content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cni_auc.R
\name{evaluate_ranking_direct}
\alias{evaluate_ranking_direct}
\title{Evaluate a ranking}
\usage{
evaluate_ranking_direct(
values,
are_true,
num_positive_interactions,
num_possible_interactions,
extend_by = 10000
)
}
\arguments{
\item{values}{A vector of importance values of predicted interactions.}
\item{are_true}{A vector denoting whether the corresponding predicted interactions are true.}
\item{num_positive_interactions}{The total number of positives.}
\item{num_possible_interactions}{The total number ranked values.}
\item{extend_by}{The number of steps with which to fill the ranking as if random, if only a part of the ranking is given.}
}
\value{
A list containing two items, the ranked evaluation and the area under the curve scores
}
\description{
Evaluate a ranking
}
|
/package/man/evaluate_ranking_direct.Rd
|
permissive
|
dynverse/dyngen_manuscript
|
R
| false
| true
| 885
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cni_auc.R
\name{evaluate_ranking_direct}
\alias{evaluate_ranking_direct}
\title{Evaluate a ranking}
\usage{
evaluate_ranking_direct(
values,
are_true,
num_positive_interactions,
num_possible_interactions,
extend_by = 10000
)
}
\arguments{
\item{values}{A vector of importance values of predicted interactions.}
\item{are_true}{A vector denoting whether the corresponding predicted interactions are true.}
\item{num_positive_interactions}{The total number of positives.}
\item{num_possible_interactions}{The total number ranked values.}
\item{extend_by}{The number of steps with which to fill the ranking as if random, if only a part of the ranking is given.}
}
\value{
A list containing two items, the ranked evaluation and the area under the curve scores
}
\description{
Evaluate a ranking
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\name{enm.ncoefs}
\alias{enm.ncoefs}
\alias{enm.ncoefs<-}
\alias{enm.ncoefs,ENMdetails-method}
\alias{enm.ncoefs<-,ENMdetails-method}
\title{enm.ncoefs generic for ENMdetails object}
\usage{
enm.ncoefs(x)
enm.ncoefs(x) <- value
\S4method{enm.ncoefs}{ENMdetails}(x)
\S4method{enm.ncoefs}{ENMdetails}(x) <- value
}
\arguments{
\item{x}{ENMdetails object}
\item{value}{input value}
}
\description{
enm.ncoefs generic for ENMdetails object
}
|
/man/enm.ncoefs.Rd
|
no_license
|
jamiemkass/ENMeval
|
R
| false
| true
| 530
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\name{enm.ncoefs}
\alias{enm.ncoefs}
\alias{enm.ncoefs<-}
\alias{enm.ncoefs,ENMdetails-method}
\alias{enm.ncoefs<-,ENMdetails-method}
\title{enm.ncoefs generic for ENMdetails object}
\usage{
enm.ncoefs(x)
enm.ncoefs(x) <- value
\S4method{enm.ncoefs}{ENMdetails}(x)
\S4method{enm.ncoefs}{ENMdetails}(x) <- value
}
\arguments{
\item{x}{ENMdetails object}
\item{value}{input value}
}
\description{
enm.ncoefs generic for ENMdetails object
}
|
\name{pnsdrm}
\alias{pnsdrm}
\alias{pnsdrm.calc}
\alias{pns.plot1}
\title{Parametric, non-parametric or semi-parametric dose-response modelling}
\description{
Parametric, non-parametric or semi-parametric dose-response modelling of both continuous and quantal data.
}
\usage{
pnsdrm(predictor, response, weights, type = c("continuous", "binomial"),
model = c("semi-parametric", "non-parametric", "parametric"),
fct = NULL, robust = FALSE, respLev = c(10, 20, 50),
reference = NULL, level = 0.95, logex = FALSE)
pnsdrm.calc(predictor, response, weights, type = c("continuous", "binomial"),
model = c("semi-parametric", "non-parametric", "parametric"),
fct = NULL, robust = FALSE, respLev = c(10, 20, 50),
reference = NULL, level = 0.95, logex = FALSE)
}
\arguments{
\item{predictor}{numeric vector of concentrations/doses.}
\item{response}{numeric vector of response values (proportions in case of quantal data).}
\item{weights}{numeric vector of weights needed for quantal data.}
\item{type}{character string specifying the type of response.}
\item{model}{character string specifying the model to be fit.}
\item{fct}{a built-in function or a list of built-in functions from the package 'drc'.}
\item{robust}{logical specifying whether or not a robust approach should be used. Only for the
semi-parametric approach.}
\item{respLev}{numeric vector of requested ED level.}
\item{reference}{optional reference value for the lower limit.}
\item{level}{numeric specifying the confidence level.}
\item{logex}{logical indicating whether or not a logarithmic x axis should be used.}
}
\details{
The parametric estimation is based on the model fitting function \code{\link[drc]{drm}} in the package 'drc'.
The non-parametric estimation relies on the 'locfit' package.
The semi-parametric approach is mainly based on the development in Nottingham and Birch (2000), whereas the
non-parametric approach uses on the package 'EffectiveDose' which implements the method introduced in
Dette \emph{et al} (2004).
\code{plot} and \code{print} methods are available.
}
\value{
A list containing the requested ED values and additional information about the underlying
model fit(s).
}
\references{
Dette, H., Neumeyer, N. and Pilz, K. F. (2004) A Note on Nonparametric Estimation of the Effective Dose
in Quantal Bioassay, \emph{J. Amer. Statist. Assoc.}, \bold{100}, 503--510.
Nottingham, Q. and Birch, J. B. (2000) A Semiparametric Approach to Analysing Dose-Response Data,
\emph{Statist. Med.}, \bold{19}, 389--404.
}
\author{
Christian Ritz (wrapper functions)
Mads Jeppe Tarp-Johansen (internal functions)
}
%\note{
% The implementation of this function as well as all other functions in the package 'mrdrc' has been funded by
% European Centre for the Validation of Alternative Methods, EU Joint Research Centre under lot 3 of the
% project "Quality assessment and novel statistical analysis techniques for toxicological data".
%}
%\seealso{
% More examples are found in the help pages for \code{\link{bin.mat}} and \code{\link{exp.a}}.
%}
\examples{
## Analysing deguelin (in the package 'drc')
## Semi-parametric model
deguelin.mrr1 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = LL.2())
deguelin.mrr1
plot(deguelin.mrr1)
## The same
gmFct <- getMeanFunctions(fname = "LL.2")
deguelin.mrr1b <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = gmFct)
deguelin.mrr1b
plot(deguelin.mrr1b)
## The same again
deguelin.mrr1c <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = list(LL2.2()))
deguelin.mrr1c
plot(deguelin.mrr1c)
deguelin.mrr1d <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = W1.2())
deguelin.mrr1d
plot(deguelin.mrr1d)
## The same
gmFct <- getMeanFunctions(fname = "W1.2")
deguelin.mrr1e <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = gmFct)
deguelin.mrr1e
plot(deguelin.mrr1e)
### Parametric models
#deguelin.mrr2 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
#model = "parametric", fct = list(LL.2(), W1.2(), W2.2()))
#deguelin.mrr2
#plot(deguelin.mrr2)
### The same parametric models
#deguelin.mrr2b <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
#model = "parametric", fct = list(W2.2(), LL.2(), W1.2()))
#deguelin.mrr2b
#plot(deguelin.mrr2b)
## Non-parametric approach -- currently not available
#deguelin.mrr3 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
#model = "non-parametric")
#deguelin.mrr3
#plot(deguelin.mrr3)
## Semi-parametric model with reference level 0.3
deguelin.mrr4 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = LL.2(), reference = 0.3)
deguelin.mrr4
plot(deguelin.mrr4)
## Semi-parametric models
deguelin.mrr5 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = list(LL.2(), W1.2(), W2.2()))
deguelin.mrr5
plot(deguelin.mrr5)
## Analysing ryegrass (in the package 'drc')
ryegrass.mrr1 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
model = "semi-parametric", fct = LL.5())
ryegrass.mrr1
plot(ryegrass.mrr1)
plot(ryegrass.mrr1, log = "x")
ryegrass.mrr2 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
model = "semi-parametric", fct = list(LL.3(), LL.4(), LL.5()))
ryegrass.mrr2
plot(ryegrass.mrr2)
#ryegrass.mrr3 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
#model = "parametric", fct = list(LL.3(), LL.4(), LL.5()))
#ryegrass.mrr3
#plot(ryegrass.mrr3)
ryegrass.mrr4 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
model = "semi-parametric", fct = list(L.4(), LL.4(), W1.4(), W2.4()))
ryegrass.mrr4
plot(ryegrass.mrr4)
## Analysing lettuce (in the package 'drc')
lettuce.mrr1 <- pnsdrm(lettuce$conc, lettuce$weight, type = "continuous",
model = "semi-parametric", fct = LL.3())
lettuce.mrr1
plot(lettuce.mrr1)
lettuce.mrr2 <- pnsdrm(lettuce$conc, lettuce$weight, type = "continuous",
model = "semi-parametric", fct = BC.4())
lettuce.mrr2
plot(lettuce.mrr2)
#lettuce.mrr3 <- pnsdrm(lettuce$conc, lettuce$weight, type = "continuous",
#model = "semi-parametric", fct = LL.3(), robust = TRUE)
#lettuce.mrr3
#plot(lettuce.mrr3)
}
\keyword{models}
\keyword{nonlinear}
|
/man/pnsdrm.Rd
|
no_license
|
cran/mrdrc
|
R
| false
| false
| 6,681
|
rd
|
\name{pnsdrm}
\alias{pnsdrm}
\alias{pnsdrm.calc}
\alias{pns.plot1}
\title{Parametric, non-parametric or semi-parametric dose-response modelling}
\description{
Parametric, non-parametric or semi-parametric dose-response modelling of both continuous and quantal data.
}
\usage{
pnsdrm(predictor, response, weights, type = c("continuous", "binomial"),
model = c("semi-parametric", "non-parametric", "parametric"),
fct = NULL, robust = FALSE, respLev = c(10, 20, 50),
reference = NULL, level = 0.95, logex = FALSE)
pnsdrm.calc(predictor, response, weights, type = c("continuous", "binomial"),
model = c("semi-parametric", "non-parametric", "parametric"),
fct = NULL, robust = FALSE, respLev = c(10, 20, 50),
reference = NULL, level = 0.95, logex = FALSE)
}
\arguments{
\item{predictor}{numeric vector of concentrations/doses.}
\item{response}{numeric vector of response values (proportions in case of quantal data).}
\item{weights}{numeric vector of weights needed for quantal data.}
\item{type}{character string specifying the type of response.}
\item{model}{character string specifying the model to be fit.}
\item{fct}{a built-in function or a list of built-in functions from the package 'drc'.}
\item{robust}{logical specifying whether or not a robust approach should be used. Only for the
semi-parametric approach.}
\item{respLev}{numeric vector of requested ED level.}
\item{reference}{optional reference value for the lower limit.}
\item{level}{numeric specifying the confidence level.}
\item{logex}{logical indicating whether or not a logarithmic x axis should be used.}
}
\details{
The parametric estimation is based on the model fitting function \code{\link[drc]{drm}} in the package 'drc'.
The non-parametric estimation relies on the 'locfit' package.
The semi-parametric approach is mainly based on the development in Nottingham and Birch (2000), whereas the
non-parametric approach uses on the package 'EffectiveDose' which implements the method introduced in
Dette \emph{et al} (2004).
\code{plot} and \code{print} methods are available.
}
\value{
A list containing the requested ED values and additional information about the underlying
model fit(s).
}
\references{
Dette, H., Neumeyer, N. and Pilz, K. F. (2004) A Note on Nonparametric Estimation of the Effective Dose
in Quantal Bioassay, \emph{J. Amer. Statist. Assoc.}, \bold{100}, 503--510.
Nottingham, Q. and Birch, J. B. (2000) A Semiparametric Approach to Analysing Dose-Response Data,
\emph{Statist. Med.}, \bold{19}, 389--404.
}
\author{
Christian Ritz (wrapper functions)
Mads Jeppe Tarp-Johansen (internal functions)
}
%\note{
% The implementation of this function as well as all other functions in the package 'mrdrc' has been funded by
% European Centre for the Validation of Alternative Methods, EU Joint Research Centre under lot 3 of the
% project "Quality assessment and novel statistical analysis techniques for toxicological data".
%}
%\seealso{
% More examples are found in the help pages for \code{\link{bin.mat}} and \code{\link{exp.a}}.
%}
\examples{
## Analysing deguelin (in the package 'drc')
## Semi-parametric model
deguelin.mrr1 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = LL.2())
deguelin.mrr1
plot(deguelin.mrr1)
## The same
gmFct <- getMeanFunctions(fname = "LL.2")
deguelin.mrr1b <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = gmFct)
deguelin.mrr1b
plot(deguelin.mrr1b)
## The same again
deguelin.mrr1c <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = list(LL2.2()))
deguelin.mrr1c
plot(deguelin.mrr1c)
deguelin.mrr1d <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = W1.2())
deguelin.mrr1d
plot(deguelin.mrr1d)
## The same
gmFct <- getMeanFunctions(fname = "W1.2")
deguelin.mrr1e <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = gmFct)
deguelin.mrr1e
plot(deguelin.mrr1e)
### Parametric models
#deguelin.mrr2 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
#model = "parametric", fct = list(LL.2(), W1.2(), W2.2()))
#deguelin.mrr2
#plot(deguelin.mrr2)
### The same parametric models
#deguelin.mrr2b <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
#model = "parametric", fct = list(W2.2(), LL.2(), W1.2()))
#deguelin.mrr2b
#plot(deguelin.mrr2b)
## Non-parametric approach -- currently not available
#deguelin.mrr3 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
#model = "non-parametric")
#deguelin.mrr3
#plot(deguelin.mrr3)
## Semi-parametric model with reference level 0.3
deguelin.mrr4 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = LL.2(), reference = 0.3)
deguelin.mrr4
plot(deguelin.mrr4)
## Semi-parametric models
deguelin.mrr5 <- pnsdrm(deguelin$dose, deguelin$r, deguelin$n, type = "binomial",
model = "semi-parametric", fct = list(LL.2(), W1.2(), W2.2()))
deguelin.mrr5
plot(deguelin.mrr5)
## Analysing ryegrass (in the package 'drc')
ryegrass.mrr1 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
model = "semi-parametric", fct = LL.5())
ryegrass.mrr1
plot(ryegrass.mrr1)
plot(ryegrass.mrr1, log = "x")
ryegrass.mrr2 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
model = "semi-parametric", fct = list(LL.3(), LL.4(), LL.5()))
ryegrass.mrr2
plot(ryegrass.mrr2)
#ryegrass.mrr3 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
#model = "parametric", fct = list(LL.3(), LL.4(), LL.5()))
#ryegrass.mrr3
#plot(ryegrass.mrr3)
ryegrass.mrr4 <- pnsdrm(ryegrass$conc, ryegrass$rootl, type = "continuous",
model = "semi-parametric", fct = list(L.4(), LL.4(), W1.4(), W2.4()))
ryegrass.mrr4
plot(ryegrass.mrr4)
## Analysing lettuce (in the package 'drc')
lettuce.mrr1 <- pnsdrm(lettuce$conc, lettuce$weight, type = "continuous",
model = "semi-parametric", fct = LL.3())
lettuce.mrr1
plot(lettuce.mrr1)
lettuce.mrr2 <- pnsdrm(lettuce$conc, lettuce$weight, type = "continuous",
model = "semi-parametric", fct = BC.4())
lettuce.mrr2
plot(lettuce.mrr2)
#lettuce.mrr3 <- pnsdrm(lettuce$conc, lettuce$weight, type = "continuous",
#model = "semi-parametric", fct = LL.3(), robust = TRUE)
#lettuce.mrr3
#plot(lettuce.mrr3)
}
\keyword{models}
\keyword{nonlinear}
|
limit <- 1
custom_query <- list(keya="aa")
verbose <- TRUE
timeout <- 20
lat <- "latit"
long <- "longit"
api_url <- "http://www.mapquestapi.com/geocoding/v1/batch"
address_df <- tibble::tribble(~address, "Madrid, ES", "hahuauhauauhu", "Segovia")
#address_df <- tibble::tibble(address = mapSpain::esp_munic.sf[1:101,]$name)
if (is.null(api_url)) api_url <- "http://www.mapquestapi.com/geocoding/v1/batch"
NA_value <- get_na_value(lat, long, rows = nrow(address_df)) # filler result to return if needed
# Construct query - for display only
query_parameters <- get_api_query("mapquest", list(limit = limit, api_key = get_key("mapquest")),
custom_parameters = custom_query
)
if (verbose == TRUE) display_query(api_url, query_parameters)
# https://developer.mapquest.com/documentation/geocoding-api/batch/post/
# Construct POST query
# A. Only certain parameters should be in the POST call----
body_params <- query_parameters[!names(query_parameters) %in% c("key", "callback")]
query_parameters <- query_parameters[names(query_parameters) %in% c("key", "callback")]
# B. Construct Body----
address_list <- list(
locations = address_df[["address"]],
options = body_params
)
# Query API
query_results <- query_api(api_url, query_parameters, mode = "list", input_list = address_list, timeout = timeout)
# Error handling----
# Parse result code
if (jsonlite::validate(query_results$content)){
status_code = jsonlite::fromJSON(query_results$content, flatten = TRUE)$info$statuscode
} else {
status_code = query_results$status
}
# Succesful status_code is 0
if (status_code == "0") status_code <- "200"
status_code <- as.character(status_code)
if (verbose == TRUE) message(paste0('HTTP Status Code: ', as.character(status_code)))
## Extract results -----------------------------------------------------------------------------------
# if there were problems with the results then return NA
if (status_code != "200") {
if (!jsonlite::validate(query_results$content)) {
# in cases like this, display the raw content but limit the length
# in case it is really long.
message(paste0('Error: ', strtrim(as.character(query_results$content), 100)))
} else {
content <- jsonlite::fromJSON(query_results$content, flatten = TRUE)
if (!is.null(content$info$messages)) message(paste0('Error: ', content$info$messages))
}
return(NA_value)
}
# End error handling-----
# Note that flatten here is necessary in order to get rid of the
# nested dataframes that would cause dplyr::bind_rows (or rbind) to fail
content <- jsonlite::fromJSON(query_results$content, flatten = TRUE)
# combine list of dataframes into a single tibble. Column names may differ between the dataframes
# MapQuest always return a default value (lat:39.4 long:-99.1) for non-found addresses
results <- dplyr::bind_rows(content$results$locations)
# rename lat/long columns
names(results)[names(results) == 'latLng.lat'] <- lat
names(results)[names(results) == 'latLng.lng'] <- long
# Prepare output----
if (full_results == FALSE) return(results[c(lat, long)])
else return(cbind(results[c(lat, long)], results[!names(results) %in% c(lat, long)]))
# Live test -----
library(tibble)
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
lat = "latt",
long = "longgg",
verbose = TRUE
)
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 5,
full_results = TRUE,
return_addresses = FALSE,
verbose = TRUE
)
ss <- tidygeocoder::geo(
address = c("Plaza Mayor", "xxxxxxxxx", "George Street"),
method = "mapquest",
lat = "latitude",
long = "longitude",
full_results = TRUE,
verbose = TRUE,
custom_query = list(language = "de-DE")
)
glimpse(ss)
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 1,
mapquest_open = TRUE,
full_results = TRUE,
return_addresses = FALSE,
verbose = TRUE
)
params <- tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 1,
full_results = TRUE,
return_addresses = FALSE,
verbose = TRUE,
custom_query = list(thumbMaps ="false", ignoreLatLngInput = TRUE)
)
glimpse(params)
# Silent
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 1,
full_results = TRUE,
return_addresses = TRUE,
verbose = FALSE
)
# Try single result
tidygeocoder::geo(
address = c("Plaza Mayor"),
method = "mapquest",
mode = "batch",
full_results = TRUE,
lat = "latt",
long = "longgg",
verbose = TRUE
)
# Error for limit
library(mapSpain)
library(tibble)
library(dplyr)
address <- tibble(direcciones = mapSpain::esp_munic.sf$name) %>%
slice(1:101)
err <- address %>%
geocode(
address = "direcciones", method = "mapquest", full_results = TRUE,
verbose = TRUE, lat = "latitude"
)
err
# Error for api key
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 1,
full_results = TRUE,
return_addresses = TRUE,
verbose = TRUE,
custom_query = list(key="xxxx")
)
# Error on bad parameter
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 1,
full_results = TRUE,
return_addresses = TRUE,
verbose = TRUE,
custom_query = list(thumbMaps ="xxxx")
)
# Full batch test
address_ok <- tibble(direcciones = mapSpain::esp_munic.sf$name) %>%
slice(1:100)
full_batch <- address_ok %>%
geocode(
address = "direcciones", method = "mapquest", full_results = TRUE,
verbose = TRUE, lat = "latitude"
)
full_batch
|
/sandbox/query_debugging/mapquest_batch.R
|
permissive
|
jessecambon/tidygeocoder
|
R
| false
| false
| 5,646
|
r
|
limit <- 1
custom_query <- list(keya="aa")
verbose <- TRUE
timeout <- 20
lat <- "latit"
long <- "longit"
api_url <- "http://www.mapquestapi.com/geocoding/v1/batch"
address_df <- tibble::tribble(~address, "Madrid, ES", "hahuauhauauhu", "Segovia")
#address_df <- tibble::tibble(address = mapSpain::esp_munic.sf[1:101,]$name)
if (is.null(api_url)) api_url <- "http://www.mapquestapi.com/geocoding/v1/batch"
NA_value <- get_na_value(lat, long, rows = nrow(address_df)) # filler result to return if needed
# Construct query - for display only
query_parameters <- get_api_query("mapquest", list(limit = limit, api_key = get_key("mapquest")),
custom_parameters = custom_query
)
if (verbose == TRUE) display_query(api_url, query_parameters)
# https://developer.mapquest.com/documentation/geocoding-api/batch/post/
# Construct POST query
# A. Only certain parameters should be in the POST call----
body_params <- query_parameters[!names(query_parameters) %in% c("key", "callback")]
query_parameters <- query_parameters[names(query_parameters) %in% c("key", "callback")]
# B. Construct Body----
address_list <- list(
locations = address_df[["address"]],
options = body_params
)
# Query API
query_results <- query_api(api_url, query_parameters, mode = "list", input_list = address_list, timeout = timeout)
# Error handling----
# Parse result code
if (jsonlite::validate(query_results$content)){
status_code = jsonlite::fromJSON(query_results$content, flatten = TRUE)$info$statuscode
} else {
status_code = query_results$status
}
# Succesful status_code is 0
if (status_code == "0") status_code <- "200"
status_code <- as.character(status_code)
if (verbose == TRUE) message(paste0('HTTP Status Code: ', as.character(status_code)))
## Extract results -----------------------------------------------------------------------------------
# if there were problems with the results then return NA
if (status_code != "200") {
if (!jsonlite::validate(query_results$content)) {
# in cases like this, display the raw content but limit the length
# in case it is really long.
message(paste0('Error: ', strtrim(as.character(query_results$content), 100)))
} else {
content <- jsonlite::fromJSON(query_results$content, flatten = TRUE)
if (!is.null(content$info$messages)) message(paste0('Error: ', content$info$messages))
}
return(NA_value)
}
# End error handling-----
# Note that flatten here is necessary in order to get rid of the
# nested dataframes that would cause dplyr::bind_rows (or rbind) to fail
content <- jsonlite::fromJSON(query_results$content, flatten = TRUE)
# combine list of dataframes into a single tibble. Column names may differ between the dataframes
# MapQuest always return a default value (lat:39.4 long:-99.1) for non-found addresses
results <- dplyr::bind_rows(content$results$locations)
# rename lat/long columns
names(results)[names(results) == 'latLng.lat'] <- lat
names(results)[names(results) == 'latLng.lng'] <- long
# Prepare output----
if (full_results == FALSE) return(results[c(lat, long)])
else return(cbind(results[c(lat, long)], results[!names(results) %in% c(lat, long)]))
# Live test -----
library(tibble)
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
lat = "latt",
long = "longgg",
verbose = TRUE
)
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 5,
full_results = TRUE,
return_addresses = FALSE,
verbose = TRUE
)
ss <- tidygeocoder::geo(
address = c("Plaza Mayor", "xxxxxxxxx", "George Street"),
method = "mapquest",
lat = "latitude",
long = "longitude",
full_results = TRUE,
verbose = TRUE,
custom_query = list(language = "de-DE")
)
glimpse(ss)
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 1,
mapquest_open = TRUE,
full_results = TRUE,
return_addresses = FALSE,
verbose = TRUE
)
params <- tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 1,
full_results = TRUE,
return_addresses = FALSE,
verbose = TRUE,
custom_query = list(thumbMaps ="false", ignoreLatLngInput = TRUE)
)
glimpse(params)
# Silent
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 1,
full_results = TRUE,
return_addresses = TRUE,
verbose = FALSE
)
# Try single result
tidygeocoder::geo(
address = c("Plaza Mayor"),
method = "mapquest",
mode = "batch",
full_results = TRUE,
lat = "latt",
long = "longgg",
verbose = TRUE
)
# Error for limit
library(mapSpain)
library(tibble)
library(dplyr)
address <- tibble(direcciones = mapSpain::esp_munic.sf$name) %>%
slice(1:101)
err <- address %>%
geocode(
address = "direcciones", method = "mapquest", full_results = TRUE,
verbose = TRUE, lat = "latitude"
)
err
# Error for api key
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 1,
full_results = TRUE,
return_addresses = TRUE,
verbose = TRUE,
custom_query = list(key="xxxx")
)
# Error on bad parameter
tidygeocoder::geo(
address = c("Plaza Mayor", "George Street"),
method = "mapquest",
limit = 1,
full_results = TRUE,
return_addresses = TRUE,
verbose = TRUE,
custom_query = list(thumbMaps ="xxxx")
)
# Full batch test
address_ok <- tibble(direcciones = mapSpain::esp_munic.sf$name) %>%
slice(1:100)
full_batch <- address_ok %>%
geocode(
address = "direcciones", method = "mapquest", full_results = TRUE,
verbose = TRUE, lat = "latitude"
)
full_batch
|
set.seed(1)
m= 6
n = 5
#couleurs <- sample(colors(), m)
couleurs <- sprintf("Couleur%i", seq_len(m))
couleurs
jeu <- sample(couleurs, n, replace = TRUE)
jeu
proposition <- sample(couleurs, n, replace = TRUE)
proposition
resultat <- reponse(proposition, jeu)
resultat
score(proposition, jeu)
# Retourne le nombre de fiches blanches et le nombre de fiches noires :
# Fiches noires = nombre de fiches bien placées
# Nombre de fiches de la bonne couleur mais mal placées
reponse <- function(proposition, jeu){
c("Fiches noires" = nb_fiches_noires(proposition, jeu),
"Fiches blanches" = nb_fiches_blanches(proposition, jeu))
}
nb_fiches_noires <- function(proposition, jeu){
sum(proposition == jeu)
}
nb_fiches_blanches <- function(proposition, jeu){
# On enlève les bien placés
sous_prop <- proposition[proposition != jeu]
sous_jeu <- jeu[proposition != jeu]
if(length(sous_prop) == 0)
return(0)
# Pour chaque couleur de sous_prop, on regarde si elle est dans jeu
mal_places <- sapply(sous_prop, function(x){
length(grep(x,sous_jeu))>0
})
sum(mal_places)
}
# Fonction utilisée pour calculer la performance :
# une fiche noire compte double car c'est plus important
score <- function(proposition, jeu){
resultat <- reponse(proposition, jeu)
resultat["Fiches noires"] * 2 + resultat["Fiches blanches"]
}
|
/Brouillons/Alain/Code.R
|
no_license
|
ARKEnsae/Mastermind_Simulation
|
R
| false
| false
| 1,343
|
r
|
set.seed(1)
m= 6
n = 5
#couleurs <- sample(colors(), m)
couleurs <- sprintf("Couleur%i", seq_len(m))
couleurs
jeu <- sample(couleurs, n, replace = TRUE)
jeu
proposition <- sample(couleurs, n, replace = TRUE)
proposition
resultat <- reponse(proposition, jeu)
resultat
score(proposition, jeu)
# Retourne le nombre de fiches blanches et le nombre de fiches noires :
# Fiches noires = nombre de fiches bien placées
# Nombre de fiches de la bonne couleur mais mal placées
reponse <- function(proposition, jeu){
c("Fiches noires" = nb_fiches_noires(proposition, jeu),
"Fiches blanches" = nb_fiches_blanches(proposition, jeu))
}
nb_fiches_noires <- function(proposition, jeu){
sum(proposition == jeu)
}
nb_fiches_blanches <- function(proposition, jeu){
# On enlève les bien placés
sous_prop <- proposition[proposition != jeu]
sous_jeu <- jeu[proposition != jeu]
if(length(sous_prop) == 0)
return(0)
# Pour chaque couleur de sous_prop, on regarde si elle est dans jeu
mal_places <- sapply(sous_prop, function(x){
length(grep(x,sous_jeu))>0
})
sum(mal_places)
}
# Fonction utilisée pour calculer la performance :
# une fiche noire compte double car c'est plus important
score <- function(proposition, jeu){
resultat <- reponse(proposition, jeu)
resultat["Fiches noires"] * 2 + resultat["Fiches blanches"]
}
|
#' query \code{problemInstance}-objects depending on argument \code{type}
#'
#' @param object an object of class \code{problemInstance}
#' @param type a character vector of length 1 defining what to calculate|return|modify. Allowed types are:}
#' \itemize{
#' \item strID: vector of unique IDs for each table cell
#' \item nrVars: total number of table cells
#' \item freq: vector of frequencies
#' \item w: a vector of weights used in the linear problem (or NULL)
#' \item numVars: a list containing numeric vectors containing values for numerical variables for each table cell (or NULL)
#' \item sdcStatus: a vector containing the suppression state for each cell (possible values are 'u': primary suppression, 'x': secondary suppression, 'z': forced for publication, 's': publishable cell, 'w': dummy cells that are considered only when applying the simple greedy heuristic to protect the table)
#' \item lb: lower bound assumed to be known by attackers for each table cell
#' \item ub: upper bound assumed to be known by attackers for each table cell
#' \item LPL: lower protection level required to protect table cells
#' \item UPL: upper protection level required to protect table cells
#' \item SPL: sliding protection level required to protect table cells
#' \item primSupps: vector of indices of primary sensitive cells
#' \item secondSupps: vector of indices of secondary suppressed cells
#' \item forcedCells: vector of indices of cells that must not be suppressed
#' \item hasPrimSupps: shows if \code{object} has primary suppressions or not
#' \item hasSecondSupps: shows if \code{object} has secondary suppressions or not
#' \item hasForcedCells: shows if \code{object} has cells that must not be suppressed
#' \item weight: gives weight that is used the suppression procedures
#' \item suppPattern: gives the current suppression pattern
#'
#' @return information from objects of class \code{dataObj} depending on argument \code{type}
#' \itemize{
#' \item a list (or NULL) if argument \code{type} matches 'numVars'
#' \item numeric vector if argument \code{type} matches 'freq', 'lb', 'ub', 'LPL', 'UPL', 'SPL', 'weight', 'suppPattern'
#' \item numeric vector (or NULL) if argument \code{type} matches 'w', 'primSupps', 'secondSupps', 'forcedCells'
#' \item character vector if argument \code{type} matches 'strID', 'sdcStatus', ''
#' \item logical vector of length 1 if argument \code{type} matches 'hasPrimSupps', 'hasSecondSupps', 'hasForcedCells'
#' \item numerical vector of length 1 if argument \code{type} matches 'nrVars'
#' }
#'
#' @export
#' @docType methods
#' @rdname get.problemInstance-method
#'
#' @note internal function
#' @author Bernhard Meindl \email{bernhard.meindl@@statistik.gv.at}
setGeneric("get.problemInstance", function(object, type) {
standardGeneric("get.problemInstance")
})
#' modify \code{problemInstance}-objects depending on argument \code{type}
#'
#' @param object an object of class \code{problemInstance}
#' @param type a character vector of length 1 defining what to calculate|return|modify. Allowed types are:}
#' \itemize{
#' \item lb: set assumed to be known lower bounds
#' \item ub: set assumed to be upper lower bounds
#' \item LPL: set lower protection levels
#' \item UPL: set upper protection levels
#' \item SPL: set sliding protection levels
#' \item sdcStatus: change anonymization status
#' @param input a list with elements 'indices' and 'values'.}
#'
#' \itemize{
#' \item element 'indices': numeric vector defining the indices of the cells that should be modified
#' \item element 'values': numeric vector whose values are going to replace current values for cells defined by 'indices' depending on argument \code{type}
#'
#' @return an object of class \code{problemInstance}
#'
#' @export
#' @docType methods
#' @rdname set.problemInstance-method
#'
#' @note internal function
#' @author Bernhard Meindl \email{bernhard.meindl@@statistik.gv.at}
setGeneric("set.problemInstance", function(object, type, input) {
standardGeneric("set.problemInstance")
})
#' perform calculations on \code{problemInstance}-objects depending on argument \code{type}
#'
#' @param object an object of class \code{problemInstance}
#' @param type a character vector of length 1 defining what to calculate|return|modify. Allowed types are:}
#' \itemize{
#' \item makeMasterProblem: create the master problem that is the core of the secondary cell suppression problem
#' \item isProtectedSolution: check if a solution violates any required (upper|lower|sliding) protection levels
#' @param input a list depending on argument \code{type}.}
#'
#' \itemize{
#' \item type==makeMasterProblem: input is not used (empty list)
#' \item type==isProtectedSolution: input is a list of length 2 with elements 'input1' and 'input2'
#' \itemize{
#' \item element 'input1': numeric vector of calculated known lower cell bounds (from attacker's problem)
#' \item element 'input2': numeric vector of known upper cell bounds (from attacker's problem) }
#'
#' @return information from objects of class \code{problemInstance} depending on argument \code{type}
#' \itemize{
#' \item an object of class \code{linProb} if argument \code{type} matches 'makeMasterProblem'
#' \item logical vector of length 1 if argument \code{type} matches 'isProtectedSolution' with TRUE if all primary suppressed cells are adequately protected, FALSE otherwise }
#'
#' @keywords internal
#' @docType methods
#' @rdname calc.problemInstance-method
#'
#' @note internal function
#' @author Bernhard Meindl \email{bernhard.meindl@@statistik.gv.at}
setGeneric("calc.problemInstance", function(object, type, input) {
standardGeneric("calc.problemInstance")
})
# get methods
setGeneric("g_sdcStatus", function(object) {
standardGeneric("g_sdcStatus")
})
setGeneric("g_primSupps", function(object) {
standardGeneric("g_primSupps")
})
setGeneric("g_secondSupps", function(object) {
standardGeneric("g_secondSupps")
})
setGeneric("g_forcedCells", function(object) {
standardGeneric("g_forcedCells")
})
setGeneric("g_type", function(object) {
standardGeneric("g_type")
})
setGeneric("g_freq", function(object) {
standardGeneric("g_freq")
})
setGeneric("g_strID", function(object) {
standardGeneric("g_strID")
})
setGeneric("g_UPL", function(object) {
standardGeneric("g_UPL")
})
setGeneric("g_LPL", function(object) {
standardGeneric("g_LPL")
})
setGeneric("g_SPL", function(object) {
standardGeneric("g_SPL")
})
setGeneric("g_nrVars", function(object) {
standardGeneric("g_nrVars")
})
setGeneric("g_lb", function(object) {
standardGeneric("g_lb")
})
setGeneric("g_ub", function(object) {
standardGeneric("g_ub")
})
setGeneric("g_w", function(object) {
standardGeneric("g_w")
})
setGeneric("g_numVars", function(object) {
standardGeneric("g_numVars")
})
setGeneric("g_hasPrimSupps", function(object) {
standardGeneric("g_hasPrimSupps")
})
setGeneric("g_hasSecondSupps", function(object) {
standardGeneric("g_hasSecondSupps")
})
setGeneric("g_hasForcedCells", function(object) {
standardGeneric("g_hasForcedCells")
})
setGeneric("g_weight", function(object) {
standardGeneric("g_weight")
})
setGeneric("g_suppPattern", function(object) {
standardGeneric("g_suppPattern")
})
# set methods
setGeneric("s_sdcStatus<-", function(object, value)
standardGeneric("s_sdcStatus<-"))
setGeneric("s_lb<-", function(object, value)
standardGeneric("s_lb<-"))
setGeneric("s_ub<-", function(object, value)
standardGeneric("s_ub<-"))
setGeneric("s_LPL<-", function(object, value)
standardGeneric("s_LPL<-"))
setGeneric("s_UPL<-", function(object, value)
standardGeneric("s_UPL<-"))
setGeneric("s_SPL<-", function(object, value)
standardGeneric("s_SPL<-"))
# calc methods
setGeneric("c_make_masterproblem", function(object, input) {
standardGeneric("c_make_masterproblem")
})
setGeneric("c_is_protected_solution", function(object, input) {
standardGeneric("c_is_protected_solution")
})
|
/R/generics_problemInstance.r
|
no_license
|
sdcTools/sdcTable
|
R
| false
| false
| 7,930
|
r
|
#' query \code{problemInstance}-objects depending on argument \code{type}
#'
#' @param object an object of class \code{problemInstance}
#' @param type a character vector of length 1 defining what to calculate|return|modify. Allowed types are:}
#' \itemize{
#' \item strID: vector of unique IDs for each table cell
#' \item nrVars: total number of table cells
#' \item freq: vector of frequencies
#' \item w: a vector of weights used in the linear problem (or NULL)
#' \item numVars: a list containing numeric vectors containing values for numerical variables for each table cell (or NULL)
#' \item sdcStatus: a vector containing the suppression state for each cell (possible values are 'u': primary suppression, 'x': secondary suppression, 'z': forced for publication, 's': publishable cell, 'w': dummy cells that are considered only when applying the simple greedy heuristic to protect the table)
#' \item lb: lower bound assumed to be known by attackers for each table cell
#' \item ub: upper bound assumed to be known by attackers for each table cell
#' \item LPL: lower protection level required to protect table cells
#' \item UPL: upper protection level required to protect table cells
#' \item SPL: sliding protection level required to protect table cells
#' \item primSupps: vector of indices of primary sensitive cells
#' \item secondSupps: vector of indices of secondary suppressed cells
#' \item forcedCells: vector of indices of cells that must not be suppressed
#' \item hasPrimSupps: shows if \code{object} has primary suppressions or not
#' \item hasSecondSupps: shows if \code{object} has secondary suppressions or not
#' \item hasForcedCells: shows if \code{object} has cells that must not be suppressed
#' \item weight: gives weight that is used the suppression procedures
#' \item suppPattern: gives the current suppression pattern
#'
#' @return information from objects of class \code{dataObj} depending on argument \code{type}
#' \itemize{
#' \item a list (or NULL) if argument \code{type} matches 'numVars'
#' \item numeric vector if argument \code{type} matches 'freq', 'lb', 'ub', 'LPL', 'UPL', 'SPL', 'weight', 'suppPattern'
#' \item numeric vector (or NULL) if argument \code{type} matches 'w', 'primSupps', 'secondSupps', 'forcedCells'
#' \item character vector if argument \code{type} matches 'strID', 'sdcStatus', ''
#' \item logical vector of length 1 if argument \code{type} matches 'hasPrimSupps', 'hasSecondSupps', 'hasForcedCells'
#' \item numerical vector of length 1 if argument \code{type} matches 'nrVars'
#' }
#'
#' @export
#' @docType methods
#' @rdname get.problemInstance-method
#'
#' @note internal function
#' @author Bernhard Meindl \email{bernhard.meindl@@statistik.gv.at}
setGeneric("get.problemInstance", function(object, type) {
standardGeneric("get.problemInstance")
})
#' modify \code{problemInstance}-objects depending on argument \code{type}
#'
#' @param object an object of class \code{problemInstance}
#' @param type a character vector of length 1 defining what to calculate|return|modify. Allowed types are:}
#' \itemize{
#' \item lb: set assumed to be known lower bounds
#' \item ub: set assumed to be upper lower bounds
#' \item LPL: set lower protection levels
#' \item UPL: set upper protection levels
#' \item SPL: set sliding protection levels
#' \item sdcStatus: change anonymization status
#' @param input a list with elements 'indices' and 'values'.}
#'
#' \itemize{
#' \item element 'indices': numeric vector defining the indices of the cells that should be modified
#' \item element 'values': numeric vector whose values are going to replace current values for cells defined by 'indices' depending on argument \code{type}
#'
#' @return an object of class \code{problemInstance}
#'
#' @export
#' @docType methods
#' @rdname set.problemInstance-method
#'
#' @note internal function
#' @author Bernhard Meindl \email{bernhard.meindl@@statistik.gv.at}
setGeneric("set.problemInstance", function(object, type, input) {
standardGeneric("set.problemInstance")
})
#' perform calculations on \code{problemInstance}-objects depending on argument \code{type}
#'
#' @param object an object of class \code{problemInstance}
#' @param type a character vector of length 1 defining what to calculate|return|modify. Allowed types are:}
#' \itemize{
#' \item makeMasterProblem: create the master problem that is the core of the secondary cell suppression problem
#' \item isProtectedSolution: check if a solution violates any required (upper|lower|sliding) protection levels
#' @param input a list depending on argument \code{type}.}
#'
#' \itemize{
#' \item type==makeMasterProblem: input is not used (empty list)
#' \item type==isProtectedSolution: input is a list of length 2 with elements 'input1' and 'input2'
#' \itemize{
#' \item element 'input1': numeric vector of calculated known lower cell bounds (from attacker's problem)
#' \item element 'input2': numeric vector of known upper cell bounds (from attacker's problem) }
#'
#' @return information from objects of class \code{problemInstance} depending on argument \code{type}
#' \itemize{
#' \item an object of class \code{linProb} if argument \code{type} matches 'makeMasterProblem'
#' \item logical vector of length 1 if argument \code{type} matches 'isProtectedSolution' with TRUE if all primary suppressed cells are adequately protected, FALSE otherwise }
#'
#' @keywords internal
#' @docType methods
#' @rdname calc.problemInstance-method
#'
#' @note internal function
#' @author Bernhard Meindl \email{bernhard.meindl@@statistik.gv.at}
setGeneric("calc.problemInstance", function(object, type, input) {
standardGeneric("calc.problemInstance")
})
# get methods
setGeneric("g_sdcStatus", function(object) {
standardGeneric("g_sdcStatus")
})
setGeneric("g_primSupps", function(object) {
standardGeneric("g_primSupps")
})
setGeneric("g_secondSupps", function(object) {
standardGeneric("g_secondSupps")
})
setGeneric("g_forcedCells", function(object) {
standardGeneric("g_forcedCells")
})
setGeneric("g_type", function(object) {
standardGeneric("g_type")
})
setGeneric("g_freq", function(object) {
standardGeneric("g_freq")
})
setGeneric("g_strID", function(object) {
standardGeneric("g_strID")
})
setGeneric("g_UPL", function(object) {
standardGeneric("g_UPL")
})
setGeneric("g_LPL", function(object) {
standardGeneric("g_LPL")
})
setGeneric("g_SPL", function(object) {
standardGeneric("g_SPL")
})
setGeneric("g_nrVars", function(object) {
standardGeneric("g_nrVars")
})
setGeneric("g_lb", function(object) {
standardGeneric("g_lb")
})
setGeneric("g_ub", function(object) {
standardGeneric("g_ub")
})
setGeneric("g_w", function(object) {
standardGeneric("g_w")
})
setGeneric("g_numVars", function(object) {
standardGeneric("g_numVars")
})
setGeneric("g_hasPrimSupps", function(object) {
standardGeneric("g_hasPrimSupps")
})
setGeneric("g_hasSecondSupps", function(object) {
standardGeneric("g_hasSecondSupps")
})
setGeneric("g_hasForcedCells", function(object) {
standardGeneric("g_hasForcedCells")
})
setGeneric("g_weight", function(object) {
standardGeneric("g_weight")
})
setGeneric("g_suppPattern", function(object) {
standardGeneric("g_suppPattern")
})
# set methods
setGeneric("s_sdcStatus<-", function(object, value)
standardGeneric("s_sdcStatus<-"))
setGeneric("s_lb<-", function(object, value)
standardGeneric("s_lb<-"))
setGeneric("s_ub<-", function(object, value)
standardGeneric("s_ub<-"))
setGeneric("s_LPL<-", function(object, value)
standardGeneric("s_LPL<-"))
setGeneric("s_UPL<-", function(object, value)
standardGeneric("s_UPL<-"))
setGeneric("s_SPL<-", function(object, value)
standardGeneric("s_SPL<-"))
# calc methods
setGeneric("c_make_masterproblem", function(object, input) {
standardGeneric("c_make_masterproblem")
})
setGeneric("c_is_protected_solution", function(object, input) {
standardGeneric("c_is_protected_solution")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{dna_convert}
\alias{dna_convert}
\title{Convert image DNA to PNG format}
\usage{
dna_convert(dna, maxXY, tempf, pngWH, bg = "white")
}
\arguments{
\item{dna}{matrix or character, untangled or tangled image DNA of any size.}
\item{tempf}{temporate file generated by default or given as file path.}
\item{pngWH}{vector, width and height of reconstructed image. If missing, width and height of original image are used.}
\item{bg}{character, color or RGB code indicating the background color of PNG.}
}
\description{
Function converts image DNA to array object including RGB or gray scale for each pixel.
}
\details{
See example...
}
\examples{
dna <- dna_untangle(dna_in(rgb = FALSE))
for(i in 1:20){
dna <- dna_mutate(dna)
}
test <- dna_convert(dna)
grid::grid.raster(test)
test[1,1,]
}
|
/man/dna_convert.Rd
|
permissive
|
herrmannrobert/GenArt
|
R
| false
| true
| 884
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{dna_convert}
\alias{dna_convert}
\title{Convert image DNA to PNG format}
\usage{
dna_convert(dna, maxXY, tempf, pngWH, bg = "white")
}
\arguments{
\item{dna}{matrix or character, untangled or tangled image DNA of any size.}
\item{tempf}{temporate file generated by default or given as file path.}
\item{pngWH}{vector, width and height of reconstructed image. If missing, width and height of original image are used.}
\item{bg}{character, color or RGB code indicating the background color of PNG.}
}
\description{
Function converts image DNA to array object including RGB or gray scale for each pixel.
}
\details{
See example...
}
\examples{
dna <- dna_untangle(dna_in(rgb = FALSE))
for(i in 1:20){
dna <- dna_mutate(dna)
}
test <- dna_convert(dna)
grid::grid.raster(test)
test[1,1,]
}
|
## ui.R ##
# Header ####
#https://stackoverflow.com/questions/31440564/adding-a-company-logo-to-shinydashboard-header
header <-
dashboardHeader(
title = "TBDS Shiny Template",
tags$li(a(href = 'https://www.tbdsolutions.com/',
img(src = 'tbdSolutions-logo.png',
height = "20px"),
style = "padding-top:10px; padding-bottom:10px;"), class = "dropdown"
)
)
# Sidebar ####
sidebar <-
dashboardSidebar(
sidebarMenuOutput("ui_main_sidebar"),
br(),
actionButton(inputId="update", label = "Update View"),
br(),
br(),
sliderInput(inputId = "wt", label = "Weight:", min = min(Theoph$Wt), max = max(Theoph$Wt), value = c(min(Theoph$Wt),mean(Theoph$Wt))),
br(),
br(),
tags$small(
tags$i(
p("Data updated 2019",style="position: fixed; bottom: 25px; left:15px;")
)
),
tags$sub(
a(href = "https://www.tbdsolutions.com/", "© TBDSolutions LLC - 2019", style="position: fixed; bottom: 15px; left:15px;")
)
)
# Body ####
body <-
dashboardBody(
plotlyOutput(outputId = "plot1", height = 400),
br(),
br(),
tags$div(box(width = 12,height = 400, DT::dataTableOutput(outputId = "table1")), style = "overflow-y:scroll;")
#DT::dataTableOutput(outputId = "table1")
)
bookmarkButton()
# Generate UI as a function to enable bookmarked state
function(req) { dashboardPage(skin = "black", header,sidebar,body) } #skin not working :/
|
/ui.R
|
no_license
|
bowmasar/TBDS_DataProjectTemplates
|
R
| false
| false
| 1,504
|
r
|
## ui.R ##
# Header ####
#https://stackoverflow.com/questions/31440564/adding-a-company-logo-to-shinydashboard-header
header <-
dashboardHeader(
title = "TBDS Shiny Template",
tags$li(a(href = 'https://www.tbdsolutions.com/',
img(src = 'tbdSolutions-logo.png',
height = "20px"),
style = "padding-top:10px; padding-bottom:10px;"), class = "dropdown"
)
)
# Sidebar ####
sidebar <-
dashboardSidebar(
sidebarMenuOutput("ui_main_sidebar"),
br(),
actionButton(inputId="update", label = "Update View"),
br(),
br(),
sliderInput(inputId = "wt", label = "Weight:", min = min(Theoph$Wt), max = max(Theoph$Wt), value = c(min(Theoph$Wt),mean(Theoph$Wt))),
br(),
br(),
tags$small(
tags$i(
p("Data updated 2019",style="position: fixed; bottom: 25px; left:15px;")
)
),
tags$sub(
a(href = "https://www.tbdsolutions.com/", "© TBDSolutions LLC - 2019", style="position: fixed; bottom: 15px; left:15px;")
)
)
# Body ####
body <-
dashboardBody(
plotlyOutput(outputId = "plot1", height = 400),
br(),
br(),
tags$div(box(width = 12,height = 400, DT::dataTableOutput(outputId = "table1")), style = "overflow-y:scroll;")
#DT::dataTableOutput(outputId = "table1")
)
bookmarkButton()
# Generate UI as a function to enable bookmarked state
function(req) { dashboardPage(skin = "black", header,sidebar,body) } #skin not working :/
|
ptime <- system.time({
r <- foreach(icount(trials), .combine=cbind) %dopar% {
ind <- sample(100, 100, replace=TRUE)
result1 <- glm(x[ind,2]~x[ind,1], family=binomial(logit))
coefficients(result1)
}
})
library(data.table)
library(stringr)
library(foreach)
library(doMC)
registerDoMC(2) #change the 2 to your number of CPU cores
dir = 'Documents/Kaggle/Driver Telematics/Data/drivers/12/'
#dir.create(paste0(dir,'/png/'))
data <- NULL
for( file in list.files(dir) ){
print(file)
path <- fread(paste0(dir,file))
pngfile <- str_replace(file,".csv",".png")
#path <- rotation0(path)
data <- rbind(data,speedDistribution(path))
#qplot(data=path, x = x, y=y)
#ggsave(paste0(dir,'/png/',pngfile))
}
data <- as.data.table(data)
plot(data[['s_']]/data[['v_']] )
ex_1_1 <- fread('Documents/Kaggle/Driver Telematics/Data/drivers/1/1.csv')
v <- sqrt(rowSums((ex_1_1[-1] - ex_1_1[-862])^2))
a <- v[-1] - v[-862]
plot(v)
plot(a)
ex_1_2 <- fread('Documents/Kaggle/Driver Telematics/Data/drivers/1/2.csv')
v <- sqrt(rowSums((ex_1_2[-1] - ex_1_2[-561])^2))
a <- v[-1] - v[-561]
plot(v)
plot(a)
ex_1_3 <- fread('Documents/Kaggle/Driver Telematics/Data/drivers/1/3.csv')
v <- sqrt(rowSums((ex_1_3[-1] - ex_1_3[-931])^2))
a <- v[-1] - v[-931]
plot(v)
plot(a)
|
/script.R
|
no_license
|
rrozas/Kaggle_Driver_Telematics
|
R
| false
| false
| 1,307
|
r
|
ptime <- system.time({
r <- foreach(icount(trials), .combine=cbind) %dopar% {
ind <- sample(100, 100, replace=TRUE)
result1 <- glm(x[ind,2]~x[ind,1], family=binomial(logit))
coefficients(result1)
}
})
library(data.table)
library(stringr)
library(foreach)
library(doMC)
registerDoMC(2) #change the 2 to your number of CPU cores
dir = 'Documents/Kaggle/Driver Telematics/Data/drivers/12/'
#dir.create(paste0(dir,'/png/'))
data <- NULL
for( file in list.files(dir) ){
print(file)
path <- fread(paste0(dir,file))
pngfile <- str_replace(file,".csv",".png")
#path <- rotation0(path)
data <- rbind(data,speedDistribution(path))
#qplot(data=path, x = x, y=y)
#ggsave(paste0(dir,'/png/',pngfile))
}
data <- as.data.table(data)
plot(data[['s_']]/data[['v_']] )
ex_1_1 <- fread('Documents/Kaggle/Driver Telematics/Data/drivers/1/1.csv')
v <- sqrt(rowSums((ex_1_1[-1] - ex_1_1[-862])^2))
a <- v[-1] - v[-862]
plot(v)
plot(a)
ex_1_2 <- fread('Documents/Kaggle/Driver Telematics/Data/drivers/1/2.csv')
v <- sqrt(rowSums((ex_1_2[-1] - ex_1_2[-561])^2))
a <- v[-1] - v[-561]
plot(v)
plot(a)
ex_1_3 <- fread('Documents/Kaggle/Driver Telematics/Data/drivers/1/3.csv')
v <- sqrt(rowSums((ex_1_3[-1] - ex_1_3[-931])^2))
a <- v[-1] - v[-931]
plot(v)
plot(a)
|
'''
Developed for Forust.io
A very general script to preprocess data before common Machine Learning procedures.
This script imports data from an excel file, replaces missing values with "0", drops unneeded columns, and provides normalization functions.
Author: Visakh Madathil
'''
#importing Excel File
library(readxl)
mydata <- read_excel("file path")
View(mydata)
#custom function for NAN replacement
is.nan.data.frame <- function(x){
do.call(cbind, lapply(x, is.nan))
}
#replacing N/A and NANvalues with 0
mydata[is.na(mydata)] <- 0
mydata[is.nan(mydata)] <- 0
#Dropping unneeded columns (if needed)
mydata$`Col Name` <- NULL
mydata$`Col Name` <- NULL
mydata$`Col Name` <- NULL
mydata$`Col Name` <- NULL
mydata$`Col Name` <- NULL
#normalizing data (if needed)
#Min Max normalization
normalize <- function(x){
return((x- min(x))/ (max(x) - min(x)))
}
dataMMNorm <- as.data.frame(lapply(mydata, normalize))
#Z-Score normalization
dataZNorm <- as.data.frame(scale(mydata))
|
/DataPreProcess.R
|
no_license
|
vmmadathil/Data-Cleaning
|
R
| false
| false
| 990
|
r
|
'''
Developed for Forust.io
A very general script to preprocess data before common Machine Learning procedures.
This script imports data from an excel file, replaces missing values with "0", drops unneeded columns, and provides normalization functions.
Author: Visakh Madathil
'''
#importing Excel File
library(readxl)
mydata <- read_excel("file path")
View(mydata)
#custom function for NAN replacement
is.nan.data.frame <- function(x){
do.call(cbind, lapply(x, is.nan))
}
#replacing N/A and NANvalues with 0
mydata[is.na(mydata)] <- 0
mydata[is.nan(mydata)] <- 0
#Dropping unneeded columns (if needed)
mydata$`Col Name` <- NULL
mydata$`Col Name` <- NULL
mydata$`Col Name` <- NULL
mydata$`Col Name` <- NULL
mydata$`Col Name` <- NULL
#normalizing data (if needed)
#Min Max normalization
normalize <- function(x){
return((x- min(x))/ (max(x) - min(x)))
}
dataMMNorm <- as.data.frame(lapply(mydata, normalize))
#Z-Score normalization
dataZNorm <- as.data.frame(scale(mydata))
|
# remove F.het.MDPP and F.het.MDPL and F.grandis from PCA
# what are % PCA
library(ggplot2)
design
sp<-as.character(unlist(design[1,]))
sp<-sp[-c(1,2)]
ph<-as.character(unlist(design[2,]))
ph<-ph[-c(1,2)]
cl<-as.character(unlist(design[3,]))
cl<-cl[-c(1,2)]
de<-as.character(unlist(design[4,]))
de<-de[-c(1,2)]
# clade
names<-colnames(log_x)
tsne<-Rtsne(t(log_x),dims=2,perplexity=10,verbose=T,max_iter=1000)
tplot<-cbind(v1=tsne$Y[,1],v2=tsne$Y[,2],clade)
a<-as.data.frame(tplot)
ggplot(a,
aes(x=v1,y=v2,color=cl,label=names))+
geom_point(cex=3) +
geom_text(aes(label=names),hjust=0,vjust=2)+
theme_classic() +
labs(x="tsne1",y="tsne2")+
theme(axis.line=element_line(size=1.5),
axis.title = element_text(size=20),
legend.text = element_text(size=20),
legend.title = element_text(size=20),
axis.text=element_text(size=15))
# genes
set.seed(5)
tsne<-Rtsne(log_x,dims=2,perplexity=50,verbose=T,max_iter=1000,check_duplicates = FALSE)
tplot<-cbind(v1=tsne$Y[,1],v2=tsne$Y[,2])
a<-as.data.frame(tplot)
ggplot(a,
aes(x=v1,y=v2))+
geom_point(cex=1) +
theme_classic() +
labs(x="tsne1",y="tsne2")+
theme(axis.line=element_line(size=1.5),
axis.title = element_text(size=20),
legend.text = element_text(size=20),
legend.title = element_text(size=20),
axis.text=element_text(size=15))
|
/scripts/tSNE.R
|
no_license
|
WhiteheadLab/RNAseq_17killifish
|
R
| false
| false
| 1,371
|
r
|
# remove F.het.MDPP and F.het.MDPL and F.grandis from PCA
# what are % PCA
library(ggplot2)
design
sp<-as.character(unlist(design[1,]))
sp<-sp[-c(1,2)]
ph<-as.character(unlist(design[2,]))
ph<-ph[-c(1,2)]
cl<-as.character(unlist(design[3,]))
cl<-cl[-c(1,2)]
de<-as.character(unlist(design[4,]))
de<-de[-c(1,2)]
# clade
names<-colnames(log_x)
tsne<-Rtsne(t(log_x),dims=2,perplexity=10,verbose=T,max_iter=1000)
tplot<-cbind(v1=tsne$Y[,1],v2=tsne$Y[,2],clade)
a<-as.data.frame(tplot)
ggplot(a,
aes(x=v1,y=v2,color=cl,label=names))+
geom_point(cex=3) +
geom_text(aes(label=names),hjust=0,vjust=2)+
theme_classic() +
labs(x="tsne1",y="tsne2")+
theme(axis.line=element_line(size=1.5),
axis.title = element_text(size=20),
legend.text = element_text(size=20),
legend.title = element_text(size=20),
axis.text=element_text(size=15))
# genes
set.seed(5)
tsne<-Rtsne(log_x,dims=2,perplexity=50,verbose=T,max_iter=1000,check_duplicates = FALSE)
tplot<-cbind(v1=tsne$Y[,1],v2=tsne$Y[,2])
a<-as.data.frame(tplot)
ggplot(a,
aes(x=v1,y=v2))+
geom_point(cex=1) +
theme_classic() +
labs(x="tsne1",y="tsne2")+
theme(axis.line=element_line(size=1.5),
axis.title = element_text(size=20),
legend.text = element_text(size=20),
legend.title = element_text(size=20),
axis.text=element_text(size=15))
|
setwd("path")
packages <- c("odbc","dplyr","readr","shinyjs","shiny","shinyWidgets")
lapply(packages, require, character.only = TRUE)
M <- read_csv("path", progress = show_progress(),
trim_ws = TRUE, na = c("","NA"), col_types = cols(.default = co_character()))
m <- data.frame(build=c('a','a','a','a','a','a','a'),
r=c(1,1,2,2,1,1,2,2),
c=c(1,2,1,2,1,2,1,2),
f=c(1,2,1,2,1,2,1,2),
m=c(50,50,50,50,50,50,50,50))
boxdf <- m %>%
group_by(b, r, c, f) %>%
summarize()
for (i in 1:nrow(boxdf)){
b <- as.character(boxdf[i,"build"])
r <- as.character(boxdf[i,"room"])
c <- as.character(boxdf[i,"class"])
f <- as.character(boxdf[i,"freq"])
}
|
/groupBy.r
|
no_license
|
KateLam401/r
|
R
| false
| false
| 685
|
r
|
setwd("path")
packages <- c("odbc","dplyr","readr","shinyjs","shiny","shinyWidgets")
lapply(packages, require, character.only = TRUE)
M <- read_csv("path", progress = show_progress(),
trim_ws = TRUE, na = c("","NA"), col_types = cols(.default = co_character()))
m <- data.frame(build=c('a','a','a','a','a','a','a'),
r=c(1,1,2,2,1,1,2,2),
c=c(1,2,1,2,1,2,1,2),
f=c(1,2,1,2,1,2,1,2),
m=c(50,50,50,50,50,50,50,50))
boxdf <- m %>%
group_by(b, r, c, f) %>%
summarize()
for (i in 1:nrow(boxdf)){
b <- as.character(boxdf[i,"build"])
r <- as.character(boxdf[i,"room"])
c <- as.character(boxdf[i,"class"])
f <- as.character(boxdf[i,"freq"])
}
|
map_plot <- function(mapdata, coronavirusdata, type, grouping, trans = "log10") {
current_date <- max(coronavirusdata$date, na.rm=TRUE)
coronavirusdata <-
coronavirusdata %>%
filter(type == {{type}})%>%
group_by(!!(grouping)) %>%
summarize(cases = sum(cases, na.rm=TRUE))
out <- ggplot() +
geom_sf(data = mapdata, fill = "lightgrey") +
geom_sf(data = coronavirusdata, mapping = aes(fill = cases)) +
scale_fill_viridis_c(trans = trans, na.value = "white") +
theme_minimal(base_size = 14) +
labs(fill = paste0(stringr::str_to_title(type), "\nCases")) +
coord_sf() +
ggtitle("",
subtitle = paste0("Totals current to ", current_date))
return(out)
}
|
/scripts/mapplot.R
|
no_license
|
jebyrnes/covid19_shiny
|
R
| false
| false
| 737
|
r
|
map_plot <- function(mapdata, coronavirusdata, type, grouping, trans = "log10") {
current_date <- max(coronavirusdata$date, na.rm=TRUE)
coronavirusdata <-
coronavirusdata %>%
filter(type == {{type}})%>%
group_by(!!(grouping)) %>%
summarize(cases = sum(cases, na.rm=TRUE))
out <- ggplot() +
geom_sf(data = mapdata, fill = "lightgrey") +
geom_sf(data = coronavirusdata, mapping = aes(fill = cases)) +
scale_fill_viridis_c(trans = trans, na.value = "white") +
theme_minimal(base_size = 14) +
labs(fill = paste0(stringr::str_to_title(type), "\nCases")) +
coord_sf() +
ggtitle("",
subtitle = paste0("Totals current to ", current_date))
return(out)
}
|
# Jake Yeung
# Date of Creation: 2021-06-29
# File: ~/projects/scChIX/analysis_scripts/2-check_LDA_outputs.R
#
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(scchicFuncs)
library(hash)
library(igraph)
library(umap)
library(ggrepel)
source("/home/jyeung/projects/gastru_scchic/scripts/Rfunctions/QCFunctionsGastru.R")
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
# Load -------------------------------------------------------------------
hubprefix <- "/home/jyeung/hub_oudenaarden"
jsuffix <- "50000"
jmarks <- c("K36", "K9m3", "K36-K9m3")
names(jmarks) <- jmarks
jmark <- jmarks[[1]]
infs <- lapply(jmarks, function(jmark){
print(jmark)
inf <- file.path(hubprefix, paste0("jyeung/data/dblchic/gastrulation/LDA_outputs/ldaAnalysis_", jsuffix, "/lda_outputs.count_tables.", jsuffix, ".", jmark, ".2021-06-28.K-30.binarize.FALSE/ldaOut.count_tables.", jsuffix, ".", jmark, ".2021-06-28.K-30.Robj"))
assertthat::assert_that(file.exists(inf))
return(inf)
})
tm.result.lst <- lapply(infs, function(inf){
load(inf, v=T) # out.lda
tm.result <- posterior(out.lda)
tm.result <- AddTopicToTmResult(tm.result)
return(tm.result)
})
dat.umap.lst <- lapply(tm.result.lst, function(tm.result){
dat.umap <- DoUmapAndLouvain(tm.result$topics, jsettings = jsettings)
return(dat.umap)
})
# Plot -------------------------------------------------------------------
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#FFB6C1", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
m.lst <- lapply(jmarks, function(jmark){
m <- ggplot(dat.umap.lst[[jmark]], aes(x = umap1, y = umap2, color = louvain)) +
geom_point() +
theme_bw() +
ggtitle(paste(jmark, "from 50kb bins")) +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
scale_color_manual(values = cbPalette)
return(m)
})
JFuncs::multiplot(m.lst[[1]], m.lst[[2]], m.lst[[3]], cols = 3)
# Check TES for K36 -------------------------------------------------------
jsuffix2 <- "TES"
jmark2 <- "K36"
# inf.tes <- file.path(hubprefix, paste0("jyeung/data/dblchic/gastrulation/LDA_outputs/ldaAnalysis_", jsuffix, "/lda_outputs.count_tables.", jsuffix, ".", jmark, ".2021-06-28.K-30.binarize.FALSE/ldaOut.count_tables.", jsuffix, ".", jmark, ".2021-06-28.K-30.Robj"))
inf.tes <- file.path(hubprefix, paste0("jyeung/data/dblchic/gastrulation/LDA_outputs/ldaAnalysis_", jsuffix2, "/lda_outputs.TES_counts.K36.2021-06-30.K-30.binarize.FALSE/ldaOut.", jsuffix2, "_counts.", jmark2, ".2021-06-30.K-30.Robj"))
assertthat::assert_that(file.exists(inf.tes))
load(inf.tes, v=T)
tm.result2 <- posterior(out.lda)
tm.result2 <- AddTopicToTmResult(tm.result2)
dat.umap2 <- DoUmapAndLouvain(tm.result2$topics, jsettings = jsettings)
m.k36 <- ggplot(dat.umap2, aes(x = umap1, y = umap2, color = louvain)) +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette) +
ggtitle("From TSS-TES") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
multiplot(m.lst$K36, m.k36, cols = 2)
# Load reference data to get cell types ----------------------------------
inf.ref <- file.path(hubprefix, "jyeung/data/public_data/CaoPijuana_merged_batch_cor.2019-12-03.RData")
load(inf.ref, v=T)
# outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/dblchic/gastrulation/from_analysis/celltyping_MergedDataNoQuantNorm"
outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/dblchic/gastrulation/from_analysis/celltyping_MergedDataNoQuantNorm_ShendureOnly_RenormScale_StricterAnnots"
dir.create(outdir)
# dat.mat.filt.batchcor <- t(dat.mat.filt.batchcor)
dat.mat.filt.batchcor <- dat.mat.filt
# keep only celltypes that start with number (shendure more late stage?)
cnames.keep <- grepl("^[[:digit:]]+", colnames(dat.mat.filt.batchcor))
dat.mat.filt.batchcor <- dat.mat.filt.batchcor[, cnames.keep]
# mutate(is.late = grepl("^[[:digit:]]+", celltype))
# renormalize?
dat.mat.filt.batchcor <- t(scale(t(dat.mat.filt.batchcor), center = TRUE, scale = TRUE))
# check batch
pca.public <- prcomp(dat.mat.filt.batchcor, center = TRUE, scale. = TRUE)
dat.pca.public <- data.frame(celltype = rownames(pca.public$x), pca.public$x, stringsAsFactors = FALSE) %>%
rowwise() %>%
mutate(is.late = grepl("^[[:digit:]]+", celltype))
ggplot(dat.pca.public, aes(x = PC1, y = PC2, color = is.late)) +
geom_point() +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# # do quant norm again??? No
#
# boxplot(dat.mat.filt.batchcor)
# cnames.before <- colnames(dat.mat.filt.batchcor)
# rnames.before <- rownames(dat.mat.filt.batchcor)
# dat.mat.filt.batchcor <- preprocessCore::normalize.quantiles(dat.mat.filt.batchcor, copy=TRUE)
# colnames(dat.mat.filt.batchcor) <- cnames.before
# rownames(dat.mat.filt.batchcor) <- rnames.before
# dat.mat.filt.batchcor <- preprocessCore::normalize.quantiles(dat.mat.filt.batchcor, copy = TRUE)
# colnames(dat.mat.filt.batchcor)
genes.orig <- sapply(rownames(dat.mat.filt.batchcor), function(x) strsplit(x, "\\.")[[1]][[1]])
genes.annot <- JFuncs::EnsemblGene2Gene(gene.list = genes.orig, return.original = TRUE)
names(genes.annot) <- genes.orig
rownames(dat.mat.filt.batchcor) <- make.names(genes.annot, unique = TRUE)
# boxplot(dat.mat.filt.batchcor)
dat.norm.df <- tidyr::gather(data.frame(gene = rownames(dat.mat.filt.batchcor), dat.mat.filt.batchcor), key = "celltype", value = "counts", -gene) %>%
group_by(gene) %>%
mutate(zscore = scale(counts, center = TRUE, scale = TRUE))
# Get celltypes by looking at topics -------------------------------------
# plot topics and merge with reference data
# H3K36me3 only: look at topics 50kb and assign each gene to nearest bin
# let's do TSS-TES maybe it's easier??
keeptop <- 150
# outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/dblchic/gastrulation/from_analysis/celltyping"
# dir.create(outdir)
jchromos <- paste("chr", c(seq(19), "X", "Y"), sep = "")
jinf.tss <- "/home/jyeung/hub_oudenaarden/jyeung/data/databases/gene_tss/gene_tss_winsize.50000.bed"
coords <- lapply(tm.result.lst, function(x){
colnames(x$terms)
# sapply(rownames(x$dat.raw.pbulk), function(x) strsplit(x, ";")[[1]][[2]], USE.NAMES = FALSE)
}) %>%
unlist() %>%
unique()
library(TxDb.Mmusculus.UCSC.mm10.knownGene)
library(org.Mm.eg.db)
library(ChIPseeker)
library(GenomicRanges)
# coords.makenames <- make.names(coords)
# coords.makenames <- gsub(pattern = "\\:", "\\.", coords)
# coords.makenames <- gsub(pattern = "\\-", "\\.", coords.makenames)
coords.annot <- AnnotateCoordsFromList.GeneWise(coords.vec = coords, inf.tss = jinf.tss, txdb = TxDb.Mmusculus.UCSC.mm10.knownGene, annodb = "org.Mm.eg.db", chromos.keep = jchromos)
coords.annot$regions.annotated$regions_coord2 <- make.names(coords.annot$regions.annotated$region_coord)
coords.annot$out2.df$regions_coord2 <- make.names(coords.annot$out2.df$region_coord)
# head(coords.annot$regions.annotated)
# coords.annot.lst <- lapply(coords.lst, function(coords){
# })
for (jmark in jmarks){
print(jmark)
topics.ordered.tmp <- OrderTopicsByEntropy(tm.result = tm.result.lst[[jmark]])
# plot topic loadings to each UMAP
dat.topics.tmp <- data.frame(cell = rownames(tm.result.lst[[jmark]]$topics), tm.result.lst[[jmark]]$topics, stringsAsFactors = FALSE)
dat.umap.withtopics.tmp <- left_join(dat.umap.lst[[jmark]], dat.topics.tmp)
# add stages
dat.umap.withtopics.tmp$stage <- sapply(dat.umap.withtopics.tmp$cell, function(cell) StageToNumeric(GetStage(PreprocessSamp(cell))))
# get plates
dat.umap.withtopics.tmp$plate <- sapply(dat.umap.withtopics.tmp$cell, function(x) ClipLast(x, jsep = "_"))
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#FFB6C1", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
m1 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = plate)) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m2 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(stage))) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m3 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = plate)) +
scale_color_manual(values = cbPalette) +
facet_wrap(~plate) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m4 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(stage))) +
scale_color_manual(values = cbPalette) +
facet_wrap(~stage) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m5 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(louvain))) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
terms.filt.tmp <- data.frame(topic = rownames(tm.result.lst[[jmark]]$terms), as.data.frame(tm.result.lst[[jmark]]$terms)) %>%
tidyr::gather(key = "term", value = "weight", -topic) %>%
rowwise()
# terms.filt.tmp.merge <- left_join(terms.filt.tmp, coords.annot$out2.df, by = c("term" = "regions_coord2"))
terms.filt.tmp.merge <- left_join(terms.filt.tmp, coords.annot$out2.df, by = c("term" = "regions_coord2")) %>%
# mutate(gene = ) %>%
group_by(topic) %>%
arrange(desc(weight)) %>%
mutate(rnk = rank(-weight))
print(head(terms.filt.tmp.merge))
outpdf <- file.path(outdir, paste0("bins_50kb_", jmark, "_celltyping_topics.", Sys.Date(), ".pdf"))
pdf(outpdf, useDingbats = FALSE)
print(m1)
print(m2)
print(m3)
print(m4)
print(m5)
for (jtop in topics.ordered.tmp$topic){
print(jtop)
# i <- strsplit(jtop, "_")[[1]][[2]]
m.umap <- PlotXYWithColor(dat.umap.withtopics.tmp, xvar = "umap1", yvar = "umap2", cname = jtop) + scale_color_viridis_c()
top.genes <- subset(terms.filt.tmp.merge, topic == jtop & rnk <= keeptop)$gene
assertthat::assert_that(length(top.genes) > 0)
jsub <- subset(dat.norm.df, gene %in% top.genes)
jsub.sorted.summarised <- jsub %>% group_by(celltype) %>% summarise(zscore = median(zscore)) %>% arrange(desc(zscore)) %>% dplyr::select(celltype)
jlevels <- as.character(jsub.sorted.summarised$celltype)
jsub$celltype <- factor(jsub$celltype, levels = jlevels)
m.exprs <- ggplot(jsub,
aes(x = celltype , y = zscore)) +
geom_boxplot(outlier.shape = NA) +
# geom_violin() +
geom_jitter(width = 0.1, size = 0.5) +
# geom_line() +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1, size = 4)) +
ggtitle(paste(jtop, "Top:", keeptop, "N Unique Genes", length(top.genes)))
print(m.umap)
print(m.exprs)
# plot top 150 genes?
jsub.terms <- subset(terms.filt.tmp.merge, topic == jtop & rnk < keeptop) %>%
ungroup() %>%
mutate(term = forcats::fct_reorder(term, dplyr::desc(weight)))
m.top <- jsub.terms %>%
# mutate(term = forcats::fct_reorder(term, dplyr::desc(weight))) %>%
ggplot(aes(x = term, y = log10(weight), label = gene)) +
geom_point(size = 0.25) +
theme_bw(8) +
# geom_text_repel(size = keeptop / 150, segment.size = 0.1, segment.alpha = 0.25) +
# theme(aspect.ratio=0.2, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, size = keeptop / 200)) +
geom_text_repel(size = 2, segment.size = 0.1, segment.alpha = 0.25) +
theme(aspect.ratio=0.2, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, size = 3.5)) +
xlab("") + ylab("Log10 Bin Weight") +
ggtitle(paste("Top peak weights for:", jtop))
print(m.top)
}
dev.off()
}
# Do both marks show that K9me3 is not useful ----------------------------
#
#
# for (jtop in topics.ordered.tmp$topic){
# print(jtop)
# m.tmp <- ggplot(dat.umap.withtopics.tmp, aes_string(x = "umap1", y = "umap2", color = jtop)) +
# geom_point() +
# scale_color_viridis_c() +
# theme_bw() +
# theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# print(m.tmp)
#
# # get gene loadings
#
# break
# }
# plot gene loadings for each topic
# merge with reference data
topics.ordered.tmp <- OrderTopicsByEntropy(tm.result = tm.result2)
# plot topic loadings to each UMAP
dat.topics <- data.frame(cell = rownames(tm.result2$topics), tm.result2$topics, stringsAsFactors = FALSE)
dat.umap.withtopics.tmp <- left_join(dat.umap2, dat.topics)
# add stages
dat.umap.withtopics.tmp$stage <- sapply(dat.umap.withtopics.tmp$cell, function(cell) StageToNumeric(GetStage(PreprocessSamp(cell))))
# get plates
dat.umap.withtopics.tmp$plate <- sapply(dat.umap.withtopics.tmp$cell, function(x) ClipLast(x, jsep = "_"))
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#FFB6C1", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
m1 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = plate)) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m2 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(stage))) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m3 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = plate)) +
scale_color_manual(values = cbPalette) +
facet_wrap(~plate) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m4 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(stage))) +
scale_color_manual(values = cbPalette) +
facet_wrap(~stage) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m5 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(louvain))) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
terms.filt.tmp <- data.frame(topic = rownames(tm.result2$terms), as.data.frame(tm.result2$terms)) %>%
tidyr::gather(key = "term", value = "weight", -topic) %>%
rowwise() %>%
mutate(gene = strsplit(term, "\\.")[[1]][[7]]) %>%
mutate(gene = gsub("_", "", gene)) %>%
group_by(topic) %>%
arrange(desc(weight)) %>%
mutate(rnk = rank(-weight))
outpdf <- file.path(outdir, paste0("TSSTES50kbmax_K36_celltyping_topics.", Sys.Date(), ".pdf"))
pdf(outpdf, useDingbats = FALSE)
print(m1)
print(m2)
print(m3)
print(m4)
print(m5)
for (jtop in topics.ordered.tmp$topic){
print(jtop)
# i <- strsplit(jtop, "_")[[1]][[2]]
m.umap <- PlotXYWithColor(dat.umap.withtopics.tmp, xvar = "umap1", yvar = "umap2", cname = jtop) + scale_color_viridis_c()
top.genes <- subset(terms.filt.tmp, topic == jtop & rnk <= keeptop)$gene
assertthat::assert_that(length(top.genes) > 0)
jsub <- subset(dat.norm.df, gene %in% top.genes)
jsub.sorted.summarised <- jsub %>% group_by(celltype) %>% summarise(zscore = median(zscore)) %>% arrange(desc(zscore)) %>% dplyr::select(celltype)
jlevels <- as.character(jsub.sorted.summarised$celltype)
jsub$celltype <- factor(jsub$celltype, levels = jlevels)
m.exprs <- ggplot(jsub,
aes(x = celltype , y = zscore)) +
geom_boxplot(outlier.shape = NA) +
# geom_violin() +
geom_jitter(width = 0.1, size = 0.5) +
# geom_line() +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1, size = 4)) +
ggtitle(paste(jtop, "Top:", keeptop, "N Unique Genes", length(top.genes)))
print(m.umap)
print(m.exprs)
# plot top 150 genes?
jsub.terms <- subset(terms.filt.tmp, topic == jtop & rnk < keeptop) %>%
ungroup() %>%
mutate(term = forcats::fct_reorder(term, dplyr::desc(weight)))
m.top <- jsub.terms %>%
# mutate(term = forcats::fct_reorder(term, dplyr::desc(weight))) %>%
ggplot(aes(x = term, y = log10(weight), label = gene)) +
geom_point(size = 0.25) +
theme_bw(8) +
# geom_text_repel(size = keeptop / 150, segment.size = 0.1, segment.alpha = 0.25) +
# theme(aspect.ratio=0.2, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, size = keeptop / 200)) +
geom_text_repel(size = 2, segment.size = 0.1, segment.alpha = 0.25) +
theme(aspect.ratio=0.2, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, size = 3.5)) +
xlab("") + ylab("Log10 Bin Weight") +
ggtitle(paste("Top peak weights for:", jtop))
print(m.top)
}
dev.off()
|
/analysis_scripts/2-check_LDA_outputs.R
|
no_license
|
jakeyeung/scChIX
|
R
| false
| false
| 18,029
|
r
|
# Jake Yeung
# Date of Creation: 2021-06-29
# File: ~/projects/scChIX/analysis_scripts/2-check_LDA_outputs.R
#
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(scchicFuncs)
library(hash)
library(igraph)
library(umap)
library(ggrepel)
source("/home/jyeung/projects/gastru_scchic/scripts/Rfunctions/QCFunctionsGastru.R")
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
# Load -------------------------------------------------------------------
hubprefix <- "/home/jyeung/hub_oudenaarden"
jsuffix <- "50000"
jmarks <- c("K36", "K9m3", "K36-K9m3")
names(jmarks) <- jmarks
jmark <- jmarks[[1]]
infs <- lapply(jmarks, function(jmark){
print(jmark)
inf <- file.path(hubprefix, paste0("jyeung/data/dblchic/gastrulation/LDA_outputs/ldaAnalysis_", jsuffix, "/lda_outputs.count_tables.", jsuffix, ".", jmark, ".2021-06-28.K-30.binarize.FALSE/ldaOut.count_tables.", jsuffix, ".", jmark, ".2021-06-28.K-30.Robj"))
assertthat::assert_that(file.exists(inf))
return(inf)
})
tm.result.lst <- lapply(infs, function(inf){
load(inf, v=T) # out.lda
tm.result <- posterior(out.lda)
tm.result <- AddTopicToTmResult(tm.result)
return(tm.result)
})
dat.umap.lst <- lapply(tm.result.lst, function(tm.result){
dat.umap <- DoUmapAndLouvain(tm.result$topics, jsettings = jsettings)
return(dat.umap)
})
# Plot -------------------------------------------------------------------
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#FFB6C1", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
m.lst <- lapply(jmarks, function(jmark){
m <- ggplot(dat.umap.lst[[jmark]], aes(x = umap1, y = umap2, color = louvain)) +
geom_point() +
theme_bw() +
ggtitle(paste(jmark, "from 50kb bins")) +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
scale_color_manual(values = cbPalette)
return(m)
})
JFuncs::multiplot(m.lst[[1]], m.lst[[2]], m.lst[[3]], cols = 3)
# Check TES for K36 -------------------------------------------------------
jsuffix2 <- "TES"
jmark2 <- "K36"
# inf.tes <- file.path(hubprefix, paste0("jyeung/data/dblchic/gastrulation/LDA_outputs/ldaAnalysis_", jsuffix, "/lda_outputs.count_tables.", jsuffix, ".", jmark, ".2021-06-28.K-30.binarize.FALSE/ldaOut.count_tables.", jsuffix, ".", jmark, ".2021-06-28.K-30.Robj"))
inf.tes <- file.path(hubprefix, paste0("jyeung/data/dblchic/gastrulation/LDA_outputs/ldaAnalysis_", jsuffix2, "/lda_outputs.TES_counts.K36.2021-06-30.K-30.binarize.FALSE/ldaOut.", jsuffix2, "_counts.", jmark2, ".2021-06-30.K-30.Robj"))
assertthat::assert_that(file.exists(inf.tes))
load(inf.tes, v=T)
tm.result2 <- posterior(out.lda)
tm.result2 <- AddTopicToTmResult(tm.result2)
dat.umap2 <- DoUmapAndLouvain(tm.result2$topics, jsettings = jsettings)
m.k36 <- ggplot(dat.umap2, aes(x = umap1, y = umap2, color = louvain)) +
geom_point() +
theme_bw() +
scale_color_manual(values = cbPalette) +
ggtitle("From TSS-TES") +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
multiplot(m.lst$K36, m.k36, cols = 2)
# Load reference data to get cell types ----------------------------------
inf.ref <- file.path(hubprefix, "jyeung/data/public_data/CaoPijuana_merged_batch_cor.2019-12-03.RData")
load(inf.ref, v=T)
# outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/dblchic/gastrulation/from_analysis/celltyping_MergedDataNoQuantNorm"
outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/dblchic/gastrulation/from_analysis/celltyping_MergedDataNoQuantNorm_ShendureOnly_RenormScale_StricterAnnots"
dir.create(outdir)
# dat.mat.filt.batchcor <- t(dat.mat.filt.batchcor)
dat.mat.filt.batchcor <- dat.mat.filt
# keep only celltypes that start with number (shendure more late stage?)
cnames.keep <- grepl("^[[:digit:]]+", colnames(dat.mat.filt.batchcor))
dat.mat.filt.batchcor <- dat.mat.filt.batchcor[, cnames.keep]
# mutate(is.late = grepl("^[[:digit:]]+", celltype))
# renormalize?
dat.mat.filt.batchcor <- t(scale(t(dat.mat.filt.batchcor), center = TRUE, scale = TRUE))
# check batch
pca.public <- prcomp(dat.mat.filt.batchcor, center = TRUE, scale. = TRUE)
dat.pca.public <- data.frame(celltype = rownames(pca.public$x), pca.public$x, stringsAsFactors = FALSE) %>%
rowwise() %>%
mutate(is.late = grepl("^[[:digit:]]+", celltype))
ggplot(dat.pca.public, aes(x = PC1, y = PC2, color = is.late)) +
geom_point() +
theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# # do quant norm again??? No
#
# boxplot(dat.mat.filt.batchcor)
# cnames.before <- colnames(dat.mat.filt.batchcor)
# rnames.before <- rownames(dat.mat.filt.batchcor)
# dat.mat.filt.batchcor <- preprocessCore::normalize.quantiles(dat.mat.filt.batchcor, copy=TRUE)
# colnames(dat.mat.filt.batchcor) <- cnames.before
# rownames(dat.mat.filt.batchcor) <- rnames.before
# dat.mat.filt.batchcor <- preprocessCore::normalize.quantiles(dat.mat.filt.batchcor, copy = TRUE)
# colnames(dat.mat.filt.batchcor)
genes.orig <- sapply(rownames(dat.mat.filt.batchcor), function(x) strsplit(x, "\\.")[[1]][[1]])
genes.annot <- JFuncs::EnsemblGene2Gene(gene.list = genes.orig, return.original = TRUE)
names(genes.annot) <- genes.orig
rownames(dat.mat.filt.batchcor) <- make.names(genes.annot, unique = TRUE)
# boxplot(dat.mat.filt.batchcor)
dat.norm.df <- tidyr::gather(data.frame(gene = rownames(dat.mat.filt.batchcor), dat.mat.filt.batchcor), key = "celltype", value = "counts", -gene) %>%
group_by(gene) %>%
mutate(zscore = scale(counts, center = TRUE, scale = TRUE))
# Get celltypes by looking at topics -------------------------------------
# plot topics and merge with reference data
# H3K36me3 only: look at topics 50kb and assign each gene to nearest bin
# let's do TSS-TES maybe it's easier??
keeptop <- 150
# outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/dblchic/gastrulation/from_analysis/celltyping"
# dir.create(outdir)
jchromos <- paste("chr", c(seq(19), "X", "Y"), sep = "")
jinf.tss <- "/home/jyeung/hub_oudenaarden/jyeung/data/databases/gene_tss/gene_tss_winsize.50000.bed"
coords <- lapply(tm.result.lst, function(x){
colnames(x$terms)
# sapply(rownames(x$dat.raw.pbulk), function(x) strsplit(x, ";")[[1]][[2]], USE.NAMES = FALSE)
}) %>%
unlist() %>%
unique()
library(TxDb.Mmusculus.UCSC.mm10.knownGene)
library(org.Mm.eg.db)
library(ChIPseeker)
library(GenomicRanges)
# coords.makenames <- make.names(coords)
# coords.makenames <- gsub(pattern = "\\:", "\\.", coords)
# coords.makenames <- gsub(pattern = "\\-", "\\.", coords.makenames)
coords.annot <- AnnotateCoordsFromList.GeneWise(coords.vec = coords, inf.tss = jinf.tss, txdb = TxDb.Mmusculus.UCSC.mm10.knownGene, annodb = "org.Mm.eg.db", chromos.keep = jchromos)
coords.annot$regions.annotated$regions_coord2 <- make.names(coords.annot$regions.annotated$region_coord)
coords.annot$out2.df$regions_coord2 <- make.names(coords.annot$out2.df$region_coord)
# head(coords.annot$regions.annotated)
# coords.annot.lst <- lapply(coords.lst, function(coords){
# })
for (jmark in jmarks){
print(jmark)
topics.ordered.tmp <- OrderTopicsByEntropy(tm.result = tm.result.lst[[jmark]])
# plot topic loadings to each UMAP
dat.topics.tmp <- data.frame(cell = rownames(tm.result.lst[[jmark]]$topics), tm.result.lst[[jmark]]$topics, stringsAsFactors = FALSE)
dat.umap.withtopics.tmp <- left_join(dat.umap.lst[[jmark]], dat.topics.tmp)
# add stages
dat.umap.withtopics.tmp$stage <- sapply(dat.umap.withtopics.tmp$cell, function(cell) StageToNumeric(GetStage(PreprocessSamp(cell))))
# get plates
dat.umap.withtopics.tmp$plate <- sapply(dat.umap.withtopics.tmp$cell, function(x) ClipLast(x, jsep = "_"))
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#FFB6C1", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
m1 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = plate)) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m2 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(stage))) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m3 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = plate)) +
scale_color_manual(values = cbPalette) +
facet_wrap(~plate) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m4 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(stage))) +
scale_color_manual(values = cbPalette) +
facet_wrap(~stage) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m5 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(louvain))) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
terms.filt.tmp <- data.frame(topic = rownames(tm.result.lst[[jmark]]$terms), as.data.frame(tm.result.lst[[jmark]]$terms)) %>%
tidyr::gather(key = "term", value = "weight", -topic) %>%
rowwise()
# terms.filt.tmp.merge <- left_join(terms.filt.tmp, coords.annot$out2.df, by = c("term" = "regions_coord2"))
terms.filt.tmp.merge <- left_join(terms.filt.tmp, coords.annot$out2.df, by = c("term" = "regions_coord2")) %>%
# mutate(gene = ) %>%
group_by(topic) %>%
arrange(desc(weight)) %>%
mutate(rnk = rank(-weight))
print(head(terms.filt.tmp.merge))
outpdf <- file.path(outdir, paste0("bins_50kb_", jmark, "_celltyping_topics.", Sys.Date(), ".pdf"))
pdf(outpdf, useDingbats = FALSE)
print(m1)
print(m2)
print(m3)
print(m4)
print(m5)
for (jtop in topics.ordered.tmp$topic){
print(jtop)
# i <- strsplit(jtop, "_")[[1]][[2]]
m.umap <- PlotXYWithColor(dat.umap.withtopics.tmp, xvar = "umap1", yvar = "umap2", cname = jtop) + scale_color_viridis_c()
top.genes <- subset(terms.filt.tmp.merge, topic == jtop & rnk <= keeptop)$gene
assertthat::assert_that(length(top.genes) > 0)
jsub <- subset(dat.norm.df, gene %in% top.genes)
jsub.sorted.summarised <- jsub %>% group_by(celltype) %>% summarise(zscore = median(zscore)) %>% arrange(desc(zscore)) %>% dplyr::select(celltype)
jlevels <- as.character(jsub.sorted.summarised$celltype)
jsub$celltype <- factor(jsub$celltype, levels = jlevels)
m.exprs <- ggplot(jsub,
aes(x = celltype , y = zscore)) +
geom_boxplot(outlier.shape = NA) +
# geom_violin() +
geom_jitter(width = 0.1, size = 0.5) +
# geom_line() +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1, size = 4)) +
ggtitle(paste(jtop, "Top:", keeptop, "N Unique Genes", length(top.genes)))
print(m.umap)
print(m.exprs)
# plot top 150 genes?
jsub.terms <- subset(terms.filt.tmp.merge, topic == jtop & rnk < keeptop) %>%
ungroup() %>%
mutate(term = forcats::fct_reorder(term, dplyr::desc(weight)))
m.top <- jsub.terms %>%
# mutate(term = forcats::fct_reorder(term, dplyr::desc(weight))) %>%
ggplot(aes(x = term, y = log10(weight), label = gene)) +
geom_point(size = 0.25) +
theme_bw(8) +
# geom_text_repel(size = keeptop / 150, segment.size = 0.1, segment.alpha = 0.25) +
# theme(aspect.ratio=0.2, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, size = keeptop / 200)) +
geom_text_repel(size = 2, segment.size = 0.1, segment.alpha = 0.25) +
theme(aspect.ratio=0.2, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, size = 3.5)) +
xlab("") + ylab("Log10 Bin Weight") +
ggtitle(paste("Top peak weights for:", jtop))
print(m.top)
}
dev.off()
}
# Do both marks show that K9me3 is not useful ----------------------------
#
#
# for (jtop in topics.ordered.tmp$topic){
# print(jtop)
# m.tmp <- ggplot(dat.umap.withtopics.tmp, aes_string(x = "umap1", y = "umap2", color = jtop)) +
# geom_point() +
# scale_color_viridis_c() +
# theme_bw() +
# theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# print(m.tmp)
#
# # get gene loadings
#
# break
# }
# plot gene loadings for each topic
# merge with reference data
topics.ordered.tmp <- OrderTopicsByEntropy(tm.result = tm.result2)
# plot topic loadings to each UMAP
dat.topics <- data.frame(cell = rownames(tm.result2$topics), tm.result2$topics, stringsAsFactors = FALSE)
dat.umap.withtopics.tmp <- left_join(dat.umap2, dat.topics)
# add stages
dat.umap.withtopics.tmp$stage <- sapply(dat.umap.withtopics.tmp$cell, function(cell) StageToNumeric(GetStage(PreprocessSamp(cell))))
# get plates
dat.umap.withtopics.tmp$plate <- sapply(dat.umap.withtopics.tmp$cell, function(x) ClipLast(x, jsep = "_"))
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#FFB6C1", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
m1 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = plate)) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m2 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(stage))) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m3 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = plate)) +
scale_color_manual(values = cbPalette) +
facet_wrap(~plate) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m4 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(stage))) +
scale_color_manual(values = cbPalette) +
facet_wrap(~stage) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
m5 <- ggplot(dat.umap.withtopics.tmp, aes(x = umap1, y = umap2, color = as.character(louvain))) +
scale_color_manual(values = cbPalette) +
geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom")
terms.filt.tmp <- data.frame(topic = rownames(tm.result2$terms), as.data.frame(tm.result2$terms)) %>%
tidyr::gather(key = "term", value = "weight", -topic) %>%
rowwise() %>%
mutate(gene = strsplit(term, "\\.")[[1]][[7]]) %>%
mutate(gene = gsub("_", "", gene)) %>%
group_by(topic) %>%
arrange(desc(weight)) %>%
mutate(rnk = rank(-weight))
outpdf <- file.path(outdir, paste0("TSSTES50kbmax_K36_celltyping_topics.", Sys.Date(), ".pdf"))
pdf(outpdf, useDingbats = FALSE)
print(m1)
print(m2)
print(m3)
print(m4)
print(m5)
for (jtop in topics.ordered.tmp$topic){
print(jtop)
# i <- strsplit(jtop, "_")[[1]][[2]]
m.umap <- PlotXYWithColor(dat.umap.withtopics.tmp, xvar = "umap1", yvar = "umap2", cname = jtop) + scale_color_viridis_c()
top.genes <- subset(terms.filt.tmp, topic == jtop & rnk <= keeptop)$gene
assertthat::assert_that(length(top.genes) > 0)
jsub <- subset(dat.norm.df, gene %in% top.genes)
jsub.sorted.summarised <- jsub %>% group_by(celltype) %>% summarise(zscore = median(zscore)) %>% arrange(desc(zscore)) %>% dplyr::select(celltype)
jlevels <- as.character(jsub.sorted.summarised$celltype)
jsub$celltype <- factor(jsub$celltype, levels = jlevels)
m.exprs <- ggplot(jsub,
aes(x = celltype , y = zscore)) +
geom_boxplot(outlier.shape = NA) +
# geom_violin() +
geom_jitter(width = 0.1, size = 0.5) +
# geom_line() +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1, size = 4)) +
ggtitle(paste(jtop, "Top:", keeptop, "N Unique Genes", length(top.genes)))
print(m.umap)
print(m.exprs)
# plot top 150 genes?
jsub.terms <- subset(terms.filt.tmp, topic == jtop & rnk < keeptop) %>%
ungroup() %>%
mutate(term = forcats::fct_reorder(term, dplyr::desc(weight)))
m.top <- jsub.terms %>%
# mutate(term = forcats::fct_reorder(term, dplyr::desc(weight))) %>%
ggplot(aes(x = term, y = log10(weight), label = gene)) +
geom_point(size = 0.25) +
theme_bw(8) +
# geom_text_repel(size = keeptop / 150, segment.size = 0.1, segment.alpha = 0.25) +
# theme(aspect.ratio=0.2, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, size = keeptop / 200)) +
geom_text_repel(size = 2, segment.size = 0.1, segment.alpha = 0.25) +
theme(aspect.ratio=0.2, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, size = 3.5)) +
xlab("") + ylab("Log10 Bin Weight") +
ggtitle(paste("Top peak weights for:", jtop))
print(m.top)
}
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covid19api.R
\name{GetDayOne}
\alias{GetDayOne}
\title{Get DayOne cases}
\usage{
GetDayOne(country.requested, status.requested, live = FALSE, total = FALSE)
}
\arguments{
\item{country.requested}{Country slug name choosed}
\item{status.requested}{Status requested, they could be confirmed, recovered or deaths}
\item{live}{If TRUE gets the lates cases from the country and status requested}
\item{total}{If TRUE returns all cases by type for a country from the first recorded case}
}
\value{
Data frame columns country, Province, latitude, longitude, date, number of cases and status
}
\description{
Get all cases by type and country from the first recorded case.
Country must be the slug from GetAvalaibleCountries() or GetCountrySummary(). Cases
must be one of: confirmed, recovered, deaths. When total parameter is TRUE the live
parametersis not necesary.
}
\examples{
GetDayOne(country.requested = 'mexico', status.requested = 'confirmed')
GetDayOne(country.requested = 'mexico', status.requested = 'confirmed', live = TRUE)
GetDayOne(country.requested = 'mexico', status.requested = 'confirmed', total = TRUE)
}
|
/man/GetDayOne.Rd
|
permissive
|
nekrum/covid19api
|
R
| false
| true
| 1,198
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covid19api.R
\name{GetDayOne}
\alias{GetDayOne}
\title{Get DayOne cases}
\usage{
GetDayOne(country.requested, status.requested, live = FALSE, total = FALSE)
}
\arguments{
\item{country.requested}{Country slug name choosed}
\item{status.requested}{Status requested, they could be confirmed, recovered or deaths}
\item{live}{If TRUE gets the lates cases from the country and status requested}
\item{total}{If TRUE returns all cases by type for a country from the first recorded case}
}
\value{
Data frame columns country, Province, latitude, longitude, date, number of cases and status
}
\description{
Get all cases by type and country from the first recorded case.
Country must be the slug from GetAvalaibleCountries() or GetCountrySummary(). Cases
must be one of: confirmed, recovered, deaths. When total parameter is TRUE the live
parametersis not necesary.
}
\examples{
GetDayOne(country.requested = 'mexico', status.requested = 'confirmed')
GetDayOne(country.requested = 'mexico', status.requested = 'confirmed', live = TRUE)
GetDayOne(country.requested = 'mexico', status.requested = 'confirmed', total = TRUE)
}
|
testlist <- list(id = integer(0), x = c(2.41785163922926e+24, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result)
|
/ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1609955440-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 162
|
r
|
testlist <- list(id = integer(0), x = c(2.41785163922926e+24, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result)
|
####Plotting results for Modules Overlap####
library(dplyr)
library(WGCNA)
library(magrittr)
library(readr)
library(gplots)
library(tidyr)
library(vcd)
setwd ("C:/Users/karin/Dropbox/Arquivos_genomica_autistas/artigo_expressao/ASDiPSCTranscriptome")
#pvalues table
modpv=read.delim("DATA/module_overlap_final_matrix_pvalue.txt")
#odds-ratio table
modor=read.delim("DATA/module_overlap_final_matrix_OR.txt")
#getting only the numerical values from the tables
modpvnum=modpv[,c(3:5)]
modornum=modor[,c(3:5)]
#color pallete
pallete=colorRampPalette(c("pink", "red"))
#vector of colors for x and y axes
colorx=c("MEblue","MEturquoise","MEpurple")
colory=as.vector(modor[,7])
###ploting table without grids####
sizeGrWindow(10,6)
pdf("RESULTS/module_overlap_final_figure_nogrids2.pdf")
#par(mar = c(bottom, left, up, right))
par(mar = c(4, 8.5, 2, 3));
# Display the correlation values within a heatmap plot
labeledHeatmap(Matrix = modpvnum,#pvalues
xLabels = colorx,#colors for x-axis labeling
xSymbols = c("MNPC10-blue", "MNeur1-turquoise", "MNeur18-purple"), #names for x-axis labeling
yLabels = colory, ##colors for y-axis labeling
ySymbols= modor[,2], #names for y-axis labeling
colorLabels = TRUE,
xLabelsAngle = 0,#set the x-axis to the horizontal position
xLabelsAdj = 0.5, #center the text label of x-axis
colors = pallete(150),
naColor = "white", #NA characters should be white
textMatrix = modornum, #paste the odds-ratio values in the table
setStdMargins = FALSE,
cex.text = 1, #size of pasted text in the matrix
cex.lab.x = 0.8,
cex.lab.y = 0.8,
x.adj.lab.y = 0.5,#center the text label of y axis
zlim = c(0,80), #set the color-coded range
main = paste("Module Overlap"))
legend(x = as.numeric(0.8),y = as.numeric(1),
bty = "n",
legend = unique(modpv$Cell.source),
col = c("green", "greenyellow", "yellow", "red"),
lty= 1,
lwd = 5,
cex=.7)
legend2 = grid_legend(0.9, 0.9,labels = "-log(padj-value)", draw = FALSE, frame = FALSE)
grid.draw(grobTree(legend2, vp = viewport(x = 0.93, angle = 90)))
dev.off()
|
/ASDiPSCTranscriptome/SCRIPTS/Plot_module_overlap.R
|
no_license
|
griesik/ASDiPSCTranscriptome
|
R
| false
| false
| 2,416
|
r
|
####Plotting results for Modules Overlap####
library(dplyr)
library(WGCNA)
library(magrittr)
library(readr)
library(gplots)
library(tidyr)
library(vcd)
setwd ("C:/Users/karin/Dropbox/Arquivos_genomica_autistas/artigo_expressao/ASDiPSCTranscriptome")
#pvalues table
modpv=read.delim("DATA/module_overlap_final_matrix_pvalue.txt")
#odds-ratio table
modor=read.delim("DATA/module_overlap_final_matrix_OR.txt")
#getting only the numerical values from the tables
modpvnum=modpv[,c(3:5)]
modornum=modor[,c(3:5)]
#color pallete
pallete=colorRampPalette(c("pink", "red"))
#vector of colors for x and y axes
colorx=c("MEblue","MEturquoise","MEpurple")
colory=as.vector(modor[,7])
###ploting table without grids####
sizeGrWindow(10,6)
pdf("RESULTS/module_overlap_final_figure_nogrids2.pdf")
#par(mar = c(bottom, left, up, right))
par(mar = c(4, 8.5, 2, 3));
# Display the correlation values within a heatmap plot
labeledHeatmap(Matrix = modpvnum,#pvalues
xLabels = colorx,#colors for x-axis labeling
xSymbols = c("MNPC10-blue", "MNeur1-turquoise", "MNeur18-purple"), #names for x-axis labeling
yLabels = colory, ##colors for y-axis labeling
ySymbols= modor[,2], #names for y-axis labeling
colorLabels = TRUE,
xLabelsAngle = 0,#set the x-axis to the horizontal position
xLabelsAdj = 0.5, #center the text label of x-axis
colors = pallete(150),
naColor = "white", #NA characters should be white
textMatrix = modornum, #paste the odds-ratio values in the table
setStdMargins = FALSE,
cex.text = 1, #size of pasted text in the matrix
cex.lab.x = 0.8,
cex.lab.y = 0.8,
x.adj.lab.y = 0.5,#center the text label of y axis
zlim = c(0,80), #set the color-coded range
main = paste("Module Overlap"))
legend(x = as.numeric(0.8),y = as.numeric(1),
bty = "n",
legend = unique(modpv$Cell.source),
col = c("green", "greenyellow", "yellow", "red"),
lty= 1,
lwd = 5,
cex=.7)
legend2 = grid_legend(0.9, 0.9,labels = "-log(padj-value)", draw = FALSE, frame = FALSE)
grid.draw(grobTree(legend2, vp = viewport(x = 0.93, angle = 90)))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_flow_data.R
\name{get_flow_data}
\alias{get_flow_data}
\title{Get flow data for a given location}
\usage{
get_flow_data(x, code, direction = "both")
}
\arguments{
\item{x}{An \code{epiflows} object.}
\item{code}{A character string denoting location code.}
\item{direction}{If "to" or "from", the function returns a vector
of flows to or from the location, respectively.
If set to "both" - a two-element list with flows both to and from
the location.}
}
\description{
Returns a vector (if direction is "both") or a list of 2 elements
(if direction is "to" or "from") to and/or from the specified location.
}
\examples{
flows <- make_epiflows(Mex_travel_2009)
get_flow_data(flows, "MEX", direction = "both")
}
\author{
Pawel Piatkowski
}
|
/man/get_flow_data.Rd
|
no_license
|
Paula-Moraga/epiflows
|
R
| false
| true
| 821
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_flow_data.R
\name{get_flow_data}
\alias{get_flow_data}
\title{Get flow data for a given location}
\usage{
get_flow_data(x, code, direction = "both")
}
\arguments{
\item{x}{An \code{epiflows} object.}
\item{code}{A character string denoting location code.}
\item{direction}{If "to" or "from", the function returns a vector
of flows to or from the location, respectively.
If set to "both" - a two-element list with flows both to and from
the location.}
}
\description{
Returns a vector (if direction is "both") or a list of 2 elements
(if direction is "to" or "from") to and/or from the specified location.
}
\examples{
flows <- make_epiflows(Mex_travel_2009)
get_flow_data(flows, "MEX", direction = "both")
}
\author{
Pawel Piatkowski
}
|
source("http://bioconductor.org/biocLite.R")
biocLite("devtools")
biocLite("pachterlab/sleuth")
libraries_file <- "$libraries"
abundances_file <- "$abundances"
full_model <- $fullModel
tx2gene_file <- "$tx2gene"
metadata <- read.table(libraries_file, sep="\t", header=T, stringsAsFactors=F)
abundance_files <- read.table(abundances_file, sep="\t", header=F, stringsAsFactors=F)
m <- match(metadata[,"UniqueID"], abundance_files[,1])
if (any(is.na(m))) {
stop("One or more samples missing from list of abundance files")
}
metadata[,"path"] <- abundance_files[m, 2]
sleuth_data <- sleuth::sleuth_prep(metadata, as.formula(full_model))
if (file.exists(tx2gene_file)) {
tx2gene <- read.table(tx2gene, sep="\t", header=T, stringsAsFactors=F)
m <- match(rownames(sleuth_data[["obs_raw"]]), tx2gene[,"target_id"])
if (any(is.na(m))) {
stop(paste(tx2gene_file, "missing one or more transcript IDs"))
}
}
else {
install.packages(stringr)
tx2gene <- data.frame(
tx_id=stringr::str_extract(rownames(abundance_matrix), 'ENSTR?[\\d\\.]+'),
gene_id=stringr::str_extract(rownames(abundance_matrix), 'ENSGR?[\\d\\.]+')
)
}
sleuth_data[["target_mapping"]] <- tx2gene
save(sleuth_data, file="$sleuthObj")
|
/nextflow/rna-quick/templates/export_sleuth.R
|
no_license
|
hmkim/workflow
|
R
| false
| false
| 1,248
|
r
|
source("http://bioconductor.org/biocLite.R")
biocLite("devtools")
biocLite("pachterlab/sleuth")
libraries_file <- "$libraries"
abundances_file <- "$abundances"
full_model <- $fullModel
tx2gene_file <- "$tx2gene"
metadata <- read.table(libraries_file, sep="\t", header=T, stringsAsFactors=F)
abundance_files <- read.table(abundances_file, sep="\t", header=F, stringsAsFactors=F)
m <- match(metadata[,"UniqueID"], abundance_files[,1])
if (any(is.na(m))) {
stop("One or more samples missing from list of abundance files")
}
metadata[,"path"] <- abundance_files[m, 2]
sleuth_data <- sleuth::sleuth_prep(metadata, as.formula(full_model))
if (file.exists(tx2gene_file)) {
tx2gene <- read.table(tx2gene, sep="\t", header=T, stringsAsFactors=F)
m <- match(rownames(sleuth_data[["obs_raw"]]), tx2gene[,"target_id"])
if (any(is.na(m))) {
stop(paste(tx2gene_file, "missing one or more transcript IDs"))
}
}
else {
install.packages(stringr)
tx2gene <- data.frame(
tx_id=stringr::str_extract(rownames(abundance_matrix), 'ENSTR?[\\d\\.]+'),
gene_id=stringr::str_extract(rownames(abundance_matrix), 'ENSGR?[\\d\\.]+')
)
}
sleuth_data[["target_mapping"]] <- tx2gene
save(sleuth_data, file="$sleuthObj")
|
Ns=1000
iterations=1:Ns
Prob.Pop.doubling=rep(NA,length = 5)
Pop.project=vector("list",length = 5)
for(aa in 1:5)
{
#2.1. Set selectivity scenarios
Selectivity.SIM=Selectivity.SIM.1=vector("list",length = Ns)
scenario.sel=1
for (s in iterations) Selectivity.SIM.1[[s]]=Sel.fn(ASim[s],LinfSim[s],kSim[s],toSim[s])
scenario.sel=2
for (s in iterations) Selectivity.SIM[[s]]=Sel.fn(ASim[s],LinfSim[s],kSim[s],toSim[s])
SelSim=add.missing.age(Selectivity.SIM)
SelSim.1=add.missing.age(Selectivity.SIM.1)
#1. Set scenarios
#biological scenario
scenario=Life.hist.scenarios[[2]]
#Selectivity scenario
scenario.sel=Sel.scenarios[[2]]
#Harvest rate scenario
scenario.U=U.scenarios[[aa]]
#N1998 scenario
scenario.N1998=N1998.scenarios[[1]]
#2. Create elements to fill in
Pop.size.ratio=rep(NA,length = Ns)
store.pop.proy=NULL
#3. Monte Carlo loop
for (s in iterations)
{
#2. Vary vital rates for projection matrices
#draw max age sample
A.sim=ASim[s] #use same A.sim for all projections to keep same size matrix
#take matrix sample of same dimension
Proj.Mat=r.numb=NULL
#
# if(scenario==1)
# {
# condition=lapply(Proyec.matrix.1, function(x) nrow(x) == A.sim)
# DATA=Proyec.matrix.1[unlist(condition)]
# Proj.Mat=DATA[[1]]
# }
# if(scenario==2)
# {
# condition=lapply(Proyec.matrix, function(x) nrow(x) == A.sim)
# DATA=Proyec.matrix[unlist(condition)]
# Proj.Mat=DATA[[1]]
# }
if(scenario==1)
{
#select matrices of A.sim dimensions
condition=lapply(Proyec.matrix.1, function(x) nrow(x) == A.sim)
DATA=Proyec.matrix.1[unlist(condition)]
if(A.sim==A)r.numb=sample(1:length(DATA),n.Yr.prol,replace=T) #resample for A.sim 60 (there are <15)
if(A.sim<A)r.numb=sample(1:length(DATA),n.Yr.prol,replace=F)
#keep only 15
Proj.Mat=DATA[r.numb]
}
if(scenario==2)
{
#select matrices of A.sim dimensions
condition=lapply(Proyec.matrix, function(x) nrow(x) == A.sim)
DATA=Proyec.matrix[unlist(condition)]
if(A.sim==A)r.numb=sample(1:length(DATA),n.Yr.prol,replace=T)
if(A.sim<A)r.numb=sample(1:length(DATA),n.Yr.prol,replace=F)
#keep only 15
Proj.Mat=DATA[r.numb]
}
#3. Calculate selectivity
Selectivity.sim=NULL
if(scenario.sel==1) Selectivity.sim=SelSim.1[s,]
if(scenario.sel==2) Selectivity.sim=SelSim[s,]
Selectivity.sim=subset(Selectivity.sim,!is.na(Selectivity.sim)) #remove NAs
#4. Add harvesting
harvest.matrix=function(matrix,U)
{
H=diag(nrow(matrix))
diag(H)=1-(U*Selectivity.sim) #apply U and selectivity
MH=matrix%*%H
return(MH)
}
Harvest.Proyec.mat=vector("list",length = n.Yr.prol)
for (h in 1:n.Yr.prol) Harvest.Proyec.mat[[h]]=harvest.matrix(Proj.Mat[[h]],scenario.U[h])
# Harvest.Proyec.mat=harvest.matrix(Proj.Mat,scenario.U[[1]][1])
#5. Project population into future
# nn=matrix(stable.stage(Harvest.Proyec.mat)*scenario.N1998[[1]])
# p<-pop.projection(Harvest.Proyec.mat,nn, 15) #project population
# # plot(p$pop.sizes) #plot pop size
# lines(p$pop.sizes,col=4)
n.vec=vector("list",length = n.Yr.prol)
n.vec[[1]]=matrix(stable.stage(Harvest.Proyec.mat[[1]])*scenario.N1998)
for (p in 2:n.Yr.prol)
{
n.vec[[p]]=Harvest.Proyec.mat[[p]]%*%matrix(n.vec[[p-1]])
}
Pop.size=rep(0,n.Yr.prol)
for(y in 1:n.Yr.prol) Pop.size[y]=sum(n.vec[[y]])
# if(aa==1)plot(Pop.size,col=aa,ylim=c(0.7,max(Pop.size)))
# if(aa>1)points(Pop.size,col=aa)
#6. Calculate population size ratio
Pop.size.ratio[s]=Pop.size[length(Pop.size)]/Pop.size[1]
store.pop.proy=rbind(store.pop.proy,Pop.size)
}
Pop.project[[aa]]=store.pop.proy
#
# #Calculate reference points
Prop.Pop.double=subset(Pop.size.ratio,Pop.size.ratio>=Biom.ref.point)
Pop.size.ratio=subset(Pop.size.ratio,!is.na(Pop.size.ratio))
Prob.Pop.doubling[aa]=length(Prop.Pop.double)/length(Pop.size.ratio)
}
plot(Prob.Pop.doubling)
# par(mfcol=c(3,2),omi=c(.6,.9,.4,.1),mai=c(.15,.15,.15,.15))
# for (i in 1:5){
# plot(Pop.project[[i]][1,],type='l',ylim=c(0,max(Pop.project[[i]])))
# for(j in 2:10) lines(Pop.project[[i]][j,],type='l')
# legend('topleft',paste("u", i))
test=NULL
for(aaa in 1:5){
test=rbind(test,Risk.fn(Life.hist.scenarios[[2]],N1998.scenarios[[1]],
U.scenarios[[aaa]],Sel.scenarios[[2]]))}
|
/White_shark_test.scenarios.R
|
no_license
|
JuanMatiasBraccini/Git_Demography
|
R
| false
| false
| 4,495
|
r
|
Ns=1000
iterations=1:Ns
Prob.Pop.doubling=rep(NA,length = 5)
Pop.project=vector("list",length = 5)
for(aa in 1:5)
{
#2.1. Set selectivity scenarios
Selectivity.SIM=Selectivity.SIM.1=vector("list",length = Ns)
scenario.sel=1
for (s in iterations) Selectivity.SIM.1[[s]]=Sel.fn(ASim[s],LinfSim[s],kSim[s],toSim[s])
scenario.sel=2
for (s in iterations) Selectivity.SIM[[s]]=Sel.fn(ASim[s],LinfSim[s],kSim[s],toSim[s])
SelSim=add.missing.age(Selectivity.SIM)
SelSim.1=add.missing.age(Selectivity.SIM.1)
#1. Set scenarios
#biological scenario
scenario=Life.hist.scenarios[[2]]
#Selectivity scenario
scenario.sel=Sel.scenarios[[2]]
#Harvest rate scenario
scenario.U=U.scenarios[[aa]]
#N1998 scenario
scenario.N1998=N1998.scenarios[[1]]
#2. Create elements to fill in
Pop.size.ratio=rep(NA,length = Ns)
store.pop.proy=NULL
#3. Monte Carlo loop
for (s in iterations)
{
#2. Vary vital rates for projection matrices
#draw max age sample
A.sim=ASim[s] #use same A.sim for all projections to keep same size matrix
#take matrix sample of same dimension
Proj.Mat=r.numb=NULL
#
# if(scenario==1)
# {
# condition=lapply(Proyec.matrix.1, function(x) nrow(x) == A.sim)
# DATA=Proyec.matrix.1[unlist(condition)]
# Proj.Mat=DATA[[1]]
# }
# if(scenario==2)
# {
# condition=lapply(Proyec.matrix, function(x) nrow(x) == A.sim)
# DATA=Proyec.matrix[unlist(condition)]
# Proj.Mat=DATA[[1]]
# }
if(scenario==1)
{
#select matrices of A.sim dimensions
condition=lapply(Proyec.matrix.1, function(x) nrow(x) == A.sim)
DATA=Proyec.matrix.1[unlist(condition)]
if(A.sim==A)r.numb=sample(1:length(DATA),n.Yr.prol,replace=T) #resample for A.sim 60 (there are <15)
if(A.sim<A)r.numb=sample(1:length(DATA),n.Yr.prol,replace=F)
#keep only 15
Proj.Mat=DATA[r.numb]
}
if(scenario==2)
{
#select matrices of A.sim dimensions
condition=lapply(Proyec.matrix, function(x) nrow(x) == A.sim)
DATA=Proyec.matrix[unlist(condition)]
if(A.sim==A)r.numb=sample(1:length(DATA),n.Yr.prol,replace=T)
if(A.sim<A)r.numb=sample(1:length(DATA),n.Yr.prol,replace=F)
#keep only 15
Proj.Mat=DATA[r.numb]
}
#3. Calculate selectivity
Selectivity.sim=NULL
if(scenario.sel==1) Selectivity.sim=SelSim.1[s,]
if(scenario.sel==2) Selectivity.sim=SelSim[s,]
Selectivity.sim=subset(Selectivity.sim,!is.na(Selectivity.sim)) #remove NAs
#4. Add harvesting
harvest.matrix=function(matrix,U)
{
H=diag(nrow(matrix))
diag(H)=1-(U*Selectivity.sim) #apply U and selectivity
MH=matrix%*%H
return(MH)
}
Harvest.Proyec.mat=vector("list",length = n.Yr.prol)
for (h in 1:n.Yr.prol) Harvest.Proyec.mat[[h]]=harvest.matrix(Proj.Mat[[h]],scenario.U[h])
# Harvest.Proyec.mat=harvest.matrix(Proj.Mat,scenario.U[[1]][1])
#5. Project population into future
# nn=matrix(stable.stage(Harvest.Proyec.mat)*scenario.N1998[[1]])
# p<-pop.projection(Harvest.Proyec.mat,nn, 15) #project population
# # plot(p$pop.sizes) #plot pop size
# lines(p$pop.sizes,col=4)
n.vec=vector("list",length = n.Yr.prol)
n.vec[[1]]=matrix(stable.stage(Harvest.Proyec.mat[[1]])*scenario.N1998)
for (p in 2:n.Yr.prol)
{
n.vec[[p]]=Harvest.Proyec.mat[[p]]%*%matrix(n.vec[[p-1]])
}
Pop.size=rep(0,n.Yr.prol)
for(y in 1:n.Yr.prol) Pop.size[y]=sum(n.vec[[y]])
# if(aa==1)plot(Pop.size,col=aa,ylim=c(0.7,max(Pop.size)))
# if(aa>1)points(Pop.size,col=aa)
#6. Calculate population size ratio
Pop.size.ratio[s]=Pop.size[length(Pop.size)]/Pop.size[1]
store.pop.proy=rbind(store.pop.proy,Pop.size)
}
Pop.project[[aa]]=store.pop.proy
#
# #Calculate reference points
Prop.Pop.double=subset(Pop.size.ratio,Pop.size.ratio>=Biom.ref.point)
Pop.size.ratio=subset(Pop.size.ratio,!is.na(Pop.size.ratio))
Prob.Pop.doubling[aa]=length(Prop.Pop.double)/length(Pop.size.ratio)
}
plot(Prob.Pop.doubling)
# par(mfcol=c(3,2),omi=c(.6,.9,.4,.1),mai=c(.15,.15,.15,.15))
# for (i in 1:5){
# plot(Pop.project[[i]][1,],type='l',ylim=c(0,max(Pop.project[[i]])))
# for(j in 2:10) lines(Pop.project[[i]][j,],type='l')
# legend('topleft',paste("u", i))
test=NULL
for(aaa in 1:5){
test=rbind(test,Risk.fn(Life.hist.scenarios[[2]],N1998.scenarios[[1]],
U.scenarios[[aaa]],Sel.scenarios[[2]]))}
|
## select CNVR using cutoff_freq
path_ensembleCNV <- ""
fileName_ensembleCNV <- ""
fileName_CNVR <- ""
cutoff_freq <- 0.01
path_output <- ""
mat_ensembleCNV <- readRDS( file = file.path(path_ensembleCNV, fileName_ensembleCNV) )
n.sample <- ncol( mat_ensembleCNV )
n.CNVR <- nrow( mat_ensembleCNV )
cnvrIDs <- rownames( mat_ensembleCNV )
## calculate freq of CNVR
freq_CNVR <- unlist( lapply(1:nrow(mat_ensembleCNV), FUN = function(i) {
v1 <- as.integer( mat_ensembleCNV[i, ] )
n1 <- sum( v1 %in% c(0, 1, 3) )
}))
idxs.refine <- which( freq_CNVR >= n.sample*cutoff_freq )
length(idxs.refine)
cnvrs_refine <- cnvrIDs[ idxs.refine ]
cnvrs_keep <- cnvrIDs[ -idxs.refine ]
saveRDS( cnvrs_refine, file = file.path( path_output, "cnvrs_refine.rds") )
saveRDS( cnvrs_keep, file = file.path( path_output, "cnvrs_keep.rds"))
dat_cnvr <- readRDS(file = file.path(path_ensembleCNV, fileName_CNVR))
dat_cnvr_keep <- subset( dat_cnvr, CNVR_ID %in% cnvrs_keep )
dat_cnvr_refine <- subset( dat_cnvr, CNVR_ID %in% cnvrs_refine )
saveRDS( dat_cnvr_keep, file = file.path(path_output, "dat_cnvrs_keep.rds") )
saveRDS( dat_cnvr_refine, file = file.path(path_output, "dat_cnvrs_refine.rds"))
|
/05_boundary_refinement/step.1.subset.refinement.CNVR.R
|
no_license
|
jeffverboon/ensembleCNV
|
R
| false
| false
| 1,203
|
r
|
## select CNVR using cutoff_freq
path_ensembleCNV <- ""
fileName_ensembleCNV <- ""
fileName_CNVR <- ""
cutoff_freq <- 0.01
path_output <- ""
mat_ensembleCNV <- readRDS( file = file.path(path_ensembleCNV, fileName_ensembleCNV) )
n.sample <- ncol( mat_ensembleCNV )
n.CNVR <- nrow( mat_ensembleCNV )
cnvrIDs <- rownames( mat_ensembleCNV )
## calculate freq of CNVR
freq_CNVR <- unlist( lapply(1:nrow(mat_ensembleCNV), FUN = function(i) {
v1 <- as.integer( mat_ensembleCNV[i, ] )
n1 <- sum( v1 %in% c(0, 1, 3) )
}))
idxs.refine <- which( freq_CNVR >= n.sample*cutoff_freq )
length(idxs.refine)
cnvrs_refine <- cnvrIDs[ idxs.refine ]
cnvrs_keep <- cnvrIDs[ -idxs.refine ]
saveRDS( cnvrs_refine, file = file.path( path_output, "cnvrs_refine.rds") )
saveRDS( cnvrs_keep, file = file.path( path_output, "cnvrs_keep.rds"))
dat_cnvr <- readRDS(file = file.path(path_ensembleCNV, fileName_CNVR))
dat_cnvr_keep <- subset( dat_cnvr, CNVR_ID %in% cnvrs_keep )
dat_cnvr_refine <- subset( dat_cnvr, CNVR_ID %in% cnvrs_refine )
saveRDS( dat_cnvr_keep, file = file.path(path_output, "dat_cnvrs_keep.rds") )
saveRDS( dat_cnvr_refine, file = file.path(path_output, "dat_cnvrs_refine.rds"))
|
.spaMM_lm.wfit <- function(x, y, offset=NULL,w=NULL) {
if (!is.null(w)) {
XtWX <- .ZtWZwrapper(x,w)
rhs <- crossprod(x,w*y)
} else {
XtWX <- crossprod(x)
rhs <- crossprod(x,y)
}
chmfactor <- Cholesky(XtWX)
if (!is.null(offset)) y <- y-offset
beta <- solve(chmfactor,rhs,system="A")
fitted <- x %*% beta
residuals <- y-fitted ## offset removed in each term
if (!is.null(offset)) fitted <- fitted+offset
return(list(coefficients=beta[,1], fitted.values=fitted, residuals=residuals,
df.residual=nrow(x)-ncol(x) ##assuming rank has been 'preprocessed'
))
}
|
/CRAN/contrib/spaMM/R/sparseX.R
|
no_license
|
PRL-PRG/dyntrace-instrumented-packages
|
R
| false
| false
| 630
|
r
|
.spaMM_lm.wfit <- function(x, y, offset=NULL,w=NULL) {
if (!is.null(w)) {
XtWX <- .ZtWZwrapper(x,w)
rhs <- crossprod(x,w*y)
} else {
XtWX <- crossprod(x)
rhs <- crossprod(x,y)
}
chmfactor <- Cholesky(XtWX)
if (!is.null(offset)) y <- y-offset
beta <- solve(chmfactor,rhs,system="A")
fitted <- x %*% beta
residuals <- y-fitted ## offset removed in each term
if (!is.null(offset)) fitted <- fitted+offset
return(list(coefficients=beta[,1], fitted.values=fitted, residuals=residuals,
df.residual=nrow(x)-ncol(x) ##assuming rank has been 'preprocessed'
))
}
|
library(ggplot2)
library(ggpubr)
library(RColorBrewer)
library(reshape2)
# 1. Get color vectors
getColors <- function(n) {
col <- brewer.pal.info[brewer.pal.info$category=='qual', ] # get max. 74 colours
col_vector <- unlist(mapply(brewer.pal, col$maxcolors, rownames(col)))
ifelse (n > length(col_vector),
vec <- sample(col_vector, n, replace=T),
vec <- sample(col_vector, n, replace=F)
)
vec
}
# 2. Draw the heatmaps
draw_heatmap <- function(voomObj, topTable, phenoDF, list) {
hm_cdr <- phenoDF %>% select(Sample.Type, tumor_stage)
rownames(hm_cdr) <- colnames(voomObj)
colnames(hm_cdr) <- c('tumor', 'stage')
tumor <- c("#99a599", "#37637f")
names(tumor) <- unique(hm_cdr$tumor)
stage <- c("#d7191c","#fdae61","#a1d99b","#2b83ba","#bababa")
names(stage) <- c("I","II","III","IV","NA")
anno_colors <- list(tumor = tumor,
stage = stage) # the name must be consistent
h <- pheatmap(voomObj$E[list, ], annotation_col=hm_cdr, annotation_colors=anno_colors,
labels_row = list, show_colnames = F)
h
}
# 3. dens.plot
dens.plot <- function(table, colVec, yrange) {
d <- plot(density(table[, 1]), col=colVec[1],
lwd=2, las=2, ylim=yrange, main="", xlab="") +
abline(v=0, lty=3) + title(xlab="expr values") +
for (i in 2:ncol(table)) {
den <- density(table[, i])
lines(den$x, den$y, col=colVec[i], lwd=2)
}
d
}
# 4. Function to draw the boxplot for a single gene
single.box <- function(v, phenoDF, id, tt){
t_pdata <- phenoDF %>% select(Sample.ID, Sample.Type)
exp_list <- as.data.frame(v$E[rownames(v$E)==id, ])
exp_list$Sample.ID <- rownames(exp_list)
colnames(exp_list) <- c("counts", "Sample.ID")
mdf <- merge(exp_list, t_pdata, by="Sample.ID")
mdf$Sample.Type <- factor(mdf$Sample.Type)
symbol <- id
q_val <- tt[id, ]$adj.P.Val
ggboxplot(mdf, x="Sample.Type", y="counts",
color="Sample.Type", palette="jco", main=paste0(symbol, " q-val = ", formatC(q_val, format="e", digits=2)),
xlab="Tissue", ylab="logCPM", add="jitter", ggtheme = theme_bw())
}
|
/functions.R
|
no_license
|
chilampoon/Meta-HCC
|
R
| false
| false
| 2,134
|
r
|
library(ggplot2)
library(ggpubr)
library(RColorBrewer)
library(reshape2)
# 1. Get color vectors
getColors <- function(n) {
col <- brewer.pal.info[brewer.pal.info$category=='qual', ] # get max. 74 colours
col_vector <- unlist(mapply(brewer.pal, col$maxcolors, rownames(col)))
ifelse (n > length(col_vector),
vec <- sample(col_vector, n, replace=T),
vec <- sample(col_vector, n, replace=F)
)
vec
}
# 2. Draw the heatmaps
draw_heatmap <- function(voomObj, topTable, phenoDF, list) {
hm_cdr <- phenoDF %>% select(Sample.Type, tumor_stage)
rownames(hm_cdr) <- colnames(voomObj)
colnames(hm_cdr) <- c('tumor', 'stage')
tumor <- c("#99a599", "#37637f")
names(tumor) <- unique(hm_cdr$tumor)
stage <- c("#d7191c","#fdae61","#a1d99b","#2b83ba","#bababa")
names(stage) <- c("I","II","III","IV","NA")
anno_colors <- list(tumor = tumor,
stage = stage) # the name must be consistent
h <- pheatmap(voomObj$E[list, ], annotation_col=hm_cdr, annotation_colors=anno_colors,
labels_row = list, show_colnames = F)
h
}
# 3. dens.plot
dens.plot <- function(table, colVec, yrange) {
d <- plot(density(table[, 1]), col=colVec[1],
lwd=2, las=2, ylim=yrange, main="", xlab="") +
abline(v=0, lty=3) + title(xlab="expr values") +
for (i in 2:ncol(table)) {
den <- density(table[, i])
lines(den$x, den$y, col=colVec[i], lwd=2)
}
d
}
# 4. Function to draw the boxplot for a single gene
single.box <- function(v, phenoDF, id, tt){
t_pdata <- phenoDF %>% select(Sample.ID, Sample.Type)
exp_list <- as.data.frame(v$E[rownames(v$E)==id, ])
exp_list$Sample.ID <- rownames(exp_list)
colnames(exp_list) <- c("counts", "Sample.ID")
mdf <- merge(exp_list, t_pdata, by="Sample.ID")
mdf$Sample.Type <- factor(mdf$Sample.Type)
symbol <- id
q_val <- tt[id, ]$adj.P.Val
ggboxplot(mdf, x="Sample.Type", y="counts",
color="Sample.Type", palette="jco", main=paste0(symbol, " q-val = ", formatC(q_val, format="e", digits=2)),
xlab="Tissue", ylab="logCPM", add="jitter", ggtheme = theme_bw())
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
dvvsc <- function(tvvsc,vvvsc){
dvvsc=tvvsc/vvvsc
return(dvvsc)
}
dvvsc(80,20)
#Viaje vacio sobre camino
tvvsc <- function(dvvsc){ tvvsc=dvvsc/88.3
return(tvvsc)}
t2<-input$DE/88.2, #Viaje vacio sobre lote
tvvsl <- function(dvvsl){tvvsl=dvvsl/88.2
return(tvvsl)}
t3<- exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100))), #Viaje mientras carga
t4<- exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI)), #Carga
t5<- input$DE/75.4, # viaje cargado sobre lote
t6<- input$DSC/109.4, # Viaje cargado sobre camino
t7<- exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI)))
)
ui <- fluidPage(
# Titulo ----
titlePanel("Tiempos de actividades"),
# Definicion del menu de entrada y salida ----
sidebarLayout(
# Sidebar para demostrar varias opciones de menu ----
sidebarPanel(
# Input: Distancia de carreteo sobre camino ----
sliderInput("DSC", "Distancia de carreteo sobre camino:",
min = 50, max = 500,
value = 150),
# Input: Distancia de Extraccion ----
sliderInput("DE", "Distancia de extracci?n:",
min = 50, max = 500,
value = 250, step = 50),
# Input: Distancia entre vias de saca ----
# sliderInput("DVS", "Distancia entre v?as de saca:",
# min = 50, max = 400,
# value = 200, step = 25),
# Input: Volumen del rodal ----
selectInput(inputId = "VolumenRodal",
label = "Volumen a cosechar",
choices = sort(unique(c(50,100,150,200,250,300,350,400))),
multiple = F),
# Input: Volumen total de carga ----
sliderInput("VolT", "Volumen total de carga:",
min = 5, max = 20,
value = 10),
# Input: Volumen individual ----
sliderInput("VolI", "Volumen del producto:",
min = 0.05, max = 0.5,
value = 0.3, step = 0.1)
# Input: Potencia ----
#selectInput(inputId = "Pot",
# label = "Potencia del tractor",
# choices = sort(unique(datos$potencia)),
# multiple = FALSE,
# selected = 90),
#selectInput(inputId = "department",
# label = "Operaci?n",
# choices = sort(unique(datos$tipo_practica)),
# multiple = TRUE)
),
# Panel principal de salidas ----
mainPanel(
# Output: Tabla resumen de los valores de entrada ----
tableOutput("values"),
tableOutput("values2"),
# Output: grafico de las variables seleccionadas ----
#plotOutput("plot1"),
# Output: grafico con ggplot ----
plotOutput("plot2"),
#tableOutput2("values2")
plotOutput("plot3"),
tableOutput("values3")
)
)
)
# Define server logic for slider examples ----
server <- function(input, output) {
sliderValues3<- reactive({
data.frame(DE1 = as.double(c(d<-seq(50,500,by=50))),
PEF = as.double(c(pef <-(input$VolT/((input$DSC/88.3+
d/88.2+
exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100)))+
exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI))+
d/75.4+ # viaje cargado sobre lote
input$DSC/109.4+ # Viaje cargado sobre camino
exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI))))/60))
)),
PEF2 = as.double(c(pef <-1.1*(input$VolT/((input$DSC/88.3+
d/88.2+
exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100)))+
exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI))+
d/75.4+ # viaje cargado sobre lote
input$DSC/109.4+ # Viaje cargado sobre camino
exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI))))/60))
)),
PEF3 = as.double(c(pef <-0.9*(input$VolT/((input$DSC/88.3+
d/88.2+
exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100)))+
exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI))+
d/75.4+ # viaje cargado sobre lote
input$DSC/109.4+ # Viaje cargado sobre camino
exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI))))/60))
)),
stringsAsFactors = FALSE
)
})
sliderValues2<- reactive({
data.frame(Actividad= c("Tiempo total"),
Tiempo = as.double(c(tt <-input$DSC/88.3+
input$DE/88.2+
exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100)))+
exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI))+
input$DE/75.4+ # viaje cargado sobre lote
input$DSC/109.4+ # Viaje cargado sobre camino
exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI)))
)),
stringsAsFactors = FALSE
)
})
# Exprecion reactiva para crear los datos que van en la tabla ----
sliderValues <- reactive({
data.frame(
Actividad = c("Viaje vacio sobre camino:",
"Viaje vacio sobre lote:",
"Movimiento en la carga:",
"Carga:",
"Viaje cargado sobre lote:",
"Viaje cargado sobre camino:",
"Descarga:"),
Tiempo = as.double(c(t1<-input$DSC/88.3, #Viaje vacio sobre camino
t2<-input$DE/88.2, #Viaje vacio sobre lote
t3<- exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100))), #Viaje mientras carga
t4<- exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI)), #Carga
t5<- input$DE/75.4, # viaje cargado sobre lote
t6<- input$DSC/109.4, # Viaje cargado sobre camino
t7<- exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI)))
)),
Unidad =as.character( c("Min",
"Min",
"Min",
"Min",
"Min",
"Min",
"Min")),
#Porcentajes =as.character( c(p1<-round(t1/tt*100),
# p2<-round(t2/tt*100),
# p3<-round(t3/tt*100),
# p4<-round(t4/tt*100),
# p5<-round(t5/tt*100),
# p6<-round(t6/tt*100),
# p7<-round(t7/tt*100)
# )),
stringsAsFactors = FALSE
# Generate a summary of the dataset
)
})
# Show the values in an HTML table ----
output$values <- renderTable({
sliderValues()
})
output$values2 <- renderTable({
sliderValues2()
})
# output$plot2<- renderPlot({
# ggplot(datos, aes(DE, PEF))+geom_point()+
# geom_point(aes(x=input$DSC,y=25), col="red", size= 5)+
# geom_smooth(method="lm", formula=y~x, col="black")
# })
output$plot2<- renderPlot({
ggplot(sliderValues3(), aes(x=DE1, y=PEF))+geom_point()+
geom_smooth(method="loess", formula=y~x, col="black")+
geom_line(aes(x=DE1, y=PEF2), col="red")+
geom_line(aes(x=DE1, y=PEF3), col="red")+
geom_point(aes(x=input$DE,y=mean(PEF)), col="red", size= 5)+
xlab("Distancia de Extraccion") + ylab("Productividad Efectiva") + # Set axis labels
ggtitle("Productividad de los tractorcitos") + # Set title
theme_bw()
})
output$plot3 <- renderPlot({
bp<- ggplot(sliderValues(), aes(x="", y=Tiempo, fill=Actividad))+
geom_bar(width = 1, stat = "identity")+ coord_polar("y", start=0)+
scale_fill_brewer() + theme_minimal()+
theme(axis.text.x=element_blank()) +
geom_text(aes(y = Tiempo/7 + c(0, cumsum(Tiempo)[-length(Tiempo)]),
label = percent(Tiempo/100)), size=5)
bp
})
}
# Create Shiny app ----
shinyApp(ui, server)
|
/Calculadora_Tractores/app.R
|
no_license
|
aleszczuk/CostosCosecha
|
R
| false
| false
| 9,159
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
dvvsc <- function(tvvsc,vvvsc){
dvvsc=tvvsc/vvvsc
return(dvvsc)
}
dvvsc(80,20)
#Viaje vacio sobre camino
tvvsc <- function(dvvsc){ tvvsc=dvvsc/88.3
return(tvvsc)}
t2<-input$DE/88.2, #Viaje vacio sobre lote
tvvsl <- function(dvvsl){tvvsl=dvvsl/88.2
return(tvvsl)}
t3<- exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100))), #Viaje mientras carga
t4<- exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI)), #Carga
t5<- input$DE/75.4, # viaje cargado sobre lote
t6<- input$DSC/109.4, # Viaje cargado sobre camino
t7<- exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI)))
)
ui <- fluidPage(
# Titulo ----
titlePanel("Tiempos de actividades"),
# Definicion del menu de entrada y salida ----
sidebarLayout(
# Sidebar para demostrar varias opciones de menu ----
sidebarPanel(
# Input: Distancia de carreteo sobre camino ----
sliderInput("DSC", "Distancia de carreteo sobre camino:",
min = 50, max = 500,
value = 150),
# Input: Distancia de Extraccion ----
sliderInput("DE", "Distancia de extracci?n:",
min = 50, max = 500,
value = 250, step = 50),
# Input: Distancia entre vias de saca ----
# sliderInput("DVS", "Distancia entre v?as de saca:",
# min = 50, max = 400,
# value = 200, step = 25),
# Input: Volumen del rodal ----
selectInput(inputId = "VolumenRodal",
label = "Volumen a cosechar",
choices = sort(unique(c(50,100,150,200,250,300,350,400))),
multiple = F),
# Input: Volumen total de carga ----
sliderInput("VolT", "Volumen total de carga:",
min = 5, max = 20,
value = 10),
# Input: Volumen individual ----
sliderInput("VolI", "Volumen del producto:",
min = 0.05, max = 0.5,
value = 0.3, step = 0.1)
# Input: Potencia ----
#selectInput(inputId = "Pot",
# label = "Potencia del tractor",
# choices = sort(unique(datos$potencia)),
# multiple = FALSE,
# selected = 90),
#selectInput(inputId = "department",
# label = "Operaci?n",
# choices = sort(unique(datos$tipo_practica)),
# multiple = TRUE)
),
# Panel principal de salidas ----
mainPanel(
# Output: Tabla resumen de los valores de entrada ----
tableOutput("values"),
tableOutput("values2"),
# Output: grafico de las variables seleccionadas ----
#plotOutput("plot1"),
# Output: grafico con ggplot ----
plotOutput("plot2"),
#tableOutput2("values2")
plotOutput("plot3"),
tableOutput("values3")
)
)
)
# Define server logic for slider examples ----
server <- function(input, output) {
sliderValues3<- reactive({
data.frame(DE1 = as.double(c(d<-seq(50,500,by=50))),
PEF = as.double(c(pef <-(input$VolT/((input$DSC/88.3+
d/88.2+
exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100)))+
exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI))+
d/75.4+ # viaje cargado sobre lote
input$DSC/109.4+ # Viaje cargado sobre camino
exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI))))/60))
)),
PEF2 = as.double(c(pef <-1.1*(input$VolT/((input$DSC/88.3+
d/88.2+
exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100)))+
exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI))+
d/75.4+ # viaje cargado sobre lote
input$DSC/109.4+ # Viaje cargado sobre camino
exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI))))/60))
)),
PEF3 = as.double(c(pef <-0.9*(input$VolT/((input$DSC/88.3+
d/88.2+
exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100)))+
exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI))+
d/75.4+ # viaje cargado sobre lote
input$DSC/109.4+ # Viaje cargado sobre camino
exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI))))/60))
)),
stringsAsFactors = FALSE
)
})
sliderValues2<- reactive({
data.frame(Actividad= c("Tiempo total"),
Tiempo = as.double(c(tt <-input$DSC/88.3+
input$DE/88.2+
exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100)))+
exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI))+
input$DE/75.4+ # viaje cargado sobre lote
input$DSC/109.4+ # Viaje cargado sobre camino
exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI)))
)),
stringsAsFactors = FALSE
)
})
# Exprecion reactiva para crear los datos que van en la tabla ----
sliderValues <- reactive({
data.frame(
Actividad = c("Viaje vacio sobre camino:",
"Viaje vacio sobre lote:",
"Movimiento en la carga:",
"Carga:",
"Viaje cargado sobre lote:",
"Viaje cargado sobre camino:",
"Descarga:"),
Tiempo = as.double(c(t1<-input$DSC/88.3, #Viaje vacio sobre camino
t2<-input$DE/88.2, #Viaje vacio sobre lote
t3<- exp(1.96+0.62*log(input$VolT/(input$VolT/input$DE*100))), #Viaje mientras carga
t4<- exp(0.11+0.73*log(input$VolT)-0.29*log(input$VolI)), #Carga
t5<- input$DE/75.4, # viaje cargado sobre lote
t6<- input$DSC/109.4, # Viaje cargado sobre camino
t7<- exp(0.11+(0.78*log(input$VolT))-(0.32*log(input$VolI)))
)),
Unidad =as.character( c("Min",
"Min",
"Min",
"Min",
"Min",
"Min",
"Min")),
#Porcentajes =as.character( c(p1<-round(t1/tt*100),
# p2<-round(t2/tt*100),
# p3<-round(t3/tt*100),
# p4<-round(t4/tt*100),
# p5<-round(t5/tt*100),
# p6<-round(t6/tt*100),
# p7<-round(t7/tt*100)
# )),
stringsAsFactors = FALSE
# Generate a summary of the dataset
)
})
# Show the values in an HTML table ----
output$values <- renderTable({
sliderValues()
})
output$values2 <- renderTable({
sliderValues2()
})
# output$plot2<- renderPlot({
# ggplot(datos, aes(DE, PEF))+geom_point()+
# geom_point(aes(x=input$DSC,y=25), col="red", size= 5)+
# geom_smooth(method="lm", formula=y~x, col="black")
# })
output$plot2<- renderPlot({
ggplot(sliderValues3(), aes(x=DE1, y=PEF))+geom_point()+
geom_smooth(method="loess", formula=y~x, col="black")+
geom_line(aes(x=DE1, y=PEF2), col="red")+
geom_line(aes(x=DE1, y=PEF3), col="red")+
geom_point(aes(x=input$DE,y=mean(PEF)), col="red", size= 5)+
xlab("Distancia de Extraccion") + ylab("Productividad Efectiva") + # Set axis labels
ggtitle("Productividad de los tractorcitos") + # Set title
theme_bw()
})
output$plot3 <- renderPlot({
bp<- ggplot(sliderValues(), aes(x="", y=Tiempo, fill=Actividad))+
geom_bar(width = 1, stat = "identity")+ coord_polar("y", start=0)+
scale_fill_brewer() + theme_minimal()+
theme(axis.text.x=element_blank()) +
geom_text(aes(y = Tiempo/7 + c(0, cumsum(Tiempo)[-length(Tiempo)]),
label = percent(Tiempo/100)), size=5)
bp
})
}
# Create Shiny app ----
shinyApp(ui, server)
|
setwd("C:/Users/sbhowmi/Desktop/Self Learning/Exploratory Data Analyis/Course_Directory/Week 1/Git_Project/ExData_Plotting1")
par(mfrow = c(1,1))
png(file = "plot2.png") # set output device
hhpc <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", dec = ".", na.strings = "?") # read the data set
febData <- subset(hhpc, Date == "1/2/2007" | Date == "2/2/2007") # slicing data for days, 1/2/2007 & 2/2/2007
# plot data
plot(febData$Global_active_power, ylab = "Global Active Power (Kilowatts)", type="l", xaxt="n", xlab = "")
axsLabels <- c('Thu', 'Fri', 'Sat') # custom x-axis labels
axis(1, at = c(0,length(febData$Global_active_power)/2,length(febData$Global_active_power)), labels = axsLabels)
dev.off()
|
/plot2.R
|
no_license
|
saurish/ExData_Plotting1
|
R
| false
| false
| 739
|
r
|
setwd("C:/Users/sbhowmi/Desktop/Self Learning/Exploratory Data Analyis/Course_Directory/Week 1/Git_Project/ExData_Plotting1")
par(mfrow = c(1,1))
png(file = "plot2.png") # set output device
hhpc <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", dec = ".", na.strings = "?") # read the data set
febData <- subset(hhpc, Date == "1/2/2007" | Date == "2/2/2007") # slicing data for days, 1/2/2007 & 2/2/2007
# plot data
plot(febData$Global_active_power, ylab = "Global Active Power (Kilowatts)", type="l", xaxt="n", xlab = "")
axsLabels <- c('Thu', 'Fri', 'Sat') # custom x-axis labels
axis(1, at = c(0,length(febData$Global_active_power)/2,length(febData$Global_active_power)), labels = axsLabels)
dev.off()
|
library(ggplot2)
require(scales)
require(dplyr)
data=read.csv("/Users/shirnschall/Desktop/Numerik2/plots/cg-dense-vs-sparse",header = TRUE ,sep = "\t")
#vergleichsfunktionen
n <- seq(from=0.1,to=1250,by=0.1)
f <- function(a){
a*a
}
g <- function(a){
a
}
t<-c(f(n),g(n))
type<-c(rep("x*x",times=length(n)),
rep("x",times=length(n)))
density<-c(rep("n",times=length(n)),
rep("1",times=length(n)))
n<-c(n,n)
d = data.frame(n,t,type,density)
p <- ggplot(data,aes(x=n,y=t,color=type,group=type))+
geom_point(aes(shape = type)) +
#geom_path(aes(group = type))+
geom_smooth()+ # argument se=F schaltet konvidenzintervall aus
theme_bw() +
labs(color = "Art der Matrix",group="Art der Matrix",linetype="Art der Matrix",shape="Art der Matrix")+
theme(
legend.position = c(.97, .03),
legend.justification = c("right", "bottom"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6)
)+
scale_y_log10()+
ylab("Zeit [\u03bcs]") +
xlab("Matrix (n\u00d7n)")+
#vergleichsfunktionen
geom_line(data = d, aes(x=n, y=t, group=density, colour=density),
show_guide = FALSE)
p
#ggsave("cg-dense-vs-sparse.png", units="in", width=5, height=4, dpi=300)
|
/plots/eigen-rafael.R
|
no_license
|
shirnschall/Numerik2
|
R
| false
| false
| 1,225
|
r
|
library(ggplot2)
require(scales)
require(dplyr)
data=read.csv("/Users/shirnschall/Desktop/Numerik2/plots/cg-dense-vs-sparse",header = TRUE ,sep = "\t")
#vergleichsfunktionen
n <- seq(from=0.1,to=1250,by=0.1)
f <- function(a){
a*a
}
g <- function(a){
a
}
t<-c(f(n),g(n))
type<-c(rep("x*x",times=length(n)),
rep("x",times=length(n)))
density<-c(rep("n",times=length(n)),
rep("1",times=length(n)))
n<-c(n,n)
d = data.frame(n,t,type,density)
p <- ggplot(data,aes(x=n,y=t,color=type,group=type))+
geom_point(aes(shape = type)) +
#geom_path(aes(group = type))+
geom_smooth()+ # argument se=F schaltet konvidenzintervall aus
theme_bw() +
labs(color = "Art der Matrix",group="Art der Matrix",linetype="Art der Matrix",shape="Art der Matrix")+
theme(
legend.position = c(.97, .03),
legend.justification = c("right", "bottom"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6)
)+
scale_y_log10()+
ylab("Zeit [\u03bcs]") +
xlab("Matrix (n\u00d7n)")+
#vergleichsfunktionen
geom_line(data = d, aes(x=n, y=t, group=density, colour=density),
show_guide = FALSE)
p
#ggsave("cg-dense-vs-sparse.png", units="in", width=5, height=4, dpi=300)
|
# this is a list of packages we will load for every chapter
# let's try to keep this to a minimum
# for many chapters, you will load special packages for them -- like if there's a section on matching it will do library(Matching) in the code
# don't add those chapter-specific packages here
bookwide_packages <-
c(
# bookdown and knitr related packages
"bookdown",
"knitr",
"kableExtra",
"gridExtra",
# DeclareDesign packages
"estimatr",
"fabricatr",
"randomizr",
"DeclareDesign",
"DesignLibrary",
# tidyverse packages
"ggplot2",
"dplyr",
"tidyr",
"readr",
"purrr",
"tibble",
"stringr",
"forcats"
)
|
/scripts/package_list.R
|
no_license
|
snowdj/book-6
|
R
| false
| false
| 684
|
r
|
# this is a list of packages we will load for every chapter
# let's try to keep this to a minimum
# for many chapters, you will load special packages for them -- like if there's a section on matching it will do library(Matching) in the code
# don't add those chapter-specific packages here
bookwide_packages <-
c(
# bookdown and knitr related packages
"bookdown",
"knitr",
"kableExtra",
"gridExtra",
# DeclareDesign packages
"estimatr",
"fabricatr",
"randomizr",
"DeclareDesign",
"DesignLibrary",
# tidyverse packages
"ggplot2",
"dplyr",
"tidyr",
"readr",
"purrr",
"tibble",
"stringr",
"forcats"
)
|
# Code for running Hidden Markov Model
# install.packages("depmixS4")
# install.packages("HiddenMarkov")
# install.packages("WriteXLS")
# install.packages("writexl")
# install.packages("lubridate")
# install.packages("R.matlab")
# install.packages("raster")
# install.packages("tidyverse")
# install.packages("ggpubr")
rm(list = ls())
library(R.matlab)
library(raster)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(ggpubr)
library(depmixS4) # hmm library
library(WriteXLS)
library(writexl)
library(lubridate)
# settig WD
setwd("D:/Academic Backups/PostDoc-Usask/PB_files/Analysis/Data_paper_revision/Upper_Lower/HMM")
EFE_full <- readMat("EFE_state_annual_50.mat")
EFE<-EFE_full[["EFE.median.annual"]]
basin<-EFE_full[["basin.hist"]]
basin<-basin[,1]
EFE_viol=EFE_full[["EFE.vioYear.median"]]
# creating date range
startDate<-as.Date("1976/01/01")
YearMonth<-seq(startDate, by="year", length.out = 30)
#
# # assigning lower boundary violation as 1 and no violation/upper violation as 0
# EFE_viol=ifelse(EFE<0, 1, 0)
#initialization
violation_stay_prob <- NULL
violation_shift_prob <- NULL
violation_shift_prob1 <- NULL
noviolation_shift_prob<-NULL
noviolation_shift_prob1<-NULL
# viol_to_viol <- NULL
# viol_to_noviol <- NULL
# noviol_to_viol <- NULL
basin_sele<-NULL
rowidx<-NULL
# loop for sites
for (row in 1:nrow(EFE)){
# extracting EFE values for one site
# EFE_run <- EFE_h08_viol[row,]
data1<-data.frame("EFE_state"=EFE_viol[row,],"Date"=YearMonth,"EFE_vio"= EFE[row,])
data1$diff<-c(diff(data1$EFE_state),NA)
# probability of staying in lower bound violation
# (if there is a violation in t-1 time step)
p_stay_violate<- length(which(data1$diff==0 & data1$EFE_state==1))/
length(which(data1$EFE_state==1))
# # new formula
# p_stay_violate<- length(which(data1$diff==0 & data1$EFE_state==1))/
# (nrow(EFE_viol)-1)
#
#changing NAN to 0
p_stay_violate[is.nan(p_stay_violate)] <- 0
violation_stay_prob <- rbind(violation_stay_prob, p_stay_violate)
# probability of switching to a low flow state from no violation state
p_shift_vio<- length(which(data1$diff==1 & data1$EFE_state==0))/
length(which(data1$EFE_state==0))
# p_shift_vio<- length(which(data1$diff==1 & data1$EFE_state==0))/
# (nrow(EFE_viol)-1)
#
p_shift_vio=ifelse(p_stay_violate>0.95, -1, p_shift_vio)
p_shift_vio[is.nan(p_shift_vio)] <- 0
violation_shift_prob <- rbind(violation_shift_prob, p_shift_vio)
# probability to switch from a violated state to a non violated state
# out of all non violated
p_shift_novio<- length(which(data1$diff==-1 & data1$EFE_state==1))/
length(which(data1$EFE_state==0))
# probability to switch from a violated state to a non violated state
# out of all shift
p_shift_novio1<- length(which(data1$diff==-1 & data1$EFE_state==1))/
(29-length(which(data1$diff==0)))
p_shift_novio1=ifelse(p_stay_violate==0, -1, p_shift_novio1)
p_shift_novio1[is.nan(p_shift_novio1)] <- 0
p_shift_novio=ifelse(p_stay_violate==0, -1, p_shift_novio)
p_shift_novio[is.nan(p_shift_novio)] <- 0
noviolation_shift_prob <- rbind(noviolation_shift_prob, p_shift_novio)
# noviolation_shift_prob1 <- rbind(noviolation_shift_prob1, p_shift_novio1)
# plt=ggplot(data1,aes(YearMonth,EFE_state)) + geom_line()
# plt
# <OPEN forrunning HMM>
# hmm
mod<-depmix(EFE_state ~1,
nstates = 2,
transition = ~1,
family = binomial(),
data=data1)
# iterations for random start values
best <-1.0e10
best_model=NA
# loop for n number of iterations
iter<-25 # number of iterations <change as per need>
for(i in 1:iter){
# fitting
fitmod<-fit(mod)
# # summary(fitmod)
# check for best solution
if(AIC(fitmod)< best){
best_model<- fitmod
best<- AIC(fitmod)
}
}
# # most probable state
# prstates<- apply(posterior(fitmod)[,c("S1","S2")],1,which.max)
# plot(prstates,type="b")
# transition prob
# s1 is violated state and s2 is non violated state
# s1_to_s1<-best_model@trDens[1]
# s2_to_s1<-best_model@trDens[3]
# s1_to_s2<-best_model@trDens[2]
#
# viol_to_viol[row] <- s1_to_s1
# noviol_to_viol[row] <- s2_to_s1
# viol_to_noviol[row] <- s1_to_s2
#
# viol_to_viol <- rbind(viol_to_viol, s1_to_s1)
# noviol_to_viol <- rbind(noviol_to_viol, s2_to_s1)
# viol_to_noviol <- rbind(viol_to_noviol, s1_to_s2)
x<-basin[row]
basin_sele<-rbind(basin_sele, x)
rowidx<-rbind(rowidx, row)
a<-"RUN COMPLETE FOR SITE "
b <- print(paste(a,row))
# output_final<-data.frame("row"=rowidx,"basin_id"=basin_sele,"noVio_to_Vio"=noviol_to_viol,"Vio_to_noVio"=viol_to_noviol,
# "Vio_to_Vio"= viol_to_viol,"viol_stay_prob"=violation__stay_prob)
#
# write_xlsx(output_final,"C:\\Dropbox\\PB_files\\Analysis\\Data_MattisGroup\\EFE data\\HMM_RStudio\\Output_final_annual.xlsx")
output_final<-data.frame("row"=rowidx,"basin_id"=basin_sele,
"viol_stay_prob"=violation_stay_prob,
"viol_shift_prob"=violation_shift_prob,
"noViol_shift_prob"=noviolation_shift_prob)
write_xlsx(output_final,"D:\\Academic Backups\\PostDoc-Usask\\PB_files\\Analysis\\Data_paper_revision\\Upper_Lower\\HMM\\Prob_final_annual_new.xlsx")
}
# violation_prob=violation_prob[1:96]
#
# output_final<-data.frame("row"=rowidx,"basin_id"=basin_sele,"noVio_to_Vio"=noviol_to_viol,"Vio_to_noVio"=viol_to_noviol,
# "Vio_to_Vio"= viol_to_viol,"viol_prob"=violation_prob)
#
# write_xlsx(output_final,"C:\\Dropbox\\PB_files\\Analysis\\Data_MattisGroup\\EFE data\\HMM_RStudio\\Output_final362_457.xlsx")
# data1$viol_prob<-violation_prob
|
/Code/EF_violation_estimation/R_Script_HMMAnnual.R
|
no_license
|
ChinchuMohan/Eflows-Biodiversity-Project
|
R
| false
| false
| 6,010
|
r
|
# Code for running Hidden Markov Model
# install.packages("depmixS4")
# install.packages("HiddenMarkov")
# install.packages("WriteXLS")
# install.packages("writexl")
# install.packages("lubridate")
# install.packages("R.matlab")
# install.packages("raster")
# install.packages("tidyverse")
# install.packages("ggpubr")
rm(list = ls())
library(R.matlab)
library(raster)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(ggpubr)
library(depmixS4) # hmm library
library(WriteXLS)
library(writexl)
library(lubridate)
# settig WD
setwd("D:/Academic Backups/PostDoc-Usask/PB_files/Analysis/Data_paper_revision/Upper_Lower/HMM")
EFE_full <- readMat("EFE_state_annual_50.mat")
EFE<-EFE_full[["EFE.median.annual"]]
basin<-EFE_full[["basin.hist"]]
basin<-basin[,1]
EFE_viol=EFE_full[["EFE.vioYear.median"]]
# creating date range
startDate<-as.Date("1976/01/01")
YearMonth<-seq(startDate, by="year", length.out = 30)
#
# # assigning lower boundary violation as 1 and no violation/upper violation as 0
# EFE_viol=ifelse(EFE<0, 1, 0)
#initialization
violation_stay_prob <- NULL
violation_shift_prob <- NULL
violation_shift_prob1 <- NULL
noviolation_shift_prob<-NULL
noviolation_shift_prob1<-NULL
# viol_to_viol <- NULL
# viol_to_noviol <- NULL
# noviol_to_viol <- NULL
basin_sele<-NULL
rowidx<-NULL
# loop for sites
for (row in 1:nrow(EFE)){
# extracting EFE values for one site
# EFE_run <- EFE_h08_viol[row,]
data1<-data.frame("EFE_state"=EFE_viol[row,],"Date"=YearMonth,"EFE_vio"= EFE[row,])
data1$diff<-c(diff(data1$EFE_state),NA)
# probability of staying in lower bound violation
# (if there is a violation in t-1 time step)
p_stay_violate<- length(which(data1$diff==0 & data1$EFE_state==1))/
length(which(data1$EFE_state==1))
# # new formula
# p_stay_violate<- length(which(data1$diff==0 & data1$EFE_state==1))/
# (nrow(EFE_viol)-1)
#
#changing NAN to 0
p_stay_violate[is.nan(p_stay_violate)] <- 0
violation_stay_prob <- rbind(violation_stay_prob, p_stay_violate)
# probability of switching to a low flow state from no violation state
p_shift_vio<- length(which(data1$diff==1 & data1$EFE_state==0))/
length(which(data1$EFE_state==0))
# p_shift_vio<- length(which(data1$diff==1 & data1$EFE_state==0))/
# (nrow(EFE_viol)-1)
#
p_shift_vio=ifelse(p_stay_violate>0.95, -1, p_shift_vio)
p_shift_vio[is.nan(p_shift_vio)] <- 0
violation_shift_prob <- rbind(violation_shift_prob, p_shift_vio)
# probability to switch from a violated state to a non violated state
# out of all non violated
p_shift_novio<- length(which(data1$diff==-1 & data1$EFE_state==1))/
length(which(data1$EFE_state==0))
# probability to switch from a violated state to a non violated state
# out of all shift
p_shift_novio1<- length(which(data1$diff==-1 & data1$EFE_state==1))/
(29-length(which(data1$diff==0)))
p_shift_novio1=ifelse(p_stay_violate==0, -1, p_shift_novio1)
p_shift_novio1[is.nan(p_shift_novio1)] <- 0
p_shift_novio=ifelse(p_stay_violate==0, -1, p_shift_novio)
p_shift_novio[is.nan(p_shift_novio)] <- 0
noviolation_shift_prob <- rbind(noviolation_shift_prob, p_shift_novio)
# noviolation_shift_prob1 <- rbind(noviolation_shift_prob1, p_shift_novio1)
# plt=ggplot(data1,aes(YearMonth,EFE_state)) + geom_line()
# plt
# <OPEN forrunning HMM>
# hmm
mod<-depmix(EFE_state ~1,
nstates = 2,
transition = ~1,
family = binomial(),
data=data1)
# iterations for random start values
best <-1.0e10
best_model=NA
# loop for n number of iterations
iter<-25 # number of iterations <change as per need>
for(i in 1:iter){
# fitting
fitmod<-fit(mod)
# # summary(fitmod)
# check for best solution
if(AIC(fitmod)< best){
best_model<- fitmod
best<- AIC(fitmod)
}
}
# # most probable state
# prstates<- apply(posterior(fitmod)[,c("S1","S2")],1,which.max)
# plot(prstates,type="b")
# transition prob
# s1 is violated state and s2 is non violated state
# s1_to_s1<-best_model@trDens[1]
# s2_to_s1<-best_model@trDens[3]
# s1_to_s2<-best_model@trDens[2]
#
# viol_to_viol[row] <- s1_to_s1
# noviol_to_viol[row] <- s2_to_s1
# viol_to_noviol[row] <- s1_to_s2
#
# viol_to_viol <- rbind(viol_to_viol, s1_to_s1)
# noviol_to_viol <- rbind(noviol_to_viol, s2_to_s1)
# viol_to_noviol <- rbind(viol_to_noviol, s1_to_s2)
x<-basin[row]
basin_sele<-rbind(basin_sele, x)
rowidx<-rbind(rowidx, row)
a<-"RUN COMPLETE FOR SITE "
b <- print(paste(a,row))
# output_final<-data.frame("row"=rowidx,"basin_id"=basin_sele,"noVio_to_Vio"=noviol_to_viol,"Vio_to_noVio"=viol_to_noviol,
# "Vio_to_Vio"= viol_to_viol,"viol_stay_prob"=violation__stay_prob)
#
# write_xlsx(output_final,"C:\\Dropbox\\PB_files\\Analysis\\Data_MattisGroup\\EFE data\\HMM_RStudio\\Output_final_annual.xlsx")
output_final<-data.frame("row"=rowidx,"basin_id"=basin_sele,
"viol_stay_prob"=violation_stay_prob,
"viol_shift_prob"=violation_shift_prob,
"noViol_shift_prob"=noviolation_shift_prob)
write_xlsx(output_final,"D:\\Academic Backups\\PostDoc-Usask\\PB_files\\Analysis\\Data_paper_revision\\Upper_Lower\\HMM\\Prob_final_annual_new.xlsx")
}
# violation_prob=violation_prob[1:96]
#
# output_final<-data.frame("row"=rowidx,"basin_id"=basin_sele,"noVio_to_Vio"=noviol_to_viol,"Vio_to_noVio"=viol_to_noviol,
# "Vio_to_Vio"= viol_to_viol,"viol_prob"=violation_prob)
#
# write_xlsx(output_final,"C:\\Dropbox\\PB_files\\Analysis\\Data_MattisGroup\\EFE data\\HMM_RStudio\\Output_final362_457.xlsx")
# data1$viol_prob<-violation_prob
|
# NOT RUN {
library(shiny)
# install.packages('ECharts2Shiny')
library(ECharts2Shiny)
dat <- data.frame(Type.A = c(4300, 10000, 25000, 35000, 50000),
Type.B = c(5000, 14000, 28000, 31000, 42000),
Type.C = c(4000, 2000, 9000, 29000, 35000))
row.names(dat) <- c("Feture 1", "Feature 2", "Feature 3", "Feature 4", "Feature 5")
# Server function -------------------------------------------
server <- function(input, output) {
renderRadarChart(div_id = "test",
data = dat)
}
# UI layout -------------------------------------------------
ui <- fluidPage(
# We MUST load the ECharts javascript library in advance
loadEChartsLibrary(),
tags$div(id="test", style="width:50%;height:400px;"),
deliverChart(div_id = "test")
)
# Run the application --------------------------------------
shinyApp(ui = ui, server = server)
|
/echarts_integration.r
|
permissive
|
ShounakRay/Stanford-COVIDVax
|
R
| false
| false
| 882
|
r
|
# NOT RUN {
library(shiny)
# install.packages('ECharts2Shiny')
library(ECharts2Shiny)
dat <- data.frame(Type.A = c(4300, 10000, 25000, 35000, 50000),
Type.B = c(5000, 14000, 28000, 31000, 42000),
Type.C = c(4000, 2000, 9000, 29000, 35000))
row.names(dat) <- c("Feture 1", "Feature 2", "Feature 3", "Feature 4", "Feature 5")
# Server function -------------------------------------------
server <- function(input, output) {
renderRadarChart(div_id = "test",
data = dat)
}
# UI layout -------------------------------------------------
ui <- fluidPage(
# We MUST load the ECharts javascript library in advance
loadEChartsLibrary(),
tags$div(id="test", style="width:50%;height:400px;"),
deliverChart(div_id = "test")
)
# Run the application --------------------------------------
shinyApp(ui = ui, server = server)
|
# 1.Read dataset
data_full <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
# 2.Subsetting the data based on dates
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
# 3.Converting date and time
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
# 4.Generate Plot 2
plot(data$Global_active_power~data$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
# 5.Saving to png file
dev.copy(png, file="plot2.png", height=480, width=480)
# 6.Close Dev
dev.off()
|
/plot2.R
|
no_license
|
marklcl/ExData_Plotting1
|
R
| false
| false
| 757
|
r
|
# 1.Read dataset
data_full <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
# 2.Subsetting the data based on dates
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
# 3.Converting date and time
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
# 4.Generate Plot 2
plot(data$Global_active_power~data$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
# 5.Saving to png file
dev.copy(png, file="plot2.png", height=480, width=480)
# 6.Close Dev
dev.off()
|
library(ape)
testtree <- read.tree("13324_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="13324_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/13324_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 137
|
r
|
library(ape)
testtree <- read.tree("13324_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="13324_0_unrooted.txt")
|
install.packages("readxl")
install.packages("tidyverse")
library(readxl)
library(tidyverse)
#import the callback information
callbacks <- read.csv(file="~/Desktop/REACH/Data/callback_merged_empty cells.csv", head=T, dec=".", sep=",")
#import the actual version of the cleaned data twice
som<-read.csv(file="~/Desktop/REACH/Data/SOM_MSNA2020_Merged_2020-08-30_v4_clean_data.csv", head=T, dec=".", sep=",")
som_old<-som
###########REFORMAT################################################################################################################################
#take over columns which are not multiple
call<-callbacks[,1:4]
call[,5:8]<-callbacks[,16:19]
#merge multiple columns to one, values separated by " " and add to new data frame
call$lack_enclosure <- paste0(callbacks[,6]," ", callbacks[,7])
call$shelter_damage <- paste0(callbacks[,8]," ", callbacks[,9]," ", callbacks[,10])
call$unable_repair <- paste0(callbacks[,11]," ", callbacks[,12])
call$shelter_issues <- paste0(callbacks[,13]," ", callbacks[,14]," ", callbacks[,15])
call$shetler_support <- paste0(callbacks[,28]," ", callbacks[,29]," ", callbacks[,30])
call$hlp_problems <- paste0(callbacks[,20]," ", callbacks[,21])
call$nfi_access <- paste0(callbacks[,22]," ", callbacks[,23]," ", callbacks[,24])
call$nfi_market <- paste0(callbacks[,25]," ", callbacks[,26]," ", callbacks[,27])
###########CREATE DUMMIES##############################################################################################################################################
#create binary columns from multiple
make_dummies<-function(x, df) {
colnum=grep(x, colnames(df))[1]
uni<-unique(scan(text = df[,colnum], what = ""))
l=length(uni)
l_2=length(df)
for (i in 1:l){
blubb<-(grepl(uni[i],df[,colnum], fixed=T)*1)
df[,l_2+i]<-blubb
names(df)[l_2+i] <- paste0(x, ".", uni[i])
}
sub_df <- df[, c(x, paste0(x, ".", uni))]
empty_value <- sub_df[,x] %in% c(" ", "", " ")
sub_df[empty_value, ] <- ""
sub_df<-sub_df[,-1]
return(sub_df)
}
#create data frames with binaries
dum1<-make_dummies("lack_enclosure", call)
dum2<-make_dummies("shelter_damage", call)
dum3<-make_dummies("unable_repair", call)
dum4<-make_dummies("shelter_issues", call)
dum5<-make_dummies("shetler_support", call)
dum6<-make_dummies("hlp_problems", call)
dum7<-make_dummies("nfi_access", call)
dum8<-make_dummies("nfi_market", call)
#merge all binaries with the other variables
call_all <- cbind(call, dum1,dum2, dum3,dum4, dum5, dum6, dum7)
#######SPELLING MISTAKES#########################################################################################################
#find spelling mistakes
col<-colnames(call_all)
som[c(col[1:11])]
som[c(col[116])]
colnames(call_all[116])
som[c(col[117:118])]
som[c(col[1:70])]
som[c(col[71])]
colnames(call_all[71])
#shetler_support.solating_panel
som[c(col[72])]
colnames(call_all[73])
#shetler_support.mosqutig-_Net
som[c(col[74:80])]
colnames(call_all[81])
#shetler_support.none
som[c(col[82:96])]
colnames(call_all[97])
#hlp_problems.cookting_utensils
colnames(call_all[98])
#hlp_problems.beddting_items
colnames(call_all[99])
#hlp_problems.wateting_containers
som[c(col[100:109])]
colnames(call_all[110])
#nfi_access.beddting_items
som[c(col[111])]
colnames(call_all[112])
#nfi_access.shoes
som[c(col[113:122])]
colnames(call_all[123])
#nfi_access.bedding
colnames(call_all[124])
#nfi_access.items
som[c(col[125])]
colnames(call_all[126])
#nfi_access.none_of
som[c(col[127:128])]
which(colnames(call_all)=="shetler_support.none")
which(colnames(call_all)=="nfi_access.beddting_items")
which(colnames(call_all)=="nfi_access.none_of")
#if needed: delete spelling mistakes to make the replacement run
#call_all<-call_all[,-c(81,110,126)]
#######REPLACE values in data set with callbacks values#####################################################################################################################
col<-colnames(call_all)
l=length(call_all)
nro<-nrow(call_all)
w<- 0
for (i in 1:nro){
w[i]<-which(som$X_uuid==call_all$X_uuid[i])
for (j in 1:l){
som[w[i],col[j]]<-call_all[i,j]
}
}
######debugging######################################################################################################################
#check = columns to compare
check<-c(110:116)
som[w[1],col[check]]
som_old[w[1],col[check]]
call_all[1,check]
#######EXPORT CLEANED DATA###################################################################################################################
today <- Sys.Date()
today<-format(today, format="_%Y_%b_%d")
write_xlsx(som, paste0("~/Desktop/REACH/Data/SOM_MSNA2020_Merged_2020-08-30_v4_clean_data_incl_callbacks",today,".xlsx"))
write.csv(som, file= paste0("~/Desktop/REACH/Data/SOM_MSNA2020_Merged_2020-08-30_v4_clean_data_incl_callbacks",today,".csv"), row.names=FALSE)
|
/FeedingInCallbacks.R
|
no_license
|
causeri3/dataCleaningSOM20
|
R
| false
| false
| 4,879
|
r
|
install.packages("readxl")
install.packages("tidyverse")
library(readxl)
library(tidyverse)
#import the callback information
callbacks <- read.csv(file="~/Desktop/REACH/Data/callback_merged_empty cells.csv", head=T, dec=".", sep=",")
#import the actual version of the cleaned data twice
som<-read.csv(file="~/Desktop/REACH/Data/SOM_MSNA2020_Merged_2020-08-30_v4_clean_data.csv", head=T, dec=".", sep=",")
som_old<-som
###########REFORMAT################################################################################################################################
#take over columns which are not multiple
call<-callbacks[,1:4]
call[,5:8]<-callbacks[,16:19]
#merge multiple columns to one, values separated by " " and add to new data frame
call$lack_enclosure <- paste0(callbacks[,6]," ", callbacks[,7])
call$shelter_damage <- paste0(callbacks[,8]," ", callbacks[,9]," ", callbacks[,10])
call$unable_repair <- paste0(callbacks[,11]," ", callbacks[,12])
call$shelter_issues <- paste0(callbacks[,13]," ", callbacks[,14]," ", callbacks[,15])
call$shetler_support <- paste0(callbacks[,28]," ", callbacks[,29]," ", callbacks[,30])
call$hlp_problems <- paste0(callbacks[,20]," ", callbacks[,21])
call$nfi_access <- paste0(callbacks[,22]," ", callbacks[,23]," ", callbacks[,24])
call$nfi_market <- paste0(callbacks[,25]," ", callbacks[,26]," ", callbacks[,27])
###########CREATE DUMMIES##############################################################################################################################################
#create binary columns from multiple
make_dummies<-function(x, df) {
colnum=grep(x, colnames(df))[1]
uni<-unique(scan(text = df[,colnum], what = ""))
l=length(uni)
l_2=length(df)
for (i in 1:l){
blubb<-(grepl(uni[i],df[,colnum], fixed=T)*1)
df[,l_2+i]<-blubb
names(df)[l_2+i] <- paste0(x, ".", uni[i])
}
sub_df <- df[, c(x, paste0(x, ".", uni))]
empty_value <- sub_df[,x] %in% c(" ", "", " ")
sub_df[empty_value, ] <- ""
sub_df<-sub_df[,-1]
return(sub_df)
}
#create data frames with binaries
dum1<-make_dummies("lack_enclosure", call)
dum2<-make_dummies("shelter_damage", call)
dum3<-make_dummies("unable_repair", call)
dum4<-make_dummies("shelter_issues", call)
dum5<-make_dummies("shetler_support", call)
dum6<-make_dummies("hlp_problems", call)
dum7<-make_dummies("nfi_access", call)
dum8<-make_dummies("nfi_market", call)
#merge all binaries with the other variables
call_all <- cbind(call, dum1,dum2, dum3,dum4, dum5, dum6, dum7)
#######SPELLING MISTAKES#########################################################################################################
#find spelling mistakes
col<-colnames(call_all)
som[c(col[1:11])]
som[c(col[116])]
colnames(call_all[116])
som[c(col[117:118])]
som[c(col[1:70])]
som[c(col[71])]
colnames(call_all[71])
#shetler_support.solating_panel
som[c(col[72])]
colnames(call_all[73])
#shetler_support.mosqutig-_Net
som[c(col[74:80])]
colnames(call_all[81])
#shetler_support.none
som[c(col[82:96])]
colnames(call_all[97])
#hlp_problems.cookting_utensils
colnames(call_all[98])
#hlp_problems.beddting_items
colnames(call_all[99])
#hlp_problems.wateting_containers
som[c(col[100:109])]
colnames(call_all[110])
#nfi_access.beddting_items
som[c(col[111])]
colnames(call_all[112])
#nfi_access.shoes
som[c(col[113:122])]
colnames(call_all[123])
#nfi_access.bedding
colnames(call_all[124])
#nfi_access.items
som[c(col[125])]
colnames(call_all[126])
#nfi_access.none_of
som[c(col[127:128])]
which(colnames(call_all)=="shetler_support.none")
which(colnames(call_all)=="nfi_access.beddting_items")
which(colnames(call_all)=="nfi_access.none_of")
#if needed: delete spelling mistakes to make the replacement run
#call_all<-call_all[,-c(81,110,126)]
#######REPLACE values in data set with callbacks values#####################################################################################################################
col<-colnames(call_all)
l=length(call_all)
nro<-nrow(call_all)
w<- 0
for (i in 1:nro){
w[i]<-which(som$X_uuid==call_all$X_uuid[i])
for (j in 1:l){
som[w[i],col[j]]<-call_all[i,j]
}
}
######debugging######################################################################################################################
#check = columns to compare
check<-c(110:116)
som[w[1],col[check]]
som_old[w[1],col[check]]
call_all[1,check]
#######EXPORT CLEANED DATA###################################################################################################################
today <- Sys.Date()
today<-format(today, format="_%Y_%b_%d")
write_xlsx(som, paste0("~/Desktop/REACH/Data/SOM_MSNA2020_Merged_2020-08-30_v4_clean_data_incl_callbacks",today,".xlsx"))
write.csv(som, file= paste0("~/Desktop/REACH/Data/SOM_MSNA2020_Merged_2020-08-30_v4_clean_data_incl_callbacks",today,".csv"), row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vivekn-sentiment-detector.R
\name{nlp_vivekn_sentiment_detector}
\alias{nlp_vivekn_sentiment_detector}
\title{Spark NLP ViveknSentimentApproach}
\usage{
nlp_vivekn_sentiment_detector(x, input_cols, output_col, sentiment_col,
prune_corpus = NULL, feature_limit = NULL,
unimportant_feature_step = NULL, important_feature_ratio = NULL,
uid = random_string("vivekn_sentiment_detector_"))
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_cols}{Input columns. String array.}
\item{output_col}{Output column. String.}
\item{sentiment_col}{Column with sentiment analysis row’s result for training.}
\item{prune_corpus}{when training on small data you may want to disable this to not cut off infrequent words}
\item{feature_limit}{}
\item{unimportant_feature_step}{}
\item{important_feature_ratio}{}
\item{uid}{A character string used to uniquely identify the ML estimator.}
\item{...}{Optional arguments, see Details.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns an instance of a \code{ml_estimator} object. The object contains a pointer to
a Spark \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the NLP estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, an estimator is constructed then
immediately fit with the input \code{tbl_spark}, returning an NLP model.
}
}
\description{
Spark ML estimator that scores a sentence for a sentiment
See \url{https://nlp.johnsnowlabs.com/docs/en/annotators#viveknsentimentdetector}
}
|
/man/nlp_vivekn_sentiment_detector.Rd
|
permissive
|
mstei4176/sparknlp
|
R
| false
| true
| 1,895
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vivekn-sentiment-detector.R
\name{nlp_vivekn_sentiment_detector}
\alias{nlp_vivekn_sentiment_detector}
\title{Spark NLP ViveknSentimentApproach}
\usage{
nlp_vivekn_sentiment_detector(x, input_cols, output_col, sentiment_col,
prune_corpus = NULL, feature_limit = NULL,
unimportant_feature_step = NULL, important_feature_ratio = NULL,
uid = random_string("vivekn_sentiment_detector_"))
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_cols}{Input columns. String array.}
\item{output_col}{Output column. String.}
\item{sentiment_col}{Column with sentiment analysis row’s result for training.}
\item{prune_corpus}{when training on small data you may want to disable this to not cut off infrequent words}
\item{feature_limit}{}
\item{unimportant_feature_step}{}
\item{important_feature_ratio}{}
\item{uid}{A character string used to uniquely identify the ML estimator.}
\item{...}{Optional arguments, see Details.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns an instance of a \code{ml_estimator} object. The object contains a pointer to
a Spark \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the NLP estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, an estimator is constructed then
immediately fit with the input \code{tbl_spark}, returning an NLP model.
}
}
\description{
Spark ML estimator that scores a sentence for a sentiment
See \url{https://nlp.johnsnowlabs.com/docs/en/annotators#viveknsentimentdetector}
}
|
# server for JAEG Tweet
function(input, output) {
# ---- Get User Tweet ----
# Grab Tweet
user_info <- reactive({
withProgress({
setProgress(message = "Grabbing Tweets!")})
input$go
isolate({num_set <- input$num_t
# clean up the @ sign if is there
tweethandle <- gsub("@", "", input$handle)
get_user_tweet(tweethandle, num_t = num_set, rt = input$rt_yn)
})
})
# ---- Get Picture! ----
# This Work! Use renderUI to insert linked photo
output$image_link <- renderUI({
input$go
HTML(paste0('<img src = "',
gsub("normal.jpeg", "400x400.jpeg", user_profile$profileImageUrl),
'" align="middle">'))
})
# ---- Get Twitter Profile Stats ----
output$user_fav <- renderInfoBox({
input$go
infoBox("Favorites", comma(user_profile$favoritesCount), icon = icon("heart"),
color = "purple"
)
})
output$user_follower <- renderInfoBox({
input$go
infoBox("Follower", comma(user_profile$followersCount), icon = icon("twitter-square"),
color = "purple"
)
})
output$user_friend <- renderInfoBox({
input$go
infoBox("Friends", comma(user_profile$friendsCount), icon = icon("group"),
color = "purple"
)
})
# ---- Make Calender plot ----
output$user_calender <- renderPlot({
# Create the dataframe with the custom fn
create_cal2_df(user_info()$tweet_df)
ggplot(user_day_df, aes(wday, m_week)) +
geom_tile(data = user_shadow_df, aes(wday, m_week), fill = "grey92", color = "white") +
geom_tile(data = user_day_df, aes_string(fill = input$var1_fill), color = "white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(user_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high = "#FF75FF", low = "#EBF0F5") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#6600CC"),
strip.text = element_text(color = "white", face = "bold", size = rel(1.3)),
legend.position="right")
})
# ---- Make Tweet by hour plot ----
# Hour by weekday density plot
output$user_hour_wday <- renderPlot({
input$go
ggplot(user_df, aes(hour, fill=wday)) +
geom_density(alpha = 1/4, adjust=.2, color=NA) + theme_fivethirtyeight() +
scale_x_continuous(breaks=seq(0, 24, by = 4)) +
scale_y_continuous(name = "", breaks = NULL)
})
# Hour bar plot
output$user_hour <- renderPlot({
input$go
ggplot(user_df, aes(hour)) +
geom_bar(position="stack", alpha=2/3) + theme_fivethirtyeight() +
scale_x_continuous(breaks=seq(0, 24, by = 4)) +
theme(panel.background = element_rect(fill = "#FAF0E6"))
# ggtitle("Number of Tweet by Hour of the Day")
})
# ---- User Word Cloud ----
output$plot_wordcloud <- renderPlot({
input$go
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(user_info()$tweet_text)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# ---- Make Tweet Table ----
output$tweet_tbl <- renderDataTable({
input$go
data <- subset(user_info()$tweet_df,
select=c("text", "created", "favoriteCount", "retweetCount", "isRetweet"))
# Make prettier label
names(data) <- c("Tweets","Time","Favorite Count","Retweet Count","Retweet?")
# Customize the DT
DT::datatable(data, filter = 'top', extensions = c('ColReorder','Scroller'), options = list(
dom = 'Rlfrtip', colReorder = list(realtime = TRUE),
deferRender = TRUE,
dom = "frtiS",
scrollY = 200,
scrollCollapse = TRUE,
paging = FALSE
)) %>%
formatStyle("Tweets", Color = "#666699")
})
# ---- Get Canadian Political Party Leader Tweet ----
cdnpoli_tweet <- reactive({
withProgress({
setProgress(message = "Grabbing Tweets!")})
get_cdnpoli_tweet(num_twt = input$num_cdn_t, r_twt = input$r_twt_yn)
})
# calculate all the calender at once with the custom fn
observe({
create_cal_df(cdnpoli_tweet()$con_df)
create_cal_df(cdnpoli_tweet()$lib_df)
create_cal_df(cdnpoli_tweet()$green_df)
create_cal_df(cdnpoli_tweet()$bloc_df)
create_cal_df(cdnpoli_tweet()$ndp_df)
})
# ---- Make Calender Plot! ----
# Convservative
output$con_calender <- renderPlot({
# create_cal_df(cdnpoli_tweet()$con_df)
ggplot(pmharper_day_df, aes(wday, m_week)) +
geom_tile(data = pmharper_shadow_df, aes(wday, m_week), fill = "grey92", color = "white") +
geom_tile(data = pmharper_day_df, aes_string(fill = input$var_fill), color="white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(pmharper_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high = "#24476B", low = "#EBF0F5") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#24476B"),
strip.text = element_text(color="white", face = "bold", size = rel(1.3)),
legend.position = "bottom")
})
# NDP
output$ndp_calender <- renderPlot({
# create_cal_df(cdnpoli_tweet()$ndp_df)
ggplot(ThomasMulcair_day_df, aes(wday, m_week)) +
geom_tile(data = ThomasMulcair_shadow_df, aes(wday, m_week),
fill = "grey92", color = "white") +
geom_tile(data = ThomasMulcair_day_df, aes_string(fill = input$var_fill), color = "white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(ThomasMulcair_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high="#FF9900",low="#FFF5E6") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#FF9900"),
strip.text = element_text(color="white", face = "bold", size = rel(1.3)),
legend.position = "bottom")
})
# Liberal
output$lib_calender <- renderPlot({
# create_cal_df(cdnpoli_tweet()$lib_df)
ggplot(JustinTrudeau_day_df, aes(wday, m_week)) +
geom_tile(data = JustinTrudeau_shadow_df, aes(wday, m_week),
fill = "grey92", color = "white") +
geom_tile(data = JustinTrudeau_day_df, aes_string(fill = input$var_fill), color = "white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(JustinTrudeau_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high = "#FF6347",low = "#FFE4E1") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#FF6347"),
strip.text = element_text(color="white", face = "bold", size = rel(1.3)),
legend.position ="bottom")
})
# Green
output$green_calender <- renderPlot({
# create_cal_df(cdnpoli_tweet()$green_df)
ggplot(ElizabethMay_day_df, aes(wday, m_week)) +
geom_tile(data = ElizabethMay_shadow_df, aes(wday, m_week),
fill = "grey92", color = "white") +
geom_tile(data = ElizabethMay_day_df, aes_string(fill = input$var_fill), color = "white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(ElizabethMay_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high = "#1F5C1F",low = "#EBF5EB") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#1F5C1F"),
strip.text = element_text(color="white", face = "bold", size = rel(1.3)),
legend.position = "bottom")
})
# Bloc
output$bloc_calender <- renderPlot({
# create_cal_df(cdnpoli_tweet()$bloc_df)
ggplot(GillesDuceppe_day_df, aes(wday, m_week)) +
geom_tile(data = GillesDuceppe_shadow_df, aes(wday, m_week),
fill = "grey92", color = "white") +
geom_tile(data = GillesDuceppe_df, aes_string(fill = input$var_fill), color = "white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(GillesDuceppe_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high = "#003366",low = "#00CCFF") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#003366"),
strip.text = element_text(color="white", face = "bold", size = rel(1.3)),
legend.position = "bottom")
})
# ---- Comparison and Common Word Cloud ----
# Comparision Cloud
output$plot_comparecloud <- renderPlot({
withProgress({
setProgress(message = "Drawing Comparison Word Cloud")})
input$go2
comparison.cloud(cdnpoli_tweet()$corpous_for_cloud, random.order = FALSE,
colors=c("blue","red", "orange","green","darkblue"),
title.size = 1.5, max.words = 100)
})
# Common Cloud
output$plot_commoncloud <- renderPlot({
withProgress({
setProgress(message = "Drawing Common Word Cloud")})
input$go2
commonality.cloud(cdnpoli_tweet()$corpous_for_cloud, random.order = FALSE,
colors = brewer.pal(8, "Dark2"),
title.size = 1.5, max.words = 100)
})
# ---- Individual Word Cloud ----
# Convservative Cloud
output$plot_wordcloud_con <- renderPlot({
input$go
input$r_twt_yn
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(pmharper_text_corpus)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# Liberal Cloud
output$plot_wordcloud_lib <- renderPlot({
input$go
input$r_twt_yn
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(JustinTrudeau_text_corpus)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# NDP Cloud
output$plot_wordcloud_ndp <- renderPlot({
input$go
input$r_twt_yn
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(ThomasMulcair_text_corpus)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# Green Cloud
output$plot_wordcloud_green <- renderPlot({
input$go
input$r_twt_yn
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(ElizabethMay_text_corpus)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# Bloc Cloud
output$plot_wordcloud_bloc <- renderPlot({
input$go
input$r_twt_yn
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(GillesDuceppe_text_corpus)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# ----- Profile Comparision -----
# Treemap via modified ggtify fn
output$treemap <- renderPlot({
# Grouping and label needs to be factor for treemap to work
cdn_party_user_profile$name <- factor(cdn_party_user_profile$name)
cdn_party_user_profile$screenName <- factor(cdn_party_user_profile$screenName)
cdn_party_user_profile$location <- factor(cdn_party_user_profile$location)
cdn_party_user_profile$lang <- factor(cdn_party_user_profile$lang)
# http://www.kevjohnson.org/making-maps-in-r/
treemapify(cdn_party_user_profile, area = "followersCount",
fill = "friendsCount", label = "name") %>% ggtify() +
scale_fill_distiller(name = "Friends Count", palette = "Blues", breaks = pretty_breaks(5)) +
guides(fill = guide_legend(reverse = TRUE))
})
# Bar graph
output$party_metric <- renderPlot({
# Grouping and label needs to be factor for treemap to work
cdn_party_user_profile$name <- factor(cdn_party_user_profile$name)
cdn_party_user_profile$screenName <- factor(cdn_party_user_profile$screenName)
cdn_party_user_profile$location <- factor(cdn_party_user_profile$location)
cdn_party_user_profile$lang <- factor(cdn_party_user_profile$lang)
# Melt (gather) the data frame for bar graph
data <- gather(cdn_party_user_profile, metrics, values, statusesCount:friendsCount)
ggplot(filter(data, metrics != "followersCount" & metrics != "statusesCount"),
aes(x = reorder(name, values), y = values, colour = metrics, fill = metrics)) +
geom_bar(position = "dodge", stat = "identity") + coord_flip() +
geom_text(aes(label = values), position = position_dodge(.9), hjust=-.2) +
theme_fivethirtyeight() + ylim(0, 8000) +
scale_fill_discrete(name = "Metric", label = c("Favorites", "Friends")) +
scale_color_discrete(name = "Metric", label = c("Favorites", "Friends"))
})
# output$cdn_party_hour_wday <- renderPlot({
# input$go
# ggplot(user_df, aes(hour, fill=wday)) + geom_density(alpha = 1/4, adjust=.2, color=NA) + theme_fivethirtyeight() +
# scale_x_continuous(breaks=seq(0, 24, by = 4)) +
# scale_y_continuous(name = "", breaks = NULL)
# })
# ---- Make Tweet by Hour Plot ----
# Hour by weekday density plot
output$party_by_hour <- renderPlot({
input$go
# Combine the dataframe of all party
pmharper_df$party <- "Convservative"
JustinTrudeau_df$party <- "Liberal"
ThomasMulcair_df$party <- "NDP"
ElizabethMay_df$party <- "Green"
GillesDuceppe_df$party <- "Bloc Québécois"
party_day_df <- rbind(pmharper_df, JustinTrudeau_df, ThomasMulcair_df,
ElizabethMay_df, GillesDuceppe_df)
ggplot(party_day_df, aes(hour, fill = party)) +
geom_density(alpha = 1/4, adjust = .2, color = NA) + theme_fivethirtyeight() +
scale_x_continuous(breaks = seq(0, 24, by = 4)) + # Make prettier break
scale_y_continuous(name = "", breaks = NULL)
})
# ----- Lets Map Followers -----
ff_coded <- eventReactive(input$map, {
withProgress({
setProgress(message = "Mapping Followers!")})
handle <- paste0(gsub("@", "", input$handle))
ff_df <- twitterMap(handle, nMax = 1000) # Lets try 1000!
ff_df
})
output$check_ff <- renderDataTable({
ff_coded()
})
output$user_follower_map <- renderLeaflet({
user_f_coded_df <- ff_coded()
leaflet() %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addMarkers(user_f_coded_df$long, user_f_coded_df$lat,
popup = user_f_coded_df$location)
})
}
|
/Tweet/Server.R
|
permissive
|
NeaterReport/portfolio
|
R
| false
| false
| 16,150
|
r
|
# server for JAEG Tweet
function(input, output) {
# ---- Get User Tweet ----
# Grab Tweet
user_info <- reactive({
withProgress({
setProgress(message = "Grabbing Tweets!")})
input$go
isolate({num_set <- input$num_t
# clean up the @ sign if is there
tweethandle <- gsub("@", "", input$handle)
get_user_tweet(tweethandle, num_t = num_set, rt = input$rt_yn)
})
})
# ---- Get Picture! ----
# This Work! Use renderUI to insert linked photo
output$image_link <- renderUI({
input$go
HTML(paste0('<img src = "',
gsub("normal.jpeg", "400x400.jpeg", user_profile$profileImageUrl),
'" align="middle">'))
})
# ---- Get Twitter Profile Stats ----
output$user_fav <- renderInfoBox({
input$go
infoBox("Favorites", comma(user_profile$favoritesCount), icon = icon("heart"),
color = "purple"
)
})
output$user_follower <- renderInfoBox({
input$go
infoBox("Follower", comma(user_profile$followersCount), icon = icon("twitter-square"),
color = "purple"
)
})
output$user_friend <- renderInfoBox({
input$go
infoBox("Friends", comma(user_profile$friendsCount), icon = icon("group"),
color = "purple"
)
})
# ---- Make Calender plot ----
output$user_calender <- renderPlot({
# Create the dataframe with the custom fn
create_cal2_df(user_info()$tweet_df)
ggplot(user_day_df, aes(wday, m_week)) +
geom_tile(data = user_shadow_df, aes(wday, m_week), fill = "grey92", color = "white") +
geom_tile(data = user_day_df, aes_string(fill = input$var1_fill), color = "white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(user_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high = "#FF75FF", low = "#EBF0F5") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#6600CC"),
strip.text = element_text(color = "white", face = "bold", size = rel(1.3)),
legend.position="right")
})
# ---- Make Tweet by hour plot ----
# Hour by weekday density plot
output$user_hour_wday <- renderPlot({
input$go
ggplot(user_df, aes(hour, fill=wday)) +
geom_density(alpha = 1/4, adjust=.2, color=NA) + theme_fivethirtyeight() +
scale_x_continuous(breaks=seq(0, 24, by = 4)) +
scale_y_continuous(name = "", breaks = NULL)
})
# Hour bar plot
output$user_hour <- renderPlot({
input$go
ggplot(user_df, aes(hour)) +
geom_bar(position="stack", alpha=2/3) + theme_fivethirtyeight() +
scale_x_continuous(breaks=seq(0, 24, by = 4)) +
theme(panel.background = element_rect(fill = "#FAF0E6"))
# ggtitle("Number of Tweet by Hour of the Day")
})
# ---- User Word Cloud ----
output$plot_wordcloud <- renderPlot({
input$go
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(user_info()$tweet_text)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# ---- Make Tweet Table ----
output$tweet_tbl <- renderDataTable({
input$go
data <- subset(user_info()$tweet_df,
select=c("text", "created", "favoriteCount", "retweetCount", "isRetweet"))
# Make prettier label
names(data) <- c("Tweets","Time","Favorite Count","Retweet Count","Retweet?")
# Customize the DT
DT::datatable(data, filter = 'top', extensions = c('ColReorder','Scroller'), options = list(
dom = 'Rlfrtip', colReorder = list(realtime = TRUE),
deferRender = TRUE,
dom = "frtiS",
scrollY = 200,
scrollCollapse = TRUE,
paging = FALSE
)) %>%
formatStyle("Tweets", Color = "#666699")
})
# ---- Get Canadian Political Party Leader Tweet ----
cdnpoli_tweet <- reactive({
withProgress({
setProgress(message = "Grabbing Tweets!")})
get_cdnpoli_tweet(num_twt = input$num_cdn_t, r_twt = input$r_twt_yn)
})
# calculate all the calender at once with the custom fn
observe({
create_cal_df(cdnpoli_tweet()$con_df)
create_cal_df(cdnpoli_tweet()$lib_df)
create_cal_df(cdnpoli_tweet()$green_df)
create_cal_df(cdnpoli_tweet()$bloc_df)
create_cal_df(cdnpoli_tweet()$ndp_df)
})
# ---- Make Calender Plot! ----
# Convservative
output$con_calender <- renderPlot({
# create_cal_df(cdnpoli_tweet()$con_df)
ggplot(pmharper_day_df, aes(wday, m_week)) +
geom_tile(data = pmharper_shadow_df, aes(wday, m_week), fill = "grey92", color = "white") +
geom_tile(data = pmharper_day_df, aes_string(fill = input$var_fill), color="white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(pmharper_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high = "#24476B", low = "#EBF0F5") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#24476B"),
strip.text = element_text(color="white", face = "bold", size = rel(1.3)),
legend.position = "bottom")
})
# NDP
output$ndp_calender <- renderPlot({
# create_cal_df(cdnpoli_tweet()$ndp_df)
ggplot(ThomasMulcair_day_df, aes(wday, m_week)) +
geom_tile(data = ThomasMulcair_shadow_df, aes(wday, m_week),
fill = "grey92", color = "white") +
geom_tile(data = ThomasMulcair_day_df, aes_string(fill = input$var_fill), color = "white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(ThomasMulcair_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high="#FF9900",low="#FFF5E6") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#FF9900"),
strip.text = element_text(color="white", face = "bold", size = rel(1.3)),
legend.position = "bottom")
})
# Liberal
output$lib_calender <- renderPlot({
# create_cal_df(cdnpoli_tweet()$lib_df)
ggplot(JustinTrudeau_day_df, aes(wday, m_week)) +
geom_tile(data = JustinTrudeau_shadow_df, aes(wday, m_week),
fill = "grey92", color = "white") +
geom_tile(data = JustinTrudeau_day_df, aes_string(fill = input$var_fill), color = "white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(JustinTrudeau_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high = "#FF6347",low = "#FFE4E1") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#FF6347"),
strip.text = element_text(color="white", face = "bold", size = rel(1.3)),
legend.position ="bottom")
})
# Green
output$green_calender <- renderPlot({
# create_cal_df(cdnpoli_tweet()$green_df)
ggplot(ElizabethMay_day_df, aes(wday, m_week)) +
geom_tile(data = ElizabethMay_shadow_df, aes(wday, m_week),
fill = "grey92", color = "white") +
geom_tile(data = ElizabethMay_day_df, aes_string(fill = input$var_fill), color = "white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(ElizabethMay_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high = "#1F5C1F",low = "#EBF5EB") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#1F5C1F"),
strip.text = element_text(color="white", face = "bold", size = rel(1.3)),
legend.position = "bottom")
})
# Bloc
output$bloc_calender <- renderPlot({
# create_cal_df(cdnpoli_tweet()$bloc_df)
ggplot(GillesDuceppe_day_df, aes(wday, m_week)) +
geom_tile(data = GillesDuceppe_shadow_df, aes(wday, m_week),
fill = "grey92", color = "white") +
geom_tile(data = GillesDuceppe_df, aes_string(fill = input$var_fill), color = "white") +
facet_grid(.~month, drop=TRUE) +
labs(x = "", y = "Week of the Month") +
scale_x_discrete(drop=FALSE, limits = rev(levels(wday))) +
scale_y_discrete(drop=FALSE, limits = rev(levels(GillesDuceppe_day_df$m_week))) +
theme_bw() +
scale_fill_gradient(high = "#003366",low = "#00CCFF") +
theme(panel.grid.major = element_blank(),
panel.background = element_rect(fill = "#FAF0E6"),
strip.background = element_rect(fill = "#003366"),
strip.text = element_text(color="white", face = "bold", size = rel(1.3)),
legend.position = "bottom")
})
# ---- Comparison and Common Word Cloud ----
# Comparision Cloud
output$plot_comparecloud <- renderPlot({
withProgress({
setProgress(message = "Drawing Comparison Word Cloud")})
input$go2
comparison.cloud(cdnpoli_tweet()$corpous_for_cloud, random.order = FALSE,
colors=c("blue","red", "orange","green","darkblue"),
title.size = 1.5, max.words = 100)
})
# Common Cloud
output$plot_commoncloud <- renderPlot({
withProgress({
setProgress(message = "Drawing Common Word Cloud")})
input$go2
commonality.cloud(cdnpoli_tweet()$corpous_for_cloud, random.order = FALSE,
colors = brewer.pal(8, "Dark2"),
title.size = 1.5, max.words = 100)
})
# ---- Individual Word Cloud ----
# Convservative Cloud
output$plot_wordcloud_con <- renderPlot({
input$go
input$r_twt_yn
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(pmharper_text_corpus)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# Liberal Cloud
output$plot_wordcloud_lib <- renderPlot({
input$go
input$r_twt_yn
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(JustinTrudeau_text_corpus)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# NDP Cloud
output$plot_wordcloud_ndp <- renderPlot({
input$go
input$r_twt_yn
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(ThomasMulcair_text_corpus)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# Green Cloud
output$plot_wordcloud_green <- renderPlot({
input$go
input$r_twt_yn
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(ElizabethMay_text_corpus)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# Bloc Cloud
output$plot_wordcloud_bloc <- renderPlot({
input$go
input$r_twt_yn
withProgress({
setProgress(message = "Drawing Word Cloud")})
tdm <- TermDocumentMatrix(GillesDuceppe_text_corpus)
matrix <- as.matrix(tdm)
v <- sort(rowSums(matrix),decreasing = TRUE)
d <- data.frame(word = names(v),freq = v)
wordcloud(d$word, d$freq, colors = brewer.pal(8, "Set1"))
})
# ----- Profile Comparision -----
# Treemap via modified ggtify fn
output$treemap <- renderPlot({
# Grouping and label needs to be factor for treemap to work
cdn_party_user_profile$name <- factor(cdn_party_user_profile$name)
cdn_party_user_profile$screenName <- factor(cdn_party_user_profile$screenName)
cdn_party_user_profile$location <- factor(cdn_party_user_profile$location)
cdn_party_user_profile$lang <- factor(cdn_party_user_profile$lang)
# http://www.kevjohnson.org/making-maps-in-r/
treemapify(cdn_party_user_profile, area = "followersCount",
fill = "friendsCount", label = "name") %>% ggtify() +
scale_fill_distiller(name = "Friends Count", palette = "Blues", breaks = pretty_breaks(5)) +
guides(fill = guide_legend(reverse = TRUE))
})
# Bar graph
output$party_metric <- renderPlot({
# Grouping and label needs to be factor for treemap to work
cdn_party_user_profile$name <- factor(cdn_party_user_profile$name)
cdn_party_user_profile$screenName <- factor(cdn_party_user_profile$screenName)
cdn_party_user_profile$location <- factor(cdn_party_user_profile$location)
cdn_party_user_profile$lang <- factor(cdn_party_user_profile$lang)
# Melt (gather) the data frame for bar graph
data <- gather(cdn_party_user_profile, metrics, values, statusesCount:friendsCount)
ggplot(filter(data, metrics != "followersCount" & metrics != "statusesCount"),
aes(x = reorder(name, values), y = values, colour = metrics, fill = metrics)) +
geom_bar(position = "dodge", stat = "identity") + coord_flip() +
geom_text(aes(label = values), position = position_dodge(.9), hjust=-.2) +
theme_fivethirtyeight() + ylim(0, 8000) +
scale_fill_discrete(name = "Metric", label = c("Favorites", "Friends")) +
scale_color_discrete(name = "Metric", label = c("Favorites", "Friends"))
})
# output$cdn_party_hour_wday <- renderPlot({
# input$go
# ggplot(user_df, aes(hour, fill=wday)) + geom_density(alpha = 1/4, adjust=.2, color=NA) + theme_fivethirtyeight() +
# scale_x_continuous(breaks=seq(0, 24, by = 4)) +
# scale_y_continuous(name = "", breaks = NULL)
# })
# ---- Make Tweet by Hour Plot ----
# Hour by weekday density plot
output$party_by_hour <- renderPlot({
input$go
# Combine the dataframe of all party
pmharper_df$party <- "Convservative"
JustinTrudeau_df$party <- "Liberal"
ThomasMulcair_df$party <- "NDP"
ElizabethMay_df$party <- "Green"
GillesDuceppe_df$party <- "Bloc Québécois"
party_day_df <- rbind(pmharper_df, JustinTrudeau_df, ThomasMulcair_df,
ElizabethMay_df, GillesDuceppe_df)
ggplot(party_day_df, aes(hour, fill = party)) +
geom_density(alpha = 1/4, adjust = .2, color = NA) + theme_fivethirtyeight() +
scale_x_continuous(breaks = seq(0, 24, by = 4)) + # Make prettier break
scale_y_continuous(name = "", breaks = NULL)
})
# ----- Lets Map Followers -----
ff_coded <- eventReactive(input$map, {
withProgress({
setProgress(message = "Mapping Followers!")})
handle <- paste0(gsub("@", "", input$handle))
ff_df <- twitterMap(handle, nMax = 1000) # Lets try 1000!
ff_df
})
output$check_ff <- renderDataTable({
ff_coded()
})
output$user_follower_map <- renderLeaflet({
user_f_coded_df <- ff_coded()
leaflet() %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addMarkers(user_f_coded_df$long, user_f_coded_df$lat,
popup = user_f_coded_df$location)
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{bin_mean}
\alias{bin_mean}
\title{Binomial Mean}
\usage{
bin_mean(trials, prob)
}
\arguments{
\item{trials}{input number of trials}
\item{prob}{input probability}
}
\value{
computed mean of the binomial distribution
}
\description{
calculate mean of the binomial distribution
}
\examples{
bin_mean(5,0.5)
}
|
/binomial/man/bin_mean.Rd
|
no_license
|
stat133-sp19/hw-stat133-hoangkhanhnghi
|
R
| false
| true
| 399
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{bin_mean}
\alias{bin_mean}
\title{Binomial Mean}
\usage{
bin_mean(trials, prob)
}
\arguments{
\item{trials}{input number of trials}
\item{prob}{input probability}
}
\value{
computed mean of the binomial distribution
}
\description{
calculate mean of the binomial distribution
}
\examples{
bin_mean(5,0.5)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assertions.R
\name{mlr_assertions}
\alias{mlr_assertions}
\alias{assert_backend}
\alias{assert_experiment}
\alias{assert_task}
\alias{assert_tasks}
\alias{assert_learner}
\alias{assert_learners}
\alias{assert_measure}
\alias{assert_measures}
\alias{assert_resampling}
\alias{assert_resamplings}
\alias{assert_resample_result}
\alias{assert_benchmark_result}
\alias{assert_param_set}
\alias{assert_id}
\alias{assert_ids}
\title{Assertion for mlr3 Objects}
\usage{
assert_backend(b, .var.name = vname(b))
assert_experiment(e, .var.name = vname(e))
assert_task(task, feature_types = NULL, task_properties = NULL,
clone = FALSE)
assert_tasks(tasks, feature_types = NULL, task_properties = NULL,
clone = FALSE)
assert_learner(learner, task = NULL, properties = character(0L),
clone = FALSE)
assert_learners(learners, task = NULL, properties = character(0L),
clone = FALSE)
assert_measure(measure, task = NULL, predict_types = NULL,
clone = FALSE)
assert_measures(measures, task = NULL, predict_types = NULL,
clone = FALSE)
assert_resampling(resampling, instantiated = NULL, clone = FALSE)
assert_resamplings(resamplings, instantiated = NULL, clone = FALSE)
assert_resample_result(resample_result,
.var.name = vname(resample_result))
assert_benchmark_result(bmr, .var.name = vname(bmr))
assert_param_set(param_set, .var.name = vname(param_set))
assert_id(id, .var.name = vname(id))
assert_ids(ids, .var.name = vname(ids))
}
\arguments{
\item{b}{:: \link{DataBackend}.}
\item{e}{:: \link{Experiment}.}
\item{task}{:: \link{Task}.}
\item{feature_types}{:: \code{character()}\cr
Set of allowed feature types.}
\item{task_properties}{:: \code{character()}\cr
Set of required task properties.}
\item{tasks}{:: list of \link{Task}.}
\item{learner}{:: \link{Learner}.}
\item{learners}{:: list of \link{Learner}.}
\item{measure}{:: \link{Measure}.}
\item{predict_types}{:: \code{character()}\cr
Vector of predict types provided by the \link{Experiment} or \link{Learner}.}
\item{measures}{:: list of \link{Measure}.}
\item{resampling}{:: \link{Resampling}.}
\item{resamplings}{:: list of \link{Resampling}.}
\item{resample_result}{:: \link{ResampleResult}.}
\item{bmr}{:: \link{BenchmarkResult}.}
\item{param_set}{:: \link[paradox:ParamSet]{paradox::ParamSet}.}
\item{id}{:: \code{character(1)}.}
\item{id}{:: \code{character(1)}.}
}
\description{
Functions intended to be used in packages extending \pkg{mlr3}.
}
\keyword{internal}
|
/man/mlr_assertions.Rd
|
permissive
|
be-marc/mlr3
|
R
| false
| true
| 2,542
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assertions.R
\name{mlr_assertions}
\alias{mlr_assertions}
\alias{assert_backend}
\alias{assert_experiment}
\alias{assert_task}
\alias{assert_tasks}
\alias{assert_learner}
\alias{assert_learners}
\alias{assert_measure}
\alias{assert_measures}
\alias{assert_resampling}
\alias{assert_resamplings}
\alias{assert_resample_result}
\alias{assert_benchmark_result}
\alias{assert_param_set}
\alias{assert_id}
\alias{assert_ids}
\title{Assertion for mlr3 Objects}
\usage{
assert_backend(b, .var.name = vname(b))
assert_experiment(e, .var.name = vname(e))
assert_task(task, feature_types = NULL, task_properties = NULL,
clone = FALSE)
assert_tasks(tasks, feature_types = NULL, task_properties = NULL,
clone = FALSE)
assert_learner(learner, task = NULL, properties = character(0L),
clone = FALSE)
assert_learners(learners, task = NULL, properties = character(0L),
clone = FALSE)
assert_measure(measure, task = NULL, predict_types = NULL,
clone = FALSE)
assert_measures(measures, task = NULL, predict_types = NULL,
clone = FALSE)
assert_resampling(resampling, instantiated = NULL, clone = FALSE)
assert_resamplings(resamplings, instantiated = NULL, clone = FALSE)
assert_resample_result(resample_result,
.var.name = vname(resample_result))
assert_benchmark_result(bmr, .var.name = vname(bmr))
assert_param_set(param_set, .var.name = vname(param_set))
assert_id(id, .var.name = vname(id))
assert_ids(ids, .var.name = vname(ids))
}
\arguments{
\item{b}{:: \link{DataBackend}.}
\item{e}{:: \link{Experiment}.}
\item{task}{:: \link{Task}.}
\item{feature_types}{:: \code{character()}\cr
Set of allowed feature types.}
\item{task_properties}{:: \code{character()}\cr
Set of required task properties.}
\item{tasks}{:: list of \link{Task}.}
\item{learner}{:: \link{Learner}.}
\item{learners}{:: list of \link{Learner}.}
\item{measure}{:: \link{Measure}.}
\item{predict_types}{:: \code{character()}\cr
Vector of predict types provided by the \link{Experiment} or \link{Learner}.}
\item{measures}{:: list of \link{Measure}.}
\item{resampling}{:: \link{Resampling}.}
\item{resamplings}{:: list of \link{Resampling}.}
\item{resample_result}{:: \link{ResampleResult}.}
\item{bmr}{:: \link{BenchmarkResult}.}
\item{param_set}{:: \link[paradox:ParamSet]{paradox::ParamSet}.}
\item{id}{:: \code{character(1)}.}
\item{id}{:: \code{character(1)}.}
}
\description{
Functions intended to be used in packages extending \pkg{mlr3}.
}
\keyword{internal}
|
\name{photoperiod}
\alias{photoperiod}
\alias{photoperiod,numeric-method}
\alias{photoperiod,Date-method}
\alias{photoperiod,data.frame-method}
\alias{photoperiod,SpatRaster-method}
\title{ photoperiod}
\description{
Compute photoperiod (daylength, sunshine duration) at a given latitude and day of the year.
}
\usage{
\S4method{photoperiod}{Date}(x, latitude)
\S4method{photoperiod}{data.frame}(x)
\S4method{photoperiod}{SpatRaster}(x, filename="", overwrite=FALSE, ...)
}
\arguments{
\item{x}{Date, integer (day of the year), or data.frame (with variables "date" and "latitude", or SpatRaster}
\item{latitude}{numeric. Latitude}
\item{filename}{character. Output filename}
\item{overwrite}{logical. If \code{TRUE}, \code{filename} is overwritten}
\item{...}{additional arguments for writing files as in \code{\link[terra]{writeRaster}}}
}
\value{
double. Photoperiod in hours
}
\references{
Forsythe, W.C., E.J. Rykiel Jr., R.S. Stahl, H. Wu, R.M. Schoolfield, 1995. A model comparison for photoperiod as a function of latitude and day of the year. Ecological Modeling 80: 87-95.
}
\examples{
photoperiod(50, 52)
photoperiod(50, 5)
photoperiod(180, 55)
p <- photoperiod(1:365, 52)
d <- dateFromDoy(1:365, 2001)
plot(d, p)
}
|
/man/daylength.Rd
|
no_license
|
cran/meteor
|
R
| false
| false
| 1,303
|
rd
|
\name{photoperiod}
\alias{photoperiod}
\alias{photoperiod,numeric-method}
\alias{photoperiod,Date-method}
\alias{photoperiod,data.frame-method}
\alias{photoperiod,SpatRaster-method}
\title{ photoperiod}
\description{
Compute photoperiod (daylength, sunshine duration) at a given latitude and day of the year.
}
\usage{
\S4method{photoperiod}{Date}(x, latitude)
\S4method{photoperiod}{data.frame}(x)
\S4method{photoperiod}{SpatRaster}(x, filename="", overwrite=FALSE, ...)
}
\arguments{
\item{x}{Date, integer (day of the year), or data.frame (with variables "date" and "latitude", or SpatRaster}
\item{latitude}{numeric. Latitude}
\item{filename}{character. Output filename}
\item{overwrite}{logical. If \code{TRUE}, \code{filename} is overwritten}
\item{...}{additional arguments for writing files as in \code{\link[terra]{writeRaster}}}
}
\value{
double. Photoperiod in hours
}
\references{
Forsythe, W.C., E.J. Rykiel Jr., R.S. Stahl, H. Wu, R.M. Schoolfield, 1995. A model comparison for photoperiod as a function of latitude and day of the year. Ecological Modeling 80: 87-95.
}
\examples{
photoperiod(50, 52)
photoperiod(50, 5)
photoperiod(180, 55)
p <- photoperiod(1:365, 52)
d <- dateFromDoy(1:365, 2001)
plot(d, p)
}
|
library(dplyr)
library(maps)
library(ggplot2)
library(grid)
source('code/edgeMaker.R')
mapExt <<- data.frame('x' = c(-125,-100), 'y' = c(30,50))
# get the data and combine it
getFlightData <- function(xx = 'data/TestFlights.csv')
{
fd <- read.csv(xx)
latLon <- read.csv('data/LatLon.csv', comment.char = '#')
# change from City to Departure, so we can easily join by Departure
names(latLon)[1] <- 'Departure'
fd <- dplyr::full_join(fd, latLon)
# edit names so that can also have arrival lat lon
fd$D.Lat <- fd$Lat
fd$D.Lon <- fd$Lon
fd$Lat <- fd$Lon <- NULL
# repeat with Arrival
names(latLon)[1] <- 'Arrival'
fd <- dplyr::full_join(fd, latLon)
fd$A.Lat <- fd$Lat
fd$A.Lon <- fd$Lon
fd$Lat <- fd$Lon <- NULL
fd
}
# first go will be using ggplot
ggMap <- function()
{
fd <- getFlightData()
usamap <- ggplot2::map_data("state")
# aggregate to get number of times flying each leg
fd$count <- 1
fd2 <- fd %>% group_by(Departure, Arrival,Purpose, D.Lat, D.Lon, A.Lat, A.Lon) %>%
summarise(count = sum(count))
# compute paths using edgeMaker
fPath <- do.call(rbind, lapply(lapply(1:nrow(fd2), function(i){edgeMaker(fd2[i,],mapExt)}),
function(X) X))
# plot USA w/ arrival cities as red points
# can use size or color to depict the number of flights flown between two cities
# this can be shown by commenting out/in the color and size lines under geom_segment
# if using color, should use a different color gradient than the default
gg <- ggplot() + geom_polygon(data = usamap, aes(x = long, y = lat, group = group)) +
geom_path(data = usamap, aes(x = long, y = lat, group = group), color = 'grey50') +
geom_segment(data = fd2, aes(x = D.Lon, xend = A.Lon, y=D.Lat, yend = A.Lat,
color = count), size = 1.5,
# size = count), color = 'blue',
arrow = grid::arrow(length = unit(.5, 'cm'))) +
geom_point(data = fd, aes(x = A.Lon, y = A.Lat), color = 'red',size = 4) +
coord_cartesian(xlim = mapExt$x, ylim = mapExt$y) +
geom_path(data = fPath, aes(x = x, y = y), color = 'blue')
gg
}
|
/code/mapFlights.R
|
no_license
|
rabutler/myFlightMap
|
R
| false
| false
| 2,202
|
r
|
library(dplyr)
library(maps)
library(ggplot2)
library(grid)
source('code/edgeMaker.R')
mapExt <<- data.frame('x' = c(-125,-100), 'y' = c(30,50))
# get the data and combine it
getFlightData <- function(xx = 'data/TestFlights.csv')
{
fd <- read.csv(xx)
latLon <- read.csv('data/LatLon.csv', comment.char = '#')
# change from City to Departure, so we can easily join by Departure
names(latLon)[1] <- 'Departure'
fd <- dplyr::full_join(fd, latLon)
# edit names so that can also have arrival lat lon
fd$D.Lat <- fd$Lat
fd$D.Lon <- fd$Lon
fd$Lat <- fd$Lon <- NULL
# repeat with Arrival
names(latLon)[1] <- 'Arrival'
fd <- dplyr::full_join(fd, latLon)
fd$A.Lat <- fd$Lat
fd$A.Lon <- fd$Lon
fd$Lat <- fd$Lon <- NULL
fd
}
# first go will be using ggplot
ggMap <- function()
{
fd <- getFlightData()
usamap <- ggplot2::map_data("state")
# aggregate to get number of times flying each leg
fd$count <- 1
fd2 <- fd %>% group_by(Departure, Arrival,Purpose, D.Lat, D.Lon, A.Lat, A.Lon) %>%
summarise(count = sum(count))
# compute paths using edgeMaker
fPath <- do.call(rbind, lapply(lapply(1:nrow(fd2), function(i){edgeMaker(fd2[i,],mapExt)}),
function(X) X))
# plot USA w/ arrival cities as red points
# can use size or color to depict the number of flights flown between two cities
# this can be shown by commenting out/in the color and size lines under geom_segment
# if using color, should use a different color gradient than the default
gg <- ggplot() + geom_polygon(data = usamap, aes(x = long, y = lat, group = group)) +
geom_path(data = usamap, aes(x = long, y = lat, group = group), color = 'grey50') +
geom_segment(data = fd2, aes(x = D.Lon, xend = A.Lon, y=D.Lat, yend = A.Lat,
color = count), size = 1.5,
# size = count), color = 'blue',
arrow = grid::arrow(length = unit(.5, 'cm'))) +
geom_point(data = fd, aes(x = A.Lon, y = A.Lat), color = 'red',size = 4) +
coord_cartesian(xlim = mapExt$x, ylim = mapExt$y) +
geom_path(data = fPath, aes(x = x, y = y), color = 'blue')
gg
}
|
#'@title Example Dataset
#'
#'@description a fictitious dataset showcasing the functionality of the WhatsApp Parser
#'@name Example
#'@docType data
#'@usage showcasing the functionality of the WhatsApp Parser
#'@format A .txt dataframe
#'@keywords datasets, WhatsApp Textfile
NULL
|
/R/Example.R
|
no_license
|
davidm6433/WhatsAppParser
|
R
| false
| false
| 290
|
r
|
#'@title Example Dataset
#'
#'@description a fictitious dataset showcasing the functionality of the WhatsApp Parser
#'@name Example
#'@docType data
#'@usage showcasing the functionality of the WhatsApp Parser
#'@format A .txt dataframe
#'@keywords datasets, WhatsApp Textfile
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appstream_operations.R
\name{appstream_delete_user}
\alias{appstream_delete_user}
\title{Deletes a user from the user pool}
\usage{
appstream_delete_user(UserName, AuthenticationType)
}
\arguments{
\item{UserName}{[required] The email address of the user.
Users' email addresses are case-sensitive.}
\item{AuthenticationType}{[required] The authentication type for the user. You must specify USERPOOL.}
}
\description{
Deletes a user from the user pool.
}
\section{Request syntax}{
\preformatted{svc$delete_user(
UserName = "string",
AuthenticationType = "API"|"SAML"|"USERPOOL"
)
}
}
\keyword{internal}
|
/cran/paws.end.user.computing/man/appstream_delete_user.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 689
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appstream_operations.R
\name{appstream_delete_user}
\alias{appstream_delete_user}
\title{Deletes a user from the user pool}
\usage{
appstream_delete_user(UserName, AuthenticationType)
}
\arguments{
\item{UserName}{[required] The email address of the user.
Users' email addresses are case-sensitive.}
\item{AuthenticationType}{[required] The authentication type for the user. You must specify USERPOOL.}
}
\description{
Deletes a user from the user pool.
}
\section{Request syntax}{
\preformatted{svc$delete_user(
UserName = "string",
AuthenticationType = "API"|"SAML"|"USERPOOL"
)
}
}
\keyword{internal}
|
#' Landmark Multidimensional Scaling
#'
#' Landmark MDS is a variant of Classical Multidimensional Scaling in that
#' it first finds a low-dimensional embedding using a small portion of given dataset
#' and graft the others in a manner to preserve as much pairwise distance from
#' all the other data points to landmark points as possible.
#'
#' @param X an \eqn{(n\times p)} matrix or data frame whose rows are observations
#' and columns represent independent variables.
#' @param ndim an integer-valued target dimension.
#' @param ltype on how to select landmark points, either \code{"random"} or \code{"MaxMin"}.
#' @param npoints the number of landmark points to be drawn.
#' @param preprocess an option for preprocessing the data. Default is "center".
#' See also \code{\link{aux.preprocess}} for more details.
#'
#' @return a named list containing
#' \describe{
#' \item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
#' \item{trfinfo}{a list containing information for out-of-sample prediction.}
#' \item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
#' }
#'
#' @examples
#' \dontrun{
#' # generate data
#' X <- aux.gensamples(dname="crown")
#'
#' ## 1. use 10% of random points
#' output1 <- do.lmds(X,ndim=2,npoints=round(nrow(X)/10))
#'
#' ## 2. using MaxMin scheme
#' output2 <- do.lmds(X,ndim=2,npoints=round(nrow(X)/10),ltype="MaxMin")
#'
#' ## 3. original mds case
#' output3 <- do.mds(X,ndim=2)
#'
#' ## Visualization
#' par(mfrow=c(1,3))
#' plot(output1$Y[,1],output2$Y[,2],main="10% random points")
#' plot(output2$Y[,1],output2$Y[,2],main="10% MaxMin points")
#' plot(output3$Y[,1],output3$Y[,2],main="original MDS")
#' }
#'
#' @seealso \code{\link{do.mds}}
#' @references
#' \insertRef{silva_global_2002}{Rdimtools}
#'
#' \insertRef{lee_landmark_2009}{Rdimtools}
#'
#' @author Kisung You
#' @rdname linear_LMDS
#' @export
do.lmds <- function(X,ndim=2,ltype="random",npoints=max(nrow(X)/5,ndim+1),
preprocess=c("center","cscale","decorrelate","whiten")){
# 1. typecheck is always first step to perform.
aux.typecheck(X)
if ((!is.numeric(ndim))||(ndim<1)||(ndim>ncol(X))||is.infinite(ndim)||is.na(ndim)){
stop("* do.lmds : 'ndim' is a positive integer in [1,#(covariates)].")
}
ndim = as.integer(ndim)
# 2. ... parameters
# 2-1. landmark selection
# ltype : "random" (default) or "MaxMin"
# npoints : (ndim+1 ~ nrow(X)/2)
# 2-2. lmds itself
# preprocess : 'center','decorrelate', or 'whiten'
if (!is.element(ltype,c("random","MaxMin"))){
stop("* do.lmds : 'ltype' is either 'random' or 'MaxMin'.")
}
npoints = as.integer(round(npoints))
if (!is.numeric(npoints)||(npoints<=ndim)||(npoints>nrow(X)/2)||is.na(npoints)||is.infinite(npoints)){
stop("* do.lmds : the number of landmark points should be [ndim+1,#(total data points)/2].")
}
if (missing(preprocess)){
algpreprocess = "center"
} else {
algpreprocess = match.arg(preprocess)
}
# 3. Preprocess the data.
tmplist = aux.preprocess.hidden(X,type=algpreprocess,algtype="linear")
trfinfo = tmplist$info
pX = tmplist$pX
# 4. select landmark points
if (ltype=="random"){
landmarkidx = sample(1:nrow(pX),npoints)
} else if (ltype=="MaxMin"){
landmarkidx = aux.MaxMinLandmark(pX,npoints)
}
if (length(landmarkidx)!=npoints){
stop("* do.lmds : landmark selection process is incomplete.")
}
# 5. MDS on landmark points
pcarun = do.pca(pX[landmarkidx,])
Lk = t(pcarun$Y)
if (nrow(Lk)<=ndim){
pcarun = do.pca(pX[landmarkidx,],ndim=ndim)
Lk = t(pcarun$Y)
}
# 6. Distance-Based Triangulation
pD = as.matrix(dist(pX))
# 6-1. pseudoinverse for mapping of (k-by-n) matrix Lk#
Lksharp = array(0,c(nrow(Lk),ncol(Lk)))
for (i in 1:nrow(Lk)){
tgtvec = Lk[i,]
lambda = sqrt(sum(tgtvec^2))
Lksharp[i,] = tgtvec/(lambda^2)
}
# 6-2. pairwise distance matrix
Deltan = (pD[landmarkidx,landmarkidx])^2
deltamu = rowMeans(Deltan)
# 6-3. Iterate over all data
Ydbt = array(0,c(nrow(Lk),nrow(pX)))
for (i in 1:nrow(pX)){
deltax = (pD[i,landmarkidx])^2
Ydbt[,i] = (Lksharp %*% (deltax-deltamu))/(-2)
}
# 7. PCA align
tYdbt = t(Ydbt)
pcaoutput = do.pca(tYdbt,ndim=ndim,preprocess = "center")
# 8. return output
result = list()
result$Y = pcaoutput$Y # Y
result$trfinfo = trfinfo # trfinfo
LHS = t(pX)%*%pX
RHS = t(pX)%*%(result$Y)
result$projection = aux.adjprojection(solve(LHS,RHS)) # projection
return(result)
}
|
/R/linear_LMDS.R
|
no_license
|
rcannood/Rdimtools
|
R
| false
| false
| 4,542
|
r
|
#' Landmark Multidimensional Scaling
#'
#' Landmark MDS is a variant of Classical Multidimensional Scaling in that
#' it first finds a low-dimensional embedding using a small portion of given dataset
#' and graft the others in a manner to preserve as much pairwise distance from
#' all the other data points to landmark points as possible.
#'
#' @param X an \eqn{(n\times p)} matrix or data frame whose rows are observations
#' and columns represent independent variables.
#' @param ndim an integer-valued target dimension.
#' @param ltype on how to select landmark points, either \code{"random"} or \code{"MaxMin"}.
#' @param npoints the number of landmark points to be drawn.
#' @param preprocess an option for preprocessing the data. Default is "center".
#' See also \code{\link{aux.preprocess}} for more details.
#'
#' @return a named list containing
#' \describe{
#' \item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
#' \item{trfinfo}{a list containing information for out-of-sample prediction.}
#' \item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
#' }
#'
#' @examples
#' \dontrun{
#' # generate data
#' X <- aux.gensamples(dname="crown")
#'
#' ## 1. use 10% of random points
#' output1 <- do.lmds(X,ndim=2,npoints=round(nrow(X)/10))
#'
#' ## 2. using MaxMin scheme
#' output2 <- do.lmds(X,ndim=2,npoints=round(nrow(X)/10),ltype="MaxMin")
#'
#' ## 3. original mds case
#' output3 <- do.mds(X,ndim=2)
#'
#' ## Visualization
#' par(mfrow=c(1,3))
#' plot(output1$Y[,1],output2$Y[,2],main="10% random points")
#' plot(output2$Y[,1],output2$Y[,2],main="10% MaxMin points")
#' plot(output3$Y[,1],output3$Y[,2],main="original MDS")
#' }
#'
#' @seealso \code{\link{do.mds}}
#' @references
#' \insertRef{silva_global_2002}{Rdimtools}
#'
#' \insertRef{lee_landmark_2009}{Rdimtools}
#'
#' @author Kisung You
#' @rdname linear_LMDS
#' @export
do.lmds <- function(X,ndim=2,ltype="random",npoints=max(nrow(X)/5,ndim+1),
preprocess=c("center","cscale","decorrelate","whiten")){
# 1. typecheck is always first step to perform.
aux.typecheck(X)
if ((!is.numeric(ndim))||(ndim<1)||(ndim>ncol(X))||is.infinite(ndim)||is.na(ndim)){
stop("* do.lmds : 'ndim' is a positive integer in [1,#(covariates)].")
}
ndim = as.integer(ndim)
# 2. ... parameters
# 2-1. landmark selection
# ltype : "random" (default) or "MaxMin"
# npoints : (ndim+1 ~ nrow(X)/2)
# 2-2. lmds itself
# preprocess : 'center','decorrelate', or 'whiten'
if (!is.element(ltype,c("random","MaxMin"))){
stop("* do.lmds : 'ltype' is either 'random' or 'MaxMin'.")
}
npoints = as.integer(round(npoints))
if (!is.numeric(npoints)||(npoints<=ndim)||(npoints>nrow(X)/2)||is.na(npoints)||is.infinite(npoints)){
stop("* do.lmds : the number of landmark points should be [ndim+1,#(total data points)/2].")
}
if (missing(preprocess)){
algpreprocess = "center"
} else {
algpreprocess = match.arg(preprocess)
}
# 3. Preprocess the data.
tmplist = aux.preprocess.hidden(X,type=algpreprocess,algtype="linear")
trfinfo = tmplist$info
pX = tmplist$pX
# 4. select landmark points
if (ltype=="random"){
landmarkidx = sample(1:nrow(pX),npoints)
} else if (ltype=="MaxMin"){
landmarkidx = aux.MaxMinLandmark(pX,npoints)
}
if (length(landmarkidx)!=npoints){
stop("* do.lmds : landmark selection process is incomplete.")
}
# 5. MDS on landmark points
pcarun = do.pca(pX[landmarkidx,])
Lk = t(pcarun$Y)
if (nrow(Lk)<=ndim){
pcarun = do.pca(pX[landmarkidx,],ndim=ndim)
Lk = t(pcarun$Y)
}
# 6. Distance-Based Triangulation
pD = as.matrix(dist(pX))
# 6-1. pseudoinverse for mapping of (k-by-n) matrix Lk#
Lksharp = array(0,c(nrow(Lk),ncol(Lk)))
for (i in 1:nrow(Lk)){
tgtvec = Lk[i,]
lambda = sqrt(sum(tgtvec^2))
Lksharp[i,] = tgtvec/(lambda^2)
}
# 6-2. pairwise distance matrix
Deltan = (pD[landmarkidx,landmarkidx])^2
deltamu = rowMeans(Deltan)
# 6-3. Iterate over all data
Ydbt = array(0,c(nrow(Lk),nrow(pX)))
for (i in 1:nrow(pX)){
deltax = (pD[i,landmarkidx])^2
Ydbt[,i] = (Lksharp %*% (deltax-deltamu))/(-2)
}
# 7. PCA align
tYdbt = t(Ydbt)
pcaoutput = do.pca(tYdbt,ndim=ndim,preprocess = "center")
# 8. return output
result = list()
result$Y = pcaoutput$Y # Y
result$trfinfo = trfinfo # trfinfo
LHS = t(pX)%*%pX
RHS = t(pX)%*%(result$Y)
result$projection = aux.adjprojection(solve(LHS,RHS)) # projection
return(result)
}
|
MultCapability <- function(data, lsls, usls, targets,
ncomps = NULL, Target = FALSE) {
X <- as.matrix(data)
m <- nrow(X)
ColMeans <- colMeans(X)
ColSD <- sqrt(colSums((X - rep(colMeans(X), each = m))^2)/(m - 1))
SVD <- svd(cov(X), nu = ncomps, nv = ncomps)
eigenValues <- SVD$d[1:ncomps]
eigenValuesSum <- sum(eigenValues)
rightsvs <- SVD$v
ncomp.inv <- 1 / ncomps
mult.3 <- 3
mult.6 <- 6
projectedSpecs <- t(rightsvs) %*% cbind(lsls, usls, targets, ColMeans)
colnames(projectedSpecs) <- c("lslspc", "uslspc", "targetspc", "colmeanspc")
cpI <- abs((projectedSpecs[, "uslspc"] - projectedSpecs[, "lslspc"])) / (mult.6 * sqrt(eigenValues))
cpkIa <- abs((projectedSpecs[, "lslspc"] - projectedSpecs[, "colmeanspc"])) / (mult.3 * sqrt(eigenValues))
cpkIb <- abs((projectedSpecs[, "colmeanspc"] - projectedSpecs[, "uslspc"])) / (mult.3 * sqrt(eigenValues))
cpkIs1 <- cbind(cpkIa, cpkIb)
cpkIa2 <- abs((projectedSpecs[, "lslspc"] - projectedSpecs[, "colmeanspc"])) / (mult.3 * sqrt(eigenValues + (projectedSpecs[, "colmeanspc"] - projectedSpecs[, "targetspc"])^2))
cpkIb2 <- abs((projectedSpecs[, "colmeanspc"] - projectedSpecs[, "uslspc"])) / (mult.3 * sqrt(eigenValues + (projectedSpecs[, "colmeanspc"] - projectedSpecs[, "targetspc"])^2))
cpkIs2 <- cbind(cpkIa2, cpkIb2)
mcp_wang <- prod(cpI)^(1/ncomps)
mcpk_wang <- prod(apply(cpkIs1, 1, min))^ncomp.inv
mcpm_wang <- prod((abs((projectedSpecs[, "uslspc"] - projectedSpecs[, "lslspc"])) / (mult.6 * sqrt(eigenValues + (projectedSpecs[, "colmeanspc"] - projectedSpecs[, "targetspc"])^2))))^ncomp.inv
mcpmk_wang <- prod(apply(cpkIs2, 1, min))^ncomp.inv
Wang1 <- c(ncomps = ncomps, mcp_wang = mcp_wang, mcpk_wang = mcpk_wang,
mcpm_wang = mcpm_wang, mcpmk_wang = mcpmk_wang)
Wang <- data.frame(Index = names(Wang1), Metrix = Wang1)
row.names(Wang) <- NULL
spaceDiff_xe <- as.vector(cpI * eigenValues)
mcp_xe <- (sum(spaceDiff_xe)) / eigenValuesSum
spaceDiff_xe.min <- apply(cpkIs1, 1, min) * eigenValues
mcpk_xe <- (sum(spaceDiff_xe.min)) / eigenValuesSum
spaceDiff_xe.normed <- (abs(projectedSpecs[, "uslspc"] - projectedSpecs[, "lslspc"]) / (mult.6 * sqrt(eigenValues + (projectedSpecs[, "colmeanspc"] - projectedSpecs[, "targetspc"])^2))) * eigenValues
mcpm_xe <- (sum(spaceDiff_xe.normed)) / eigenValuesSum
spaceDiff_xe.normed.min <- apply(cpkIs2, 1, min) * eigenValues
mcpmk_xe <- (sum(spaceDiff_xe.normed.min)) / eigenValuesSum
Xekalaki1 <- c(ncomps = ncomps, mcp_xe = mcp_xe, mcpk_xe = mcpk_xe,
mcpm_xe = mcpm_xe, mcpmk_xe = mcpmk_xe)
Xekalaki <- data.frame(Index = names(Xekalaki1), Metrix = Xekalaki1)
row.names(Xekalaki) <- NULL
spaceDiff_wang2 <- as.vector(cpI^eigenValues)
mcp_wang_2 <- (prod(spaceDiff_wang2))^(1 / eigenValuesSum)
spaceDiff_wang2.min <- apply(cpkIs1, 1, min)^eigenValues
mcpk_wang_2 <- (prod(spaceDiff_wang2.min))^(1 / eigenValuesSum)
spaceDiff_wang2.normed <- (abs(projectedSpecs[, "uslspc"] - projectedSpecs[, "lslspc"]) / (mult.6 * sqrt(eigenValues + (projectedSpecs[, "colmeanspc"] - projectedSpecs[, "targetspc"])^2)))^ eigenValues
mcpm_wang_2 <- (prod(spaceDiff_wang2.normed))^(1 / eigenValuesSum)
spaceDiff_wang2.normed.min <- apply(cpkIs2, 1, min)^eigenValues
mcpmk_wang_2 <- (prod(spaceDiff_wang2.normed.min))^(1 / eigenValuesSum)
Wang21 <- c(ncomps = ncomps, mcp_wang_2 = mcp_wang_2, mcpk_wang_2 = mcpk_wang_2,
mcpm_wang_2 = mcpm_wang_2, mcpmk_wang_2 = mcpmk_wang_2)
Wang2 <- data.frame(Index = names(Wang21), Metrix = Wang21)
row.names(Wang2) <- NULL
if(Target == TRUE) {
Pre.Ppk1 <- cbind((targets - lsls) / (mult.3 * ColSD),
(usls - targets) / (mult.3 * ColSD))
Ppk <- data.frame(Index = "Ppk", `Individual Ppks` = apply(Pre.Ppk1, 1, min))
row.names(Ppk) <- colnames(X)
} else {
Pre.Ppk2 <- cbind((ColMeans - lsls) / (mult.3 * ColSD),
(usls - ColMeans) / (mult.3 * ColSD))
Ppk <- data.frame(Index = "Ppk", `Individual Ppks` = apply(Pre.Ppk2, 1, min))
row.names(Ppk) <- colnames(X)
}
Results <- list("multivariate capability indices - Wang CP" = Wang,
"multivariate capability indices - Xekalaki CP" = Xekalaki,
"multivariate capability indices - Wang2 CP" = Wang2,
"Individual Parameter Ppks" = Ppk)
class(Results) <- "mcpk"
Results
}
|
/R/MultCapability.R
|
no_license
|
cran/mvdalab
|
R
| false
| false
| 4,465
|
r
|
MultCapability <- function(data, lsls, usls, targets,
ncomps = NULL, Target = FALSE) {
X <- as.matrix(data)
m <- nrow(X)
ColMeans <- colMeans(X)
ColSD <- sqrt(colSums((X - rep(colMeans(X), each = m))^2)/(m - 1))
SVD <- svd(cov(X), nu = ncomps, nv = ncomps)
eigenValues <- SVD$d[1:ncomps]
eigenValuesSum <- sum(eigenValues)
rightsvs <- SVD$v
ncomp.inv <- 1 / ncomps
mult.3 <- 3
mult.6 <- 6
projectedSpecs <- t(rightsvs) %*% cbind(lsls, usls, targets, ColMeans)
colnames(projectedSpecs) <- c("lslspc", "uslspc", "targetspc", "colmeanspc")
cpI <- abs((projectedSpecs[, "uslspc"] - projectedSpecs[, "lslspc"])) / (mult.6 * sqrt(eigenValues))
cpkIa <- abs((projectedSpecs[, "lslspc"] - projectedSpecs[, "colmeanspc"])) / (mult.3 * sqrt(eigenValues))
cpkIb <- abs((projectedSpecs[, "colmeanspc"] - projectedSpecs[, "uslspc"])) / (mult.3 * sqrt(eigenValues))
cpkIs1 <- cbind(cpkIa, cpkIb)
cpkIa2 <- abs((projectedSpecs[, "lslspc"] - projectedSpecs[, "colmeanspc"])) / (mult.3 * sqrt(eigenValues + (projectedSpecs[, "colmeanspc"] - projectedSpecs[, "targetspc"])^2))
cpkIb2 <- abs((projectedSpecs[, "colmeanspc"] - projectedSpecs[, "uslspc"])) / (mult.3 * sqrt(eigenValues + (projectedSpecs[, "colmeanspc"] - projectedSpecs[, "targetspc"])^2))
cpkIs2 <- cbind(cpkIa2, cpkIb2)
mcp_wang <- prod(cpI)^(1/ncomps)
mcpk_wang <- prod(apply(cpkIs1, 1, min))^ncomp.inv
mcpm_wang <- prod((abs((projectedSpecs[, "uslspc"] - projectedSpecs[, "lslspc"])) / (mult.6 * sqrt(eigenValues + (projectedSpecs[, "colmeanspc"] - projectedSpecs[, "targetspc"])^2))))^ncomp.inv
mcpmk_wang <- prod(apply(cpkIs2, 1, min))^ncomp.inv
Wang1 <- c(ncomps = ncomps, mcp_wang = mcp_wang, mcpk_wang = mcpk_wang,
mcpm_wang = mcpm_wang, mcpmk_wang = mcpmk_wang)
Wang <- data.frame(Index = names(Wang1), Metrix = Wang1)
row.names(Wang) <- NULL
spaceDiff_xe <- as.vector(cpI * eigenValues)
mcp_xe <- (sum(spaceDiff_xe)) / eigenValuesSum
spaceDiff_xe.min <- apply(cpkIs1, 1, min) * eigenValues
mcpk_xe <- (sum(spaceDiff_xe.min)) / eigenValuesSum
spaceDiff_xe.normed <- (abs(projectedSpecs[, "uslspc"] - projectedSpecs[, "lslspc"]) / (mult.6 * sqrt(eigenValues + (projectedSpecs[, "colmeanspc"] - projectedSpecs[, "targetspc"])^2))) * eigenValues
mcpm_xe <- (sum(spaceDiff_xe.normed)) / eigenValuesSum
spaceDiff_xe.normed.min <- apply(cpkIs2, 1, min) * eigenValues
mcpmk_xe <- (sum(spaceDiff_xe.normed.min)) / eigenValuesSum
Xekalaki1 <- c(ncomps = ncomps, mcp_xe = mcp_xe, mcpk_xe = mcpk_xe,
mcpm_xe = mcpm_xe, mcpmk_xe = mcpmk_xe)
Xekalaki <- data.frame(Index = names(Xekalaki1), Metrix = Xekalaki1)
row.names(Xekalaki) <- NULL
spaceDiff_wang2 <- as.vector(cpI^eigenValues)
mcp_wang_2 <- (prod(spaceDiff_wang2))^(1 / eigenValuesSum)
spaceDiff_wang2.min <- apply(cpkIs1, 1, min)^eigenValues
mcpk_wang_2 <- (prod(spaceDiff_wang2.min))^(1 / eigenValuesSum)
spaceDiff_wang2.normed <- (abs(projectedSpecs[, "uslspc"] - projectedSpecs[, "lslspc"]) / (mult.6 * sqrt(eigenValues + (projectedSpecs[, "colmeanspc"] - projectedSpecs[, "targetspc"])^2)))^ eigenValues
mcpm_wang_2 <- (prod(spaceDiff_wang2.normed))^(1 / eigenValuesSum)
spaceDiff_wang2.normed.min <- apply(cpkIs2, 1, min)^eigenValues
mcpmk_wang_2 <- (prod(spaceDiff_wang2.normed.min))^(1 / eigenValuesSum)
Wang21 <- c(ncomps = ncomps, mcp_wang_2 = mcp_wang_2, mcpk_wang_2 = mcpk_wang_2,
mcpm_wang_2 = mcpm_wang_2, mcpmk_wang_2 = mcpmk_wang_2)
Wang2 <- data.frame(Index = names(Wang21), Metrix = Wang21)
row.names(Wang2) <- NULL
if(Target == TRUE) {
Pre.Ppk1 <- cbind((targets - lsls) / (mult.3 * ColSD),
(usls - targets) / (mult.3 * ColSD))
Ppk <- data.frame(Index = "Ppk", `Individual Ppks` = apply(Pre.Ppk1, 1, min))
row.names(Ppk) <- colnames(X)
} else {
Pre.Ppk2 <- cbind((ColMeans - lsls) / (mult.3 * ColSD),
(usls - ColMeans) / (mult.3 * ColSD))
Ppk <- data.frame(Index = "Ppk", `Individual Ppks` = apply(Pre.Ppk2, 1, min))
row.names(Ppk) <- colnames(X)
}
Results <- list("multivariate capability indices - Wang CP" = Wang,
"multivariate capability indices - Xekalaki CP" = Xekalaki,
"multivariate capability indices - Wang2 CP" = Wang2,
"Individual Parameter Ppks" = Ppk)
class(Results) <- "mcpk"
Results
}
|
MR <- read.csv("../data/GSE37418.csv", header = T,check.names = F,row.names = 1)
########HEATMAP of G3 vs G4 for GSE37418 #######################
G4 <- c(1,2,4,6,7,8,13,14,15,18,19,20,21,22,25,27,28,33,36, 38,39,40,45,46,49,50, 53,54,55,56,57,58,68, 69, 70, 71, 73, 75, 76)
G3 <- c(9, 10, 11, 12, 17, 23, 24,34, 35, 37, 47, 52, 59, 60, 62, 63)
G4_m <-MR[, G4]
G3_m <- MR[, G3]
#WNT <- c(3, 16, 26, 48, 61, 64, 65, 66)
#SHH <- c(5, 29, 30, 31, 32, 42, 43, 44, 51, 72, 74)
MR_clean <- cbind( G4_m, G3_m)
Biomarkers <- c("HLX", "TRIM58", "MFAP4",
"EOMES", "RBM24", "EN2")
biomarker_cpm <- MR_clean[Biomarkers,]
biomarker_G4 <- biomarker_cpm[,1:39]
biomarker_G3 <- biomarker_cpm[,40:55]
biomarker_label <- cbind( biomarker_G3, biomarker_G4)
annot <- data.frame(condition=c(rep("G3",16), rep("G4",39)))
rownames(annot) <- colnames(biomarker_label)
pheatmap(as.matrix(biomarker_label),cluster_rows = F,show_colnames=F,cluster_cols = F,annotation_col = annot,cellheight = 25)
########HEATMAP of G3 vs G4 for GSE21140 #######################
MR_test_clean <- read.csv("../data/GSE21140.csv", header = T,check.names = F,row.names = 1)
G4 <- c(5,10,12,13,15,18,22,24,25,26,27,30,32,33,41,44,49,55,57,59,61,63,65,71,73,78,80,86,87,92,93,94,98,99,103)
G3 <- c(2,14,17,20,35,38,39,40,42,43,51,54,58,60,66,67,81,82,83,84,85,90,91,95,96,97,102)
WNT <- c(8,19,34,36,37,62,64,74)
SHH <- c(1,3,4,6,7,9,11,16,21,23,28,29,31,45,46,47,48,50,52,53,56,68,69,70,72,75,76,77,79,88,89,100,101)
G4_m <-MR_test_clean[, G4]
G3_m <- MR_test_clean[, G3]
WNT_m <- MR_test_clean[, WNT]
SHH_m <- MR_test_clean[, SHH]
Biomarkers <- c("HLX", "TRIM58", "MFAP4",
"EOMES", "RBM24", "EN2")
MR_clean <- cbind(WNT_m, SHH_m, G3_m, G4_m)
marker_counts <- MR_clean[Biomarkers,]
G3_marker_counts <- marker_counts[, 42:68]
G4_marker_counts <- marker_counts[, 69:103]
marker_counts_clean <- cbind(G3_marker_counts,G4_marker_counts)
annot <- data.frame(condition=c(rep("G3",27), rep("G4",35)))
rownames(annot) <- colnames(marker_counts_clean)
pheatmap(log2(as.matrix(marker_counts_clean)),cluster_rows = F,show_colnames=F,cluster_cols = F,annotation_col = annot,cellheight = 20)
########HEATMAP of G3 vs G4 for GSE37382 #######################
MR_test2_clean <- read.csv("../data/GSE37382.csv", header = T,check.names = F,row.names = 1)
SHH_m <- MR_test2_clean[, 1:51]
G3_m <- MR_test2_clean[, 52:97]
G4_m <-MR_test2_clean[, 98:285]
Biomarkers <- c("HLX", "TRIM58", "MFAP4",
"EOMES", "RBM24", "EN2")
MR_clean <- cbind(SHH_m, G3_m, G4_m)
marker_counts <- MR_clean[Biomarkers,]
#SHH_marker_counts <- marker_counts[, 1:51]
G3_marker_counts <- marker_counts[, 52:97]
G4_marker_counts <- marker_counts[, 98:285]
marker_counts_clean <- cbind(G3_marker_counts,G4_marker_counts)
annot <- data.frame(condition=c(rep("G3",46), rep("G4",188)))
rownames(annot) <- colnames(marker_counts_clean)
pheatmap(log2(as.matrix(marker_counts_clean)),cluster_rows = F,show_colnames=F,cluster_cols = F,annotation_col = annot,cellheight = 40)
|
/figures/suppl/FigureS7.R
|
no_license
|
idellyzhang/DDR
|
R
| false
| false
| 3,025
|
r
|
MR <- read.csv("../data/GSE37418.csv", header = T,check.names = F,row.names = 1)
########HEATMAP of G3 vs G4 for GSE37418 #######################
G4 <- c(1,2,4,6,7,8,13,14,15,18,19,20,21,22,25,27,28,33,36, 38,39,40,45,46,49,50, 53,54,55,56,57,58,68, 69, 70, 71, 73, 75, 76)
G3 <- c(9, 10, 11, 12, 17, 23, 24,34, 35, 37, 47, 52, 59, 60, 62, 63)
G4_m <-MR[, G4]
G3_m <- MR[, G3]
#WNT <- c(3, 16, 26, 48, 61, 64, 65, 66)
#SHH <- c(5, 29, 30, 31, 32, 42, 43, 44, 51, 72, 74)
MR_clean <- cbind( G4_m, G3_m)
Biomarkers <- c("HLX", "TRIM58", "MFAP4",
"EOMES", "RBM24", "EN2")
biomarker_cpm <- MR_clean[Biomarkers,]
biomarker_G4 <- biomarker_cpm[,1:39]
biomarker_G3 <- biomarker_cpm[,40:55]
biomarker_label <- cbind( biomarker_G3, biomarker_G4)
annot <- data.frame(condition=c(rep("G3",16), rep("G4",39)))
rownames(annot) <- colnames(biomarker_label)
pheatmap(as.matrix(biomarker_label),cluster_rows = F,show_colnames=F,cluster_cols = F,annotation_col = annot,cellheight = 25)
########HEATMAP of G3 vs G4 for GSE21140 #######################
MR_test_clean <- read.csv("../data/GSE21140.csv", header = T,check.names = F,row.names = 1)
G4 <- c(5,10,12,13,15,18,22,24,25,26,27,30,32,33,41,44,49,55,57,59,61,63,65,71,73,78,80,86,87,92,93,94,98,99,103)
G3 <- c(2,14,17,20,35,38,39,40,42,43,51,54,58,60,66,67,81,82,83,84,85,90,91,95,96,97,102)
WNT <- c(8,19,34,36,37,62,64,74)
SHH <- c(1,3,4,6,7,9,11,16,21,23,28,29,31,45,46,47,48,50,52,53,56,68,69,70,72,75,76,77,79,88,89,100,101)
G4_m <-MR_test_clean[, G4]
G3_m <- MR_test_clean[, G3]
WNT_m <- MR_test_clean[, WNT]
SHH_m <- MR_test_clean[, SHH]
Biomarkers <- c("HLX", "TRIM58", "MFAP4",
"EOMES", "RBM24", "EN2")
MR_clean <- cbind(WNT_m, SHH_m, G3_m, G4_m)
marker_counts <- MR_clean[Biomarkers,]
G3_marker_counts <- marker_counts[, 42:68]
G4_marker_counts <- marker_counts[, 69:103]
marker_counts_clean <- cbind(G3_marker_counts,G4_marker_counts)
annot <- data.frame(condition=c(rep("G3",27), rep("G4",35)))
rownames(annot) <- colnames(marker_counts_clean)
pheatmap(log2(as.matrix(marker_counts_clean)),cluster_rows = F,show_colnames=F,cluster_cols = F,annotation_col = annot,cellheight = 20)
########HEATMAP of G3 vs G4 for GSE37382 #######################
MR_test2_clean <- read.csv("../data/GSE37382.csv", header = T,check.names = F,row.names = 1)
SHH_m <- MR_test2_clean[, 1:51]
G3_m <- MR_test2_clean[, 52:97]
G4_m <-MR_test2_clean[, 98:285]
Biomarkers <- c("HLX", "TRIM58", "MFAP4",
"EOMES", "RBM24", "EN2")
MR_clean <- cbind(SHH_m, G3_m, G4_m)
marker_counts <- MR_clean[Biomarkers,]
#SHH_marker_counts <- marker_counts[, 1:51]
G3_marker_counts <- marker_counts[, 52:97]
G4_marker_counts <- marker_counts[, 98:285]
marker_counts_clean <- cbind(G3_marker_counts,G4_marker_counts)
annot <- data.frame(condition=c(rep("G3",46), rep("G4",188)))
rownames(annot) <- colnames(marker_counts_clean)
pheatmap(log2(as.matrix(marker_counts_clean)),cluster_rows = F,show_colnames=F,cluster_cols = F,annotation_col = annot,cellheight = 40)
|
#
# Project: DescTools
#
# Purpose: Tools for descriptive statistics, the missing link...
# Univariat, pairwise bivariate, groupwise und multivariate
#
# Author: Andri Signorell
# Version: 0.99.19 (under construction)
#
# Depends: tcltk
# Imports: boot
# Suggests: RDCOMClient
#
# Datum:
# 31.07.2013 version 0.99.4 almost releaseable
# 06.05.2011 created
#
# ****************************************************************************
# ********** DescTools' design goals, Dos and Donts
# Some thoughts about coding:
# 1. Use recycling rules as often and wherever possible.
# 2. Handle NAs by adding an na.rm option (default FALSE) where it makes sense.
# 3. Use Google Naming StyleGuide
# 4. no data.frame or matrix interfaces for functions, the user is supposed to use
# sapply and apply.
# Interfaces for data.frames are widely deprecated nowadays and so we abstained to implement one.
# Use do.call (do.call), rbind and lapply for getting a matrix with estimates and confidence
# intervals for more than 1 column.
# 5. A pairwise apply construction is implemented PwApply
# 6. Use formula interfaces wherever possible.
# 7. use test results format class "htest"
# 8. deliver confidence intervals wherever possible, rather than tests (use ci for that)
# 9. always define appropriate default values for function arguments
# 10. provide an inverse function whenever possible (ex.: BoxCox - BoxCoxInv)
# 11. auxiliary functions, which don't have to be defined globally are put in the function's body
# (and not made invisible to the user by using .funname)
# 12. restrict the use of other libraries to the minimum (possibly only core),
# avoid hierarchical dependencies of packages over more than say 2 steps
# 13. do not create wrappers, which basically only define specific arguments and
# call an existing function (we would run into a forest of functions, loosing overview)
# 14. make functions as flexible as possible but do not define more than say
# a maximum of 12 arguments for a function (can hardly be controlled by the user)
# 15. define reasonable default values for possibly all used arguments
# (besides x), the user should get some result when typing fun(x)!
# 16. do not reinvent the wheel
# 17. do not write a function for a problem already solved(!), unless you think
# it is NOT (from your point of view) and you are pretty sure you can do better..
# 18. take the most flexible function on the market, if there are several
# take the most efficient function on the market, if there are differences in speed
# 19. make it work - make it safe - make it fast (in this very order...)
# 20. possibly publish all functions, if internal functions are used, define it within
# the functions body, this will ensure a quick source lookup.
# ********** Similar packages:
# - descr, UsingR
# - prettyR
# - reporttools
# - lessR (full)
# - Hmisc (describe)
# - psych
# check:
# library(pwr) # Power-Analyse
# http://www.ats.ucla.edu/stat/r/dae/t_test_power2.htm
# Data in packages
# http://www.hep.by/gnu/r-patched/r-exts/R-exts_8.html
# library(gtools): odd zu IsOdd, vgl: stars.pval
# library(e1071): hamming.distance, hamming.window, hsv_palette, matchControls (SampleTwins)
# library(plotrix): color.id (RgbToCol), color.scale (FindColor)
# vgl: PlotCI (plotCI), plot_bg
# ********** Know issues:
# bug: Desc( driver + temperature ~ operator + interaction(city, driver, sep=":") , data=d.pizza)
# works: Desc( driver + temperature ~ operator + interaction(city, driver, sep=".") , data=d.pizza)
# works: Desc( driver + temperature ~ operator + city:driver, data=d.pizza)
# - bei der Anwendung von tapply wird die Bezeichnung des Levels nicht verwendet
# Beispiel:
# tapply( d.pizza$delivery_min, d.pizza$driver, Desc )
# Problem: Titel und level kommt nicht mit ***CLEARME***CLEARME***CLEARME***CLEARME***CLEARME***
# - DescWrd.factor.factor gibt die Argumente an WrdText nicht weiter? fontsize, etc. (17.4.2012)
# - ein langer label fuehrt dazu, dass die Tabellenausgabe umgebrochen wird und die Grafik unter dem Text plaziert wird.
# this error arises when no plot windows exists, but is the same for boxplot, so we leave it here
# PlotViolin(temperature ~ driver, d.pizza, col="steelblue", panel.first=grid())
# Error in int_abline(a = a, b = b, h = h, v = v, untf = untf, ...) :
# plot.new has not been called yet
# ********** Open implementations:
# functions:
# polychor, tetrachor
# Cohen's effect fformat(ISOdate(2000, 1:12, 1), "%B")ct
# Cohen's effect hlp
# eta fct lines
# eta hlp
# eta2 <- function(x,y) {
# return(summary(lm(as.formula(x~y)))$r.squared)
# }
# open multiple comparisons:
# ScottKnott test (scottknott),
# Waller-Duncan test (agricolae), Gabriel test (not found)
# flag ~ flag mit mosaicplot und allgemein bivariate darstellung
# ConDisPairs als O(n log(n)) AVL-Tree implementation
# PlotMultiDens stack and 100% (cdplot)
#
# PlotCirc for symmetric tables
# Konsequente ueberpruefung der uebergabe und weiterreichung der parameter
# z.B. was ist mit Boxplot las?
# uebersicht, was wird wo vewendet, z.b. kommt rfrq ueberhaupt an bei Desc(data.frame)
# Was ist die maximale Menge an parameter?
# - Tabellen factor ~ factor nebeneinander wenn Platz
# PercTable tasks:
# Sum, perc, usw. Texte parametrisieren
# 0 values als '-' optional anzeigen
# Format perc stimmt im ersten Fall nicht, parametrisieren?
# Reihenfolge Zuerich, perc vs. perc , Zuerich wechselbar machen. Ist das schon?
# faqNC <- function() browseURL("http://www.ncfaculty.net/dogle/R/FAQ/FAQ_R_NC.html")
# Formula-Interface fuer PlotBag
# - replace .fmt by Format
# - DescDlg
# - Object Browser a la RevoR
# - Fixierung Nachkommastellen pro Variable - geloest, aber unbefriedigend
# sollte unterscheiden zwischen kleinen (1.22e-22), mittleren (100.33) und
# grossen Zahlen (1.334e5)
# grosse Zahlen mit Tausendertrennzeichen ausgegeben: 13'899
# - Alle PlotDesc sollten so funktionieren wie Desc, also mit data, ohne data etc.
# wenn mal viel Zeit: test routinen mit htest result fuer
# SomersDelta, GoodmanKruskal etc.
# separate Data ========
# Creation of the Page distribution function for the Page TrendTest
#
# .PageDF <- list(
# NA, NA
# , k3 = c(1, 3, 3, 5, 6)
# , k4 = c(1, 4, 5, 9, 11, 13, 15, 19, 20, 23, 24)
# , k5 = c(1, 5, 8, 14, 21, 27, 31, 41, 47, 57, 63, 73, 79, 89, 93, 99, 106, 112, 115, 119, 120)
# , k6 = c(1, 6, 12, 21, 37, 49, 63, 87, 107, 128, 151, 179, 203, 237,
# 257, 289, 331, 360, 389, 431, 463, 483, 517, 541, 569, 592, 613,
# 633, 657, 671, 683, 699, 708, 714, 719, 720)
# , k7 = c(1, 7, 17, 31, 60, 86, 121, 167, 222, 276, 350, 420, 504, 594,
# 672, 762, 891, 997, 1120, 1254, 1401, 1499, 1667, 1797, 1972,
# 2116, 2284, 2428, 2612, 2756, 2924, 3068, 3243, 3373, 3541, 3639,
# 3786, 3920, 4043, 4149, 4278, 4368, 4446, 4536, 4620, 4690, 4764,
# 4818, 4873, 4919, 4954, 4980, 5009, 5023, 5033, 5039, 5040)
# , k8 = c(1, 8, 23, 45, 92, 146, 216, 310, 439, 563, 741, 924, 1161,
# 1399, 1675, 1939, 2318, 2667, 3047, 3447, 3964, 4358, 4900, 5392,
# 6032, 6589, 7255, 7850, 8626, 9310, 10096, 10814, 11736, 12481,
# 13398, 14179, 15161, 15987, 16937, 17781, 18847, 19692, 20628,
# 21473, 22539, 23383, 24333, 25159, 26141, 26922, 27839, 28584,
# 29506, 30224, 31010, 31694, 32470, 33065, 33731, 34288, 34928,
# 35420, 35962, 36356, 36873, 37273, 37653, 38002, 38381, 38645,
# 38921, 39159, 39396, 39579, 39757, 39881, 40010, 40104, 40174,
# 40228, 40275, 40297, 40312, 40319, 40320)
# , k9 = c(1, 9, 30, 64, 136, 238, 368, 558, 818, 1102, 1500, 1954, 2509,
# 3125, 3881, 4625, 5647, 6689, 7848, 9130, 10685, 12077, 13796,
# 15554, 17563, 19595, 21877, 24091, 26767, 29357, 32235, 35163,
# 38560, 41698, 45345, 48913, 52834, 56700, 61011, 65061, 69913,
# 74405, 79221, 84005, 89510, 94464, 100102, 105406, 111296, 116782,
# 122970, 128472, 134908, 140730, 146963, 152987, 159684, 165404,
# 172076, 178096, 184784, 190804, 197476, 203196, 209893, 215917,
# 222150, 227972, 234408, 239910, 246098, 251584, 257474, 262778,
# 268416, 273370, 278875, 283659, 288475, 292967, 297819, 301869,
# 306180, 310046, 313967, 317535, 321182, 324320, 327717, 330645,
# 333523, 336113, 338789, 341003, 343285, 345317, 347326, 349084,
# 350803, 352195, 353750, 355032, 356191, 357233, 358255, 358999,
# 359755, 360371, 360926, 361380, 361778, 362062, 362322, 362512,
# 362642, 362744, 362816, 362850, 362871, 362879, 362880)
# , k10 = c(1, 10, 38, 89, 196, 373, 607, 967, 1465, 2084, 2903, 3943, 5195, 6723, 8547, 10557, 13090, 15927, 19107, 22783, 27088, 31581, 36711, 42383, 48539, 55448, 62872, 70702, 79475, 88867, 98759, 109437, 121084, 133225, 146251, 160169, 174688, 190299, 206577, 223357, 242043, 261323, 280909, 301704, 324089, 346985, 370933, 395903, 421915, 449011, 477478, 505905, 536445, 567717, 599491, 632755, 667503, 702002, 738301, 774897, 813353, 852279, 892263, 931649, 973717, 1016565, 1058989, 1101914, 1146958, 1191542, 1237582, 1283078, 1329968, 1377004, 1424345, 1471991, 1520878, 1569718, 1617762, 1666302, 1716368, 1765338, 1814400, 1863462, 1912432, 1962498, 2011038, 2059082, 2107922, 2156809, 2204455, 2251796, 2298832, 2345722, 2391218, 2437258, 2481842, 2526886, 2569811, 2612235, 2655083, 2697151, 2736537, 2776521, 2815447, 2853903, 2890499, 2926798, 2961297, 2996045, 3029309, 3061083, 3092355, 3122895, 3151322, 3179789, 3206885, 3232897, 3257867, 3281815, 3304711, 3327096, 3347891, 3367477, 3386757, 3405443, 3422223, 3438501, 3454112, 3468631, 3482549, 3495575, 3507716, 3519363, 3530041, 3539933, 3549325, 3558098, 3565928, 3573352, 3580261, 3586417, 3592089, 3597219, 3601712, 3606017, 3609693, 3612873, 3615710, 3618243, 3620253, 3622077, 3623605, 3624857, 3625897, 3626716, 3627335, 3627833, 3628193, 3628427, 3628604, 3628711, 3628762, 3628790, 3628799, 3628800)
#
# , k11 = c(1, 11, 47, 121, 277, 565, 974, 1618, 2548, 3794, 5430, 7668, 10382, 13858, 18056, 23108, 29135, 36441, 44648, 54464, 65848, 78652, 92845, 109597, 127676, 148544, 171124, 196510, 223843, 254955, 287403, 323995, 363135, 406241, 451019, 501547, 553511, 610953, 670301, 735429, 803299, 877897, 953161, 1036105, 1122228, 1215286, 1309506, 1413368, 1518681, 1632877, 1749090, 1874422, 2002045, 2140515, 2278832, 2429566, 2581919, 2744859, 2908190, 3085090, 3263110, 3453608, 3643760, 3847514, 4052381, 4272633, 4489678, 4722594, 4956028, 5204156, 5449644, 5712530, 5973493, 6250695, 6523539, 6816137, 7104526, 7411262, 7710668, 8030252, 8345178, 8678412, 9002769, 9348585, 9686880, 10046970, 10393880, 10763840, 11125055, 11506717, 11876164, 12267556, 12646883, 13049009, 13434313, 13845399, 14241951, 14660041, 15058960, 15484804, 15894731, 16324563, 16734970, 17170868, 17587363, 18027449, 18444344, 18884724, 19305912, 19748160, 20168640, 20610888, 21032076, 21472456, 21889351, 22329437, 22745932, 23181830, 23592237, 24022069, 24431996, 24857840, 25256759, 25674849, 26071401, 26482487, 26867791, 27269917, 27649244, 28040636, 28410083, 28791745, 29152960, 29522920, 29869830, 30229920, 30568215, 30914031, 31238388, 31571622, 31886548, 32206132, 32505538, 32812274, 33100663, 33393261, 33666105, 33943307, 34204270, 34467156, 34712644, 34960772, 35194206, 35427122, 35644167, 35864419, 36069286, 36273040, 36463192, 36653690, 36831710, 37008610, 37171941, 37334881, 37487234, 37637968, 37776285, 37914755, 38042378, 38167710, 38283923, 38398119, 38503432, 38607294, 38701514, 38794572, 38880695, 38963639, 39038903, 39113501, 39181371, 39246499, 39305847, 39363289, 39415253, 39465781, 39510559, 39553665, 39592805, 39629397, 39661845, 39692957, 39720290, 39745676, 39768256, 39789124, 39807203, 39823955, 39838148, 39850952, 39862336, 39872152, 39880359, 39887665, 39893692, 39898744, 39902942, 39906418, 39909132, 39911370, 39913006, 39914252, 39915182, 39915826, 39916235, 39916523, 39916679, 39916753, 39916789, 39916799, 39916800)
#
# , k12 = c(1, 12, 57, 161, 385, 832, 1523, 2629, 4314, 6678, 9882, 14397, 20093, 27582, 36931, 48605, 62595, 80232, 100456, 125210, 154227, 188169, 226295, 272179, 322514, 381283, 446640, 521578, 602955, 697449, 798012, 913234, 1037354, 1177139, 1325067, 1493942, 1670184, 1867627, 2075703, 2306597, 2547605, 2817918, 3095107, 3402876, 3723206, 4075092, 4436130, 4836594, 5245232, 5694249, 6155263, 6658390, 7171170, 7734985, 8304533, 8927791, 9562307, 10250749, 10946272, 11707175, 12472247, 13304674, 14143124, 15051520, 15964324, 16958207, 17951038, 19024576, 20103385, 21266520, 22428668, 23688490, 24941145, 26293113, 27640685, 29092979, 30538037, 32094364, 33635325, 35292663, 36939122, 38705429, 40450799, 42327667, 44179645, 46167953, 48128734, 50226064, 52293360, 54508939, 56686818, 59015668, 61303483, 63746140, 66141668, 68703444, 71211606, 73883239, 76497639, 79284492, 82008603, 84912335, 87739711, 90750133, 93683865, 96803338, 99840816, 103063901, 106199027, 109522404, 112757434, 116187490, 119511072, 123034744, 126446666, 130064197, 133565830, 137269085, 140848253, 144633119, 148294783, 152161902, 155889546, 159821171, 163617371, 167622510, 171480066, 175541648, 179449088, 183562195, 187525039, 191692873, 195691020, 199891634, 203924412, 208164174, 212229695, 216488881, 220574078, 224852631, 228953203, 233247651, 237351468, 241650132, 245753949, 250048397, 254148969, 258427522, 262512719, 266771905, 270837426, 275077188, 279109966, 283310580, 287308727, 291476561, 295439405, 299552512, 303459952, 307521534, 311379090, 315384229, 319180429, 323112054, 326839698, 330706817, 334368481, 338153347, 341732515, 345435770, 348937403, 352554934, 355966856, 359490528, 362814110, 366244166, 369479196, 372802573, 375937699, 379160784, 382198262, 385317735, 388251467, 391261889, 394089265, 396992997, 399717108, 402503961, 405118361, 407789994, 410298156, 412859932, 415255460, 417698117, 419985932, 422314782, 424492661, 426708240, 428775536, 430872866, 432833647, 434821955, 436673933, 438550801, 440296171, 442062478, 443708937,
# 445366275, 446907236, 448463563, 449908621, 451360915, 452708487, 454060455, 455313110, 456572932, 457735080, 458898215, 459977024, 461050562, 462043393, 463037276, 463950080, 464858476, 465696926, 466529353, 467294425, 468055328, 468750851, 469439293, 470073809, 470697067, 471266615, 471830430, 472343210, 472846337, 473307351, 473756368, 474165006, 474565470, 474926508, 475278394, 475598724, 475906493, 476183682, 476453995, 476695003, 476925897, 477133973, 477331416, 477507658, 477676533, 477824461, 477964246, 478088366, 478203588, 478304151, 478398645, 478480022, 478554960, 478620317, 478679086, 478729421, 478775305, 478813431, 478847373, 478876390, 478901144, 478921368, 478939005, 478952995, 478964669, 478974018, 478981507, 478987203, 478991718, 478994922, 478997286, 478998971, 479000077, 479000768, 479001215, 479001439, 479001543, 479001588, 479001599, 479001600 )
#
# , k13 = c(1, 13, 68, 210, 527, 1197, 2324, 4168, 7119, 11429, 17517, 26225, 37812, 53230, 73246, 98816, 130483, 170725, 218750, 278034, 349136, 434162, 532482, 651024, 785982, 944022, 1124332, 1332640, 1565876, 1835792, 2132840, 2472812, 2848749, 3273357, 3735585, 4260527, 4827506, 5461252, 6147299, 6908609, 7725716, 8635460, 9600260, 10666252, 11804773, 13050503, 14365677, 15812701, 17335403, 18994955, 20742001, 22638493, 24624900, 26787112, 29032733, 31464927, 34008755, 36743621, 39579021, 42647201, 45817786, 49226378, 52752239, 56535435, 60435209, 64628147, 68927405, 73528499, 78274283, 83329815, 88504447, 94050417, 99720505, 105759011, 111937321, 118508917, 125224959, 132372517, 139644194, 147366078, 155251313, 163598355, 172068955, 181074075, 190212385, 199875487, 209687980, 220053214, 230566521, 241680167, 252905559, 264763303, 276775771, 289421809, 302176267, 315640063, 329231261, 343509837, 357915454, 373057790, 388317114, 404365328, 420470916, 437394874, 454438992, 472280042, 490183678, 508970736, 527836540, 547557794, 567333404, 588036304, 608771329, 630463117, 652127890, 674778950, 697468748, 721126694, 744732766, 769392312, 794014392, 819670692, 845236737, 871892593, 898464180, 926132356, 953650676, 982290898, 1010834369, 1040477655, 1069921254, 1100563830, 1131007339, 1162609975, 1193943276, 1226507722, 1258827639, 1292328257, 1325502938, 1359918362, 1394027869, 1429370035, 1464279071, 1500517059, 1536339992, 1573396522, 1609980791, 1647854021, 1685286706, 1723967698, 1762082365, 1801533261, 1840420643, 1880601675, 1920106583, 1960960701, 2001224218, 2042719638, 2083488859, 2125600829, 2167005742, 2209678334, 2251531986, 2294726538, 2337123023, 2380790291, 2423568572, 2467632034, 2510865295, 2555331665, 2598793469, 2643582407, 2687416596, 2732465154, 2776464125, 2821723625, 2865981806, 2911394478, 2955721182, 3001237104, 3045709215, 3091307829, 3135712971, 3181311585, 3225783696, 3271299618, 3315626322, 3361038994, 3405297175, 3450556675, 3494555646, 3539604204, 3583438393, 3628227331, 3671689135, 3716155505,
# 3759388766, 3803452228, 3846230509, 3889897777, 3932294262, 3975488814, 4017342466, 4060015058, 4101419971, 4143531941, 4184301162, 4225796582, 4266060099, 4306914217, 4346419125, 4386600157, 4425487539, 4464938435, 4503053102, 4541734094, 4579166779, 4617040009, 4653624278, 4690680808, 4726503741, 4762741729, 4797650765, 4832992931, 4867102438, 4901517862, 4934692543, 4968193161, 5000513078, 5033077524, 5064410825, 5096013461, 5126456970, 5157099546, 5186543145, 5216186431, 5244729902, 5273370124, 5300888444, 5328556620, 5355128207, 5381784063, 5407350108, 5433006408, 5457628488, 5482288034, 5505894106, 5529552052, 5552241850, 5574892910, 5596557683, 5618249471, 5638984496, 5659687396, 5679463006, 5699184260, 5718050064, 5736837122, 5754740758, 5772581808, 5789625926, 5806549884, 5822655472, 5838703686, 5853963010, 5869105346, 5883510963, 5897789539, 5911380737, 5924844533, 5937598991, 5950245029, 5962257497, 5974115241, 5985340633, 5996454279, 6006967586, 6017332820, 6027145313, 6036808415, 6045946725, 6054951845, 6063422445, 6071769487, 6079654722, 6087376606, 6094648283, 6101795841, 6108511883, 6115083479, 6121261789, 6127300295, 6132970383, 6138516353, 6143690985, 6148746517, 6153492301, 6158093395, 6162392653, 6166585591, 6170485365, 6174268561, 6177794422, 6181203014, 6184373599, 6187441779, 6190277179, 6193012045, 6195555873, 6197988067, 6200233688, 6202395900, 6204382307, 6206278799, 6208025845, 6209685397, 6211208099, 6212655123, 6213970297, 6215216027, 6216354548, 6217420540, 6218385340, 6219295084, 6220112191, 6220873501, 6221559548, 6222193294, 6222760273, 6223285215, 6223747443, 6224172051, 6224547988, 6224887960, 6225185008, 6225454924, 6225688160, 6225896468, 6226076778, 6226234818, 6226369776, 6226488318, 6226586638, 6226671664, 6226742766, 6226802050, 6226850075, 6226890317, 6226921984, 6226947554, 6226967570, 6226982988, 6226994575, 6227003283, 6227009371, 6227013681, 6227016632, 6227018476, 6227019603, 6227020273, 6227020590, 6227020732, 6227020787, 6227020799, 6227020800)
#
# , k14 = c(1, 14, 80, 269, 711, 1689, 3467, 6468, 11472, 19093, 30278, 46574, 69288, 99975, 141304, 195194, 264194, 352506, 462442, 598724, 766789, 970781, 1213870, 1507510, 1853680, 2260125, 2736501, 3291591, 3930026, 4668007, 5508108, 6466862, 7556159, 8787659, 10165645, 11724144, 13460539, 15392221, 17539134, 19922717, 22546063, 25447736, 28627069, 32116076, 35937108, 40106433, 44631074, 49573596, 54926631, 60716114, 66974508, 73740246, 81009240, 88845749, 97239223, 106246902, 115900686, 126216169, 137197091, 148953202, 161446731, 174730758, 188835459, 203837905, 219695178, 236524328, 254283795, 273083666, 292923813, 313860397, 335854799, 359112526, 383528656, 409202706, 436135896, 464473466, 494134210, 525276498, 557815202, 591946436, 627603800, 664907029, 703773267, 744486823, 786877234, 831103465, 877129675, 925182097, 975110533, 1027121161, 1081080881, 1137323422, 1195661689, 1256271970, 1319049120, 1384348268, 1451952010, 1522055063, 1594541080, 1669783989, 1747541228, 1828055758, 1911151548, 1997286462, 2086139682, 2177925841, 2272580839, 2370486063, 2471328513, 2575410222, 2682471831, 2793082385, 2906881741, 3024092956, 3144510886, 3268758800, 3396339981, 3527578003, 3662304885, 3800998837, 3943227695, 4089440734, 4239185132, 4393196954, 4551031331, 4712856765, 4878478438, 5048720892, 5222754969, 5401045094, 5583410846, 5770395123, 5961416258, 6157027619, 6356554732, 6561015163, 6769843465, 6983093805, 7200534248, 7423263710, 7650023569, 7881592853, 8117625307, 8358760439, 8604199870, 8854704639, 9109316970, 9369314835, 9633980748, 9903337745, 10177004917, 10456529218, 10740122230, 11028754748, 11321981370, 11620526571, 11923494567, 12231834199, 12544092637, 12862071155, 13184668352, 13511964024, 13843525611, 14181198310, 14522618329, 14869105782, 15220174133, 15576509168, 15936926462, 16302784406, 16672089744, 17047134658, 17426587171, 17810429228, 18198087372, 18591770156, 18988751460, 19390461912, 19796344325, 20207120401, 20621426516, 21040873172, 21463087253, 21890649743, 22322106033, 22757217771, 23195600046,
# 23639594170, 24086026475, 24536477172, 24990465186, 25448639418, 25909641657, 26374985116, 26842266606, 27314012018, 27788960817, 28266602799, 28746609271, 29231436410, 29717689954, 30206932003, 30698971843, 31193949888, 31690902354, 32191012868, 32692174745, 33196629733, 33703478249, 34211544046, 34720969890, 35234031737, 35747617060, 36262719119, 36779697578, 37298186864, 37817722298, 38338904825, 38860175016, 39383211341, 39907644570, 40431821887, 40956454566, 41483109694, 42009225414, 42535209127, 43062242912, 43589145600, 44116048288, 44643082073, 45169065786, 45695181506, 46221836634, 46746469313, 47270646630, 47795079859, 48318116184, 48839386375, 49360568902, 49880104336, 50398593622, 50915572081, 51430674140, 51944259463, 52457321310, 52966747154, 53474812951, 53981661467, 54486116455, 54987278332, 55487388846, 55984341312, 56479319357, 56971359197, 57460601246, 57946854790, 58431681929, 58911688401, 59389330383, 59864279182, 60336024594, 60803306084, 61268649543, 61729651782, 62187826014, 62641814028, 63092264725, 63538697030, 63982691154, 64421073429, 64856185167, 65287641457, 65715203947, 66137418028, 66556864684, 66971170799, 67381946875, 67787829288, 68189539740, 68586521044, 68980203828, 69367861972, 69751704029, 70131156542, 70506201456, 70875506794, 71241364738, 71601782032, 71958117067, 72309185418, 72655672871, 72997092890, 73334765589, 73666327176, 73993622848, 74316220045, 74634198563, 74946457001, 75254796633, 75557764629, 75856309830, 76149536452, 76438168970, 76721761982, 77001286283, 77274953455, 77544310452, 77808976365, 78068974230, 78323586561, 78574091330, 78819530761, 79060665893, 79296698347, 79528267631, 79755027490, 79977756952, 80195197395, 80408447735, 80617276037, 80821736468, 81021263581, 81216874942, 81407896077, 81594880354, 81777246106, 81955536231, 82129570308, 82299812762, 82465434435, 82627259869, 82785094246, 82939106068, 83088850466, 83235063505, 83377292363, 83515986315, 83650713197, 83781951219, 83909532400, 84033780314, 84154198244, 84271409459, 84385208815, 84495819369,
# 84602880978, 84706962687, 84807805137, 84905710361, 85000365359, 85092151518, 85181004738, 85267139652, 85350235442, 85430749972, 85508507211, 85583750120, 85656236137, 85726339190, 85793942932, 85859242080, 85922019230, 85982629511, 86040967778, 86097210319, 86151170039, 86203180667, 86253109103, 86301161525, 86347187735, 86391413966, 86433804377, 86474517933, 86513384171, 86550687400, 86586344764, 86620475998, 86653014702, 86684156990, 86713817734, 86742155304, 86769088494, 86794762544, 86819178674, 86842436401, 86864430803, 86885367387, 86905207534, 86924007405, 86941766872, 86958596022, 86974453295, 86989455741, 87003560442, 87016844469, 87029337998, 87041094109, 87052075031, 87062390514, 87072044298, 87081051977, 87089445451, 87097281960, 87104550954, 87111316692, 87117575086, 87123364569, 87128717604, 87133660126, 87138184767, 87142354092, 87146175124, 87149664131, 87152843464, 87155745137, 87158368483, 87160752066, 87162898979, 87164830661, 87166567056, 87168125555, 87169503541, 87170735041, 87171824338, 87172783092, 87173623193, 87174361174, 87174999609, 87175554699, 87176031075, 87176437520, 87176783690, 87177077330, 87177320419, 87177524411, 87177692476, 87177828758, 87177938694, 87178027006, 87178096006, 87178149896, 87178191225, 87178221912, 87178244626, 87178260922, 87178272107, 87178279728, 87178284732, 87178287733, 87178289511, 87178290489, 87178290931, 87178291120, 87178291186, 87178291199, 87178291200 )
#
# , k15 = c(1, 15, 93, 339, 946, 2344, 5067, 9845, 18094, 31210, 51135, 80879, 123856, 183350, 265744, 375782, 520770, 709108, 950935, 1254359, 1637783, 2110255, 2688261, 3392105, 4243753, 5253985, 6463435, 7887051, 9559689, 11508657, 13779635, 16385319, 19406949, 22847453, 26778757, 31237429, 36312890, 41988174, 48415169, 55581133, 63617482, 72531890, 82493993, 93449491, 105663309, 119038213, 133821033, 149981059, 167810258, 187138620, 208394580, 231407260, 256572630, 283728734, 313349422, 345140612, 379784963, 416871267, 457037763, 499992359, 546463298, 595886554, 649243982, 705940396, 766920856, 831552862, 900947933, 974276983, 1052930913, 1135866291, 1224452526, 1317816142, 1417501545, 1522137313, 1633652530, 1750626806, 1875052020, 2005336686, 2143665106, 2288248572, 2441639216, 2601691186, 2771087853, 2947714613, 3134569070, 3328885582, 3534148307, 3747528715, 3972688056, 4206327920, 4452435789, 4707707507, 4976502908, 5254730366, 5547265512, 5849894908, 6167966973, 6496524245, 6841251954, 7197208516, 7570606695, 7955492307, 8358702869, 8774325693, 9209487348, 9657140024, 10125565750, 10607269130, 11110947428, 11628498256, 12168723926, 12723609294, 13303228032, 13897378066, 14517038181, 15152582797, 15815095216, 16493452984, 17200382721, 17923779849, 18677052770, 19447720986, 20249039825, 21068309835, 21920989644, 22790961184, 23695090223, 24618800757, 25577947305, 26555930925, 27571664648, 28606831690, 29681188983, 30776084989, 31910591023, 33065874467, 34264718158, 35483254398, 36745418556, 38030320602, 39360005810, 40711195500, 42110524356, 43531199878, 45001319765, 46494257553, 48036654343, 49602075643, 51221875032, 52862604614, 54557065970, 56276716608, 58051331346, 59848489468, 61704800734, 63582981112, 65521450173, 67484389131, 69506528883, 71552497079, 73663855894, 75795896650, 77992481274, 80214974822, 82502403057, 84811883255, 87191972089, 89593082611, 92064881373, 94560883919, 97125402107, 99713005329, 102377610307, 105060302611, 107817686686, 110599694856, 113456740182, 116333639168, 119291579167, 122267356121,
# 125323501236, 128401997238, 131558157109, 134734085833, 137997611218, 141274089126, 144635051739, 148017803651, 151483637626, 154964665476, 158536414603, 162120609581, 165794608949, 169485898871, 173262539499, 177052751993, 180940334728, 184834047000, 188819766650, 192821736664, 196913537154, 201013587060, 205213037672, 209416246916, 213716661616, 218026615728, 222428224181, 226835589231, 231347734832, 235855804736, 240461451056, 245075672864, 249785350011, 254493014069, 259306386598, 264111876662, 269020469253, 273929072733, 278932752466, 283931152738, 289039128373, 294131477475, 299325743006, 304517112400, 309806619906, 315081186550, 320465864608, 325829963244, 331299254515, 336756611895, 342309552544, 347844707934, 353492785526, 359109888388, 364830049809, 370533853771, 376336452468, 382110605480, 387994926455, 393843943991, 399797486177, 405725583879, 411748092537, 417737799943, 423839699258, 429894358406, 436050852136, 442177460900, 448399401827, 454577618889, 460862851875, 467097523711, 473433714049, 479729592211, 486115143213, 492451898587, 498897897209, 505281471971, 511760849379, 518195355931, 524718405991, 531183425467, 537750411835, 544250726707, 550846203604, 557385785810, 564007939322, 570567450178, 577227764133, 583810787025, 590480506935, 597092270467, 603784200787, 610403013525, 617114828578, 623745063632, 630461354816, 637109043600, 643828046362, 650470873262, 657203494738, 663846321638, 670565324400, 677213013184, 683929304368, 690559539422, 697271354475, 703890167213, 710582097533, 717193861065, 723863580975, 730446603867, 737106917822, 743666428678, 750288582190, 756828164396, 763423641293, 769923956165, 776490942533, 782955962009, 789479012069, 795913518621, 802392896029, 808776470791, 815222469413, 821559224787, 827944775789, 834240653951, 840576844289, 846811516125, 853096749111, 859274966173, 865496907100, 871623515864, 877780009594, 883834668742, 889936568057, 895926275463, 901948784121, 907876881823, 913830424009, 919679441545, 925563762520, 931337915532, 937140514229, 942844318191, 948564479612,
# 954181582474, 959829660066, 965364815456, 970917756105, 976375113485, 981844404756, 987208503392, 992593181450, 997867748094, 1003157255600, 1008348624994, 1013542890525, 1018635239627, 1023743215262, 1028741615534, 1033745295267, 1038653898747, 1043562491338, 1048367981402, 1053181353931, 1057889017989, 1062598695136, 1067212916944, 1071818563264, 1076326633168, 1080838778769, 1085246143819, 1089647752272, 1093957706384, 1098258121084, 1102461330328, 1106660780940, 1110760830846, 1114852631336, 1118854601350, 1122840321000, 1126734033272, 1130621616007, 1134411828501, 1138188469129, 1141879759051, 1145553758419, 1149137953397, 1152709702524, 1156190730374, 1159656564349, 1163039316261, 1166400278874, 1169676756782, 1172940282167, 1176116210891, 1179272370762, 1182350866764, 1185407011879, 1188382788833, 1191340728832, 1194217627818, 1197074673144, 1199856681314, 1202614065389, 1205296757693, 1207961362671, 1210548965893, 1213113484081, 1215609486627, 1218081285389, 1220482395911, 1222862484745, 1225171964943, 1227459393178, 1229681886726, 1231878471350, 1234010512106, 1236121870921, 1238167839117, 1240189978869, 1242152917827, 1244091386888, 1245969567266, 1247825878532, 1249623036654, 1251397651392, 1253117302030, 1254811763386, 1256452492968, 1258072292357, 1259637713657, 1261180110447, 1262673048235, 1264143168122, 1265563843644, 1266963172500, 1268314362190, 1269644047398, 1270928949444, 1272191113602, 1273409649842, 1274608493533, 1275763776977, 1276898283011, 1277993179017, 1279067536310, 1280102703352, 1281118437075, 1282096420695, 1283055567243, 1283979277777, 1284883406816, 1285753378356, 1286606058165, 1287425328175, 1288226647014, 1288997315230, 1289750588151, 1290473985279, 1291180915016, 1291859272784, 1292521785203, 1293157329819, 1293776989934, 1294371139968, 1294950758706, 1295505644074, 1296045869744, 1296563420572, 1297067098870, 1297548802250, 1298017227976, 1298464880652, 1298900042307, 1299315665131, 1299718875693, 1300103761305, 1300477159484, 1300833116046, 1301177843755, 1301506401027, 1301824473092,
# 1302127102488, 1302419637634, 1302697865092, 1302966660493, 1303221932211, 1303468040080, 1303701679944, 1303926839285, 1304140219693, 1304345482418, 1304539798930, 1304726653387, 1304903280147, 1305072676814, 1305232728784, 1305386119428, 1305530702894, 1305669031314, 1305799315980, 1305923741194, 1306040715470, 1306152230687, 1306256866455, 1306356551858, 1306449915474, 1306538501709, 1306621437087, 1306700091017, 1306773420067, 1306842815138, 1306907447144, 1306968427604, 1307025124018, 1307078481446, 1307127904702, 1307174375641, 1307217330237, 1307257496733, 1307294583037, 1307329227388, 1307361018578, 1307390639266, 1307417795370, 1307442960740, 1307465973420, 1307487229380, 1307506557742, 1307524386941, 1307540546967, 1307555329787, 1307568704691, 1307580918509, 1307591874007, 1307601836110, 1307610750518, 1307618786867, 1307625952831, 1307632379826, 1307638055110, 1307643130571, 1307647589243, 1307651520547, 1307654961051, 1307657982681, 1307660588365, 1307662859343, 1307664808311, 1307666480949, 1307667904565, 1307669114015, 1307670124247, 1307670975895, 1307671679739, 1307672257745, 1307672730217, 1307673113641, 1307673417065, 1307673658892, 1307673847230, 1307673992218, 1307674102256, 1307674184650, 1307674244144, 1307674287121, 1307674316865, 1307674336790, 1307674349906, 1307674358155, 1307674362933, 1307674365656, 1307674367054, 1307674367661, 1307674367907, 1307674367985, 1307674367999, 1307674368000 )
# )
#
# .PageDF <- lapply(.PageDF, function(x) c(x[1], diff(x)) / tail(x,1))
# save(.PageDF, file="C:/Users/Andri/Documents/R/sources/DescTools/MakeDescToolsBase/PageDF.rda")
# load(file="C:/Users/Andri/Documents/R/Projects/load/PageDF.rda")
# load(file="C:/Users/Andri/Documents/R/Projects/DescTools/load/wdConst.rda")
# load(file="C:/Users/Andri/Documents/R/sources/DescTools/periodic.rda")
# just for check not to bark!
utils::globalVariables(c("d.units","d.periodic","d.prefix",
"day.name","day.abb","wdConst",
"fmt", "pal",
"hred","hblue","horange","hyellow","hecru","hgreen",
"tarot","cards","roulette"))
# hred <- unname(Pal("Helsana")[1])
# horange <- unname(Pal("Helsana")[2])
# hyellow <- unname(Pal("Helsana")[3])
# hecru <- unname(Pal("Helsana")[4])
# hblue <- unname(Pal("Helsana")[6])
# hgreen <- unname(Pal("Helsana")[7])
#
# save(x=hred, file='C:/Users/andri/Documents/R/Projects/DescTools/data/hred.rda')
# save(x=horange, file='C:/Users/andri/Documents/R/Projects/DescTools/data/horange.rda')
# save(x=hyellow, file='C:/Users/andri/Documents/R/Projects/DescTools/data/hyellow.rda')
# save(x=hecru, file='C:/Users/andri/Documents/R/Projects/DescTools/data/hecru.rda')
# save(x=hblue, file='C:/Users/andri/Documents/R/Projects/DescTools/data/hblue.rda')
# save(x=hgreen, file='C:/Users/andri/Documents/R/Projects/DescTools/data/hgreen.rda')
# source( "C:/Users/Andri/Documents/R/sources/DescTools/wdConst.r" )
# Base functions ====
## base: calculus
# we have month.name and month.abb in base R, but nothing similar for day names
# in english (use format(ISOdate(2000, 1:12, 1), "%B") for months in current locale)
# day.name <- c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
# day.abb <- c("Mon","Tue","Wed","Thu","Fri","Sat","Sun")
# internal: golden section constant
gold_sec_c <- (1+sqrt(5)) / 2
# tarot <- structure(list(rank = c("1", "2", "3", "4", "5", "6", "7", "8",
# "9", "10", "page", "knight", "queen", "king", "1", "2", "3",
# "4", "5", "6", "7", "8", "9", "10", "page", "knight", "queen",
# "king", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "page",
# "knight", "queen", "king", "1", "2", "3", "4", "5", "6", "7",
# "8", "9", "10", "page", "knight", "queen", "king", "0", "1",
# "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13",
# "14", "15", "16", "17", "18", "19", "20", "21"), suit = c("wands",
# "wands", "wands", "wands", "wands", "wands", "wands", "wands",
# "wands", "wands", "wands", "wands", "wands", "wands", "coins",
# "coins", "coins", "coins", "coins", "coins", "coins", "coins",
# "coins", "coins", "coins", "coins", "coins", "coins", "cups",
# "cups", "cups", "cups", "cups", "cups", "cups", "cups", "cups",
# "cups", "cups", "cups", "cups", "cups", "swords", "swords", "swords",
# "swords", "swords", "swords", "swords", "swords", "swords", "swords",
# "swords", "swords", "swords", "swords", "trumps", "trumps", "trumps",
# "trumps", "trumps", "trumps", "trumps", "trumps", "trumps", "trumps",
# "trumps", "trumps", "trumps", "trumps", "trumps", "trumps", "trumps",
# "trumps", "trumps", "trumps", "trumps", "trumps"), desc = c(NA,
# NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
# NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
# NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
# NA, NA, NA, NA, NA, NA, NA, "The Fool", "The Magician", "The High Priestess",
# "The Empress", "The Emperor", "The Hierophant", "The Lovers",
# "The Chariot", "Strength", "The Hermit", "Wheel of Fortune",
# "Justice", "The Hanged Man", "Death", "Temperance", "The Devil",
# "The Tower", "The Star", "The Moon", "The Sun", "Judgment", "The World"
# )), .Names = c("rank", "suit", "desc"), out.attrs = structure(list(
# dim = structure(c(14L, 4L), .Names = c("rank", "suit")),
# dimnames = structure(list(rank = c("rank=1", "rank=2", "rank=3",
# "rank=4", "rank=5", "rank=6", "rank=7", "rank=8", "rank=9",
# "rank=10", "rank=page", "rank=knight", "rank=queen", "rank=king"
# ), suit = c("suit=wands", "suit=coins", "suit=cups", "suit=swords"
# )), .Names = c("rank", "suit"))), .Names = c("dim", "dimnames"
# )), row.names = c(NA, 78L), class = "data.frame")
#
#
# cards <- structure(list(rank = structure(c(1L, 2L, 3L, 4L, 5L, 6L, 7L,
# 8L, 9L, 10L, 11L, 12L, 13L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L,
# 10L, 11L, 12L, 13L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L,
# 11L, 12L, 13L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L,
# 12L, 13L), .Label = c("2", "3", "4", "5", "6", "7", "8", "9",
# "10", "J", "Q", "K", "A"), class = "factor"), suit = structure(c(1L,
# 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L,
# 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 3L,
# 3L, 3L, 3L, 3L, 3L, 3L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L,
# 4L, 4L, 4L), .Label = c("club", "diamond", "heart", "spade"), class = "factor")), .Names = c("rank",
# "suit"), out.attrs = structure(list(dim = structure(c(13L, 4L
# ), .Names = c("rank", "suit")), dimnames = structure(list(rank = c("rank=2",
# "rank=3", "rank=4", "rank=5", "rank=6", "rank=7", "rank=8", "rank=9",
# "rank=10", "rank=J", "rank=Q", "rank=K", "rank=A"), suit = c("suit=club",
# "suit=diamond", "suit=heart", "suit=spade")), .Names = c("rank",
# "suit"))), .Names = c("dim", "dimnames")), class = "data.frame", row.names = c(NA, -52L))
#
#
# roulette <- structure(list(num = structure(c(1L, 20L, 24L, 30L, 5L, 22L,
# 35L, 23L, 11L, 16L, 37L, 26L, 7L, 14L, 2L, 28L, 9L, 18L, 33L,
# 3L, 17L, 36L, 25L, 4L, 31L, 6L, 21L, 34L, 29L, 10L, 19L, 13L,
# 15L, 32L, 12L, 8L, 27L), .Label = c("0", "1", "10", "11", "12",
# "13", "14", "15", "16", "17", "18", "19", "2", "20", "21", "22",
# "23", "24", "25", "26", "27", "28", "29", "3", "30", "31", "32",
# "33", "34", "35", "36", "4", "5", "6", "7", "8", "9"), class = "factor"),
# col = structure(c(2L,
# 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L,
# 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L,
# 1L, 3L, 1L, 3L, 1L, 3L), .Label = c("black", "white", "red"
# ), class = "factor")), .Names = c("num", "col"
# ), row.names = c(NA, -37L), class = "data.frame")
#
# save(tarot, file="tarot.rda")
# save(cards, file="cards.rda")
# save(roulette, file="roulette.rda")
# Define some alias(es)
N <- as.numeric
## This is not exported as it would mask base function and
# but it would be very, very handy if the base function was changed accoringly
as.Date.numeric <- function (x, origin, ...) {
if (missing(origin))
origin <- "1970-01-01"
as.Date(origin, ...) + x
}
Primes <- function (n) {
# Source: sfsmisc
# Bill Venables (<= 2001); Martin Maechler gained another 40% speed, working with logicals and integers.
if ((M2 <- max(n)) <= 1)
return(integer(0))
P <- rep.int(TRUE, M2)
P[1] <- FALSE
M <- as.integer(sqrt(M2))
n <- as.integer(M2)
for (p in 1:M) if (P[p])
P[seq(p * p, n, p)] <- FALSE
(1:n)[P]
}
Factorize <- function (n) {
# Factorize <- function (n, verbose = FALSE) {
# Source sfsmisc: Martin Maechler, Jan. 1996.
if (all(n < .Machine$integer.max))
n <- as.integer(n)
else {
warning("factorizing large int ( > maximal integer )")
n <- round(n)
}
N <- length(n)
M <- as.integer(sqrt(max(n)))
k <- length(pr <- Primes(M))
nDp <- outer(pr, n, FUN = function(p, n) n%%p == 0)
res <- vector("list", length = N)
names(res) <- n
for (i in 1:N) {
nn <- n[i]
if (any(Dp <- nDp[, i])) {
nP <- length(pfac <- pr[Dp])
# if (verbose) cat(nn, " ")
}
else {
res[[i]] <- cbind(p = nn, m = 1)
# if (verbose) cat("direct prime", nn, "\n")
next
}
m.pr <- rep(1, nP)
Ppf <- prod(pfac)
while (1 < (nn <- nn%/%Ppf)) {
Dp <- nn%%pfac == 0
if (any(Dp)) {
m.pr[Dp] <- m.pr[Dp] + 1
Ppf <- prod(pfac[Dp])
}
else {
pfac <- c(pfac, nn)
m.pr <- c(m.pr, 1)
break
}
}
res[[i]] <- cbind(p = pfac, m = m.pr)
}
res
}
GCD <- function(..., na.rm = FALSE) {
x <- unlist(list(...), recursive=TRUE)
if(na.rm) x <- x[!is.na(x)]
if(anyNA(x)) return(NA)
stopifnot(is.numeric(x))
if (floor(x) != ceiling(x) || length(x) < 2)
stop("Argument 'x' must be an integer vector of length >= 2.")
x <- x[x != 0]
n <- length(x)
if (n == 0) {
g <- 0
} else if (n == 1) {
g <- x
} else if (n == 2) {
g <- .Call("_DescTools_compute_GCD", PACKAGE = "DescTools", x[1], x[2])
} else {
# g <- .GCD(x[1], x[2])
g <- .Call("_DescTools_compute_GCD", PACKAGE = "DescTools", x[1], x[2])
for (i in 3:n) {
g <- .Call("_DescTools_compute_GCD", PACKAGE = "DescTools", g, x[i])
if (g == 1) break
}
}
return(g)
}
LCM <- function(..., na.rm = FALSE) {
# .LCM <- function(n, m) {
# stopifnot(is.numeric(n), is.numeric(m))
# if (length(n) != 1 || floor(n) != ceiling(n) ||
# length(m) != 1 || floor(m) != ceiling(m))
# stop("Arguments 'n', 'm' must be integer scalars.")
# if (n == 0 && m == 0) return(0)
#
# return(n / GCD(c(n, m)) * m)
# }
x <- unlist(list(...), recursive=TRUE)
if(na.rm) x <- x[!is.na(x)]
if(anyNA(x)) return(NA)
stopifnot(is.numeric(x))
if (floor(x) != ceiling(x) || length(x) < 2)
stop("Argument 'x' must be an integer vector of length >= 2.")
x <- x[x != 0]
n <- length(x)
if (n == 0) {
l <- 0
} else if (n == 1) {
l <- x
} else if (n == 2) {
# l <- .LCM(x[1], x[2])
l <- .Call("_DescTools_compute_LCM", PACKAGE = "DescTools", x[1], x[2])
} else {
# l <- .LCM(x[1], x[2])
l <- .Call("_DescTools_compute_LCM", PACKAGE = "DescTools", x[1], x[2])
for (i in 3:n) {
# l <- .LCM(l, x[i])
l <- .Call("_DescTools_compute_LCM", PACKAGE = "DescTools", l, x[i])
}
}
return(l)
}
DigitSum <- function(x)
# calculates the digit sum of a number: DigitSum(124) = 7
sapply(x, function(z)
sum(floor(z / 10^(0:(nchar(z) - 1))) %% 10))
CombN <- function(x, m, repl=FALSE, ord=FALSE){
# return the number for the 4 combinatoric cases
n <- length(x)
if(repl){
res <- n^m
if(!ord){
res <- choose(n+m-1, m)
}
} else {
if(ord){
# res <- choose(n, m) * factorial(m)
# res <- gamma(n+1) / gamma(m+1)
# avoid numeric overflow
res <- exp(lgamma(n+1)-lgamma(n-m+1))
} else {
res <- choose(n, m)
}
}
return(res)
}
Permn <- function(x, sort = FALSE) {
# by F. Leisch
n <- length(x)
if (n == 1)
return(matrix(x))
# Andri: why should we need that??? ...
# else if (n < 2)
# stop("n must be a positive integer")
z <- matrix(1)
for (i in 2:n) {
y <- cbind(z, i)
a <- c(1:i, 1:(i - 1))
z <- matrix(0, ncol = ncol(y), nrow = i * nrow(y))
z[1:nrow(y), ] <- y
for (j in 2:i - 1) {
z[j * nrow(y) + 1:nrow(y), ] <- y[, a[1:i + j]]
}
}
dimnames(z) <- NULL
m <- apply(z, 2, function(i) x[i])
if(any(duplicated(x)))
m <- unique(m)
if(sort) m <- Sort(m)
return(m)
}
CombSet <- function(x, m, repl=FALSE, ord=FALSE, as.list=FALSE) {
if(length(m)>1){
res <- lapply(m, function(i) CombSet(x=x, m=i, repl=repl, ord=ord))
} else {
# generate the samples for the 4 combinatoric cases
if(repl){
res <- as.matrix(do.call(expand.grid, as.list(as.data.frame(replicate(m, x)))))
dimnames(res) <- NULL
if(!ord){
res <- unique(t(apply(res, 1, sort)))
}
} else {
if(ord){
res <- do.call(rbind, combn(x, m=m, FUN=Permn, simplify = FALSE))
} else {
res <- t(combn(x, m))
}
}
}
if(as.list){
# Alternative: we could flatten the whole list
# and now flatten the list of lists into one list
# lst <- split(unlist(lst), rep(1:length(idx <- rapply(lst, length)), idx))
if(is.list(res)){
res <- do.call(c, lapply(res,
function(x){ as.list(as.data.frame(t(x), stringsAsFactors = FALSE))}))
} else {
res <- as.list(as.data.frame(t(res), stringsAsFactors = FALSE))
}
names(res) <- NULL
}
return(res)
}
# CombSet(x, m, repl=TRUE, ord=FALSE)
# CombSet(x, m, repl=TRUE, ord=TRUE)
# CombSet(x, m, repl=FALSE, ord=TRUE)
# CombSet(x, m, repl=FALSE, ord=FALSE)
CombPairs <- function(x, y = NULL) {
# liefert einen data.frame mit allen paarweisen Kombinationen der Variablen
if( missing(y)) { # kein y vorhanden, use x only
data.frame( t(combn(x, 2)), stringsAsFactors=F )
} else {
# wenn y definiert ist, wird all.x zu all.y zurueckgegeben
expand.grid(x, y, stringsAsFactors=F )
}
}
Fibonacci <- function(n) {
if (!is.numeric(n) || !IsWhole(n) || n < 0)
stop("Argument 'n' must be integer >= 0.")
maxn <- max(n)
if (maxn == 0) return(0)
if (maxn == 1) return(c(0, 1)[n+1])
if (maxn == 2) return(c(0, 1, 1)[n+1])
z <- c(0, 1, 1, rep(NA, maxn-3))
for (i in 4:(maxn+1)) {
z[i] <- z[i-1] + z[i-2]
}
z[n+1]
}
### M^k for a matrix M and non-negative integer 'k'
## Matrixpower
"%^%" <- expm::"%^%"
Vigenere <- function(x, key = NULL, decrypt = FALSE) {
# hold that constant, as it makes the function too flexible else
# in cases you maybe remind your password, but lost the charlist definition....
charlist <- c(LETTERS, letters, 0:9)
if(is.null(key)) key <- PasswordDlg()
.mod1 <- function(v, n) {
# mod1(1:20, 6) => 1 2 3 4 5 6 1 2 3 4 5 6 1 2 3 4 5 6 1 2
((v - 1) %% n) + 1
}
.str2ints <- function(s) {
as.integer(Filter(Negate(is.na),
factor(levels = charlist, strsplit(s, "")[[1]])))
}
x <- .str2ints(x)
key <- rep(.str2ints(key), len = length(x)) - 1
paste(collapse = "", charlist[
.mod1(x + (if (decrypt) -1 else 1)*key, length(charlist))])
}
Winsorize <- function(x, minval = NULL, maxval = NULL,
probs=c(0.05, 0.95), na.rm = FALSE) {
# following an idea from Gabor Grothendieck
# http://r.789695.n4.nabble.com/how-to-winsorize-data-td930227.html
# in HuberM things are implemented the same way
# don't eliminate NAs in x, moreover leave them untouched,
# just calc quantile without them...
# pmax(pmin(x, maxval), minval)
# the pmax(pmin()-version is slower than the following
if(is.null(minval) || is.null(maxval)){
xq <- quantile(x=x, probs=probs, na.rm=na.rm)
if(is.null(minval)) minval <- xq[1]
if(is.null(maxval)) maxval <- xq[2]
}
x[x<minval] <- minval
x[x>maxval] <- maxval
return(x)
# see also Andreas Alfons, KU Leuven
# roubustHD, Winsorize
# Jim Lemon's rather clumsy implementation:
# #added winsor.var and winsor.sd and winsor.mean (to supplement winsor.means)
# #August 28, 2009 following a suggestion by Jim Lemon
# #corrected January 15, 2009 to use the quantile function rather than sorting.
# #suggested by Michael Conklin in correspondence with Karl Healey
# #this preserves the order of the data
# "wins" <- function(x,trim=.2, na.rm=TRUE) {
# if ((trim < 0) | (trim>0.5) )
# stop("trimming must be reasonable")
# qtrim <- quantile(x,c(trim,.5, 1-trim),na.rm = na.rm)
# xbot <- qtrim[1]
# xtop <- qtrim[3]
# if(trim<.5) {
# x[x < xbot] <- xbot
# x[x > xtop] <- xtop} else {x[!is.na(x)] <- qtrim[2]}
# return(x) }
}
Trim <- function(x, trim = 0.1, na.rm = FALSE){
if (na.rm) x <- x[!is.na(x)]
if (!is.numeric(trim) || length(trim) != 1L)
stop("'trim' must be numeric of length one")
n <- length(x)
if (trim > 0 && n) {
if (is.complex(x))
stop("trim is not defined for complex data")
if (anyNA(x))
return(NA_real_)
if (trim >= 0.5 && trim < 1)
return(NA_real_)
if(trim < 1)
lo <- floor(n * trim) + 1
else{
lo <- trim + 1
if (trim >= (n/2))
return(NA_real_)
}
hi <- n + 1 - lo
# x <- sort.int(x, partial = unique(c(lo, hi)))[lo:hi]
res <- sort.int(x, index.return = TRUE)
trimi <- res[["ix"]][c(1:(lo-1), (hi+1):length(x))]
# x <- res[["x"]][order(res[["ix"]])[lo:hi]]
x <- res[["x"]][lo:hi][order(res[["ix"]][lo:hi])]
attr(x, "trim") <- trimi
}
return(x)
}
RobScale <- function(x, center = TRUE, scale = TRUE){
x <- as.matrix(x)
if(center) {
x <- scale(x, center = apply(x, 2, median, na.rm=TRUE), scale = FALSE)
}
if(scale) {
x <- scale(x, center = FALSE, scale = apply(x, 2, mad, na.rm=TRUE))
}
return(x)
}
MoveAvg <- function(x, order, align = c("center","left","right"),
endrule = c("NA", "keep", "constant")){
n <- length(x)
align = match.arg(align)
switch(align,
"center" = {
idx <- c(1:(order %/% 2), (n-order %/% 2+1):n)
idx_const <- c(rep((order %/% 2)+1, order %/% 2),
rep(n-(order %/% 2), order %/% 2))
if(order %% 2 == 1){ # order is odd
z <- filter(x, rep(1/order, order), sides=2)
} else { # order is even
z <- filter(x, c(1/(2*order), rep(1/order, order-1), 1/(2*order)), sides=2)
} }
, "right" = {
idx <- 1:(order-1)
idx_const <- order
z <- filter(x, rep(1/order, order), sides=1)
}
, "left" = {
idx <- (n-order+2):n
idx_const <- n-order+1
z <- rev(filter(rev(x), rep(1/order, order), sides=1))
}
)
endrule <- match.arg(endrule)
switch(endrule,
"NA" = {},
keep = {z[idx] <- x[idx]},
constant = {z[idx] <- z[idx_const]})
if(!is.ts(x)) attr(z, "tsp") <- NULL
class(z) <- class(x)
return(z)
}
LinScale <- function (x, low = NULL, high = NULL, newlow = 0, newhigh = 1) {
x <- as.matrix(x)
if(is.null(low)) {
low <- apply(x, 2, min, na.rm=TRUE)
} else {
low <- rep(low, length.out=ncol(x))
}
if(is.null(high)) {
high <- apply(x, 2, max, na.rm=TRUE)
} else {
high <- rep(high, length.out=ncol(x))
}
# do the recycling job
newlow <- rep(newlow, length.out=ncol(x))
newhigh <- rep(newhigh, length.out=ncol(x))
xcntr <- (low * newhigh - high * newlow) / (newhigh - newlow)
xscale <- (high - low) / (newhigh - newlow)
return( scale(x, center = xcntr, scale = xscale))
}
Large <- function (x, k = 5, unique = FALSE, na.last = NA) {
n <- length(x)
x <- x[!is.na(x)]
na_n <- n - length(x)
# na.last
# for controlling the treatment of NAs. If TRUE, missing values in the data are put last;
# if FALSE, they are put first;
# if NA, they are removed.
if (unique==TRUE) {
res <- .Call("_DescTools_top_n", PACKAGE = "DescTools", x, k)
if(na_n > 0){
if(!is.na(na.last)){
if(na.last==FALSE) {
res$value <- tail(c(NA, res$value), k)
res$frequency <- tail(c(na_n, res$frequency), k)
}
if(na.last==TRUE){
res$value <- tail(c(res$value, NA), k)
res$frequency <- tail(c(res$frequency, na_n), k)
}
}
}
if(is.factor(x))
res$value <- levels(x)[res$value]
else
class(res$value) <- class(x)
} else {
# do not allow k be bigger than n
k <- min(k, n)
res <- x[.Call("_DescTools_top_i", PACKAGE = "DescTools", x, k)]
if(!is.na(na.last)){
if(na.last==FALSE)
res <- tail(c(rep(NA, na_n), res), k)
if(na.last==TRUE)
res <- tail(c(res, rep(NA, na_n)), k)
}
}
return(res)
}
# old version, replaced 0.99.17/13.5.2016
#
# Large <- function (x, k = 5, unique = FALSE, na.rm = FALSE) {
#
# if (na.rm)
# x <- x[!is.na(x)]
#
# if (unique==TRUE) {
# ux <- unique(x)
# # un <- length(ux)
# un <- sum(!is.na(ux))
# minval <- sort(ux, partial=max((un-k+1), 1):un, na.last = TRUE)[max((un-k+1),1)]
#
# # we are using the rationale of rle here, as it turned out to be the fastest approach
# x <- sort(x[x>=minval])
# n <- length(x)
# if (n == 0L)
# res <- list(lengths = integer(), values = x)
#
# y <- x[-1L] != x[-n]
# i <- c(which(y | is.na(y)), n)
# res <- list(lengths = diff(c(0L, i)), values = x[i])
#
# # res <- unclass(rle(sort(x[x>=minval])))
# }
# else {
# # n <- length(x)
# n <- sum(!is.na(x))
# res <- sort(x, partial=max((n-k+1),1):n, na.last = TRUE)[max((n-k+1),1):n]
# # lst <- as.vector(unlist(lapply(lst, "[", "val")))
# # http://stackoverflow.com/questions/15659783/why-does-unlist-kill-dates-in-r
#
# # faster alternative (but check NA-handling first):
# # res <- x[.Call("_DescTools_top_index", PACKAGE = "DescTools", x, k)]
#
# }
# return(res)
# }
Small <- function (x, k = 5, unique = FALSE, na.last = NA) {
n <- length(x)
x <- x[!is.na(x)]
na_n <- n - length(x)
# na.last
# for controlling the treatment of NAs. If TRUE, missing values in the data are put last;
# if FALSE, they are put first;
# if NA, they are removed.
if (unique==TRUE) {
res <- .Call("_DescTools_bottom_n", PACKAGE = "DescTools", x, k)
if(na_n > 0){
if(!is.na(na.last)){
if(na.last==FALSE) {
k <- min(length(res$value) + 1, k)
res$value <- c(NA, res$value)[1:k]
res$frequency <- c(na_n, res$frequency)[1:k]
}
if(na.last==TRUE){
k <- min(length(res$value) + 1, k)
res$value <- c(res$value, NA)[1:k]
res$frequency <- c(res$frequency, na_n)[1:k]
}
}
}
if(is.factor(x))
res$value <- levels(x)[res$value]
else
class(res$value) <- class(x)
} else {
# do not allow k be bigger than n
k <- min(k, n)
res <- rev(x[.Call("_DescTools_bottom_i", PACKAGE = "DescTools", x, k)])
if(!is.na(na.last)){
if(na.last==FALSE)
res <- c(rep(NA, na_n), res)[1:k]
if(na.last==TRUE)
res <- c(res, rep(NA, na_n))[1:k]
}
}
return(res)
}
# Small <- function (x, k = 5, unique = FALSE, na.rm = FALSE) {
#
# if (na.rm)
# x <- x[!is.na(x)]
#
# if (unique==TRUE) {
# ux <- unique(x)
# un <- length(ux)
# maxval <- sort(ux, partial = min(k, un))[min(k, un)]
#
# # we are using the rationale of rle here, as it turned out to be the fastest approach
# x <- sort(x[x<=maxval])
# n <- length(x)
# if (n == 0L)
# res <- list(lengths = integer(), values = x)
#
# y <- x[-1L] != x[-n]
# i <- c(which(y | is.na(y)), n)
# res <- list(lengths = diff(c(0L, i)), values = x[i])
#
# # res <- unclass(rle(sort(x[x<=maxval])))
# }
# else {
# n <- length(x)
# res <- sort(x, partial = 1:min(k, n))[1:min(k, n)]
# # lst <- as.vector(unlist(lapply(lst, "[", "val")))
# # http://stackoverflow.com/questions/15659783/why-does-unlist-kill-dates-in-r
# }
# return(res)
# }
HighLow <- function (x, nlow = 5, nhigh = nlow, na.last = NA) {
# updated 1.2.2014 / Andri
# using table() was unbearable slow and inefficient for big vectors!!
# sort(partial) is the way to go..
# http://r.789695.n4.nabble.com/Fast-way-of-finding-top-n-values-of-a-long-vector-td892565.html
# updated 1.5.2016 / Andri
# ... seemed the way to go so far, but now outperformed by nathan russell's C++ solution
if ((nlow + nhigh) != 0) {
frqs <- Small(x, k=nlow, unique=TRUE, na.last=na.last)
frql <- Large(x, k=nhigh, unique=TRUE, na.last=na.last)
frq <- c(frqs$frequency, frql$frequency)
vals <- c(frqs$value, frql$value)
if (is.numeric(x)) {
vals <- prettyNum(vals, big.mark = "'")
}
else {
vals <- vals
}
frqtxt <- paste(" (", frq, ")", sep = "")
frqtxt[frq < 2] <- ""
txt <- StrTrim(paste(vals, frqtxt, sep = ""))
lowtxt <- paste(head(txt, min(length(frqs$frequency), nlow)), collapse = ", ")
hightxt <- paste(tail(txt, min(length(frql$frequency), nhigh)), collapse = ", ")
}
else {
lowtxt <- ""
hightxt <- ""
}
return(paste("lowest : ", lowtxt, "\n",
"highest: ", hightxt, "\n", sep = ""))
}
Closest <- function(x, a, which = FALSE, na.rm = FALSE){
# # example: Closest(a=67.5, x=d.pizza$temperature)
#
if(na.rm) x <- x[!is.na(x)]
mdist <- min(abs(x-a))
if(is.na(mdist))
res <- NA
else {
idx <- DescTools::IsZero(abs(x-a) - mdist) # beware of floating-point-gods
if(which == TRUE )
res <- which(idx)
else
res <- x[idx]
}
# Frank's Hmisc solution is faster
# but does not handle ties satisfactorily
# res <- .Fortran("wclosest", as.double(a), as.double(x), length(a),
# length(x), j = integer(length(a)), PACKAGE = "DescTools")$j
# if(!which) res <- x[res]
return(res)
}
DenseRank <- function(x, na.last = TRUE) {
as.numeric(as.factor(rank(x, na.last)))
}
PercentRank <- function(x)
trunc(rank(x, na.last="keep"))/sum(!is.na(x))
Unwhich <- function(idx, n, useNames=TRUE){
# Author: Nick Sabbe
# http://stackoverflow.com/questions/7659833/inverse-of-which
# less performant, but oneliner:
# is.element(seq_len(n), i)
res <- logical(n)
if(length(idx) > 0) {
res[idx] <- TRUE
if(useNames) names(res)[idx] <- names(idx)
}
return(res)
}
CombLevels <- function(...){
dots <- list( ... )
unique(unlist(lapply(dots, function(x) {
if(!inherits(x, "factor")) x <- factor(x)
levels(x)
}
)))
}
###
## base: string functions ====
# Missing string functions for newbies, but not only..
StrTrim <- function(x, pattern=" \t\n", method="both") {
switch(match.arg(arg = method, choices = c("both", "left", "right")),
both = { gsub( pattern=gettextf("^[%s]+|[%s]+$", pattern, pattern), replacement="", x=x) },
left = { gsub( pattern=gettextf("^[%s]+",pattern), replacement="", x=x) },
right = { gsub( pattern=gettextf("[%s]+$",pattern), replacement="", x=x) }
)
}
StrRight <- function(x, n) {
n <- rep(n, length.out=length(x))
sapply(seq_along(x), function(i) {
if(n[i] >= 0)
substr(x[i], (nchar(x[i]) - n[i]+1), nchar(x[i]))
else
substr(x[i], - n[i]+1, nchar(x[i]))
} )
}
StrLeft <- function(x, n) {
n <- rep(n, length.out=length(x))
sapply(seq_along(x), function(i) {
if(n[i] >= 0)
substr(x[i], 0, n[i])
else
substr(x[i], 0, nchar(x[i]) + n[i])
} )
}
StrExtract <- function(x, pattern){
# example regmatches
## Match data from regexpr()
m <- regexpr(pattern, x)
regmatches(x, m)
res <- rep(NA_character_, length(m))
res[m>0] <- regmatches(x, m)
res
}
StrTrunc <- function(x, maxlen = 20) {
# original truncString from prettyR
# author: Jim Lemon
# toolong <- nchar(x) > maxlen
# maxwidth <- ifelse(toolong, maxlen - 3, maxlen)
# chopx <- substr(x, 1, maxwidth)
#
# for(i in 1:length(x)) if(toolong[i]) chopx[i] <- paste(chopx[i], "...", sep="")
#
# return(formatC(chopx, width = maxlen, flag = ifelse(justify == "left", "-", " ")) )
# ... but this is all a bit clumsy, let's have it shorter - and much faster! ;-)
paste(substr(x, 0, maxlen), ifelse(nchar(x) > maxlen, "...", ""), sep="")
}
StrAbbr <- function(x, minchar=1, method=c("left","fix")){
switch(match.arg(arg = method, choices = c("left", "fix")),
"left"={
idx <- rep(minchar, length(x))-1
for(i in minchar:max(nchar(x))){
adup <- AllDuplicated(substr(x, 1, i))
idx[adup] <- i
}
res <- substr(x, 1, idx+1)
},
"fix"={
i <- 1
while(sum(duplicated(substr(x, 1, i))) > 0) { i <- i+1 }
res <- substr(x, 1, pmax(minchar, i))
}
)
return(res)
}
# replaced by 0.99.19 with method by word and title
# StrCap <- function(x) {
# # Source: Hmisc
# # Author: Charles Dupont
# capped <- grep('^[^A-Z]*', x, perl=TRUE)
#
# substr(x[capped], 1,1) <- toupper(substr(x[capped], 1,1))
# return(x)
#
# }
StrCap <- function(x, method=c("first", "word", "title")) {
.cap <- function(x){
# Source: Hmisc
# Author: Charles Dupont
capped <- grep('^[^A-Z]*', x, perl=TRUE)
substr(x[capped], 1,1) <- toupper(substr(x[capped], 1,1))
return(x)
}
na <- is.na(x)
switch(match.arg(method),
first = {
res <- .cap(x)
},
word = {
res <- unlist(lapply(lapply(strsplit(x, split="\\b\\W+\\b"), .cap), paste, collapse=" "))
},
title={
z <- strsplit(tolower(x), split="\\b\\W+\\b")
low <- c("a","an","the","at","by","for","in","of","on","to","up","and","as","but","or","nor","s")
z <- lapply(z, function(y) {
y[y %nin% low] <- StrCap(y[y %nin% low])
y[y %in% low] <- tolower(y[y %in% low])
y}
)
nn <- strsplit(x, split="\\w+")
res <- unlist(lapply(1:length(z), function(i) {
if(length(nn[[i]]) != length(z[[i]])){
if(z[[i]][1] == "" ){
z[[i]] <- z[[i]][-1]
} else {
z[[i]] <- c(z[[i]], "")
}
} else {
if(z[[i]][1] == "" & length(z[[i]])>1)
z[[i]] <- VecRot(z[[i]], -1)
}
do.call(paste, list(nn[[i]], z[[i]], sep="", collapse=""))
}
))
}
)
res[na] <- NA
return(res)
}
StrDist <- function (x, y, method = "levenshtein", mismatch = 1, gap = 1, ignore.case = FALSE){
# source MKmisc, Author: Matthias Kohl
if(ignore.case){
x <- tolower(x)
y <- tolower(y)
}
if (!is.na(pmatch(method, "levenshtein")))
method <- "levenshtein"
METHODS <- c("levenshtein", "normlevenshtein", "hamming")
method <- pmatch(method, METHODS)
if (is.na(method))
stop("invalid distance method")
if (method == -1)
stop("ambiguous distance method")
stopifnot(is.character(x), is.character(y))
if (length(x) == 1 & nchar(x[1]) > 1)
x1 <- strsplit(x, split = "")[[1]]
else
x1 <- x
if (length(y) == 1 & nchar(y[1]) > 1)
y1 <- strsplit(y, split = "")[[1]]
else
y1 <- y
if (method %in% c(1,2)){ ## Levenshtein
m <- length(x1)
n <- length(y1)
D <- matrix(NA, nrow = m+1, ncol = n+1)
M <- matrix("", nrow = m+1, ncol = n+1)
D[,1] <- seq_len(m+1)*gap-1
D[1,] <- seq_len(n+1)*gap-1
D[1,1] <- 0
M[,1] <- "d"
M[1,] <- "i"
M[1,1] <- "start"
text <- c("d", "m", "i")
for(i in c(2:(m+1))){
for(j in c(2:(n+1))){
m1 <- D[i-1,j] + gap
m2 <- D[i-1,j-1] + (x1[i-1] != y1[j-1])*mismatch
m3 <- D[i,j-1] + gap
D[i,j] <- min(m1, m2, m3)
wmin <- text[which(c(m1, m2, m3) == D[i,j])]
if("m" %in% wmin & x1[i-1] != y1[j-1])
wmin[wmin == "m"] <- "mm"
M[i,j] <- paste(wmin, collapse = "/")
}
}
rownames(M) <- rownames(D) <- c("gap", x1)
colnames(M) <- colnames(D) <- c("gap", y1)
d <- D[m+1, n+1]
if(method == 2){ ## normalized levenshtein
d <- 1-d / (max(m, n))
}
}
if(method == 3){ ## Hamming
if(length(x1) != length(y1))
stop("Hamming distance is only defined for equal length strings")
d <- sum(x1 != y1)
D <- NULL
M <- NULL
}
attr(d, "Size") <- 2
attr(d, "Diag") <- FALSE
if(length(x) > 1) x <- paste0("", x, collapse = "")
if(length(y) > 1) y <- paste0("", y, collapse = "")
attr(d, "Labels") <- c(x,y)
attr(d, "Upper") <- FALSE
attr(d, "method") <- METHODS[method]
attr(d, "call") <- match.call()
attr(d, "ScoringMatrix") <- D
attr(d, "TraceBackMatrix") <- M
class(d) <- c("stringDist", "dist")
return(d)
}
StrRev <- function(x) {
# reverses a string
sapply(lapply(strsplit(x, NULL), rev), paste, collapse="")
}
# defunct by 0.99.21
# StrRep <- function(x, times, sep=""){
# # same as strrep which seems to be new in 3.4.0
# z <- Recycle(x=x, times=times, sep=sep)
# sapply(1:attr(z, "maxdim"), function(i) paste(rep(z$x[i], times=z$times[i]), collapse=z$sep[i]))
# }
# useless because we have base::strwrap but interesting as regexp example
#
# StrWordWrap <- function(x, n, sep = "\n") {
#
# res <- gsub(gettextf("(.{1,%s})(\\s|$)", n), gettextf("\\1%s", sep), x)
# res <- gsub(gettextf("[%s]$", sep), "", res)
#
# return(res)
#
# }
#
StrPad <- function(x, width = NULL, pad = " ", adj = "left") {
.pad <- function(x, width, pad=" ", adj="left"){
if(is.na(x)) return(NA)
mto <- match.arg(adj, c("left", "right", "center"))
free <- max(0, width - nchar(x))
fill <- substring(paste(rep(pad, ceiling(free / nchar(pad))), collapse = ""), 1, free)
#### cat(" free=",free,", fill=",fill,", mto=",mto,"\n")
# old, but chop is not a good idea: if(free <= 0) substr(x, 1, len)
if(free <= 0) x
else if (mto == "left") paste(x, fill, sep = "")
else if (mto == "right") paste(fill, x, sep = "")
else paste(substring(fill, 1, free %/% 2), x, substring(fill, 1 + free %/% 2, free), sep = "")
}
# adj <- sapply(adj, match.arg, choices=c("left", "right", "center"))
if(is.null(width)) width <- max(nchar(x), na.rm=TRUE)
lgp <- DescTools::Recycle(x=x, width=width, pad=pad, adj=adj)
sapply( 1:attr(lgp, "maxdim"), function(i) .pad(lgp$x[i], lgp$width[i], lgp$pad[i], lgp$adj[i]) )
}
StrAlign <- function(x, sep = "\\r"){
# replace \l by \\^, \r by \\$ and \c means centered
# check for NA only and combined
# return x if sep is not found in x
id.na <- is.na(x)
# what should be done, if x does not contain sep??
# we could return unchanged, but this is often not adaquate
# we align right to the separator
if(length(grep("\\", sep, fixed=TRUE)) == 0) {
idx <- !grepl(x=x, pattern=sep, fixed = TRUE)
x[idx] <- paste(x[idx], sep, sep="")
}
# center alignment
# keep this here, as we may NOT pad x for centered text!!
# example?? don't see why anymore... check!
if (sep == "\\c")
return(StrPad(x, width = max(nchar(x), na.rm=TRUE), pad = " ", adj = "center"))
# Pad to same maximal length, for right alignment this is mandatory
# for left alignment not, but again for any character
x <- StrPad(x, max(nchar(x), na.rm=TRUE))
# left alignment
if(sep == "\\l")
return( sub("(^ +)(.+)", "\\2\\1", x) )
# right alignment
if(sep == "\\r")
return( sub("(.+?)( +$)", "\\2\\1", x) )
# alignment by a special character
bef <- substr(x, 1, StrPos(x, sep, fix=TRUE)) # use fix = TRUE as otherwise the decimal would be to have entered as \\.
aft <- substr(x, StrPos(x, sep, fix=TRUE) + 1, nchar(x))
# chop white space on the right
aft <- substr(aft, 1, max(nchar(StrTrim(aft, method="right"))))
res <- paste(replace(StrPad(bef, max(nchar(bef), na.rm=TRUE),
" ", adj = "right"), is.na(bef), ""),
replace(StrPad(aft, max(nchar(aft), na.rm=TRUE), " ", adj = "left"), is.na(aft),
""), sep = "")
# restore orignal NAs
res[id.na] <- NA
# overwrite the separator
if(length(grep("\\", sep, fixed=TRUE)) == 0)
res[idx] <- gsub(sep, " ", res[idx], fixed = TRUE)
# return unchanged values not containing sep
return(res)
}
# replaced by 0.99.19: new argument pos for cutting positions and vector support
# StrChop <- function(x, len) {
# # Splits a string into a number of pieces of fixed length
# # example: StrChop(x=paste(letters, collapse=""), len = c(3,5,0))
# xsplit <- character(0)
# for(i in 1:length(len)){
# xsplit <- append(xsplit, substr(x, 1, len[i]))
# x <- substr(x, len[i]+1, nchar(x))
# }
# return(xsplit)
# }
StrChop <- function(x, len, pos) {
.chop <- function(x, len, pos) {
# Splits a string into a number of pieces of fixed length
# example: StrChop(x=paste(letters, collapse=""), len = c(3,5,0))
if(!missing(len)){
if(!missing(pos))
stop("too many arguments")
} else {
len <- c(pos[1], diff(pos), nchar(x))
}
xsplit <- character(0)
for(i in 1:length(len)){
xsplit <- append(xsplit, substr(x, 1, len[i]))
x <- substr(x, len[i]+1, nchar(x))
}
return(xsplit)
}
res <- lapply(x, .chop, len=len, pos=pos)
if(length(x)==1)
res <- res[[1]]
return(res)
}
StrCountW <- function(x){
# old: does not work for one single word!!
# return(sapply(gregexpr("\\b\\W+\\b", x, perl=TRUE), length) + 1)
return(sapply(gregexpr("\\b\\W+\\b", x, perl = TRUE), function(x) sum(x>0)) + 1)
}
StrVal <- function(x, paste = FALSE, as.numeric = FALSE){
# Problem 20.2.2015: - will not be accepted, when a space is between sign and number
# not sure if this is really a problem: -> oberserve...
# StrVal(x="- 2.5", paste = FALSE, as.numeric = FALSE)
pat <- "[-+.e0-9]*\\d"
gfound <- gregexpr(pattern=pat, text=x)
vals <- lapply(seq_along(x), function(i){
found <- gfound[[i]]
ml <- attr(found, which="match.length")
res <- sapply(seq_along(found), function(j) substr(x[i], start=found[j], stop=found[j]+ml[j]-1) )
return(res)
})
if(paste==TRUE) {
vals <- sapply(vals, paste, collapse="")
if(as.numeric==TRUE)
vals <- as.numeric(vals)
} else {
if(as.numeric==TRUE)
vals <- sapply(vals, as.numeric)
else
vals <- sapply(vals, as.character)
}
return(vals)
}
StrPos <- function(x, pattern, pos=1, ... ){
# example:
# StrPos(x=levels(d.pizza$driver), "t", pos=4)
pos <- rep(pos, length.out=length(x))
x <- substr(x, start=pos, stop=nchar(x))
i <- as.vector(regexpr(pattern = pattern, text = x, ...))
i[i<0] <- NA
return(i)
}
SplitPath <- function(path, last.is.file=NULL) {
if(is.null(last.is.file)){
# if last sign is delimiter / or \ read path as dirname
last.is.file <- (length(grep(pattern="[/\\]$", path)) == 0)
}
path <- normalizePath(path, mustWork = FALSE)
lst <- list()
lst$normpath <- path
if (.Platform$OS.type == "windows") {
lst$drive <- regmatches(path, regexpr("^([[:alpha:]]:)|(\\\\[[:alnum:]]+)", path))
lst$dirname <- gsub(pattern=lst$drive, x=dirname(path), replacement="")
} else {
lst$drive <- NA
lst$dirname <- dirname(path)
}
lst$dirname <- paste(lst$dirname, "/", sep="")
lst$fullfilename <- basename(path)
lst$filename <- strsplit(lst$fullfilename, "\\.")[[1]][1]
lst$extension <- strsplit(lst$fullfilename, "\\.")[[1]][2]
if(!last.is.file){
lst$dirname <- paste(lst$dirname, lst$fullfilename, "/",
sep="")
lst$extension <- lst$filename <- lst$fullfilename <- NA
}
return(lst)
}
###
## base: conversion functions ====
CharToAsc <- function(x) {
# Original from Henrik Bengtsson R.oo:
# char2asc <- function (ch, ...) { match(ch, ASCII) - 1 }
# example: x.char <- char2asc(x="Andri")
if(length(x) == 1)
strtoi(charToRaw(x), 16L)
else
sapply(x, function(x) strtoi(charToRaw(x), 16L))
}
AscToChar <- function(i) {
# old version:
# example: AscToChar(x.char)
# ASCII <- intToUtf8(1:256, multiple=TRUE)
# new and far more elegant
# ref: http://datadebrief.blogspot.ch/search/label/R
rawToChar(as.raw(i))
}
HexToDec <- function(x) strtoi(x, 16L)
# example: strtoi(c("9A", "3B"), 16L)
DecToHex <- function(x) as.hexmode(as.numeric(x))
OctToDec <- function(x) strtoi(x, 8L)
# example: strtoi(c("12", "24"), 8L)
DecToOct <- function(x) as.numeric(as.character(as.octmode(as.numeric(x))))
# Alternative: as.numeric(sprintf(242, fmt="%o"))
BinToDec <- function(x) {
# Alternative: bin2dec <- function(x) { sum(2^.subset((length(x)-1):0, x)) }
# example: bin2dec(x=as.numeric(unlist(strsplit("1001", split=NULL)))==1)
strtoi(x, 2L)
}
# example: strtoi(c("100001", "101"), 2L)
# DecToBin <- function (x) {
# # This would be nice, but does not work: (intToBin from R.utils)
# # y <- as.integer(x)
# # class(y) <- "binmode"
# # y <- as.character(y)
# # dim(y) <- dim(x)
# # y
# as.vector(sapply(x, function(x) as.integer(paste(rev(as.integer(intToBits(x))), collapse=""))))
# }
DecToBin <- function (x) {
z <- .Call("_DescTools_conv_DecToBin", PACKAGE = "DescTools", x)
z[x > 536870911] <- NA
return(sub("^0+", "", z))
}
# void dec_to_bin(int number) {
# int remainder;
#
# if(number <= 1) {
# cout << number;
# return;
# }
#
# remainder = number%2;
# dec_to_bin(number >> 1);
# cout << remainder;
# }
# DecToBinC <- function(x){
# z <- .C("dec_to_bin", x = as.integer(x))
# return(z)
# }
RomanToInt <- function (x) {
# opposite to as.roman
roman2int.inner <- function (roman) {
results <- .C("roman2int", roman = as.character(roman), nchar = as.integer(nchar(roman)),
value = integer(1), PACKAGE = "DescTools")
return(results$value)
}
roman <- trimws(toupper(as.character(x)))
tryIt <- function(x) {
retval <- try(roman2int.inner(x), silent = TRUE)
if (is.numeric(retval))
retval
else NA
}
retval <- sapply(roman, tryIt)
retval
}
DegToRad <- function(deg) deg * pi /180
RadToDeg <- function(rad) rad * 180 / pi
UnitConv <- function(x, from_unit, to_unit){
if(from_unit == "C") {
if(to_unit=="F") return(x *1.8+32)
}
if(from_unit == "F") {
if(to_unit=="C") return((x -32) *5/9)
}
fact <- d.units[d.units$from == from_unit & d.units$to==to_unit, "fact"]
if(length(fact)==0) fact <- NA
return(x * fact)
}
DoCall <- function (what, args, quote = FALSE, envir = parent.frame()) {
# source: Gmisc
# author: Max Gordon <max@gforge.se>
if (quote)
args <- lapply(args, enquote)
if (is.null(names(args)) ||
is.data.frame(args)){
argn <- args
args <- list()
}else{
# Add all the named arguments
argn <- lapply(names(args)[names(args) != ""], as.name)
names(argn) <- names(args)[names(args) != ""]
# Add the unnamed arguments
argn <- c(argn, args[names(args) == ""])
args <- args[names(args) != ""]
}
if (class(what) == "character"){
if(is.character(what)){
fn <- strsplit(what, "[:]{2,3}")[[1]]
what <- if(length(fn)==1) {
get(fn[[1]], envir=envir, mode="function")
} else {
get(fn[[2]], envir=asNamespace(fn[[1]]), mode="function")
}
}
call <- as.call(c(list(what), argn))
}else if (class(what) == "function"){
f_name <- deparse(substitute(what))
call <- as.call(c(list(as.name(f_name)), argn))
args[[f_name]] <- what
}else if (class(what) == "name"){
call <- as.call(c(list(what, argn)))
}
eval(call,
envir = args,
enclos = envir)
}
###
## base: transformation functions ====
as.matrix.xtabs <- function(x, ...){
# xtabs would not be converted by as.matrix.default...
attr(x, "class") <- NULL
attr(x, "call") <- NULL
return(x)
}
TextToTable <- function(x, dimnames = NULL, ...){
d.frm <- read.table(text=x, ...)
tab <- as.table(as.matrix(d.frm))
if(!is.null(dimnames)) names(dimnames(tab)) <- dimnames
return(tab)
}
Recode <- function(x, ..., elselevel=NA, use.empty=FALSE){
newlevels <- list(...)
if( sum(duplicated(unlist(newlevels))) > 0) stop ("newlevels contain non unique values!")
if(is.null(elselevel)) { # leave elselevels as they are
elselevels <- setdiff(levels(x), unlist(newlevels))
names(elselevels) <- elselevels
newlevels <- c(newlevels, elselevels)
} else {
if(!is.na(elselevel)){
newlevels[[length(newlevels)+1]] <- setdiff(levels(x), unlist(newlevels))
names(newlevels)[[length(newlevels)]] <- elselevel
}
}
levels(x) <- newlevels
if(!use.empty) x <- factor(x) # delete potentially empty levels
return(x)
}
ZeroIfNA <- function(x) {
# same as zeroifnull in SQL
replace(x, is.na(x), 0)
}
NAIfZero <- function(x)
replace(x, IsZero(x), NA)
Impute <- function(x, FUN = function(x) median(x, na.rm=TRUE)) {
if(is.function(FUN)) {
# if FUN is a function, then save it under new name and
# overwrite function name in FUN, which has to be character
fct <- FUN
FUN <- "fct"
FUN <- gettextf("%s(x)", FUN)
}
# Calculates the mean absolute deviation from the sample mean.
return(eval(parse(text = gettextf("replace(x, is.na(x), %s)", FUN))))
}
reorder.factor <- function(x, X, FUN, ..., order=is.ordered(x), new.order,
sort=SortMixed) {
# 25.11.2017 verbatim from gdata, Greg Warnes
constructor <- if (order) ordered else factor
if(!missing(X) || !missing(FUN)){
if(missing(FUN)) FUN <- 'mean'
## I would prefer to call stats::reorder.default directly,
## but it exported from stats, so the relevant code is
## replicated here:
## -->
scores <- tapply(X = X, INDEX = x, FUN = FUN, ...)
levels <- names(base::sort(scores, na.last = TRUE))
if(order)
ans <- ordered(x, levels=levels)
else
ans <- factor(x, levels=levels)
attr(ans, "scores") <- scores
## <--
return(ans)
} else if (!missing(new.order)) {
if (is.numeric(new.order))
new.order <- levels(x)[new.order]
else
new.order <- new.order
} else
new.order <- sort(levels(x))
constructor(x, levels=new.order)
}
SortMixed <- function(x,
decreasing=FALSE,
na.last=TRUE,
blank.last=FALSE,
numeric.type=c("decimal", "roman"),
roman.case=c("upper","lower","both") ) {
ord <- OrderMixed(x,
decreasing=decreasing,
na.last=na.last,
blank.last=blank.last,
numeric.type=numeric.type,
roman.case=roman.case
)
x[ord]
}
OrderMixed <- function(x,
decreasing=FALSE,
na.last=TRUE,
blank.last=FALSE,
numeric.type=c("decimal", "roman"),
roman.case=c("upper","lower","both") ) {
# 25.11.2017 verbatim from gtools, Greg Warnes
# - Split each each character string into an vector of strings and
# numbers
# - Separately rank numbers and strings
# - Combine orders so that strings follow numbers
numeric.type <- match.arg(numeric.type)
roman.case <- match.arg(roman.case)
if(length(x)<1)
return(NULL)
else if(length(x)==1)
return(1)
if( !is.character(x) )
return( order(x, decreasing=decreasing, na.last=na.last) )
delim="\\$\\@\\$"
if(numeric.type=="decimal")
{
regex <- "((?:(?i)(?:[-+]?)(?:(?=[.]?[0123456789])(?:[0123456789]*)(?:(?:[.])(?:[0123456789]{0,}))?)(?:(?:[eE])(?:(?:[-+]?)(?:[0123456789]+))|)))" # uses PERL syntax
numeric <- function(x) as.numeric(x)
}
else if (numeric.type=="roman")
{
regex <- switch(roman.case,
"both" = "([IVXCLDMivxcldm]+)",
"upper" = "([IVXCLDM]+)",
"lower" = "([ivxcldm]+)"
)
numeric <- function(x) RomanToInt(x)
}
else
stop("Unknown value for numeric.type: ", numeric.type)
nonnumeric <- function(x)
{
ifelse(is.na(numeric(x)), toupper(x), NA)
}
x <- as.character(x)
which.nas <- which(is.na(x))
which.blanks <- which(x=="")
####
# - Convert each character string into an vector containing single
# character and numeric values.
####
# find and mark numbers in the form of +1.23e+45.67
delimited <- gsub(regex,
paste(delim,"\\1",delim,sep=""),
x,
perl=TRUE)
# separate out numbers
step1 <- strsplit(delimited, delim)
# remove empty elements
step1 <- lapply( step1, function(x) x[x>""] )
# create numeric version of data
suppressWarnings( step1.numeric <- lapply( step1, numeric ) )
# create non-numeric version of data
suppressWarnings( step1.character <- lapply( step1, nonnumeric ) )
# now transpose so that 1st vector contains 1st element from each
# original string
maxelem <- max(sapply(step1, length))
step1.numeric.t <- lapply(1:maxelem,
function(i)
sapply(step1.numeric,
function(x)x[i])
)
step1.character.t <- lapply(1:maxelem,
function(i)
sapply(step1.character,
function(x)x[i])
)
# now order them
rank.numeric <- sapply(step1.numeric.t, rank)
rank.character <- sapply(step1.character.t,
function(x) as.numeric(factor(x)))
# and merge
rank.numeric[!is.na(rank.character)] <- 0 # mask off string values
rank.character <- t(
t(rank.character) +
apply(matrix(rank.numeric),2,max,na.rm=TRUE)
)
rank.overall <- ifelse(is.na(rank.character),rank.numeric,rank.character)
order.frame <- as.data.frame(rank.overall)
if(length(which.nas) > 0)
if(is.na(na.last))
order.frame[which.nas,] <- NA
else if(na.last)
order.frame[which.nas,] <- Inf
else
order.frame[which.nas,] <- -Inf
if(length(which.blanks) > 0)
if(is.na(blank.last))
order.frame[which.blanks,] <- NA
else if(blank.last)
order.frame[which.blanks,] <- 1e99
else
order.frame[which.blanks,] <- -1e99
order.frame <- as.list(order.frame)
order.frame$decreasing <- decreasing
order.frame$na.last <- NA
retval <- do.call("order", order.frame)
return(retval)
}
Lookup <- function(x, ref, val){
val[match(x, ref)]
}
# StahelLogC <- function(x, na.rm=FALSE) {
# if(na.rm) x <- na.omit(x)
# ### muessen die 0-Werte hier weggelassen werden??
# x <- x[x>0]
# ### additive Konstante fuer die Logarithmierung nach Stahel "...es hat sich gezeigt, dass..."
# return(as.vector(median(x) / (median(x)/quantile(x, 0.25))^2.9))
# }
# http://support.sas.com/documentation/cdl/en/statugfreq/63124/PDF/default/statugfreq.pdf
LogSt <- function(x, base = 10, calib = x, threshold = NULL, mult = 1) {
# original function logst in source regr
#
# # Purpose: logs of x, zeros and small values treated well
# # *********************************************************************
# # Author: Werner Stahel, Date: 3 Nov 2001, 08:22
# x <- cbind(x)
# calib <- cbind(calib)
# lncol <- ncol(calib)
# ljthr <- length(threshold) > 0
# if (ljthr) {
# if (!length(threshold) %in% c(1, lncol))
# stop("!LogSt! length of argument 'threshold' is inadequate")
# lthr <- rep(threshold, length=lncol)
# ljdt <- !is.na(lthr)
# } else {
# ljdt <- rep(TRUE, lncol)
# lthr <- rep(NA, lncol)
# for (lj in 1:lncol) {
# lcal <- calib[, lj]
# ldp <- lcal[lcal > 0 & !is.na(lcal)]
# if(length(ldp) == 0) ljdt[lj] <- FALSE else {
# lq <- quantile(ldp,probs = c(0.25,0.75), na.rm = TRUE)
# if(lq[1] == lq[2]) lq[1] <- lq[2]/2
# lthr[lj] <- lc <- lq[1]^(1 + mult) / lq[2]^mult
# }
# }
# }
# # transform x
# for (lj in 1:lncol) {
# ldt <- x[,lj]
# lc <- lthr[lj]
# li <- which(ldt < lc)
# if (length(li))
# ldt[li] <- lc * 10^((ldt[li] - lc) / (lc * log(10)))
# x[,lj] <- log10(ldt)
# }
# if (length(colnames(x)))
# lnmpd <- names(ljdt) <- names(lthr) <- colnames(x) else
# lnmpd <- as.character(1:lncol)
#
# attr(x,"threshold") <- c(lthr)
#
# if (any(!ljdt)) {
# warning(':LogSt: no positive x for variables',lnmpd[!ljdt],
# '. These are not transformed')
# attr(x,"untransformed") <- c(ljdt)
# }
# x
if(is.null(threshold)){
lq <- quantile(calib[calib > 0], probs = c(0.25, 0.75), na.rm = TRUE)
if (lq[1] == lq[2]) lq[1] <- lq[2]/2
threshold <- lq[1]^(1 + mult)/lq[2]^mult
}
res <- rep(NA, length(x))
idx <- (x < threshold)
idx.na <- is.na(idx)
res[idx & !idx.na] <- log(x = threshold, base=base) + ((x[idx & !idx.na] - threshold)/(threshold * log(base)))
res[!idx & !idx.na] <- log(x = x[!idx & !idx.na], base=base)
attr(res, "threshold") <- threshold
attr(res, "base") <- base
return(res)
}
LogStInv <- function (x, base=NULL, threshold = NULL) {
if(is.null(threshold)) threshold <- attr(x, "threshold")
if(is.null(base)) base <- attr(x, "base")
res <- rep(NA, length(x))
idx <- (x < log10(threshold))
idx.na <- is.na(idx)
res[idx & !idx.na] <- threshold - threshold * log(base) *( log(x = threshold, base=base) - x[idx & !idx.na])
res[!idx & !idx.na] <- base^(x[!idx & !idx.na])
return(res)
}
# Variance stabilizing functions
# log(x+a)
# log(x+a, base=10)
# sqrt(x+a)
# 1/x
# arcsinh(x)
# LogGen <- function(x, a) { return( log((x + sqrt(x^2 + a^2)) / 2)) }
#
#
# LogLin <- function(x, a) {
# # log-linear hybrid transformation
# # introduced by Rocke and Durbin (2003)
# x[x<=a] <- x[x<=a] / a + log(a) - 1
# x[x>a] <- log(x[x>a])
#
# return(x)
# }
Logit <- function(x, min=0, max=1) {
# variant in boot:::logit - CHECKME if better ********
p <- (x-min)/(max-min)
log(p/(1-p))
}
LogitInv <- function(x, min=0, max=1) {
p <- exp(x)/(1+exp(x))
p <- ifelse( is.na(p) & !is.na(x), 1, p ) # fix problems with +Inf
p * (max-min) + min
}
# from library(forecast)
BoxCox <- function (x, lambda) {
# Author: Rob J Hyndman
# origin: library(forecast)
if (lambda < 0)
x[x < 0] <- NA
if (lambda == 0)
out <- log(x)
else out <- (sign(x) * abs(x)^lambda - 1)/lambda
if (!is.null(colnames(x)))
colnames(out) <- colnames(x)
return(out)
# Greg Snow's Variant
# BoxCox <- function (x, lambda)
# {
# ### Author: Greg Snow
# ### Source: Teaching Demos
# xx <- exp(mean(log(x)))
# if (lambda == 0)
# return(log(x) * xx)
# res <- (x^lambda - 1)/(lambda * xx^(lambda - 1))
# return(res)
# }
}
BoxCoxInv <- function(x, lambda){
if (lambda < 0)
x[x > -1/lambda] <- NA
if (lambda == 0)
out <- exp(x)
else {
xx <- x * lambda + 1
out <- sign(xx) * abs(xx)^(1/lambda)
}
if (!is.null(colnames(x)))
colnames(out) <- colnames(x)
return(out)
}
# This R script contains code for extracting the Box-Cox
# parameter, lambda, using Guerrero's method (1993).
# Written by Leanne Chhay
BoxCoxLambda <- function(x, method=c("guerrero","loglik"), lower=-1, upper=2) {
# Guerrero extracts the required lambda
# Input: x = original time series as a time series object
# Output: lambda that minimises the coefficient of variation
Guerrero <- function(x, lower=-1, upper=2, nonseasonal.length=2) {
# guer.cv computes the coefficient of variation
# Input:
# lam = lambda
# x = original time series as a time series object
# Output: coefficient of variation
guer.cv <- function(lam, x, nonseasonal.length=2) {
period <- max(nonseasonal.length, frequency(x))
nobsf <- length(x)
nyr <- floor(nobsf / period)
nobst <- nyr * period
x.mat <- matrix(x[(nobsf-nobst+1):nobsf], period, nyr)
x.mean <- apply(x.mat, 2, mean, na.rm=TRUE)
x.sd <- apply(x.mat, 2, sd, na.rm=TRUE)
x.rat <- x.sd / x.mean^(1-lam)
return(sd(x.rat, na.rm=TRUE)/mean(x.rat, na.rm=TRUE))
}
return(optimize(guer.cv, c(lower,upper), x=x,
nonseasonal.length=nonseasonal.length)$minimum)
}
# Modified version of boxcox from MASS package
BCLogLik <- function(x, lower=-1, upper=2) {
n <- length(x)
if (any(x <= 0))
stop("x must be positive")
logx <- log(x)
xdot <- exp(mean(logx))
# if(all(class(x)!="ts"))
fit <- lm(x ~ 1, data=data.frame(x=x))
# else if(frequency(x)>1)
# fit <- tslm(x ~ trend + season, data=data.frame(x=x))
# else
# fit <- tslm(x ~ trend, data=data.frame(x=x))
xqr <- fit$qr
lambda <- seq(lower,upper,by=.05)
xl <- loglik <- as.vector(lambda)
m <- length(xl)
for (i in 1L:m)
{
if (abs(la <- xl[i]) > 0.02)
xt <- (x^la - 1)/la
else
xt <- logx * (1 + (la*logx)/2 * (1+(la*logx)/3*(1+(la*logx)/4)))
loglik[i] <- -n/2 * log(sum(qr.resid(xqr, xt/xdot^(la-1))^2))
}
return(xl[which.max(loglik)])
}
if(any(x <= 0))
lower <- 0
# stop("All values must be positive")
method <- match.arg(method)
if(method=="loglik")
return(BCLogLik(x,lower,upper))
else
return(Guerrero(x,lower,upper))
}
LOCF <- function(x) UseMethod("LOCF")
LOCF.default <- function(x) {
# last observation carried forward
# replaces NAs by the last observed value
# while(any(is.na(x))) {
# x[is.na(x)] <- x[which(is.na(x))-1]
# }
# return(x)
# faster solution from Daniel Wollschlaeger:
# corrected by 0.99.19, as this didn't handle c(NA, 3.0, NA, 5,5) correctly
# rep(x[!is.na(x)], diff(c(which(!is.na(x)), length(x)+1)))
l <- !is.na(x)
rep(c(NA, x[l]), diff(c(1, which(l), length(x) + 1)))
}
LOCF.data.frame <- function(x){
as.data.frame(lapply(x, LOCF))
}
LOCF.matrix <- function(x){
apply(x, 2, LOCF)
}
# Alternative names: PairApply, PwApply, pwapply, papply, ...
PairApply <- function(x, FUN = NULL, ..., symmetric = FALSE){
if(is.function(FUN)) {
# if FUN is a function, then save it under new name and
# overwrite function name in FUN, which has to be character
fct <- FUN
FUN <- "fct"
}
if(is.matrix(x)) x <- as.data.frame(x)
x <- as.list(x)
ix <- 1:length(x)
# pairwise logic from pairwise.table
pp <- outer(ix, ix, function(ivec, jvec) sapply(seq_along(ivec),
function(k) {
i <- ivec[[k]]
j <- jvec[[k]]
if (i >= j)
eval(parse(text = gettextf("%s(x[[i]], x[[j]], ...)", FUN)))
else NA
}))
# why did we need that? in any case it's wrong, if no symmetric calcs are done
# diag(pp) <- 1
if(symmetric){
pp[upper.tri(pp)] <- t(pp)[upper.tri(t(pp))]
} else {
pp.upr <- outer(ix, ix, function(ivec, jvec) sapply(seq_along(ivec),
function(k) {
i <- ivec[[k]]
j <- jvec[[k]]
if (i >= j)
eval(parse(text = gettextf("%s(x[[j]], x[[i]], ...)", FUN)))
else NA
}))
pp[upper.tri(pp)] <- t(pp.upr)[upper.tri(pp.upr)]
}
dimnames(pp) <- list(names(x),names(x))
return(pp)
}
###
## base: date functions ====
# fastPOSIXct <- function(x, tz=NULL, required.components = 3L)
# .POSIXct(if (is.character(x)) .Call("parse_ts", x, required.components) else .Call("parse_ts", as.character(x), required.components), tz)
HmsToSec <- function(x) {
hms <- as.character(x)
z <- sapply(data.frame(do.call(rbind, strsplit(hms, ":"))),
function(x) { as.numeric(as.character(x)) })
z[,1] * 3600 + z[,2] * 60 + z[,3]
}
SecToHms <- function(x, digits=NULL) {
x <- as.numeric(x)
h <- floor(x/3600)
m <- floor((x-h*3600)/60)
s <- floor(x-(m*60 + h*3600))
b <- x-(s + m*60 + h*3600)
if(is.null(digits)) digits <- ifelse(all(b < sqrt(.Machine$double.eps)),0, 2)
if(digits==0) f <- "" else f <- gettextf(paste(".%0", digits, "d", sep=""), round(b*10^digits, 0))
gettextf("%02d:%02d:%02d%s", h, m, s, f)
}
IsDate <- function(x, what=c('either','both','timeVaries')) {
what <- match.arg(what)
cl <- class(x) # was oldClass 22jun03
if(!length(cl)) return(FALSE)
dc <- c('POSIXt','POSIXct','dates','times','chron','Date')
dtc <- c('POSIXt','POSIXct','chron')
switch(what,
either = any(cl %in% dc),
both = any(cl %in% dtc),
timeVaries = {
# original: if('chron' %in% cl || !.R.) { ### chron or S+ timeDate
if('chron' %in% cl) { # chron ok, but who cares about S+?
y <- as.numeric(x)
length(unique(round(y - floor(y),13))) > 1
} else {
length(unique(format(x, '%H%M%S'))) > 1
}
}
)
}
IsWeekend <- function(x) {
x <- as.POSIXlt(x)
x$wday > 5 | x$wday < 1
}
# This is not useful anymore. Use: as.Date(ISODate())
# Date <- function(year, month = NA, day = NA) {
# if(is.na(month) && is.na(day)) {
# # try to interpret year as yearmonthday yyyymmdd
# res <- as.Date(ISOdate(year %/% 10000, (year %% 10000) %/% 100, (year %% 100)))
# } else {
# res <- as.Date(ISOdate(year, month, day))
# }
# return(res)
# }
# Year <- function(x){ as.integer( format(as.Date(x), "%Y") ) }
Year <- function(x){ as.POSIXlt(x)$year + 1900 }
IsLeapYear <- function(x){
if(!IsWhole(x))
x <- Year(as.Date(x))
ifelse(x %% 100 == 0, x %% 400 == 0, x %% 4 == 0)
}
Month <- function (x, fmt = c("m", "mm", "mmm"), lang = DescToolsOptions("lang"), stringsAsFactors = TRUE) {
res <- as.POSIXlt(x)$mon + 1
switch(match.arg(arg = fmt, choices = c("m", "mm", "mmm")),
m = { res },
mm = {
# res <- as.integer(format(x, "%m"))
switch(match.arg(arg = lang, choices = c("local", "engl")),
local = {
# months in current locale: format(ISOdate(2000, 1:12, 1), "%b")
res <- factor(res, levels=1:12, labels=format(ISOdate(2000, 1:12, 1), "%b"))
},
engl = {
res <- factor(res, levels=1:12, labels=month.abb)
})
if(!stringsAsFactors) res <- as.character(res)
},
mmm = {
# res <- as.integer(format(x, "%m"))
switch(match.arg(arg = lang, choices = c("local", "engl")),
local = {
# months in current locale: format(ISOdate(2000, 1:12, 1), "%b")
res <- factor(res, levels=1:12, labels=format(ISOdate(2000, 1:12, 1), "%B"))
},
engl = {
res <- factor(res, levels=1:12, labels=month.name)
})
if(!stringsAsFactors) res <- as.character(res)
})
return(res)
}
Week <- function(x, method = c("iso", "us")){
# cast x to date, such as being able to handle POSIX-Dates automatically
x <- as.Date(x)
method <- match.arg(method, c("iso", "us"))
switch(method,
"iso" = {
#??? fast implementation in lubridate:
# xday <- ISOdate(year(x), month(x), day(x), tz = tz(x))
# dn <- 1 + (wday(x) + 5)%%7
# nth <- xday + ddays(4 - dn)
# jan1 <- ISOdate(year(nth), 1, 1, tz = tz(x))
# 1 + (nth - jan1)%/%ddays(7)
# The weeknumber is the number of weeks between the
# first thursday of the year and the thursday in the target week
# der Donnerstag in der Zielwoche
# x.y <- Year(x)
# x.weekday <- Weekday(x)
#
# x.thursday <- (x - x.weekday + 4)
# # der erste Donnerstag des Jahres
# jan1.weekday <- Weekday(as.Date(paste(x.y, "01-01", sep="-")))
# first.thursday <- as.Date(paste(x.y, "01", (5 + 7*(jan1.weekday > 4) - jan1.weekday), sep="-"))
#
# wn <- (as.integer(x.thursday - first.thursday) %/% 7) + 1 - ((x.weekday < 4) & (Year(x.thursday) != Year(first.thursday)))*52
# wn <- ifelse(wn == 0, Week(as.Date(paste(x.y-1, "12-31", sep="-"))), wn)
z <- x + (3 - (as.POSIXlt(x)$wday + 6) %% 7)
jan1 <- as.Date(paste(Year(z), "-01-01", sep=""))
wn <- 1 + as.integer(z - jan1) %/% 7
},
"us"={
wn <- as.numeric(strftime(as.POSIXlt(x), format="%W"))
}
)
return(wn)
}
# Day <- function(x){ as.integer(format(as.Date(x), "%d") ) }
Day <- function(x){ as.POSIXlt(x)$mday }
# Accessor for Day, as defined by library(lubridate)
"Day<-" <- function(x, value) { x <- x + (value - Day(x)) }
Weekday <- function (x, fmt = c("d", "dd", "ddd"), lang = DescToolsOptions("lang"), stringsAsFactors = TRUE) {
# x <- as.Date(x)
res <- as.POSIXlt(x)$wday
res <- replace(res, res==0, 7)
switch(match.arg(arg = fmt, choices = c("d", "dd", "ddd")),
d = { res },
dd = {
# weekdays in current locale, Sunday : Saturday, format(ISOdate(2000, 1, 2:8), "%A")
switch(match.arg(arg = lang, choices = c("local", "engl")),
local = {
# months in current locale: format(ISOdate(2000, 1:12, 1), "%b")
res <- factor(res, levels=1:7, labels=format(ISOdate(2000, 1, 3:9), "%a"))
},
engl = {
res <- factor(res, levels=1:7, labels=day.abb)
})
if(!stringsAsFactors) res <- as.character(res)
},
ddd = {
# weekdays in current locale, Sunday : Saturday, format(ISOdate(2000, 1, 2:8), "%A")
switch(match.arg(arg = lang, choices = c("local", "engl")),
local = {
# months in current locale: format(ISOdate(2000, 1:12, 1), "%b")
res <- factor(res, levels=1:7, labels=format(ISOdate(2000, 1, 3:9), "%A"))
},
engl = {
res <- factor(res, levels=1:7, labels=day.name)
})
if(!stringsAsFactors) res <- as.character(res)
})
return(res)
}
Quarter <- function (x) {
# Berechnet das Quartal eines Datums
# y <- as.numeric( format( x, "%Y") )
# paste(y, "Q", (as.POSIXlt(x)$mon)%/%3 + 1, sep = "")
# old definition is counterintuitive...
return((as.POSIXlt(x)$mon) %/% 3 + 1)
}
YearDay <- function(x) {
# return(as.integer(format(as.Date(x), "%j")))
return(as.POSIXlt(x)$yday)
}
YearMonth <- function(x){
# returns the yearmonth representation of a date x
x <- as.POSIXlt(x)
return((x$year + 1900)*100 + x$mon + 1)
}
Today <- function() Sys.Date()
Now <- function() Sys.time()
Hour <- function(x) {
# strptime(x, "%H")
as.POSIXlt(x)$hour
}
Minute <- function(x) {
# strptime(x, "%M")
as.POSIXlt(x)$min
}
Second <- function(x) {
# strptime(x, "%S")
as.POSIXlt(x)$sec
}
Timezone <- function(x) {
as.POSIXlt(x)$zone
}
DiffDays360 <- function(start_d, end_d, method=c("eu","us")){
# source: http://en.wikipedia.org/wiki/360-day_calendar
start_d <- as.Date(start_d)
end_d <- as.Date(end_d)
d1 <- Day(start_d)
m1 <- Month(start_d)
y1 <- Year(start_d)
d2 <- Day(end_d)
m2 <- Month(end_d)
y2 <- Year(end_d)
method = match.arg(method)
switch(method,
"eu" = {
if(Day(start_d)==31) start_d <- start_d-1
if(Day(end_d)==31) end_d <- end_d-1
}
, "us" ={
if( (Day(start_d+1)==1 & Month(start_d+1)==3) &
(Day(end_d+1)==1 & Month(end_d+1)==3)) d2 <- 30
if( d1==31 ||
(Day(start_d+1)==1 & Month(start_d+1)==3)) {
d1 <- 30
if(d2==31) d2 <- 30
}
}
)
return( (y2-y1)*360 + (m2-m1)*30 + d2-d1)
}
LastDayOfMonth <- function(x){
z <- AddMonths(x, 1)
Day(z) <- 1
return(z-1)
}
AddMonths <- function (x, n, ...) {
.addMonths <- function (x, n) {
# ref: http://stackoverflow.com/questions/14169620/add-a-month-to-a-date
# Author: Antonio
# no ceiling
res <- sapply(x, seq, by = paste(n, "months"), length = 2)[2,]
# sapply kills the Date class, so recreate down the road
# ceiling
DescTools::Day(x) <- 1
res_c <- sapply(x, seq, by = paste(n + 1, "months"), length = 2)[2,] - 1
# use ceiling in case of overlapping
res <- pmin(res, res_c)
return(res)
}
x <- as.Date(x, ...)
res <- mapply(.addMonths, x, n)
# mapply (as sapply above) kills the Date class, so recreate here
# and return res in the same class as x
class(res) <- "Date"
return(res)
}
AddMonthsYM <- function (x, n) {
.addMonths <- function (x, n) {
if (x %[]% c(100001, 999912)) {
# Author: Roland Rapold
# YYYYMM
y <- x %/% 100
m <- x - y * 100
res <- (y - 10 + ((m + n + 120 - 1) %/% 12)) * 100 +
((m + n + 120 - 1) %% 12) + 1
} else if (x %[]% c(10000101, 99991231)) {
# YYYYMMDD
res <- DescTools::AddMonths(x = as.Date(as.character(x), "%Y%m%d"), n = n)
res <- DescTools::Year(res)*10000 + DescTools::Month(res)*100 + Day(res)
}
return(res)
}
res <- mapply(.addMonths, x, n)
return(res)
}
Zodiac <- function(x, lang = c("engl","deu"), stringsAsFactors = TRUE) {
switch(match.arg(lang, choices=c("engl","deu"))
, engl = {z <- c("Capricorn","Aquarius","Pisces","Aries","Taurus","Gemini","Cancer","Leo","Virgo","Libra","Scorpio","Sagittarius","Capricorn") }
, deu = {z <- c("Steinbock","Wassermann","Fische","Widder","Stier","Zwillinge","Krebs","Loewe","Jungfrau","Waage","Skorpion","Schuetze","Steinbock") }
)
i <- cut(DescTools::Month(x)*100 + DescTools::Day(x),
breaks=c(0,120,218,320,420,520,621,722,822,923,1023,1122,1221,1231))
if(stringsAsFactors){
res <- i
levels(res) <- z
} else {
res <- z[i]
}
return(res)
}
axTicks.POSIXct <- function (side, x, at, format, labels = TRUE, ...) {
# This is completely original R-code with one exception:
# Not an axis is drawn but z are returned.
mat <- missing(at) || is.null(at)
if (!mat)
x <- as.POSIXct(at)
else x <- as.POSIXct(x)
range <- par("usr")[if (side%%2)
1L:2L
else 3L:4L]
d <- range[2L] - range[1L]
z <- c(range, x[is.finite(x)])
attr(z, "tzone") <- attr(x, "tzone")
if (d < 1.1 * 60) {
sc <- 1
if (missing(format))
format <- "%S"
}
else if (d < 1.1 * 60 * 60) {
sc <- 60
if (missing(format))
format <- "%M:%S"
}
else if (d < 1.1 * 60 * 60 * 24) {
sc <- 60 * 60
if (missing(format))
format <- "%H:%M"
}
else if (d < 2 * 60 * 60 * 24) {
sc <- 60 * 60
if (missing(format))
format <- "%a %H:%M"
}
else if (d < 7 * 60 * 60 * 24) {
sc <- 60 * 60 * 24
if (missing(format))
format <- "%a"
}
else {
sc <- 60 * 60 * 24
}
if (d < 60 * 60 * 24 * 50) {
zz <- pretty(z/sc)
z <- zz * sc
z <- .POSIXct(z, attr(x, "tzone"))
if (sc == 60 * 60 * 24)
z <- as.POSIXct(round(z, "days"))
if (missing(format))
format <- "%b %d"
}
else if (d < 1.1 * 60 * 60 * 24 * 365) {
z <- .POSIXct(z, attr(x, "tzone"))
zz <- as.POSIXlt(z)
zz$mday <- zz$wday <- zz$yday <- 1
zz$isdst <- -1
zz$hour <- zz$min <- zz$sec <- 0
zz$mon <- pretty(zz$mon)
m <- length(zz$mon)
M <- 2 * m
m <- rep.int(zz$year[1L], m)
zz$year <- c(m, m + 1)
zz <- lapply(zz, function(x) rep(x, length.out = M))
zz <- .POSIXlt(zz, attr(x, "tzone"))
z <- as.POSIXct(zz)
if (missing(format))
format <- "%b"
}
else {
z <- .POSIXct(z, attr(x, "tzone"))
zz <- as.POSIXlt(z)
zz$mday <- zz$wday <- zz$yday <- 1
zz$isdst <- -1
zz$mon <- zz$hour <- zz$min <- zz$sec <- 0
zz$year <- pretty(zz$year)
M <- length(zz$year)
zz <- lapply(zz, function(x) rep(x, length.out = M))
z <- as.POSIXct(.POSIXlt(zz))
if (missing(format))
format <- "%Y"
}
if (!mat)
z <- x[is.finite(x)]
keep <- z >= range[1L] & z <= range[2L]
z <- z[keep]
if (!is.logical(labels))
labels <- labels[keep]
else if (identical(labels, TRUE))
labels <- format(z, format = format)
else if (identical(labels, FALSE))
labels <- rep("", length(z))
# axis(side, at = z, labels = labels, ...)
# return(list(at=z, labels=labels))
return(z)
}
axTicks.Date <- function(side = 1, x, ...) {
## This functions is almost a copy of axis.Date
x <- as.Date(x)
range <- par("usr")[if (side%%2)
1L:2L
else 3:4L]
range[1L] <- ceiling(range[1L])
range[2L] <- floor(range[2L])
d <- range[2L] - range[1L]
z <- c(range, x[is.finite(x)])
class(z) <- "Date"
if (d < 7)
format <- "%a"
if (d < 100) {
z <- structure(pretty(z), class = "Date")
format <- "%b %d"
}
else if (d < 1.1 * 365) {
zz <- as.POSIXlt(z)
zz$mday <- 1
zz$mon <- pretty(zz$mon)
m <- length(zz$mon)
m <- rep.int(zz$year[1L], m)
zz$year <- c(m, m + 1)
z <- as.Date(zz)
format <- "%b"
}
else {
zz <- as.POSIXlt(z)
zz$mday <- 1
zz$mon <- 0
zz$year <- pretty(zz$year)
z <- as.Date(zz)
format <- "%Y"
}
keep <- z >= range[1L] & z <= range[2L]
z <- z[keep]
z <- sort(unique(z))
class(z) <- "Date"
z
}
###
## base: information functions ====
# Between operators
`%[]%` <- function(x, rng) {
if(is.matrix(rng)){
# recycle things
# which parameter has the highest dimension
maxdim <- max(length(x), nrow(rng))
# recycle all params to maxdim
x <- rep(x, length.out = maxdim)
# the rows of the matrix rng
rng <- rng[rep(1:nrow(rng), length.out = maxdim),]
res <- .Call("between_num_lrm", as.numeric(x), as.numeric(rng[,1]), as.numeric(rng[,2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
return( res)
}
if(is.numeric(x) || IsDate(x)) {
# as.numeric still needed for casting integer to numeric!!
res <- .Call("between_num_lr", as.numeric(x), as.numeric(rng[1]), as.numeric(rng[2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(is.ordered(x)) {
res <- .Call("between_num_lr", as.numeric(x), as.numeric(match(rng[1], levels(x))), as.numeric(match(rng[2], levels(x))), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(class(x) == "character") {
res <- ifelse ( x >= rng[1] & x <= rng[2], TRUE, FALSE )
} else {
res <- rep(NA, length(x))
}
return(res)
}
`%(]%` <- function(x, rng) {
if(is.matrix(rng)){
# recycle things
# which parameter has the highest dimension
maxdim <- max(length(x), nrow(rng))
# recycle all params to maxdim
x <- rep(x, length.out = maxdim)
# the rows of the matrix rng
rng <- rng[rep(1:nrow(rng), length.out = maxdim),]
res <- .Call("between_num_rm", as.numeric(x), as.numeric(rng[,1]), as.numeric(rng[,2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
return( res)
}
if(is.numeric(x) || IsDate(x)) {
# as.numeric still needed for casting integer to numeric!!
res <- .Call("between_num_r", as.numeric(x), as.numeric(rng[1]), as.numeric(rng[2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(is.ordered(x)) {
res <- .Call("between_num_r", as.numeric(x), as.numeric(match(rng[1], levels(x))), as.numeric(match(rng[2], levels(x))), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(class(x) == "character") {
res <- ifelse ( x > rng[1] & x <= rng[2], TRUE, FALSE )
} else {
res <- rep(NA, length(x))
}
return(res)
}
`%[)%` <- function(x, rng) {
if(is.matrix(rng)){
# recycle things
# which parameter has the highest dimension
maxdim <- max(length(x), nrow(rng))
# recycle all params to maxdim
x <- rep(x, length.out = maxdim)
# the rows of the matrix rng
rng <- rng[rep(1:nrow(rng), length.out = maxdim),]
res <- .Call("between_num_lm", as.numeric(x), as.numeric(rng[,1]), as.numeric(rng[,2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
return( res)
}
if(is.numeric(x) || IsDate(x)) {
# as.numeric still needed for casting integer to numeric!!
res <- .Call("between_num_l", as.numeric(x), as.numeric(rng[1]), as.numeric(rng[2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(is.ordered(x)) {
res <- .Call("between_num_l", as.numeric(x), as.numeric(match(rng[1], levels(x))), as.numeric(match(rng[2], levels(x))), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(class(x) == "character") {
res <- ifelse ( x >= rng[1] & x < rng[2], TRUE, FALSE )
} else {
res <- rep(NA, length(x))
}
return(res)
}
`%()%` <- function(x, rng) {
if(is.matrix(rng)){
# recycle things
# which parameter has the highest dimension
maxdim <- max(length(x), nrow(rng))
# recycle all params to maxdim
x <- rep(x, length.out = maxdim)
# the rows of the matrix rng
rng <- rng[rep(1:nrow(rng), length.out = maxdim),]
res <- .Call("between_num_m", as.numeric(x), as.numeric(rng[,1]), as.numeric(rng[,2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
return( res)
}
if(is.numeric(x) || IsDate(x)) {
# as.numeric still needed for casting integer to numeric!!
res <- .Call("between_num_", as.numeric(x), as.numeric(rng[1]), as.numeric(rng[2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(is.ordered(x)) {
res <- .Call("between_num_", as.numeric(x), as.numeric(match(rng[1], levels(x))), as.numeric(match(rng[2], levels(x))), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(class(x) == "character") {
res <- ifelse ( x > rng[1] & x < rng[2], TRUE, FALSE )
} else {
res <- rep(NA, length(x))
}
return(res)
}
# outside operators (not exactly the negations)
`%][%` <- function(x, rng) {
return(!(x %()% rng))
}
`%](%` <- function(x, rng) {
return(!(x %(]% rng))
}
`%)[%` <- function(x, rng) {
return(!(x %[)% rng))
}
`%)(%` <- function(x, rng) {
return(!(x %[]% rng))
}
# Not %in% operator
`%nin%` <- function(x, table) match(x, table, nomatch = 0) == 0
# quick paste operator
# Core (Chambers) does not recommend + for non commutative operators, but still it's convenient and so we use c
# is it really? I doubt meanwhile...
# https://www.stat.math.ethz.ch/pipermail/r-devel/2006-August/039013.html
# http://stackoverflow.com/questions/1319698/why-doesnt-operate-on-characters-in-r?lq=1
`%c%` <- function(x, y) paste(x, y, sep="")
`%like%` <- function(x, pattern) {
return(`%like any%`(x, pattern))
}
`%like any%` <- function(x, pattern) {
pattern <- sapply(pattern, function(z){
if (!substr(z, 1, 1) == "%") {
z <- paste("^", z, sep="")
} else {
z <- substr(z, 2, nchar(z) )
}
if (!substr(z, nchar(z), nchar(z)) == "%") {
z <- paste(z, "$", sep="")
} else {
z <- substr(z, 1, nchar(z)-1 )
}
return(z)
})
grepl(pattern=paste(pattern, collapse = "|"), x=x)
# since 0.99.17: better returning the values, than a logical vector:
# grep(pattern=paste(pattern, collapse = "|"), x=x, value=TRUE)
# rolled back 26.4.2016: did not really prove successful
}
# c(Date(2012,1,3), Date(2012,2,3)) %overlaps% c(Date(2012,3,1), Date(2012,3,3))
# c(Date(2012,1,3), Date(2012,2,3)) %overlaps% c(Date(2012,1,15), Date(2012,1,21))
# Date(2012,1,3) %overlaps% c(Date(2012,3,1), Date(2012,3,3))
# c(1, 18) %overlaps% c(10, 45)
# Interval <- function(xp, yp){
# # calculates the number of days of the overlapping part of two date periods
# length(intersect(xp[1]:xp[2], yp[1]:yp[2]))
# }
Interval <- function(x, y){
# make sure that min is left and max right
x <- cbind(apply(rbind(x), 1, min), apply(rbind(x), 1, max))
y <- cbind(apply(rbind(y), 1, min), apply(rbind(y), 1, max))
# replicate
maxdim <- max(nrow(x), nrow(y))
x <- x[rep(1:nrow(x), length.out=maxdim), , drop=FALSE]
y <- y[rep(1:nrow(y), length.out=maxdim), , drop=FALSE]
d <- numeric(maxdim)
idx <- y[,1] > x[,2]
d[idx] <- (y[idx,1] - x[idx,2])
idx <- y[,2] < x[,1]
d[idx] <- (y[idx,2] - x[idx,1])
unname(d)
}
`%overlaps%` <- function(x, y) {
if(length(x) < 2) x <- rep(x, 2)
if(length(y) < 2) y <- rep(y, 2)
return(!(max(x) < min(y) | min(x) > max(y)) )
}
Overlap <- function(x, y){
# make sure that min is left and max right
x <- cbind(apply(rbind(x), 1, min), apply(rbind(x), 1, max))
y <- cbind(apply(rbind(y), 1, min), apply(rbind(y), 1, max))
# replicate
maxdim <- max(nrow(x), nrow(y))
x <- x[rep(1:nrow(x), length.out=maxdim), , drop=FALSE]
y <- y[rep(1:nrow(y), length.out=maxdim), , drop=FALSE]
# old: replaced in 0.99.17 as it did not what it was expected to
#
# d <- (apply(x, 1, diff) + apply(y, 1, diff)) - pmin(x[,2] - y[,1], y[,2]- x[,1])
# d[x[,1] > y[,2] | y[,1] > x[,2]] <- 0
d1 <- x[, 2]
idx <- x[, 2] > y[, 2]
d1[idx] <- y[idx, 2]
d2 <- y[, 1]
idx <- x[, 1] > y[, 1]
d2[idx] <- x[idx, 1]
d <- d1 - d2
d[d <=0 ] <- 0
unname(d)
}
AllDuplicated <- function(x){
# returns an index vector of all values involved in ties
# so !AllDuplicated determines all values in x just appearing once
duplicated(x, fromLast=FALSE) | duplicated(x, fromLast=TRUE)
}
# dummy codierung als Funktion aus: library(nnet)
# see also model.frame(...)
# ClassInd <- function(cl) {
# n <- length(cl)
# cl <- as.factor(cl)
# x <- matrix(0, n, length(levels(cl)))
# x[(1L:n) + n * (unclass(cl) - 1L)] <- 1
# dimnames(x) <- list(names(cl), levels(cl))
# x
# }
Dummy <- function (x, method = c("treatment", "sum", "helmert", "poly", "full"), base = 1, levels=NULL) {
# Alternatives:
# options(contrasts = c("contr.sum", "contr.poly"))
# model.matrix(~x.)[, -1] ### und die dummy-codes
# or Ripley's brilliant shorty-function:
# diag(nlevels(x))[x,]
if(is.null(levels))
x <- factor(x)
else
x <- factor(x, levels=levels)
if(!is.numeric(base)) base <- match(base, levels(x))
method <- match.arg( arg = method, choices = c("treatment", "sum", "helmert", "poly", "full") )
switch( method
, "treatment" = { res <- contr.treatment(n = nlevels(x), base = base)[x,] }
, "sum" = { res <- contr.sum(n = nlevels(x))[x,] }
, "helmert" = { res <- contr.helmert(n = nlevels(x))[x,] }
, "poly" = { res <- contr.poly(n = nlevels(x))[x,] }
, "full" = { res <- diag(nlevels(x))[x,] }
)
res <- as.matrix(res) # force res to be matrix, avoiding res being a vector if nlevels(x) = 2
if(method=="full") {
dimnames(res) <- list(if(is.null(names(x))) 1:length(x) else names(x), levels(x))
attr(res, "base") <- NA
} else {
dimnames(res) <- list(if(is.null(names(x))) 1:length(x) else names(x), levels(x)[-base])
attr(res, "base") <- levels(x)[base]
}
return(res)
}
# would not return characters correctly
#
Coalesce <- function(..., method = c("is.na", "is.finite")) {
# Returns the first element in x which is not NA
if(length(list(...)) > 1) {
if(all(lapply(list(...), length) > 1)){
x <- data.frame(..., stringsAsFactors = FALSE)
} else {
x <- unlist(list(...))
}
} else {
if(is.matrix(...)) {
x <- data.frame(..., stringsAsFactors = FALSE)
} else {
x <- (...)
}
}
switch(match.arg(method, choices=c("is.na", "is.finite")),
"is.na" = res <- Reduce(function (x,y) ifelse(!is.na(x), x, y), x),
"is.finite" = res <- Reduce(function (x,y) ifelse(is.finite(x), x, y), x)
)
return(res)
}
PartitionBy <- function(x, by, FUN, ...){
# SQL-OLAP: sum() over (partition by g)
# (more than 1 grouping variables are enumerated like by=list(g1,g2,g3),
# as it is defined in tapply
# see also ave, which only handles arguments otherwise..
if (missing(by))
x[] <- FUN(x, ...)
else {
g <- interaction(by)
split(x, g) <- lapply(split(x, g), FUN, ...)
}
x
}
IsWhole <- function (x, all=FALSE, tol = sqrt(.Machine$double.eps), na.rm=FALSE) {
if (na.rm)
x <- x[!is.na(x)]
if(all){
if (is.integer(x)) {
TRUE
} else if (is.numeric(x)) {
isTRUE(all.equal(x, round(x), tol))
} else if (is.complex(x)) {
isTRUE(all.equal(Re(x), round(Re(x)), tol)) && isTRUE(all.equal(Im(x), round(Im(x)), tol))
} else FALSE
} else {
if (is.integer(x)) {
rep(TRUE, length(x))
} else if (is.numeric(x)) {
abs(x - round(x)) < tol
} else if (is.complex(x)) {
abs(Re(x) - round(Re(x))) < tol && abs(Im(x) - round(Im(x))) < tol
} else rep(FALSE, length(x))
}
}
IsZero <-function(x, tol = sqrt(.Machine$double.eps), na.rm=FALSE) {
# Define check if a numeric is 0
if (na.rm)
x <- x[!is.na(x)]
if(is.numeric(x))
abs(x) < tol
else
FALSE
}
IsNumeric <- function (x, length.arg = Inf, integer.valued = FALSE, positive = FALSE, na.rm = FALSE){
if (na.rm)
x <- x[!is.na(x)]
if (all(is.numeric(x)) && all(is.finite(x)) && (if (is.finite(length.arg)) length(x) ==
length.arg else TRUE) && (if (integer.valued) all(x == round(x)) else TRUE) &&
(if (positive) all(x > 0) else TRUE)) TRUE else FALSE
}
IsOdd <- function(x) x %% 2 == 1
IsDichotomous <- function(x, strict=FALSE, na.rm=FALSE) {
if(na.rm)
x <- x[!is.na(x)]
if(strict)
length(unique(x)) == 2
else
length(unique(x)) <= 2
}
StrIsNumeric <- function(x){
# example:
# x <- c("123", "-3.141", "foobar123")
# StrIsNUmeric(x)
suppressWarnings(!is.na(as.numeric(x)))
}
IsPrime <- function(x) {
if (is.null(x) || length(x) == 0)
stop("Argument 'x' must be a nonempty vector or matrix.")
if (!is.numeric(x) || any(x < 0) || any(x != round(x)))
stop("All entries of 'x' must be nonnegative integers.")
n <- length(x)
X <- x[1:n]
L <- logical(n)
p <- DescTools::Primes(ceiling(sqrt(max(x))))
for (i in 1:n) {
L[i] <- all(X[i] %% p[p < X[i]] != 0)
}
L[X == 1 | X == 0] <- FALSE
dim(L) <- dim(x)
return(L)
}
VecRot <- function(x, k = 1) {
if (k != round(k)) {
k <- round(k)
warning("'k' is not an integer")
}
# just one shift: (1:x %% x) + 1
k <- k %% length(x)
rep(x, times=2)[(length(x) - k+1):(2*length(x)-k)]
}
VecShift <- function(x, k = 1){
if (k != round(k)) {
k <- round(k)
warning("'k' is not an integer")
}
if(k < 0){
c(x[-k:length(x)], rep(NA, -k))
} else {
c(rep(NA, k), x[1:(length(x)-k)])
}
}
RoundTo <- function(x, multiple = 1, FUN = round) {
# check for functions: round, ceiling, floor, but how????
# FUN <- match.arg(FUN, c(round, ceiling, floor))
if(is.function(FUN)) {
# if FUN is a function, then save it under new name and
# overwrite function name in FUN, which has to be character
fct <- FUN
FUN <- "fct"
FUN <- gettextf("%s", FUN)
}
# round will set digits to 0 by default, which is exactly what we need here
return(eval(parse(text = gettextf("%s(x/multiple) * multiple", FUN))))
}
# Alternative Idee mit up and down:
# Round <- function(x, digits = 0, direction=c("both", "down", "up"), multiple = NA) {
#
# direction <- match.arg(direction)
#
# switch(direction
# , both={
# if(is.na(multiple)){
# res <- round(x, digits = digits)
# } else {
# res <- round(x/multiple) * multiple
# }
# }
# , down={
# if(is.na(multiple)){
# res <- floor(x, digits = digits)
# } else {
# res <- floor(x/multiple) * multiple
# }
# }
# , up={
# if(is.na(multiple)){
# res <- ceiling(x, digits = digits)
# } else {
# res <- ceiling(x/multiple) * multiple
# }
# }
# )
# return(res)
# }
Str <- function(x, ...){
if(identical(class(x), "data.frame")) {
args <- list(...)
if(is.null(args["strict.width"])) args["strict.width"] <- "cut"
out <- .CaptOut(do.call(str, c(list(object=x), args)))
idx <- format(1:length(grep(pattern="^ \\$", out)))
i <- 1
j <- 1
while(i <= length(out)) {
if( length(grep(pattern="^ \\$", out[i])) > 0 ) {
out[i] <- gsub(pattern="^ \\$", replacement= paste(" ", idx[j], " \\$", sep=""), out[i])
j <- j + 1
}
i <- i + 1
}
res <- out
} else {
res <- str(x, ...)
}
cat(res, sep="\n")
invisible(res)
}
Some <- function(x, n = 6L, ...){
UseMethod("Some")
}
Some.data.frame <- function (x, n = 6L, ...) {
stopifnot(length(n) == 1L)
n <- if (n < 0L)
max(nrow(x) + n, 0L)
else min(n, nrow(x))
x[sort(sample(nrow(x), n)), , drop = FALSE]
}
Some.matrix <- function (x, n = 6L, addrownums = TRUE, ...) {
stopifnot(length(n) == 1L)
nrx <- nrow(x)
n <- if (n < 0L)
max(nrx + n, 0L)
else min(n, nrx)
sel <- sort(sample(nrow(x)))
ans <- x[sel, , drop = FALSE]
if (addrownums && is.null(rownames(x)))
rownames(ans) <- format(sprintf("[%d,]", sel), justify = "right")
ans
}
Some.default <- function (x, n = 6L, ...) {
stopifnot(length(n) == 1L)
n <- if (n < 0L)
max(length(x) + n, 0L)
else min(n, length(x))
x[sort(sample(length(x), n))]
}
LsFct <- function(package){
as.vector(unclass(lsf.str(pos = gettextf("package:%s", package) )))
}
# LsData <- function(package){
# # example lsf("DescTools")
# ls(pos = gettextf("package:%s", package))
# as.vector(unclass(ls.str(gettextf("package:%s", package), mode="list")))
#
# }
LsObj <- function(package){
# example lsf("DescTools")
ls(pos = gettextf("package:%s", package))
}
What <- function(x){
list(mode=mode(x), typeof=typeof(x), storage.mode=storage.mode(x),
dim=dim(x), length=length(x),class=class(x))
}
PDFManual <- function(package){
package <- as.character(substitute(package))
browseURL(paste("http://cran.r-project.org/web/packages/", package,"/", package, ".pdf", sep = ""))
}
# showPDFmanual <- function(package, lib.loc=NULL)
# {
# path <- find.package(package, lib.loc)
# system(paste(shQuote(file.path(R.home("bin"), "R")),
# "CMD", "Rd2pdf",
# shQuote(path)))
# }
###
## base: organisation, format, report and printing routines ====
# Mbind <- function(...){
# # matrix bind
# # function um n nxm-matrizen zu einem 3d-array zusammenzufassen
#
# arg.list <- list(...)
# # check dimensions, by compare the dimension of each matrix to the first
# if( !all( unlist(lapply(arg.list, function(m) all(unlist(dim(arg.list[[1]])) == unlist(dim(m)))) )))
# stop("Not all matrices have the same dimension!")
#
# ma <- array(unlist(arg.list), dim=c(nrow(arg.list[[1]]), ncol(arg.list[[2]]), length(arg.list)) )
# dimnames(ma) <- dimnames(arg.list[[1]])
# dimnames(ma)[[3]] <- if(is.null(names(arg.list))){1:length(arg.list)} else {names(arg.list)}
#
# return(ma)
# }
Abind <- function(..., along=N, rev.along=NULL, new.names=NULL,
force.array=TRUE, make.names=FALSE,
use.first.dimnames=FALSE, hier.names=FALSE, use.dnns=FALSE) {
if (is.character(hier.names))
hier.names <- match.arg(hier.names, c('before', 'after', 'none'))
else
hier.names <- if (hier.names) 'before' else 'no'
arg.list <- list(...)
if (is.list(arg.list[[1]]) && !is.data.frame(arg.list[[1]])) {
if (length(arg.list)!=1)
stop("can only supply one list-valued argument for ...")
if (make.names)
stop("cannot have make.names=TRUE with a list argument")
arg.list <- arg.list[[1]]
have.list.arg <- TRUE
} else {
N <- max(1, sapply(list(...), function(x) length(dim(x))))
have.list.arg <- FALSE
}
if (any(discard <- sapply(arg.list, is.null)))
arg.list <- arg.list[!discard]
if (length(arg.list)==0)
return(NULL)
N <- max(1, sapply(arg.list, function(x) length(dim(x))))
## N will eventually be length(dim(return.value))
if (!is.null(rev.along))
along <- N + 1 - rev.along
if (along < 1 || along > N || (along > floor(along) && along < ceiling(along))) {
N <- N + 1
along <- max(1, min(N+1, ceiling(along)))
}
## this next check should be redundant, but keep it here for safety...
if (length(along) > 1 || along < 1 || along > N + 1)
stop(paste("\"along\" must specify one dimension of the array,",
"or interpolate between two dimensions of the array",
sep="\n"))
if (!force.array && N==2) {
if (!have.list.arg) {
if (along==2)
return(cbind(...))
if (along==1)
return(rbind(...))
} else {
if (along==2)
return(do.call("cbind", arg.list))
if (along==1)
return(do.call("rbind", arg.list))
}
}
if (along>N || along<0)
stop("along must be between 0 and ", N)
pre <- seq(from=1, len=along-1)
post <- seq(to=N-1, len=N-along)
## "perm" specifies permutation to put join dimension (along) last
perm <- c(seq(len=N)[-along], along)
arg.names <- names(arg.list)
if (is.null(arg.names)) arg.names <- rep("", length(arg.list))
## if new.names is a character vector, treat it as argument names
if (is.character(new.names)) {
arg.names[seq(along=new.names)[nchar(new.names)>0]] <-
new.names[nchar(new.names)>0]
new.names <- NULL
}
## Be careful with dot.args, because if Abind was called
## using do.call(), and had anonymous arguments, the expressions
## returned by match.call() are for the entire structure.
## This can be a problem in S-PLUS, not sure about R.
## E.g., in this one match.call() returns compact results:
## > (function(...)browser())(1:10,letters)
## Called from: (function(...) browser())....
## b()> match.call(expand.dots=FALSE)$...
## list(1:10, letters)
## But in this one, match.call() returns evaluated results:
## > test <- function(...) browser()
## > do.call("test", list(1:3,letters[1:4]))
## Called from: test(c(1, 2, 3), c("a", "b....
## b(test)> match.call(expand.dots=FALSE)$...
## list(c(1, 2, 3), c("a", "b", "c", "d")
## The problem here was largely mitigated by making Abind()
## accept a single list argument, which removes most of the
## need for the use of do.call("Abind", ...)
## Create deparsed versions of actual arguments in arg.alt.names
## These are used for error messages
if (any(arg.names=="")) {
if (make.names) {
## Create dot.args to be a list of calling expressions for the objects to be bound.
## Be careful here with translation to R --
## dot.args does not have the "list" functor with R
## (and dot.args is not a call object), whereas with S-PLUS, dot.args
## must have the list functor removed
dot.args <- match.call(expand.dots=FALSE)$... ## [[2]]
if (is.call(dot.args) && identical(dot.args[[1]], as.name("list")))
dot.args <- dot.args[-1]
arg.alt.names <- arg.names
for (i in seq(along=arg.names)) {
if (arg.alt.names[i]=="") {
if (object.size(dot.args[[i]])<1000) {
arg.alt.names[i] <- paste(deparse(dot.args[[i]], 40), collapse=";")
} else {
arg.alt.names[i] <- paste("X", i, sep="")
}
arg.names[i] <- arg.alt.names[i]
}
}
## unset(dot.args) don't need dot.args any more, but R doesn't have unset()
} else {
arg.alt.names <- arg.names
arg.alt.names[arg.names==""] <- paste("X", seq(along=arg.names), sep="")[arg.names==""]
}
} else {
arg.alt.names <- arg.names
}
use.along.names <- any(arg.names!="")
## need to have here: arg.names, arg.alt.names, don't need dot.args
names(arg.list) <- arg.names
## arg.dimnames is a matrix of dimension names, each element of the
## the matrix is a character vector, e.g., arg.dimnames[j,i] is
## the vector of names for dimension j of arg i
arg.dimnames <- matrix(vector("list", N*length(arg.names)), nrow=N, ncol=length(arg.names))
dimnames(arg.dimnames) <- list(NULL, arg.names)
## arg.dnns is a matrix of names of dimensions, each element is a
## character vector len 1, or NULL
arg.dnns <- matrix(vector("list", N*length(arg.names)), nrow=N, ncol=length(arg.names))
dimnames(arg.dnns) <- list(NULL, arg.names)
dimnames.new <- vector("list", N)
## Coerce all arguments to have the same number of dimensions
## (by adding one, if necessary) and permute them to put the
## join dimension last.
## Create arg.dim as a matrix with length(dim) rows and
## length(arg.list) columns: arg.dim[j,i]==dim(arg.list[[i]])[j],
## The dimension order of arg.dim is original
arg.dim <- matrix(integer(1), nrow=N, ncol=length(arg.names))
for (i in seq(len=length(arg.list))) {
m <- arg.list[[i]]
m.changed <- FALSE
## be careful with conversion to array: as.array converts data frames badly
if (is.data.frame(m)) {
## use as.matrix() in preference to data.matrix() because
## data.matrix() uses the unintuitive codes() function on factors
m <- as.matrix(m)
m.changed <- TRUE
} else if (!is.array(m) && !is.null(m)) {
if (!is.atomic(m))
stop("arg '", arg.alt.names[i], "' is non-atomic")
## make sure to get the names of a vector and attach them to the array
dn <- names(m)
m <- as.array(m)
if (length(dim(m))==1 && !is.null(dn))
dimnames(m) <- list(dn)
m.changed <- TRUE
}
new.dim <- dim(m)
if (length(new.dim)==N) {
## Assign the dimnames of this argument to the i'th column of arg.dimnames.
## If dimnames(m) is NULL, would need to do arg.dimnames[,i] <- list(NULL)
## to set all elts to NULL, as arg.dimnames[,i] <- NULL does not actually
## change anything in S-PLUS (leaves whatever is there) and illegal in R.
## Since arg.dimnames has NULL entries to begin with, don't need to do
## anything when dimnames(m) is NULL
if (!is.null(dimnames(m))) {
arg.dimnames[,i] <- dimnames(m)
if (use.dnns && !is.null(names(dimnames(m))))
arg.dnns[,i] <- as.list(names(dimnames(m)))
}
arg.dim[,i] <- new.dim
} else if (length(new.dim)==N-1) {
## add another dimension (first set dimnames to NULL to prevent errors)
if (!is.null(dimnames(m))) {
## arg.dimnames[,i] <- c(dimnames(m)[pre], list(NULL), dimnames(m))[post]
## is equivalent to arg.dimnames[-N,i] <- dimnames(m)
arg.dimnames[-along,i] <- dimnames(m)
if (use.dnns && !is.null(names(dimnames(m))))
arg.dnns[-along,i] <- as.list(names(dimnames(m)))
## remove the dimnames so that we can assign a dim of an extra length
dimnames(m) <- NULL
}
arg.dim[,i] <- c(new.dim[pre], 1, new.dim[post])
if (any(perm!=seq(along=perm))) {
dim(m) <- c(new.dim[pre], 1, new.dim[post])
m.changed <- TRUE
}
} else {
stop("'", arg.alt.names[i], "' does not fit: should have `length(dim())'=",
N, " or ", N-1)
}
if (any(perm!=seq(along=perm)))
arg.list[[i]] <- aperm(m, perm)
else if (m.changed)
arg.list[[i]] <- m
}
## Make sure all arguments conform
conform.dim <- arg.dim[,1]
for (i in seq(len=ncol(arg.dim))) {
if (any((conform.dim!=arg.dim[,i])[-along])) {
stop("arg '", arg.alt.names[i], "' has dims=", paste(arg.dim[,i], collapse=", "),
"; but need dims=", paste(replace(conform.dim, along, "X"), collapse=", "))
}
}
## find the last (or first) names for each dimensions except the join dimension
if (N>1)
for (dd in seq(len=N)[-along]) {
for (i in (if (use.first.dimnames) seq(along=arg.names) else rev(seq(along=arg.names)))) {
if (length(arg.dimnames[[dd,i]]) > 0) {
dimnames.new[[dd]] <- arg.dimnames[[dd,i]]
if (use.dnns && !is.null(arg.dnns[[dd,i]]))
names(dimnames.new)[dd] <- arg.dnns[[dd,i]]
break
}
}
}
## find or create names for the join dimension
for (i in seq(len=length(arg.names))) {
## only use names if arg i contributes some elements
if (arg.dim[along,i] > 0) {
dnm.along <- arg.dimnames[[along,i]]
if (length(dnm.along)==arg.dim[along,i]) {
use.along.names <- TRUE
if (hier.names=='before' && arg.names[i]!="")
dnm.along <- paste(arg.names[i], dnm.along, sep=".")
else if (hier.names=='after' && arg.names[i]!="")
dnm.along <- paste(dnm.along, arg.names[i], sep=".")
} else {
## make up names for the along dimension
if (arg.dim[along,i]==1)
dnm.along <- arg.names[i]
else if (arg.names[i]=="")
dnm.along <- rep("", arg.dim[along,i])
else
dnm.along <- paste(arg.names[i], seq(length=arg.dim[along,i]), sep="")
}
dimnames.new[[along]] <- c(dimnames.new[[along]], dnm.along)
}
if (use.dnns) {
dnn <- unlist(arg.dnns[along,])
if (length(dnn)) {
if (!use.first.dimnames)
dnn <- rev(dnn)
names(dimnames.new)[along] <- dnn[1]
}
}
}
## if no names at all were given for the along dimension, use none
if (!use.along.names)
dimnames.new[along] <- list(NULL)
## Construct the output array from the pieces.
## Could experiment here with more efficient ways of constructing the
## result than using unlist(), e.g.
## out <- numeric(prod(c( arg.dim[-along,1], sum(arg.dim[along,]))))
## Don't use names in unlist because this can quickly exhaust memory when
## Abind is called with "do.call" (which creates horrendous names in S-PLUS).
out <- array(unlist(arg.list, use.names=FALSE),
dim=c( arg.dim[-along,1], sum(arg.dim[along,])),
dimnames=dimnames.new[perm])
## permute the output array to put the join dimension back in the right place
if (any(order(perm)!=seq(along=perm)))
out <- aperm(out, order(perm))
## if new.names is list of character vectors, use whichever are non-null
## for dimension names, checking that they are the right length
if (!is.null(new.names) && is.list(new.names)) {
for (dd in seq(len=N)) {
if (!is.null(new.names[[dd]])) {
if (length(new.names[[dd]])==dim(out)[dd])
dimnames(out)[[dd]] <- new.names[[dd]]
else if (length(new.names[[dd]]))
warning(paste("Component ", dd,
" of new.names ignored: has length ",
length(new.names[[dd]]), ", should be ",
dim(out)[dd], sep=""))
}
if (use.dnns && !is.null(names(new.names)) && names(new.names)[dd]!='')
names(dimnames(out))[dd] <- names(new.names)[dd]
}
}
if (use.dnns && !is.null(names(dimnames(out))) && any(i <- is.na(names(dimnames(out)))))
names(dimnames(out))[i] <- ''
out
}
# *********************************** 12.12.2014
# stack/unstack does exactly that
# ToLong <- function(x, varnames=NULL){
# lst <- as.list(x)
# res <- data.frame(rep(names(lst), lapply(lst, length)), unlist(lst))
# rownames(res) <- NULL
# if(is.null(varnames)) varnames <- c("grp","x")
# colnames(res) <- varnames
# return(res)
# }
ToLong <- function (x, varnames = NULL) {
if(!is.list(x)) {
if(is.matrix(x) || is.table(x))
x <- as.data.frame(x)
lst <- as.list(x)
} else {
lst <- x
}
grpnames <- names(lst)
if(is.null(grpnames)) grpnames <- paste("X", 1:length(lst), sep="")
res <- data.frame(rep(grpnames, lapply(lst, length)), unlist(lst))
rownames(res) <- NULL
if (is.null(varnames))
varnames <- c("grp", "x")
colnames(res) <- varnames
rownames(res) <- do.call(paste, c(expand.grid(rownames(x), grpnames), sep="."))
return(res)
}
ToWide <- function(x, g, by=NULL, varnames=NULL){
if(is.null(varnames))
varnames <- levels(g)
if(is.null(by)){
by <- "row.names"
} else {
x <- data.frame(x, idx=by)
by <- "idx"
varnames <- c("by", varnames)
}
g <- factor(g)
s <- split(x, g)
res <- Reduce(function(x, y) {
z <- merge(x, y, by=by, all.x=TRUE, all.y=TRUE)
# kill the rownames
if(by=="row.names") z <- z[, -grep("Row.names", names(z))]
return(z)
}, s)
colnames(res) <- varnames
return(res)
}
# ToWide <- function(x, g, varnames=NULL){
# g <- factor(g)
# res <- do.call("cbind", split(x, g))
# if(is.null(varnames)) varnames <- levels(g)
# colnames(res) <- varnames
# return(res)
# }
CatTable <- function( tab, wcol, nrepchars, width=getOption("width") ) {
# Wie viele Datenspalten haben vollstaendig Platz auf einer Linie?
ncols <- ( width - nrepchars ) %/% wcol
# Wieviele Zeilen ergeben sich?
nrows <- ((nchar(tab[1]) - nrepchars) %/% wcol) / ncols +
(((nchar(tab[1]) - nrepchars) %% wcol ) > 0) *1 # Rest Linie
for( i in 1:nrows ) {
for( j in 1:length(tab) ){
# cat( i, nrepchars + 1 + (i-1)*(ncols*wcol-4), nrepchars + i*ncols*wcol-5, "\n")
cat( substr(tab[j],1,nrepchars)
, substr(tab[j], nrepchars + 1 + (i-1)*(ncols*wcol), nrepchars + 1 + i*ncols*wcol-1 )
, "\n", sep="" )
}
cat( "\n" )
}
}
.CaptOut <- function(..., file = NULL, append = FALSE, width=150) {
opt <- options(width=width)
args <- substitute(list(...))[-1L]
rval <- NULL
closeit <- TRUE
if (is.null(file))
file <- textConnection("rval", "w", local = TRUE)
else if (is.character(file))
file <- file(file, if (append)
"a"
else "w")
else if (inherits(file, "connection")) {
if (!isOpen(file))
open(file, if (append)
"a"
else "w")
else closeit <- FALSE
}
else stop("'file' must be NULL, a character string or a connection")
sink(file)
on.exit({
sink()
if (closeit) close(file)
options(opt)
})
pf <- parent.frame()
evalVis <- function(expr) withVisible(eval(expr, pf))
for (i in seq_along(args)) {
expr <- args[[i]]
tmp <- switch(mode(expr), expression = lapply(expr, evalVis),
call = , name = list(evalVis(expr)), stop("bad argument"))
for (item in tmp) if (item$visible)
print(item$value)
}
on.exit(options(opt))
sink()
if (closeit)
close(file)
if (is.null(rval))
invisible(NULL)
else rval
}
Ndec <- function(x) {
# liefert die Anzahl der Nachkommastellen einer Zahl x
# Alternative auch format.info [1]... Breite, [2]...Anzahl Nachkommastellen, [3]...Exponential ja/nein
stopifnot(class(x)=="character")
res <- rep(0, length(x))
# remove evtl. exponents
x <- gsub(pattern="[eE].+$", replacement="", x=x)
res[grep("\\.",x)] <- nchar( sub("^.+[.]","",x) )[grep("\\.",x)]
return(res)
}
Prec <- function (x) {
# Function to return the most precise
# digit from a vector of real numbers
# Keep dividing by powers of 10 (pos and neg from trunc(log(max(x)) down)
# until the fractional portion is zero, then we have the highest precision
# digit in terms of a integer power of 10.
# Thanks to Thomas Lumley for help with machine precision
# Note: Turn this into a standalone function for "regularizing" a
# time-activity object with irregular time breaks.
init <- trunc(log10(max(x))) + 1
zero <- 0
y <- 1
while (any(y > zero)) {
init <- init - 1
x1 <- x*10^(-init)
y <- x1 - trunc(x1)
zero <- max(x1)*.Machine$double.eps
}
10^init
# sapply(c(1.235, 125.3, 1245), prec)
}
# other idea:
# precision <- function(x) {
# rng <- range(x, na.rm = TRUE)
#
# span <- if (zero_range(rng)) rng[1] else diff(rng)
# 10 ^ floor(log10(span))
# }
# References:
# http://stackoverflow.com/questions/3443687/formatting-decimal-places-in-r
# http://my.ilstu.edu/~jhkahn/apastats.html
# https://en.wikipedia.org/wiki/Significant_figures
# http://www.originlab.com/doc/Origin-Help/Options-Dialog-NumFormat-Tab
Format <- function(x, digits = NULL, sci = NULL
, big.mark=NULL, leading = NULL
, zero.form = NULL, na.form = NULL
, fmt = NULL, align = NULL, width = NULL
, lang = NULL, ...){
UseMethod("Format")
}
Format.data.frame <- function(x, digits = NULL, sci = NULL
, big.mark=NULL, leading = NULL
, zero.form = NULL, na.form = NULL
, fmt = NULL, align = NULL, width = NULL, lang = NULL, ...){
x[] <- lapply(x, Format, digits = digits,
sci = sci, big.mark = big.mark, leading = leading, zero.form = zero.form,
na.form = na.form, fmt = fmt, align = align, width = width,
lang = lang, ...)
class(x) <- c("Format", class(x))
return(x)
}
Format.matrix <- function(x, digits = NULL, sci = NULL
, big.mark=NULL, leading = NULL
, zero.form = NULL, na.form = NULL
, fmt = NULL, align = NULL, width = NULL, lang = NULL, ...){
x[,] <- Format.default(x=x, digits=digits, sci=sci, big.mark=big.mark,
leading=leading, zero.form=zero.form, na.form=na.form,
fmt=fmt, align=align, width=width, lang=lang, ...)
class(x) <- c("Format", class(x))
return(x)
}
Format.table <- function(x, digits = NULL, sci = NULL
, big.mark = NULL, leading = NULL
, zero.form = NULL, na.form = NULL
, fmt = NULL, align = NULL, width = NULL, lang = NULL, ...){
x[] <- Format.default(x=x, digits=digits, sci=sci, big.mark=big.mark,
leading=leading, zero.form=zero.form, na.form=na.form,
fmt=fmt, align=align, width=width, lang=lang, ...)
class(x) <- c("Format", class(x))
return(x)
}
as.CDateFmt <- function(fmt) {
# fine format codes
# http://www.autohotkey.com/docs/commands/FormatTime.htm
pat <- ""
fpat <- ""
i <- 1
# we used here:
# if(length(grep("\\bd{4}\\b", fmt)) > 0)
# which found dddd only as separated string from others (\b ... blank)
# this is not suitable for formats like yyyymmdd
# hence this was changed to d{4}
# if(length(grep("\\bd{4}\\b", fmt)) > 0) {
if(length(grep("d{4}", fmt)) > 0) {
fmt <- gsub(pattern = "dddd", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%A-", sep="")
i <- i+1
}
# if(length(grep("\\bd{3}\\b", fmt)) > 0) {
if(length(grep("d{3}", fmt)) > 0) {
fmt <- gsub(pattern = "ddd", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%a-", sep="")
i <- i+1
}
if(length(grep("d{2}", fmt)) > 0) {
fmt <- gsub(pattern = "dd", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%d-", sep="")
i <- i+1
}
if(length(grep("d{1}", fmt)) > 0) {
fmt <- gsub(pattern = "d", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "0?(.+)-", sep="")
fpat <- paste(fpat, "%e-", sep="")
i <- i+1
}
if(length(grep("m{4}", fmt)) > 0) {
fmt <- gsub(pattern = "mmmm", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%B-", sep="")
i <- i+1
}
if(length(grep("m{3}", fmt)) > 0) {
fmt <- gsub(pattern = "mmm", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%b-", sep="")
i <- i+1
}
if(length(grep("m{2}", fmt)) > 0) {
fmt <- gsub(pattern = "mm", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%m-", sep="")
i <- i+1
}
if(length(grep("m{1}", fmt)) > 0) {
fmt <- gsub(pattern = "m", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "0?(.+)-", sep="")
fpat <- paste(fpat, "%m-", sep="")
i <- i+1
}
if(length(grep("y{4}", fmt)) > 0) {
fmt <- gsub(pattern = "yyyy", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%Y-", sep="")
i <- i+1
}
if(length(grep("y{2}", fmt)) > 0) {
fmt <- gsub(pattern = "yy", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%y-", sep="")
i <- i+1
}
if(length(grep("y{1}", fmt)) > 0) {
fmt <- gsub(pattern = "y", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "0?(.+)-", sep="")
fpat <- paste(fpat, "%y-", sep="")
i <- i+1
}
sub(pat, fmt, fpat)
}
Format.default <- function(x, digits = NULL, sci = NULL
, big.mark = NULL, leading = NULL
, zero.form = NULL, na.form = NULL
, fmt = NULL, align = NULL, width = NULL, lang = NULL, ...){
.format.pval <- function(x){
# format p-values *********************************************************
# this is based on original code from format.pval
r <- character(length(is0 <- x < eps))
if (any(!is0)) {
rr <- x <- x[!is0]
expo <- floor(log10(ifelse(x > 0, x, 1e-50)))
fixp <- (expo >= -3)
if (any(fixp))
rr[fixp] <- format(x[fixp], digits = 4)
if (any(!fixp))
rr[!fixp] <- format(x[!fixp], digits=3, scientific=TRUE)
r[!is0] <- rr
}
if (any(is0)) {
r[is0] <- gettextf("< %s", format(eps, digits = 2))
}
return(r)
}
.format.stars <- function(x){
# format significance stars ***************************************************
# example: Format(c(0.3, 0.08, 0.042, 0.001), fmt="*")
breaks <- c(0,0.001,0.01,0.05,0.1,1)
labels <- c("***","** ","* ",". "," ")
res <- as.character(sapply(x, cut, breaks=breaks, labels=labels, include.lowest=TRUE))
return(res)
}
.leading.zero <- function(x, n){
# just add a given number of leading zeros
# split at the .
z <- strsplit(as.character(x), split=".", fixed = TRUE)
# left side
zl <- lapply(z, "[", 1)
zl <- sapply(zl, function(x) sprintf(paste0("%0", n + (x<0)*1, "i"), as.numeric(x)))
# right side
zr <- sapply(z, "[", 2)
zr <- ifelse(is.na(zr), "", paste(".", zr, sep=""))
paste(zl, zr, sep="")
}
.format.eng <- function(x, digits = NULL, leading = NULL
, zero.form = NULL, na.form = NULL){
s <- lapply(strsplit(format(x, scientific=TRUE), "e"), as.numeric)
y <- unlist(lapply(s, "[[", 1))
pwr <- unlist(lapply(s, "[", 2))
return(paste(Format(y * 10^(pwr %% 3), digits=digits, leading=leading,
zero.form = zero.form, na.form=na.form)
, "e"
, c("-","+")[(pwr >= 0) + 1]
, Format(abs((pwr - (pwr %% 3))), leading = "00", digits=0)
, sep="")
)
}
.format.engabb <- function(x, digits = NULL, leading = NULL
, zero.form = NULL, na.form = NULL){
s <- lapply(strsplit(format(x, scientific=TRUE), "e"), as.numeric)
y <- unlist(lapply(s, "[[", 1))
pwr <- unlist(lapply(s, "[", 2))
a <- paste("1e"
, c("-","+")[(pwr >= 0) + 1]
, Format(abs((pwr - (pwr %% 3))), leading = "00", digits=0)
, sep="")
am <- Lookup(as.numeric(a), d.prefix$mult, d.prefix$abbr)
a[!is.na(am)] <- am[!is.na(am)]
a[a == "1e+00"] <- ""
return(paste(Format(y * 10^(pwr %% 3), digits=digits, leading=leading,
zero.form = zero.form, na.form=na.form)
, " " , a
, sep="")
)
}
# We accept here a fmt class to be used as user templates
# example:
#
# fmt.int <- structure(list(
# digits = 5, sci = getOption("scipen"), big.mark = "",
# leading = NULL, zero.form = NULL, na.form = NULL,
# align = "left", width = NULL, txt="(%s), %s - CHF"), class="fmt"
# )
#
# Format(7845, fmt=fmt.int)
if(is.null(fmt)) fmt <- ""
if(class(fmt) == "fmt") {
# we want to offer the user the option to overrun format definitions
# consequence is, that all defaults of the function must be set to NULL
# as we cannot distinguish between defaults and user sets else
if(!is.null(digits)) fmt$digits <- digits
if(!is.null(sci)) fmt$sci <- sci
if(!is.null(big.mark)) fmt$big.mark <- big.mark
if(!is.null(leading)) fmt$leading <- leading
if(!is.null(zero.form)) fmt$zero.form <- zero.form
if(!is.null(na.form)) fmt$na.form <- na.form
if(!is.null(align)) fmt$align <- align
if(!is.null(width)) fmt$sci <- width
if(!is.null(lang)) fmt$lang <- lang
return(do.call(Format, c(fmt, x=list(x))))
}
# The defined decimal character:
# getOption("OutDec")
# set the defaults, if user says nothing
if(is.null(sci))
if(is.null(digits)){
# if given digits and sci NULL set sci to Inf
sci <- getOption("scipen", default = 7)
} else {
sci <- Inf
}
if(is.null(big.mark)) big.mark <- ""
if(is.null(na.form)) na.form <- "NA"
if ((has.na <- any(ina <- is.na(x))))
x <- x[!ina]
eps <- .Machine$double.eps
sci <- rep(sci, length.out=2)
if(all(class(x) == "Date")) {
# the language is only needed for date formats, so avoid looking up the option
# for other types
if(is.null(lang)) lang <- DescToolsOptions("lang")
if(lang=="engl"){
loc <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "C")
on.exit(Sys.setlocale("LC_TIME", loc))
}
r <- format(x, as.CDateFmt(fmt=fmt))
} else if(all(class(x) %in% c("character","factor","ordered"))) {
r <- format(x)
} else if(fmt=="*"){
r <- .format.stars(x)
} else if(fmt=="p"){
r <- .format.pval(x)
} else if(fmt=="eng"){
r <- .format.eng(x, digits=digits, leading=leading, zero.form=zero.form, na.form=na.form)
} else if(fmt=="engabb"){
r <- .format.engabb(x, digits=digits, leading=leading, zero.form=zero.form, na.form=na.form)
} else if(fmt=="e"){
r <- formatC(x, digits = digits, width = width, format = "e",
big.mark=big.mark, zero.print = zero.form)
} else if(fmt=="%"){
r <- paste(suppressWarnings(formatC(x * 100, digits = digits, width = width, format = "f",
big.mark=big.mark, drop0trailing = FALSE)),
"%", sep="")
} else if(fmt=="frac"){
r <- as.character(MASS::fractions(x))
} else { # format else ********************************************
if(all(is.na(sci))) {
# use is.na(sci) to inhibit scientific notation
r <- formatC(x, digits = digits, width = width, format = "f",
big.mark=big.mark)
} else {
idx <- (((abs(x) > .Machine$double.eps) & (abs(x) <= 10^-sci[2])) | (abs(x) >= 10^sci[1]))
r <- as.character(rep(NA, length(x)))
# use which here instead of res[idx], because of NAs
# formatC is barking, classes are of no interess here, so suppress warning...
# what's that exactly??
r[which(idx)] <- suppressWarnings(formatC(x[which(idx)], digits = digits, width = width, format = "e",
big.mark=big.mark, drop0trailing = FALSE))
# Warning messages:
# 1: In formatC(x[which(!idx)], digits = digits, width = width, format = "f", :
# class of 'x' was discarded
# formatC is barking, classes are of no interess here, so suppress warning...
r[which(!idx)] <- suppressWarnings(formatC(x[which(!idx)], digits = digits, width = width, format = "f",
big.mark=big.mark, drop0trailing = FALSE))
}
if(!is.null(leading)){
# handle leading zeros ------------------------------
if(leading %in% c("","drop")) {
# drop leading zeros
r <- gsub("(?<![0-9])0+\\.", "\\.", r, perl = TRUE)
# alternative:
# res <- gsub("(-?)[^[:digit:]]0+\\.", "\\.", res)
# old: mind the minus
# res <- gsub("[^[:digit:]]0+\\.","\\.", res)
} else if(grepl("^[0]*$", leading)){
# leading contains only zeros, so let's use them as leading zeros
# old:
# n <- nchar(leading) - unlist(lapply(lapply(strsplit(res, "\\."), "[", 1), nchar))
# old: did not handle - correctly
# res <- StrPad(res, pad = "0", width=nchar(res) + pmax(n, 0), adj="right")
r <- .leading.zero(r, nchar(leading))
}
}
}
if(!is.null(zero.form))
r[abs(x) < eps] <- zero.form
if (has.na) {
rok <- r
r <- character(length(ina))
r[!ina] <- rok
r[ina] <- na.form
}
if(!is.null(align)){
r <- StrAlign(r, sep = align)
}
class(r) <- c("Format", class(r))
return(r)
}
print.Format <- function (x, ...) {
class(x) <- class(x)[class(x)!="Format"]
NextMethod("print", quote = FALSE, right=TRUE, ...)
}
Fmt <- function(...){
# get format templates and modify on the fly, e.g. other digits
# x is the name of the template
def <- structure(
list(
abs=structure(list(digits = 0, big.mark = "'"),
label = "Number format for counts",
name="abs",
default=TRUE, class = "fmt"),
per=structure(list(digits = 1, fmt = "%"),
label = "Percentage number format",
name="per",
default=TRUE, class = "fmt"),
num=structure(list(digits = 0, big.mark = "'"),
label = "Number format for floating points",
name="num",
default=TRUE, class = "fmt")
), name="fmt")
# get a format from the fmt templates options
res <- DescToolsOptions("fmt")
# find other defined fmt in .GlobalEnv and append to list
# found <- ls(parent.frame())[ lapply(lapply(ls(parent.frame()), function(x) gettextf("class(%s)", x)),
# function(x) eval(parse(text=x))) == "fmt" ]
# if(length(found)>0){
# udf <- lapply(found, function(x) eval(parse(text=x)))
# names(udf) <- found
# }
# collect all found formats, defaults included if not set as option
# abs, per and num must always be available, even if not explicitly defined
res <- c(res, def[names(def) %nin% names(res)]) #, udf)
# get additional arguments
dots <- list(...)
# leave away all NULL values, these should not overwrite the defaults below
#dots <- dots[!is.null(dots)]
# functionality:
# Fmt() return all from options
# Fmt("abs") return abs
# Fmt("abs", digits=3) return abs with updated digits
# Fmt(c("abs","per")) return abs and per
# Fmt(nob=as.Fmt(digits=10, na.form="nodat")) set nob
if(length(dots)==0){
# no arguments supplied
# return list of defined formats
# just return(res)
} else {
# some dots supplied
# if first unnamed and the rest named, take as format name and overwrite other
if(is.null(names(dots))){
# if not names at all
# select the requested ones by name, the unnamed ones
fnames <- unlist(dots[is.null(names(dots))])
res <- res[fnames]
# return(res)
} else {
if(all(names(dots)!="")){
# if only names (no unnamed), take name as format name and define format
old <- options("DescTools")[[1]]
opt <- old
for(i in seq_along(dots))
attr(dots[[i]], "name") <- names(dots)[[i]]
opt$fmt[[names(dots)]] <- dots[[names(dots)]]
options(DescTools=opt)
# same behaviour as options
invisible(old)
} else {
# select the requested ones by name, the unnamed ones
fnames <- unlist(dots[names(dots)==""])
res <- res[fnames]
# modify additional arguments in the template definition
for(z in names(res)){
if(!is.null(res[[z]])){
# use named dots, but only those which are not NULL
idx <- names(dots) != "" & !sapply(dots[names(dots)], is.null)
# res[[z]][names(dots[names(dots)!=""])] <- dots[names(dots)!=""]
res[[z]][names(dots[idx])] <- dots[idx]
}
}
# return(res)
}
}
}
# simplify list
if(length(res)==1) res <- res[[1]]
return(res)
}
#
#
# # define some format templates
# .fmt_abs <- function()
# getOption("fmt.abs", structure(list(digits=0,
# big.mark="'"), class="fmt"))
# # there is an option Sys.localeconv()["thousands_sep"], but we can't change it
#
# .fmt_per <- function(digits=NULL){
#
# # we could use getOption("digits") as default here, but this is normally not a good choice
# # as numeric digits and percentage digits usually differ
# res <- getOption("fmt.per", structure(list(digits=1,
# fmt="%"), class="fmt"))
# # overwrite digits if given
# if(!is.null(digits))
# res["digits"] <- digits
# return(res)
# }
#
# .fmt_num <- function(digits = NULL){
# # check if fmt is defined
# res <- getOption("fmt.num")
#
# # if not: use a default, based on digfix
# if(is.null(res))
# res <- structure(list(digits=Coalesce(digits, DescToolsOptions("digits"), 3),
# big.mark=Sys.localeconv()["thousands_sep"]),
# class="fmt")
# else
# # if exists overwrite digits
# if(!is.null(digits)) res$digits <- digits
# # what should we do, when digits are neither defined in fmt.num nor given
# # in case the fmt.num exists?
#
# return(res)
# }
# .fmt <- function()
# getOption("fmt", default = list(
# per=structure(list(digits=1, fmt="%"), name="per", label="Percentage number format", class="fmt")
# , num=structure(list(digits=getOption("digfix", default=3), big.mark=Sys.localeconv()["thousands_sep"]), name="num", label="Number format for floating points", class="fmt")
# , abs=structure(list(digits=0, big.mark=Sys.localeconv()["thousands_sep"]), name="abs", label="Number format for counts", class="fmt")
# ) )
#
print.fmt <- function(x, ...){
CollapseList <- function(x){
z <- x
# opt <- options(useFancyQuotes=FALSE); on.exit(options(opt))
z[unlist(lapply(z, inherits, "character"))] <- shQuote(z[unlist(lapply(z, inherits, "character"))])
z <- paste(names(z), "=", z, sep="", collapse = ", ")
return(z)
}
cat(gettextf("Format name: %s%s\n", attr(x, "name"), # deparse(substitute(x)),
ifelse(identical(attr(x, "default"), TRUE), " (default)", "")), # deparse(substitute(x))),
gettextf("Description: %s\n", Label(x)),
gettextf("Definition: %s\n", CollapseList(x)),
gettextf("Example: %s\n", Format(pi * 1e5, fmt=x))
)
}
Frac <- function(x, dpwr = NA) { # fractional part
res <- abs(x) %% 1
# Alternative: res <- abs(x-trunc(x))
if (!missing(dpwr)) res <- round(10^dpwr * res)
res
}
MaxDigits <- function(x){
# How to find the significant digits of a number?
z <- na.omit(unlist(
lapply(strsplit(as.character(x),
split = getOption("OutDec"), fixed = TRUE),
"[", 2)))
if(length(z)==0)
res <- 0
else
res <- max(nchar(z))
return(res)
# Alternative: Sys.localeconv()["decimal_point"]
}
Recycle <- function(...){
lst <- list(...)
maxdim <- max(unlist(lapply(lst, length)))
# recycle all params to maxdim
res <- lapply(lst, rep_len, length.out=maxdim)
attr(res, "maxdim") <- maxdim
return(res)
}
###
## stats: strata sampling ----------------
Strata <- function (x, stratanames = NULL, size = 1,
method = c("srswor", "srswr", "poisson", "systematic"),
pik, description = FALSE) {
method <- match.arg(method, c("srswor", "srswr", "poisson", "systematic"))
# find non factors in stratanames
factor_fg <- unlist(lapply(x[, stratanames, drop=FALSE], is.factor))
# factorize nonfactors, get their levels and combine with levels of existing factors
lvl <- c(lapply(lapply(x[,names(which(!factor_fg)), drop=FALSE], factor), levels)
, lapply(x[,names(which(factor_fg)), drop=FALSE], levels))
# get the stratanames in the given order
strat <- expand.grid(lvl[stratanames])
strat$stratum <- factor(1:nrow(strat))
# set the size for the strata to sample
strat$size <- rep(size, length.out=nrow(strat))
# prepare the sample
x <- merge(x, strat)
x$id <- 1:nrow(x)
n <- table(x$stratum)
if(method %in% c("srswor", "srswr")) {
res <- do.call(rbind,
lapply(split(x, x$stratum),
function(z){
if(nrow(z)>0){
idx <- sample(x=nrow(z), size=z$size[1], replace=(method=="srswr"))
z[idx,]
} else {
z
}
}
)
)
} else if(method == "poisson") {
# still to implement!!! *********************
res <- do.call(rbind,
lapply(split(x, x$stratum),
function(z){
if(nrow(z)>0){
idx <- sample(x=nrow(z), size=z$size[1], replace=(method=="srswr"))
z[idx,]
} else {
z
}
}
)
)
} else if(method == "systematic") {
# still to implement!!! *********************
res <- do.call(rbind,
lapply(split(x, x$stratum),
function(z){
if(nrow(z)>0){
idx <- sample(x=nrow(z), size=z$size[1], replace=(method=="srswr"))
z[idx,]
} else {
z
}
}
)
)
}
return(res)
}
# Strata <- function (data, stratanames = NULL, size,
# method = c("srswor", "srswr", "poisson", "systematic"),
# pik, description = FALSE)
# {
#
# # Author: Yves Tille <yves.tille@unine.ch>, Alina Matei <alina.matei@unine.ch>
# # source: library(sampling)
#
# inclusionprobabilities <- function (a, n)
# {
# nnull = length(a[a == 0])
# nneg = length(a[a < 0])
# if (nnull > 0)
# warning("there are zero values in the initial vector a\n")
# if (nneg > 0) {
# warning("there are ", nneg, " negative value(s) shifted to zero\n")
# a[(a < 0)] = 0
# }
# if (identical(a, rep(0, length(a))))
# pik1 = a
# else {
# pik1 = n * a/sum(a)
# pik = pik1[pik1 > 0]
# list1 = pik1 > 0
# list = pik >= 1
# l = length(list[list == TRUE])
# if (l > 0) {
# l1 = 0
# while (l != l1) {
# x = pik[!list]
# x = x/sum(x)
# pik[!list] = (n - l) * x
# pik[list] = 1
# l1 = l
# list = (pik >= 1)
# l = length(list[list == TRUE])
# }
# pik1[list1] = pik
# }
# }
# pik1
# }
#
# srswor <- function (n, N)
# {
# s <- rep(0, times = N)
# s[sample(N, n)] <- 1
# s
# }
#
# srswr <- function (n, N)
# # as.vector(rmultinom(1, n, rep(n/N, times = N)))
# if(n==0) rep(0, N) else as.vector(rmultinom(1, n, rep(n/N, times = N)))
#
#
# UPsystematic <- function (pik, eps = 1e-06)
# {
# if (any(is.na(pik)))
# stop("there are missing values in the pik vector")
# list = pik > eps & pik < 1 - eps
# pik1 = pik[list]
# N = length(pik1)
# a = (c(0, cumsum(pik1)) - runif(1, 0, 1))%%1
# s1 = as.integer(a[1:N] > a[2:(N + 1)])
# s = pik
# s[list] = s1
# s
# }
#
# UPpoisson <- function (pik)
# {
# if (any(is.na(pik)))
# stop("there are missing values in the pik vector")
# as.numeric(runif(length(pik)) < pik)
# }
#
#
#
# if (missing(method)) {
# warning("the method is not specified; by default, the method is srswor")
# method = "srswor"
# }
# if (!(method %in% c("srswor", "srswr", "poisson", "systematic")))
# stop("the name of the method is wrong")
# if (method %in% c("poisson", "systematic") & missing(pik))
# stop("the vector of probabilities is missing")
# if (missing(stratanames) | is.null(stratanames)) {
# if (method == "srswor")
# result = data.frame((1:nrow(data))[srswor(size, nrow(data)) ==
# 1], rep(size/nrow(data), size))
# if (method == "srswr") {
# s = srswr(size, nrow(data))
# st = s[s != 0]
# l = length(st)
# result = data.frame((1:nrow(data))[s != 0])
# if (size <= nrow(data))
# result = cbind.data.frame(result, st, prob = rep(size/nrow(data),
# l))
# else {
# prob = rep(size/nrow(data), l)/sum(rep(size/nrow(data),
# l))
# result = cbind.data.frame(result, st, prob)
# }
# colnames(result) = c("id", "replicates", "prob")
# }
# if (method == "poisson") {
# pikk = inclusionprobabilities(pik, size)
# s = (UPpoisson(pikk) == 1)
# if (length(s) > 0)
# result = data.frame((1:nrow(data))[s], pikk[s])
# if (description)
# cat("\nPopulation total and number of selected units:",
# nrow(data), sum(s), "\n")
# }
# if (method == "systematic") {
# pikk = inclusionprobabilities(pik, size)
# s = (UPsystematic(pikk) == 1)
# result = data.frame((1:nrow(data))[s], pikk[s])
# }
# if (method != "srswr")
# colnames(result) = c("id", "prob")
# if (description & method != "poisson")
# cat("\nPopulation total and number of selected units:",
# nrow(data), sum(size), "\n")
# }
# else {
# data = data.frame(data)
# index = 1:nrow(data)
# m = match(stratanames, colnames(data))
# if (any(is.na(m)))
# stop("the names of the strata are wrong")
# data2 = cbind.data.frame(data[, m], index)
# colnames(data2) = c(stratanames, "index")
# x1 = data.frame(unique(data[, m]))
# colnames(x1) = stratanames
# result = NULL
# for (i in 1:nrow(x1)) {
# if (is.vector(x1[i, ]))
# data3 = data2[data2[, 1] == x1[i, ], ]
# else {
# as = data.frame(x1[i, ])
# names(as) = names(x1)
# data3 = merge(data2, as, by = intersect(names(data2),
# names(as)))
# }
# y = sort(data3$index)
# if (description & method != "poisson") {
# cat("Stratum", i, "\n")
# cat("\nPopulation total and number of selected units:",
# length(y), size[i], "\n")
# }
# if (method != "srswr" & length(y) < size[i]) {
# stop("not enough obervations in the stratum ",
# i, "\n")
# st = c(st, NULL)
# }
# else {
# if (method == "srswor") {
# st = y[srswor(size[i], length(y)) == 1]
# r = cbind.data.frame(data2[st, ], rep(size[i]/length(y),
# size[i]))
# }
# if (method == "systematic") {
# pikk = inclusionprobabilities(pik[y], size[i])
# s = (UPsystematic(pikk) == 1)
# st = y[s]
# r = cbind.data.frame(data2[st, ], pikk[s])
# }
# if (method == "srswr") {
# s = srswr(size[i], length(y))
# st = rep(y[s != 0], s[s != 0])
# l = length(st)
# if (size[i] <= length(y))
# r = cbind.data.frame(data2[st, ], prob = rep(size[i]/length(y),
# l))
# else {
# prob = rep(size[i]/length(y), l)/sum(rep(size[i]/length(y),
# l))
# r = cbind.data.frame(data2[st, ], prob)
# }
# }
# if (method == "poisson") {
# pikk = inclusionprobabilities(pik[y], size[i])
# s = (UPpoisson(pikk) == 1)
# if (any(s)) {
# st = y[s]
# r = cbind.data.frame(data2[st, ], pikk[s])
# if (description) {
# cat("Stratum", i, "\n")
# cat("\nPopulation total and number of selected units:",
# length(y), length(st), "\n")
# }
# }
# else {
# if (description) {
# cat("Stratum", i, "\n")
# cat("\nPopulation total and number of selected units:",
# length(y), 0, "\n")
# }
# r = NULL
# }
# }
# }
# # corrected 7.4.2014 for allowing size=0 for a stratum:
# # if (!is.null(r)) {
# if (!is.null(r) & nrow(r)>0) {
# r = cbind(r, i)
# result = rbind.data.frame(result, r)
# }
# }
#
# # original, seems a bit "over-ifed"
# # if (method == "srswr")
# # colnames(result) = c(stratanames, "ID_unit", "Prob", "Stratum")
# # else colnames(result) = c(stratanames, "ID_unit", "Prob", "Stratum")
#
# colnames(result) <- c(stratanames, "id", "prob", "stratum")
#
# if (description) {
# cat("Number of strata ", nrow(x1), "\n")
# if (method == "poisson")
# cat("Total number of selected units", nrow(result),
# "\n")
# else cat("Total number of selected units", sum(size),
# "\n")
# }
# }
# result
# }
SampleTwins <- function (x, stratanames = NULL, twins,
method = c("srswor", "srswr", "poisson", "systematic"),
pik, description = FALSE) {
# sort data first
x <- x[do.call("order", lapply(x[,stratanames], order)),]
# define the frequencies
twinsize <- as.data.frame.table(xtabs( as.formula(gettextf("~ %s", paste(stratanames, collapse="+"))), twins))
size <- merge(x=expand.grid(lapply(x[stratanames], unique)),
y=twinsize, all.x=TRUE, all.y=TRUE)
size$Freq[is.na(size$Freq)] <- 0
s <- Strata(x = x, stratanames = stratanames, size=size$Freq, method=method,
pik=pik, description=description)
if(!identical(table(s[,stratanames]), table(twins[,stratanames]))) {
warning("Could not find a twin for all records. Enlighten the restrictions!")
}
return(s)
}
## stats: distributions ---------------------------------
dBenf <- function(x, ndigits = 1, log = FALSE) {
if (!IsNumeric(ndigits, length.arg = 1,
positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
ans <- x * NA
indexTF <- is.finite(x) & (x >= lowerlimit)
ans[indexTF] <- log10(1 + 1/x[indexTF])
ans[!is.na(x) & !is.nan(x) &
((x < lowerlimit) |
(x > upperlimit) |
(x != round(x)))] <- 0.0
if (log.arg) log(ans) else ans
}
rBenf <- function(n, ndigits = 1) {
if (!IsNumeric(ndigits, length.arg = 1,
positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
use.n <- if ((length.n <- length(n)) > 1) length.n else
if (!IsNumeric(n, integer.valued = TRUE,
length.arg = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
myrunif <- runif(use.n)
ans <- rep(lowerlimit, length = use.n)
for (ii in (lowerlimit+1):upperlimit) {
indexTF <- (pBenf(ii-1, ndigits = ndigits) < myrunif) &
(myrunif <= pBenf(ii, ndigits = ndigits))
ans[indexTF] <- ii
}
ans
}
pBenf <- function(q, ndigits = 1, log.p = FALSE) {
if (!IsNumeric(ndigits, length.arg = 1,
positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
ans <- q * NA
floorq <- floor(q)
indexTF <- is.finite(q) & (floorq >= lowerlimit)
ans[indexTF] <- log10(1 + floorq[indexTF]) -
ifelse(ndigits == 1, 0, 1)
ans[!is.na(q) & !is.nan(q) & (q >= upperlimit)] <- 1
ans[!is.na(q) & !is.nan(q) & (q < lowerlimit)] <- 0
if (log.p) log(ans) else ans
}
qBenf <- function(p, ndigits = 1) {
if (!IsNumeric(ndigits, length.arg = 1,
positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
bad <- !is.na(p) & !is.nan(p) & ((p < 0) | (p > 1))
if (any(bad))
stop("bad input for argument 'p'")
ans <- rep(lowerlimit, length = length(p))
for (ii in (lowerlimit+1):upperlimit) {
indexTF <- is.finite(p) &
(pBenf(ii-1, ndigits = ndigits) < p) &
(p <= pBenf(ii, ndigits = ndigits))
ans[indexTF] <- ii
}
ans[ is.na(p) | is.nan(p)] <- NA
ans[!is.na(p) & !is.nan(p) & (p == 0)] <- lowerlimit
ans[!is.na(p) & !is.nan(p) & (p == 1)] <- upperlimit
ans
}
dRevGumbel <- function (x, location = 0, scale = 1) {
# from VGAM -- if (is.null(x)) FALSE else ifelse(is.na(x), FALSE, x)
if (!IsNumeric(scale, positive=TRUE))
stop("\"scale\" must be positive")
temp = exp((x - location)/scale)
temp * exp(-temp)/scale
}
pRevGumbel <- function (q, location = 0, scale = 1) {
if (!IsNumeric(scale, positive=TRUE))
stop("\"scale\" must be positive")
1-exp(-exp((q - location)/scale))
}
qRevGumbel <- function (p, location = 0, scale = 1)
{
if (!IsNumeric(scale, positive=TRUE))
stop("\"scale\" must be positive")
location + scale * log(-log(p))
}
qRevGumbelExp <- function (p) exp(qRevGumbel(p))
rRevGumbel <- function (n, location = 0, scale = 1)
{
if (!IsNumeric(scale, positive=TRUE, integer.valued=TRUE))
stop("bad input for argument \"n\"")
if (!IsNumeric(scale, positive=TRUE))
stop("\"scale\" must be positive")
location + scale * log(-log(runif(n)))
}
RndPairs <- function(n, r, rdist1 = rnorm(n=n, mean = 0, sd = 1), rdist2 = rnorm(n=n, mean = 0, sd = 1)){
# create correlated random pairs
data.frame(matrix(nrow=n, ncol=2, data=cbind(rdist1, rdist2)) %*%
chol(matrix(nrow=2, ncol=2, data=c(1, r, r, 1))))
}
RndWord <- function(size, length, x = LETTERS, replace = TRUE, prob = NULL){
sapply(1:size, function(i) paste(sample(x=x, size=length, replace=replace, prob=prob), collapse=""))
}
## basic finance functions ---------------
NPV <- function(i, cf, t=seq(along=cf)-1) {
# Net present value
sum(cf/(1+i)^t)
}
IRR <- function(cf, t=seq(along=cf)-1) {
# internal rate of return
uniroot(NPV, c(0,1), cf=cf, t=t)$root
}
OPR <- function (K, D = NULL, log = FALSE) {
# Einperiodenrenditen One-period-returns
if (is.null(D))
D <- rep(0, length(K))
if (!log){
res <- (D[-1] + K[-1] - K[-length(K)])/K[-length(K)]
} else {
res <- log((D[-1] + K[-1])/K[-length(K)])
}
return(res)
}
NPVFixBond <- function(i, Co, RV, n){
# net present value for fixed bonds
sum(Co / (1+i)^(1:n), RV / (1+i)^n)
}
YTM <- function(Co, PP, RV, n){
# yield to maturity (irr)
uniroot(function(i) -PP + sum(Co / (1+i)^(1:n), RV / (1+i)^n)
, c(0,1))$root
}
## utils: manipulation, utilities ====
InDots <- function(..., arg, default){
# was arg in the dots-args? parse dots.arguments
arg <- unlist(match.call(expand.dots=FALSE)$...[arg])
# if arg was not in ... then return default
if(is.null(arg)) arg <- default
return(arg)
}
FctArgs <- function(name, sort=FALSE) {
# got that somewhere, but don't know from where...
if(is.function(name)) name <- as.character(substitute(name))
a <- formals(get(name, pos=1))
if(is.null(a))
return(NULL)
arg.labels <- names(a)
arg.values <- as.character(a)
char <- sapply(a, is.character)
arg.values[char] <- paste("\"", arg.values[char], "\"", sep="")
if(sort)
{
ord <- order(arg.labels)
if(any(arg.labels == "..."))
ord <- c(ord[-which(arg.labels[ord]=="...")],
which(arg.labels=="..."))
arg.labels <- arg.labels[ord]
arg.values <- arg.values[ord]
}
output <- data.frame(value=I(arg.values), row.names=arg.labels)
print(output, right=FALSE)
invisible(output)
}
Keywords <- function( topic ) {
# verbatim from library(gtools)
file <- file.path(R.home("doc"),"KEYWORDS")
if(missing(topic))
{
file.show(file)
} else {
# ## Local copy of trim.character to avoid cyclic dependency with gdata ##
# trim <- function(s) {
#
# s <- sub(pattern="^[[:blank:]]+", replacement="", x=s)
# s <- sub(pattern="[[:blank:]]+$", replacement="", x=s)
# s
# }
kw <- scan(file=file, what=character(), sep="\n", quiet=TRUE)
kw <- grep("&", kw, value=TRUE)
kw <- gsub("&[^&]*$","", kw)
kw <- gsub("&+"," ", kw)
kw <- na.omit(StrTrim(kw))
ischar <- tryCatch(is.character(topic) && length(topic) ==
1L, error = identity)
if (inherits(ischar, "error"))
ischar <- FALSE
if (!ischar)
topic <- deparse(substitute(topic))
item <- paste("^",topic,"$", sep="")
# old, replaced by suggestion of K. Hornik 23.2.2015
# topics <- function(k) help.search(keyword=k)$matches[,"topic"]
topics <- function(k) {
matches <- help.search(keyword=k)$matches
matches[ , match("topic", tolower(colnames(matches)))]
}
matches <- lapply(kw, topics)
names(matches) <- kw
tmp <- unlist(lapply( matches, function(m) grep(item, m, value=TRUE) ))
names(tmp)
}
}
SysInfo <- function() {
## description << getSysinfo is a convenience function to compile some information about the
## computing system and environment used.
package.names <- sapply(sessionInfo()[['otherPkgs']],'[[','Package')
package.versions <- sapply(sessionInfo()[['otherPkgs']],'[[','Version')
packages.all <- paste(gettextf("%s (%s)", package.names, package.versions), collapse=", ")
pars.sys <- c('user', 'nodename', 'sysname', 'release')
R.system <- paste(sessionInfo()[[1]]$version.string)
sys.info <- paste(pars.sys, Sys.info()[pars.sys], collapse=', ', sep=': ')
all.info <- paste(c(sys.info,', ', R.system,', installed Packages: ', packages.all),
sep='', collapse='')
cat(gettextf("\nSystem: %s\nNodename: %s, User: %s",
paste(Sys.info()[c("sysname","release","version")], collapse=" ")
, Sys.info()["nodename"], Sys.info()["user"], "\n\n"))
cat(gettextf("\nTotal Memory: %s MB\n\n", memory.limit()))
cat(StrTrim(sessionInfo()$R.version$version.string), "\n")
cat(sessionInfo()$platform, "\n")
cat("\nLoaded Packages: \n", packages.all, "\n")
DescToolsOptions()
invisible(all.info)
}
FindRProfile <- function(){
candidates <- c( Sys.getenv("R_PROFILE"),
file.path(Sys.getenv("R_HOME"), "etc", "Rprofile.site"),
Sys.getenv("R_PROFILE_USER"),
file.path(getwd(), ".Rprofile") )
Filter(file.exists, candidates)
}
DescToolsOptions <- function (..., default = NULL, reset = FALSE) {
.Simplify <- function(x)
if(is.list(x) && length(x)==1L)
x[[1L]]
else
x
# all system defaults
def <- list(
col = c(hblue, hred, horange),
digits = 3,
fixedfont = structure(list(name = "Consolas", size = 7), class = "font"),
fmt = structure(list(
abs = structure(list(digits = 0, big.mark = "'"), .Names = c("digits", "big.mark"),
name = "abs", label = "Number format for counts",
default = TRUE, class = "fmt"),
per = structure(list(digits = 1, fmt = "%"), .Names = c("digits", "fmt"),
name = "per", label = "Percentage number format",
default = TRUE, class = "fmt"),
num = structure(list(digits = 3, big.mark = "'"), .Names = c("digits", "big.mark"),
name = "num", label = "Number format for floats",
default = TRUE, class = "fmt")), name = "fmt"),
footnote = c("'", "\"", "\"\""),
lang = "engl",
plotit = TRUE,
stamp = expression(gettextf("%s/%s", Sys.getenv("USERNAME"),
Format(Today(), fmt = "yyyy-mm-dd"))),
lastWrd=NULL,
lastXL=NULL,
lastPP=NULL
)
# potentionally evaluate dots
dots <- lapply(list(...), function(x) {
if (is.symbol(x))
eval(substitute(x, env = parent.frame()))
else
x
})
# reduce length[[1]] list to a list n (exclude single named argument)
if(length(dots)==1L && is.list(dots) &&
!(length(dots)==1 && !is.null(names(dots))))
dots <- dots[[1]]
# refuse to work with several options and defaults
if (length(dots) > 1L && !is.null(default))
stop("defaults can only be used with single options")
# ignore anything else, set the defaults and return old values
if (reset == TRUE)
invisible(options(DescTools = def))
# flag these values as defaults, not before they are potentially reset
# do not set on lastXYZ options (can't set attribute on NULL values)
for(i in seq_along(def)[-c(9:11)])
attr(def[[i]], "default") <- TRUE
opt <- getOption("DescTools")
# store such as to return as result
old <- opt
# take defaults and overwrite found entries in options
def[names(opt)] <- opt
opt <- def
# no names were given, so just return all options
if (length(dots) == 0) {
return(opt)
} else {
# entries were supplied, now check if there were named entries
# dots is then a list with length 1
if (is.null(names(dots))) {
# if no names, check default and return either the value
# or if this does not exist, the default
if (!is.null(default))
# a default is given, so get old option value and replace with user default
# when it's NULL
# note: in old are the original option values (no system defaults)
return(.Simplify(ifelse(is.null(old[[dots]]), default, old[[dots]])))
else
# no defaults given, so return options, evt. sys defaults
# reduce list to value, if length 1
return(.Simplify(opt[unlist(dots)]))
} else {
# there are named values, so these are to be stored
# restore old options in opt (no defaults should be stored)
opt <- old
if (is.null(opt))
opt <- list()
opt[names(dots)] <- dots
# store full option set
options(DescTools = opt)
# return only the new set variables
old <- old[names(dots)]
}
}
invisible(old)
}
# DescToolsOptions <- function(..., default=NULL, reset=FALSE){
#
# .Simplify <- function(x)
# # return first element of a list, if it's the only one
# if(is.list(x) && length(x)==1)
# x[[1]]
# else
# x
#
#
# def <- list(
# col=c(hred, hblue, hgreen),
# digits=3,
# fixedfont=structure(list(name="Consolas", size=7), class="font"),
# fmt=structure(
# list(
# abs=structure(list(digits = 0, big.mark = "'"),
# .Names = c("digits","big.mark"),
# name = "abs", label = "Number format for counts",
# default=TRUE, class = "fmt"),
# per=structure(list(digits = 1, fmt = "%"),
# .Names = c("digits","big.mark"), name = "per",
# label = "Percentage number format",
# default=TRUE, class = "fmt"),
# num=structure(list(digits = 3, big.mark = "'"),
# .Names = c("digits","big.mark"), name = "num",
# label = "Number format for floats",
# default=TRUE, class = "fmt")
# ), name="fmt"),
#
# footnote=c("'", '"', '""'),
# lang="engl",
# plotit=TRUE,
# stamp=expression(gettextf("%s/%s", Sys.getenv("USERNAME"), Format(Today(), fmt = "yyyy-mm-dd"))),
# lastWrd=NULL,
# lastXL=NULL,
# lastPP=NULL
# )
#
#
# # potentionally evaluate dots
# dots <- lapply(list(...), function(x){
# if(is.symbol(x))
# eval(substitute(x, env = parent.frame()))
# else
# x
# })
#
# # refuse to work with several options and defaults
# if(length(dots)>1 && !is.null(default))
# stop("defaults can only be used with single options")
#
# opt <- getOption("DescTools")
#
# old <- opt
#
# if(reset==TRUE)
# # reset the options and return old values invisible
# options(DescTools=def)
#
# if(length(dots)==0) {
# # no arguments, just return the options
# return(.Simplify(opt))
#
# } else {
# if(is.null(names(dots))){
# # get the option and return either value or the default
# if(!is.null(default))
# # just one allowed here, can we do better?? **********
# return(.Simplify(Coalesce(opt[dots[[1]]], default)))
#
# else
# # more values allowed
# return(.Simplify(opt[unlist(dots)]))
#
# } else {
# #set the options
# if(is.null(opt))
# opt <- list()
#
# opt[names(dots)[[1]]] <- dots[[1]]
#
# # let default options return the result
# .Simplify(options(DescTools=opt))
# }
# }
#
# invisible(old)
#
# }
fmt <- function(...){
# get format templates and modify on the fly, e.g. other digits
# x is the name of the template
def <- structure(
list(
abs=structure(list(digits = 0, big.mark = "'"),
label = "Number format for counts",
default=TRUE, class = "fmt"),
per=structure(list(digits = 1, fmt = "%"),
label = "Percentage number format",
default=TRUE, class = "fmt"),
num=structure(list(digits = 0, big.mark = "'"),
label = "Number format for floating points",
default=TRUE, class = "fmt")
), name="fmt")
# get a format from the fmt templates options
res <- DescToolsOptions("fmt")[[1]]
# find other defined fmt in .GlobalEnv and append to list
# found <- ls(parent.frame())[ lapply(lapply(ls(parent.frame()), function(x) gettextf("class(%s)", x)),
# function(x) eval(parse(text=x))) == "fmt" ]
# if(length(found)>0){
# udf <- lapply(found, function(x) eval(parse(text=x)))
# names(udf) <- found
# }
# collect all found formats, defaults included if not set as option
# abs, per and num must always be available, even if not explicitly defined
res <- c(res, def[names(def) %nin% names(res)]) #, udf)
# get additional arguments
dots <- match.call(expand.dots=FALSE)$...
# leave away all NULL values, these should not overwrite the defaults below
dots <- dots[is.null(dots)]
# functionality:
# Fmt() return all from options
# Fmt("abs") return abs
# Fmt("abs", digits=3) return abs with updated digits
# Fmt(c("abs","per")) return abs and per
# Fmt(nob=as.Fmt(digits=10, na.form="nodat")) set nob
if(all(!is.null(names(dots)))){
# set value
old <- options("DescTools")
opt <- old
opt$fmt[[names(dots)]] <- dots
options(DescTools=opt)
# same behaviour as options
invisible(old)
} else {
if(!length(dots))
return(res)
# select the requested ones by name
fnames <- unlist(dots[is.null(names(dots))])
res <- res[fnames]
# modify additional arguments in the template definition
for(z in names(res)){
if(!is.null(res[[z]]))
# use named dots
res[[z]][names(dots[!is.null(names(dots))])] <- dots[!is.null(names(dots))]
}
# set names as given, especially for returning the ones not found
# ???? names(res) <- fnames
# reduce list, this should not be necessary, but to make sure
# if(length(res)==1)
# res <- res[[1]]
return(res)
}
}
as.fmt <- function(...){
# dots <- match.call(expand.dots=FALSE)$...
# new by 0.99.22
dots <- list(...)
structure(dots,
.Names = names(dots),
label = "Number format",
class = "fmt")
}
ParseSASDatalines <- function(x, env = .GlobalEnv, overwrite = FALSE) {
# see: http://www.psychstatistics.com/2012/12/07/using-datalines-in-sas/
# or: http://www.ats.ucla.edu/stat/sas/library/SASRead_os.htm
# split command to list by means of ;
lst <- StrTrim(strsplit(x, ";")[[1]])
dsname <- lst[grep(pattern = "^[Dd][Aa][Tt][Aa] ", StrTrim(lst))] # this would be the dataname
dsname <- gsub(pattern = "^[Dd][Aa][Tt][Aa] +", "", dsname)
# get the columnnames from the input line
input <- lst[grep(pattern = "^[Ii][Nn][Pp][Uu][Tt]", StrTrim(lst))]
# get rid of potential single @
input <- gsub("[ \n\t]@+[ \n\t]*", "", input)
input <- gsub(pattern=" +\\$", "$", input)
input <- gsub(" +", " ", input)
cnames <- strsplit(input, " ")[[1]][-1]
# the default values for the variables
def <- rep(0, length(cnames))
def[grep("\\$$", cnames)] <- "''"
vars <- paste(gsub("\\$$","",cnames), def, sep="=", collapse=",")
datalines <- lst[grep("datalines|cards|cards4", tolower(lst))+1]
res <- eval(parse(text=gettextf(
"data.frame(scan(file=textConnection(datalines),
what=list(%s), quiet=TRUE))", vars)))
if(length(dsname) > 0){ # check if a dataname could be found
if( overwrite | ! exists(dsname, envir=env) ) {
assign(dsname, res, envir=env)
} else {
cat(gettextf("The file %s already exists in %s. Should it be overwritten? (y/n)\n"
, dsname, deparse(substitute(env))))
ans <- readline()
if(ans == "y")
assign(dsname, res, envir = env)
# stop(gettextf("%s already exists in %s. Use overwrite = TRUE to overwrite it.", dsname, deparse(substitute(env))))
}
}
return(res)
}
SetNames <- function (x, ...) {
# see also setNames()
# args <- match.call(expand.dots = FALSE)$...
args <- list(...)
if("colnames" %in% names(args))
colnames(x) <- args[["colnames"]]
if("rownames" %in% names(args))
rownames(x) <- args[["rownames"]]
if("names" %in% names(args))
names(x) <- args[["names"]]
x
}
InsRow <- function(m, x, i, row.names=NULL){
nr <- dim(m)[1]
x <- matrix(x, ncol=ncol(m))
if(!is.null(row.names))
row.names(x) <- row.names
if(i==1)
res <- rbind(x, m)
else if(i>nr)
res <- rbind(m, x)
else
res <- rbind(m[1:(i-1),], x, m[i:nr,])
colnames(res) <- colnames(m)
res
}
InsCol <- function(m, x, i, col.names=NULL){
nc <- dim(m)[2]
x <- matrix(x, nrow=nrow(m))
if(!is.null(col.names))
colnames(x) <- col.names
if(i==1)
res <- cbind(x, m)
else if(i > nc)
res <- cbind(m, x)
else
res <- cbind(m[,1:(i-1)], x, m[,i:nc])
rownames(res) <- rownames(m)
res
}
Rename <- function(x, ..., gsub=FALSE, fixed=TRUE, warn=TRUE){
subst <- c(...)
# if ... do not have names use those from x, assigned by sequence
if(is.null(names(subst)))
names(subst) <- names(x)[1:length(subst)]
if(gsub){
names.x <- names(x)
for(i in 1:length(subst)){
names.x <- gsub(names(subst[i]), subst[i], names.x, fixed=fixed)
}
names(x) <- names.x
} else {
i <- match(names(subst), names(x))
if(any(is.na(i))) {
if(warn) warning("unused name(s) selected")
if(any(!is.na(i)))
subst <- subst[!is.na(i)]
i <- i[!is.na(i)]
}
if(length(i))
names(x)[i] <- subst
}
return(x)
}
# This does not work, because x does not come as a reference
# AddLabel <- function(x, text = ""){
# ### add an attribute named "label" to a variable in a data.frame
# attr(x, "label") <- text
# }
# attr(d.pizza$driver, "label") <- "The driver delivering the pizza"
# AddLabel(d.pizza$driver, "lkj?lkjlkjlk?lkj lkj lkj lkadflkj alskd lkas")
# simplified from Hmisc
Label <- function(x) {
attributes(x)$label
}
"Label<-" <- function(x, value) {
if(is.list(value)) stop("cannot assign a list to be an object label")
if((length(value) != 1L) & !is.null(value)) stop("value must be character vector of length 1")
attr(x, "label") <- value
return(x)
}
# "Label<-.data.frame" <- function(x, self=(length(value)==1), ..., value) {
#
# if(!is.data.frame(x)) stop("x must be a data.frame")
#
# if(self){
# attr(x, "label") <- value
# } else {
# for (i in seq(along.with=x)) {
# Label(x[[i]]) <- value[[i]]
# }
# }
# return(x)
# }
# Label.data.frame <- function(x, ...) {
# labels <- mapply(FUN=Label, x=x)
# return(labels[unlist(lapply(labels, function(x) !is.null(x) ))])
# }
# SetLabel <- function (object = nm, nm) {
# Label(object) <- nm
# object
# }
`Unit<-` <- function (x, value) {
if (is.list(value))
stop("cannot assign a list to be an object label")
if ((length(value) != 1L) & !is.null(value))
stop("value must be character vector of length 1")
attr(x, "unit") <- value
return(x)
}
Unit <- function (x) attributes(x)$unit
#
# To Sort(., mixed=TRUE) for vectors
#
#
# SortMixed Order or Sort Strings With Embedded Numbers So That The Numbers
# Are In The Correct Order
# Description
# These functions sort or order character strings containing numbers so that the numbers are numerically
# sorted rather than sorted by character value. I.e. "Asprin 50mg" will come before "Asprin
# 100mg". In addition
#
Sort <- function(x, ...) {
UseMethod("Sort")
}
Sort.default <- function(x, ...) {
sort(x = x, ...)
}
Sort.data.frame <- function(x, ord = NULL, decreasing = FALSE, factorsAsCharacter = TRUE,
na.last = TRUE, ...) {
# why not using ord argument as in matrix and table instead of ord?
if(is.null(ord)) { ord <- 1:ncol(x) }
if(is.character(ord)) {
ord <- match(ord, c("row.names", names(x)))
} else if(is.numeric(ord)) {
ord <- as.integer(ord) + 1
}
# recycle decreasing and by
lgp <- list(decreasing = decreasing, ord = ord)
# recycle all params to maxdim = max(unlist(lapply(lgp, length)))
lgp <- lapply(lgp, rep, length.out = max(unlist(lapply(lgp, length))))
# decreasing is not recycled in order, so we use rev to change the sorting direction
# old: d.ord <- x[,lgp$ord, drop=FALSE] # preserve data.frame with drop = FALSE
d.ord <- data.frame(rn=rownames(x),x)[, lgp$ord, drop = FALSE] # preserve data.frame with drop = FALSE
if(factorsAsCharacter){
for( xn in which(sapply(d.ord, is.factor)) ){ d.ord[,xn] <- factor(d.ord[,xn], levels=sort(levels(d.ord[,xn]))) }
}
d.ord[, which(sapply(d.ord, is.character))] <- lapply(d.ord[,which(sapply(d.ord, is.character)), drop=FALSE], factor)
d.ord <- data.frame(lapply(d.ord, as.numeric))
d.ord[lgp$decreasing] <- lapply(d.ord[lgp$decreasing], "-")
x[ do.call("order", c(as.list(d.ord), na.last=na.last)), , drop = FALSE]
}
Sort.matrix <- function (x, ord = NULL, decreasing = FALSE, na.last = TRUE, ...) {
if (length(dim(x)) == 1 ){
# do not specially handle 1-dimensional matrices
res <- sort(x=x, decreasing=decreasing)
} else {
if (is.null(ord)) {
# default order by sequence of columns
ord <- 1:ncol(x)
}
# replace keyword by code
ord[ord=="row_names"] <- 0
# we have to coerce, as ord will be character if row_names is used
ord <- as.numeric(ord)
lgp <- list(decreasing = decreasing, ord = ord)
lgp <- lapply(lgp, rep, length.out = max(unlist(lapply(lgp, length))))
if( is.null(row.names(x))) {
d.x <- data.frame(cbind(rownr=1:nrow(x)), x)
} else {
d.x <- data.frame(cbind( rownr=as.numeric(factor(row.names(x))), x))
}
d.ord <- d.x[, lgp$ord + 1, drop = FALSE]
d.ord[lgp$decreasing] <- lapply(d.ord[lgp$decreasing], "-")
res <- x[do.call("order", c(as.list(d.ord), na.last=na.last)), , drop=FALSE]
# old version cannot be used for [n,1]-matrices, we switch to reset dim
# class(res) <- "matrix"
# 19.9.2013: dim kills rownames, so stick to drop = FALSE
# dim(res) <- dim(x)
}
return(res)
}
Sort.table <- function (x, ord = NULL, decreasing = FALSE, na.last = TRUE, ...) {
if (length(dim(x)) == 1 ){
# do not specially handle 1-dimensional tables
res <- sort(x=x, decreasing=decreasing)
} else {
if (is.null(ord)) {
ord <- 1:ncol(x)
}
lgp <- list(decreasing = decreasing, ord = ord)
lgp <- lapply(lgp, rep, length.out = max(unlist(lapply(lgp, length))))
d.x <- data.frame(cbind( rownr=as.numeric(factor(row.names(x))), x, mar=apply(x, 1, sum)))
d.ord <- d.x[, lgp$ord + 1, drop = FALSE]
d.ord[lgp$decreasing] <- lapply(d.ord[lgp$decreasing], "-")
res <- x[do.call("order", c(as.list(d.ord), na.last=na.last)), , drop=FALSE]
class(res) <- "table"
}
return(res)
}
Rev <- function(x, ...) {
# additional interface for rev...
UseMethod("Rev")
}
Rev.default <- function(x, ...){
# refuse accepting margins here
if(length(list(...)) > 0 && length(dim(x)) == 1 && !identical(list(...), 1))
warning("margin has been supplied and will be discarded.")
rev(x)
}
Rev.table <- function(x, margin, ...) {
if (!is.array(x))
stop("'x' is not an array")
newdim <- rep("", length(dim(x)))
newdim[margin] <- paste(dim(x), ":1", sep="")[margin]
z <- eval(parse(text=gettextf("x[%s, drop = FALSE]", paste(newdim, sep="", collapse=","))))
class(z) <- oldClass(x)
return(z)
}
Rev.matrix <- function(x, margin, ...) {
Rev.table(x, margin, ...)
}
Rev.data.frame <- function(x, margin, ...) {
if(1 %in% margin) x <- x[nrow(x):1L,]
if(2 %in% margin) x <- x[, ncol(x):1L]
return(x)
}
Untable <- function(x, ...){
UseMethod("Untable")
}
Untable.data.frame <- function(x, freq = "Freq", rownames = NULL, ...){
if(all(is.na(match(freq, names(x)))))
stop(gettextf("Frequency column %s does not exist!", freq))
res <- x[Untable(x[,freq], type="as.numeric")[,], -grep(freq, names(x))]
rownames(res) <- rownames
return(res)
}
Untable.default <- function(x, dimnames=NULL, type = NULL, rownames = NULL, colnames = NULL, ...) {
# recreates the data.frame out of a contingency table
# coerce to table, such as also be able to handle vectors
x <- as.table(x)
if(!is.null(dimnames)) dimnames(x) <- dimnames
if(is.null(dimnames) && identical(type, "as.numeric")) dimnames(x) <- list(seq_along(x))
# set a title for the table if it does not have one
# if(is.null(names(dimnames(x)))) names(dimnames(x)) <- ""
# if(length(dim(x))==1 && names(dimnames(x))=="") names(dimnames(x)) <- "Var1"
# replaced 26.3.2013
for( i in 1:length(dimnames(x)) )
if (is.null(names(dimnames(x)[i])) || names(dimnames(x)[i]) == "")
if (length(dimnames(x)) == 1) names(dimnames(x)) <- gettextf("Var%s", i)
else names(dimnames(x)[i]) <- gettextf("Var%s", i)
res <- as.data.frame(expand.grid(dimnames(x))[rep(1:prod(dim(x)), as.vector(x)),])
rownames(res) <- NULL
if(!all(names(dimnames(x))=="")) colnames(res) <- names(dimnames(x))
# return ordered factors, if wanted...
if(is.null(type)) type <- "as.factor"
# recycle type:
if(length(type) < ncol(res)) type <- rep(type, length.out=ncol(res))
for(i in 1:ncol(res)){
if(type[i]=="as.numeric"){
res[,i] <- as.numeric(as.character(res[,i]))
} else {
res[,i] <- eval(parse(text = gettextf("%s(res[,i])", type[i])))
}
}
# overwrite the dimnames, if requested
if(!is.null(rownames)) rownames(res) <- rownames
if(!is.null(colnames)) colnames(res) <- colnames
return(res)
}
# AddClass <- function(x, class, after=0) {
# class(x) <- append(class(x), class, after = after)
# x
# }
#
#
# RemoveClass <- function(x, class) {
# class(x) <- class(x)[class(x) %nin% class]
# x
# }
FixToTable <- function(txt, sep = " ", delim = "\t", trim = TRUE, header = TRUE){
# converts a fixed text to a delim separated table
# make all lines same width first
txt <- StrPad(txt, width=max(nchar(txt)))
m <- do.call("rbind", strsplit(txt, ""))
idx <- apply( m, 2, function(x) all(x == sep))
# replace all multiple delims by just one
idx[-1][(apply(cbind(idx[-1], idx[-length(idx)]), 1, sum) == 2)] <- FALSE
m[,idx] <- delim
tab <- apply( m, 1, paste, collapse="")
# trim the columns
if(trim) {
tab <- do.call("rbind", lapply(strsplit(tab, delim), StrTrim))
} else {
tab <- do.call("rbind", strsplit(tab, delim))
}
if(header) {
colnames(tab) <- tab[1,]
tab <- tab[-1,]
}
return(tab)
}
## GUI-Elements: select variables by dialog, FileOpen, DescDlg, ObjectBrowse ====
SaveAsDlg <- function(x, filename){
if(missing(filename))
filename <- file.choose()
if(! is.na(filename)) save(list=deparse(substitute(x)), file = filename)
else
warning("No filename supplied")
}
SelectVarDlg <- function (x, ...) {
UseMethod("SelectVarDlg")
}
.ToClipboard <- function (x, ...) {
# This fails on Linux with
#
# * checking examples ... ERROR
# Running examples in 'DescTools-Ex.R' failed The error most likely occurred in:
#
# > base::assign(".ptime", proc.time(), pos = "CheckExEnv") ### Name:
# > ToClipboard ### Title: Write Text to Clipboard ### Aliases:
# > ToClipboard
sn <- Sys.info()["sysname"]
if (sn == "Darwin") {
file <- pipe("pbcopy")
cat(x, file = file, ...)
close(file)
}
else if (sn == "Windows") {
cat(x, file = "clipboard", ...)
}
else {
stop("Writing to the clipboard is not implemented for your system (",
sn, ") in this package.")
}
}
SelectVarDlg.default <- function(x, useIndex = FALSE, ...){
# example: Sel(d.pizza)
xsel <- select.list(x, multiple = TRUE, graphics = TRUE)
if(useIndex == TRUE) {
xsel <- which(x %in% xsel)
} else {
xsel <- shQuote(xsel)
}
if(!identical(xsel, "\"\""))
txt <- paste("c(", paste(xsel, collapse=","),")", sep="")
else
txt <- ""
.ToClipboard(txt)
invisible(txt)
}
SelectVarDlg.numeric <- function(x, ...) {
if(!is.null(names(x)))
z <- names(x)
else
z <- as.character(x)
txt <- paste(deparse(substitute(x)), "[", SelectVarDlg.default( x = z, ...), "]",
sep="", collapse="")
.ToClipboard(txt)
invisible(txt)
}
SelectVarDlg.factor <- function(x, ...) { SelectVarDlg.default( x = levels(x), ...) }
SelectVarDlg.data.frame <- function(x, ...) {
sel <- SelectVarDlg.default( x = colnames(x), ...)
if(sel!="")
txt <- paste(deparse(substitute(x)), "[,",
sel, "]", sep="", collapse="")
else
txt <- ""
.ToClipboard(txt)
invisible(txt)
}
FileOpenCmd <- function(fmt=NULL) {
fn <- file.choose()
# fn <- tcltk::tclvalue(tcltk::tkgetOpenFile())
op <- options(useFancyQuotes = FALSE)
# switch from backslash to slash
fn <- gsub("\\\\", "/", fn)
# parse the filename into path, filename, filextension
fnamelong <- rev(unlist(strsplit(fn, "/")))[1]
ext <- rev(unlist(strsplit( fnamelong, "\\.")))[1]
fname <- substr(fnamelong, 1, nchar(fnamelong) - nchar(ext) - 1)
path <- substr(fn, 1, nchar(fn) - nchar(fname) - nchar(ext) - 1)
if(is.null(fmt)) {
if(ext %in% c("rda", "RData"))
fmt <- 3
else if(ext %in% c("dat", "csv"))
fmt <- 2
else
fmt <- 1
}
# read.table text:
if(fmt == 1) {
fmt <- "\"%path%%fname%.%ext%\""
} else if( fmt == 2) {
fmt="d.%fname% <- read.table(file = \"%path%%fname%.%ext%\", header = TRUE, sep = \";\", na.strings = c(\"NA\",\"NULL\"), strip.white = TRUE)"
} else if( fmt == 3) {
fmt="load(file = \"%path%%fname%.%ext%\")"
}
rcmd <- gsub("%fname%", fname, gsub("%ext%", ext, gsub( "%path%", path, fmt)))
# utils::writeClipboard(rcmd)
.ToClipboard(rcmd)
options(op)
invisible(rcmd)
}
.InitDlg <- function(width, height, x=NULL, y=NULL, resizex=FALSE, resizey=FALSE, main="Dialog", ico="R"){
top <- tcltk::tktoplevel()
if(is.null(x)) x <- as.integer(tcltk::tkwinfo("screenwidth", top))/2 - 50
if(is.null(y)) y <- as.integer(tcltk::tkwinfo("screenheight", top))/2 - 25
geom <- gettextf("%sx%s+%s+%s", width, height, x, y)
tcltk::tkwm.geometry(top, geom)
tcltk::tkwm.title(top, main)
tcltk::tkwm.resizable(top, resizex, resizey)
# alternative:
# system.file("extdata", paste(ico, "ico", sep="."), package="DescTools")
tcltk::tkwm.iconbitmap(top, file.path(find.package("DescTools"), "extdata", paste(ico, "ico", sep=".")))
return(top)
}
.ImportSPSS <- function(datasetname = "dataset") {
# read.spss
# function (file, use.value.labels = TRUE, to.data.frame = FALSE,
# max.value.labels = Inf, trim.factor.names = FALSE, trim_values = TRUE,
# reencode = NA, use.missings = to.data.frame)
e1 <- environment()
env.dsname <- character()
env.use.value.labels <- logical()
env.to.data.frame <- logical()
env.max.value.labels <- character()
env.trim.factor.names <- logical()
env.trim.values <- logical()
env.reencode <- character()
env.use.missings <- logical()
lst <- NULL
OnOK <- function() {
assign("lst", list(), envir = e1)
assign("env.dsname", tcltk::tclvalue(dsname), envir = e1)
assign("env.use.value.labels", tcltk::tclvalue(use.value.labels), envir = e1)
assign("env.to.data.frame", tcltk::tclvalue(to.data.frame), envir = e1)
assign("env.max.value.labels", tcltk::tclvalue(max.value.labels), envir = e1)
assign("env.trim.factor.names", tcltk::tclvalue(trim.factor.names), envir = e1)
assign("env.trim.values", tcltk::tclvalue(trim.values), envir = e1)
assign("env.reencode", tcltk::tclvalue(reencode), envir = e1)
assign("env.use.missings", tcltk::tclvalue(use.missings), envir = e1)
tcltk::tkdestroy(top)
}
top <- .InitDlg(350, 300, main="Import SPSS Dataset")
dsname <- tcltk::tclVar(datasetname)
dsnameFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
entryDsname <- tcltk::ttkentry(dsnameFrame, width=30, textvariable=dsname)
optionsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
use.value.labels <- tcltk::tclVar("1")
use.value.labelsCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Use value labels", variable=use.value.labels)
to.data.frame <- tcltk::tclVar("1")
to.data.frameCheckBox <- tcltk::ttkcheckbutton(optionsFrame,
text="Convert value labels to factor levels", variable=to.data.frame)
max.value.labels <- tcltk::tclVar("Inf")
entryMaxValueLabels <- tcltk::ttkentry(optionsFrame, width=30, textvariable=max.value.labels)
trim.values <- tcltk::tclVar("1")
trim.valuesCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Ignore trailing spaces when matching"
, variable=trim.values)
trim.factor.names <- tcltk::tclVar("1")
trim.factor.namesCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Trim trailing spaces from factor levels"
, variable=trim.factor.names)
reencode <- tcltk::tclVar("")
entryReencode <- tcltk::ttkentry(optionsFrame, width=30, textvariable=reencode)
use.missings <- tcltk::tclVar("1")
use.missingsCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Use missings",
variable=use.missings)
tcltk::tkgrid(tcltk::tklabel(dsnameFrame, text="Enter name for data set: "), entryDsname, sticky="w")
tcltk::tkgrid(dsnameFrame, columnspan=2, sticky="w")
tcltk::tkgrid(use.value.labelsCheckBox, sticky="w")
tcltk::tkgrid(to.data.frameCheckBox, sticky="nw")
tcltk::tkgrid(tcltk::ttklabel(optionsFrame, text="Maximal value label:"), sticky="nw")
tcltk::tkgrid(entryMaxValueLabels, padx=20, sticky="nw")
tcltk::tkgrid(trim.valuesCheckBox, sticky="w")
tcltk::tkgrid(trim.factor.namesCheckBox, sticky="w")
tcltk::tkgrid(tcltk::ttklabel(optionsFrame, text="Reencode character strings to the current locale:"), sticky="nw")
tcltk::tkgrid(entryReencode, padx=20, sticky="nw")
tcltk::tkgrid(use.missingsCheckBox, sticky="w")
tcltk::tkgrid(optionsFrame, sticky="w")
buttonsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
tfButOK <- tcltk::tkbutton(buttonsFrame, text = "OK", command = OnOK, width=10)
tfButCanc <- tcltk::tkbutton(buttonsFrame, width=10, text = "Cancel", command = function() tcltk::tkdestroy(top))
tcltk::tkgrid(tfButOK, tfButCanc)
tcltk::tkgrid.configure(tfButCanc, padx=c(6,6))
tcltk::tkgrid.columnconfigure(buttonsFrame, 0, weight=2)
tcltk::tkgrid.columnconfigure(buttonsFrame, 1, weight=1)
tcltk::tkgrid(buttonsFrame, sticky="ew")
tcltk::tkwait.window(top)
if(!is.null(lst)){
lst <- list(dsname=env.dsname, use.value.labels=as.numeric(env.use.value.labels),
to.data.frame=as.numeric(env.to.data.frame),
max.value.labels=env.max.value.labels, trim.factor.names=as.numeric(env.trim.factor.names),
trim.values=as.numeric(env.trim.values), reencode=env.reencode, use.missings=as.numeric(env.use.missings) )
}
return(lst)
}
.ImportSYSTAT <- function(datasetname = "dataset") {
e1 <- environment()
env.dsname <- character()
env.to.data.frame <- logical()
lst <- NULL
top <- .InitDlg(350, 140, main="Import SYSTAT Dataset")
OnOK <- function() {
assign("lst", list(), envir = e1)
assign("env.dsname", tcltk::tclvalue(dsname), envir = e1)
assign("env.to.data.frame", tcltk::tclvalue(to.data.frame ), envir = e1)
tcltk::tkdestroy(top)
}
dsname <- tcltk::tclVar(datasetname)
dsnameFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
entryDsname <- tcltk::ttkentry(dsnameFrame, width=30, textvariable=dsname)
optionsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
to.data.frame <- tcltk::tclVar("1")
to.data.frameCheckBox <- tcltk::ttkcheckbutton(optionsFrame,
text="Convert dataset to data.frame", variable=to.data.frame)
tcltk::tkgrid(tcltk::tklabel(dsnameFrame, text="Enter name for data set: "), entryDsname, sticky="w")
tcltk::tkgrid(dsnameFrame, columnspan=2, sticky="w")
tcltk::tkgrid(to.data.frameCheckBox, sticky="w")
tcltk::tkgrid(optionsFrame, sticky="w")
buttonsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
tfButOK <- tcltk::tkbutton(buttonsFrame, text = "OK", command = OnOK, width=10)
tfButCanc <- tcltk::tkbutton(buttonsFrame, width=10, text = "Cancel", command = function() tcltk::tkdestroy(top))
tcltk::tkgrid(tfButOK, tfButCanc)
tcltk::tkgrid.configure(tfButCanc, padx=c(6,6))
tcltk::tkgrid.columnconfigure(buttonsFrame, 0, weight=2)
tcltk::tkgrid.columnconfigure(buttonsFrame, 1, weight=1)
tcltk::tkgrid(buttonsFrame, sticky="ew")
tcltk::tkwait.window(top)
if(!is.null(lst)){
lst <- list(dsname=env.dsname, to.data.frame=as.numeric(env.to.data.frame))
}
return(lst)
}
.ImportStataDlg <- function(datasetname = "dataset") {
# function (file, convert.dates = TRUE, convert.factors = TRUE,
# missing.type = FALSE, convert.underscore = FALSE, warn.missing.labels = TRUE)
e1 <- environment()
env.dsname <- character()
env.convert.dates <- logical()
env.convert.factors <- logical()
env.convert.underscore <- logical()
env.missing.type <- logical()
env.warn.missing.labels <- logical()
lst <- NULL
OnOK <- function() {
assign("lst", list(), envir = e1)
assign("env.dsname", tcltk::tclvalue(dsname), envir = e1)
assign("env.convert.dates", tcltk::tclvalue(convert.dates), envir = e1)
assign("env.convert.factors", tcltk::tclvalue(convert.factors), envir = e1)
assign("env.convert.underscore", tcltk::tclvalue(convert.underscore), envir = e1)
assign("env.missing.type", tcltk::tclvalue(missing.type), envir = e1)
assign("env.warn.missing.labels", tcltk::tclvalue(warn.missing.labels), envir = e1)
tcltk::tkdestroy(top)
}
top <- .InitDlg(350, 220, main="Import Stata Dataset")
dsname <- tcltk::tclVar(datasetname)
dsnameFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
entryDsname <- tcltk::ttkentry(dsnameFrame, width=30, textvariable=dsname)
optionsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
convert.factors <- tcltk::tclVar("1")
convert.factorsCheckBox <- tcltk::ttkcheckbutton(optionsFrame,
text="Convert value labels to factor levels", variable=convert.factors)
convert.dates <- tcltk::tclVar("1")
convert.datesCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Convert dates to R format", variable=convert.dates)
missing.type <- tcltk::tclVar("1")
missing.typeCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Multiple missing types (>=Stata 8)"
, variable=missing.type)
convert.underscore <- tcltk::tclVar("1")
convert.underscoreCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Convert underscore to period"
, variable=convert.underscore)
warn.missing.labels <- tcltk::tclVar("1")
warn.missing.labelsCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Warn on missing labels",
variable=warn.missing.labels)
tcltk::tkgrid(tcltk::tklabel(dsnameFrame, text="Enter name for data set: "), entryDsname, sticky="w")
tcltk::tkgrid(dsnameFrame, columnspan=2, sticky="w")
tcltk::tkgrid(convert.datesCheckBox, sticky="w")
tcltk::tkgrid(convert.factorsCheckBox, sticky="nw")
tcltk::tkgrid(missing.typeCheckBox, sticky="w")
tcltk::tkgrid(convert.underscoreCheckBox, sticky="w")
tcltk::tkgrid(warn.missing.labelsCheckBox, sticky="w")
tcltk::tkgrid(optionsFrame, sticky="w")
buttonsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
tfButOK <- tcltk::tkbutton(buttonsFrame, text = "OK", command = OnOK, width=10)
tfButCanc <- tcltk::tkbutton(buttonsFrame, width=10, text = "Cancel", command = function() tcltk::tkdestroy(top))
tcltk::tkgrid(tfButOK, tfButCanc)
tcltk::tkgrid.configure(tfButCanc, padx=c(6,6))
tcltk::tkgrid.columnconfigure(buttonsFrame, 0, weight=2)
tcltk::tkgrid.columnconfigure(buttonsFrame, 1, weight=1)
tcltk::tkgrid(buttonsFrame, sticky="ew")
tcltk::tkwait.window(top)
if(!is.null(lst)){
lst <- list(dsname=env.dsname, convert.factors=as.numeric(env.convert.factors),
convert.dates=as.numeric(env.convert.dates), convert.underscore=as.numeric(env.convert.underscore),
missing.type=as.numeric(env.missing.type), warn.missing.labels=as.numeric(env.warn.missing.labels) )
}
return(lst)
}
ImportFileDlg <- function(auto_type = TRUE, env = .GlobalEnv) {
requireNamespace("tcltk", quietly = FALSE)
filename <- tcltk::tclvalue(tcltk::tkgetOpenFile(filetypes= "{{All files} *}
{{SPSS Files} {.sav}} {{SAS xport files} {.xpt, .xport}}
{{SYSTAT} {*.sys, *.syd}} {{MiniTab} {.mtp}}
{{Stata Files} {.dta}}"))
# nicht topmost, aber wie mach ich das dann??
# tcl("wm", "attributes", root, topmost=TRUE)
if (filename=="") return()
path <- SplitPath(filename)
fformats <- c("SPSS","SAS","SYSTAT", "Minitab","Stata")
if(auto_type){
xsel <- switch(toupper(path$extension),
"SAV"="SPSS",
"DTA"="Stata",
"SYD"="SYSTAT",
"SYS"="SYSTAT",
"MTP"="MiniTab",
"XPT"="SAS",
"XPORT"="SAS",
"SAS"="SAS",
select.list(fformats, multiple = FALSE, graphics = TRUE))
} else {
xsel <- select.list(fformats, multiple = FALSE, graphics = TRUE)
}
switch(xsel,
"MiniTab"={
zz <- foreign::read.mtp(file=filename)
},
"SYSTAT"={
dlg <- .ImportSYSTAT(paste("d.", path$filename, sep=""))
if(is.null(dlg)) return()
zz <- foreign::read.systat(file=filename, to.data.frame = dlg$to.data.frame)
},
"SPSS"={
dlg <- .ImportSPSS(paste("d.", path$filename, sep=""))
if(is.null(dlg)) return()
zz <- foreign::read.spss(file=filename, use.value.labels = dlg$use.value.labels,
to.data.frame = dlg$to.data.frame,
max.value.labels = dlg$max.value.labels,
trim.factor.names = dlg$trim.factor.names,
trim_values = dlg$trim_value,
reencode = ifelse(dlg$reencode=="", NA, dlg$reencode),
use.missings = dlg$use.missings)
},
"SAS"={
print("not yet implemented.")
},
"Stata"={
dlg <- .ImportStataDlg(paste("d.", path$filename, sep=""))
if(is.null(dlg)) return()
zz <- foreign::read.dta(file=filename, convert.dates = dlg[["convert.dates"]], convert.factors = dlg[["convert.factors"]],
missing.type = dlg[["missing.type"]], convert.underscore = dlg[["convert.underscore"]],
warn.missing.labels = dlg[["warn.missing.labels"]])
})
assign(dlg[["dsname"]], zz, envir=env)
message(gettextf("Dataset %s has been successfully created!\n\n", dlg[["dsname"]]))
# Exec(gettextf("print(str(%s, envir = %s))", dlg[["dsname"]], deparse(substitute(env))))
}
PasswordDlg <- function() {
requireNamespace("tcltk", quietly = FALSE)
e1 <- environment()
pw <- character()
tfpw <- tcltk::tclVar("")
OnOK <- function() {
assign("pw", tcltk::tclvalue(tfpw), envir = e1)
tcltk::tkdestroy(root)
}
# do not update screen
tcltk::tclServiceMode(on = FALSE)
# create window
root <- .InitDlg(205, 110, resizex=FALSE, resizey=FALSE, main="Login", ico="key")
# define widgets
content <- tcltk::tkframe(root, padx=10, pady=10)
tfEntrPW <- tcltk::tkentry(content, width="30", textvariable=tfpw, show="*" )
tfButOK <- tcltk::tkbutton(content,text="OK",command=OnOK, width=6)
tfButCanc <- tcltk::tkbutton(content, text="Cancel", width=7,
command=function() tcltk::tkdestroy(root))
# build GUI
tcltk::tkgrid(content, column=0, row=0)
tcltk::tkgrid(tcltk::tklabel(content, text="Enter Password"), column=0, row=0,
columnspan=3, sticky="w")
tcltk::tkgrid(tfEntrPW, column=0, row=1, columnspan=3, pady=10)
tcltk::tkgrid(tfButOK, column=0, row=2, ipadx=15, sticky="w")
tcltk::tkgrid(tfButCanc, column=2, row=2, ipadx=5, sticky="e")
# binding event-handler
tcltk::tkbind(tfEntrPW, "<Return>", OnOK)
tcltk::tkfocus(tfEntrPW)
tcltk::tclServiceMode(on = TRUE)
tcltk::tcl("wm", "attributes", root, topmost=TRUE)
tcltk::tkwait.window(root)
return(pw)
}
ColorDlg <- function() {
requireNamespace("tcltk", quietly = FALSE)
return(as.character(tcltk::tcl("tk_chooseColor", title="Choose a color")))
}
IdentifyA <- function(x, ...){
UseMethod("IdentifyA")
}
IdentifyA.formula <- function(formula, data, subset, poly = FALSE, ...){
opt <- options(na.action=na.pass); on.exit(options(opt))
# identifies points in a plot, lying in a rectangle, spanned by upleft, botright
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "na.action", "subset"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
response <- attr(attr(mf, "terms"), "response")
vname <- attr(attr(attr(mf, "terms"), "dataClasses"), "names")
x <- setNames(mf[[-response]], vname[2])
y <- setNames(mf[[response]], vname[1])
IdentifyA(x=x, y=y, ...)
}
IdentifyA.default <- function(x, y=NULL, poly = FALSE, ...){
xlabel <- if (!missing(x))
deparse(substitute(x))
ylabel <- if (!missing(y))
deparse(substitute(y))
pxy <- xy.coords(x, y, xlabel, ylabel)
xlabel <- pxy$xlab
ylabel <- pxy$ylab
if(poly){
cat("Select polygon points and click on finish when done!\n")
xy <- locator(type="n")
polygon(xy, border="grey", lty="dotted")
idx <- PtInPoly(data.frame(pxy$x, pxy$y), do.call("data.frame", xy))$pip == 1
code <- paste("x %in% c(", paste(which(idx), collapse=","), ")", sep="")
} else {
cat("Select upper-left and bottom-right point!\n")
xy <- locator(n=2, type="n")[1:2]
rect(xy$x[1], xy$y[1], xy$x[2], xy$y[2], border="grey", lty="dotted")
idx <- (pxy$x %[]% range(xy$x) & pxy$y %[]% range(xy$y))
code <- paste(xlabel, " %[]% c(", xy$x[1], ", ", xy$x[2], ") & ", ylabel ," %[]% c(", xy$y[1], ", ", xy$y[2], "))", sep="")
}
res <- which(idx)
xy <- lapply(lapply(xy, range), signif, digits=4)
attr(x=res, which="cond") <- code
return(res)
}
PtInPoly <- function(pnts, poly.pnts) {
#check if pnts & poly is 2 column matrix or dataframe
pnts = as.matrix(pnts); poly.pnts = as.matrix(poly.pnts)
if (!(is.matrix(pnts) & is.matrix(poly.pnts))) stop('pnts & poly.pnts must be a 2 column dataframe or matrix')
if (!(dim(pnts)[2] == 2 & dim(poly.pnts)[2] == 2)) stop('pnts & poly.pnts must be a 2 column dataframe or matrix')
#ensure first and last polygon points are NOT the same
if (poly.pnts[1,1] == poly.pnts[nrow(poly.pnts),1] & poly.pnts[1,2] == poly.pnts[nrow(poly.pnts),2]) poly.pnts = poly.pnts[-1,]
#run the point in polygon code
out = .Call('pip', PACKAGE="DescTools", pnts[,1], pnts[,2], nrow(pnts), poly.pnts[,1], poly.pnts[,2], nrow(poly.pnts))
#return the value
return(data.frame(pnts,pip=out))
}
# Identify points in a plot using a formula.
# http://www.rforge.net/NCStats/files/
# Author: Derek Ogle <dogle@northland.edu>
identify.formula <- function(formula, data, subset, na.action, ...) {
# mf <- model.frame(x, data)
# x <- mf[,2]
# y <- mf[,1]
# identify(x, y, ...)
if (missing(formula) || (length(formula) != 3L) || (length(attr(terms(formula[-2L]),
"term.labels")) != 1L))
stop("'formula' missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m[[1L]] <- quote(stats::model.frame)
m$... <- NULL
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
identify(x=mf[[-response]], y=mf[[response]], ...)
}
# experimental: formula interface for split
split.formula <- function(x, f, drop = FALSE, data = NULL, ...) {
mf <- model.frame(x, data)
f <- mf[,2]
x <- mf[,1]
split(x, f, drop=drop, ...)
}
###
## helpers: PlotPar und PlotRCol
PlotPar <- function(){
# plots the most used plot parameters
usr <- par(no.readonly=TRUE); on.exit(par(usr))
if( !is.null(dev.list()) ){
curwin <- dev.cur()
on.exit({
dev.set(curwin)
par(usr)
})
}
# this does not work and CRAN does not allow windows()
# dev.new(width=7.2, height=4)
par( mar=c(0,0,0,0), mex=0.001, xaxt="n", yaxt="n", ann=F, xpd=TRUE)
plot( x=1:25, y=rep(11,25), pch=1:25, cex=2, xlab="", ylab=""
, frame.plot=FALSE, ylim=c(-1,15), col=2, bg=3)
points( x=1:25, y=rep(12.5,25), pch=1:35, cex=2, col=1)
text( x=1:25, y=rep(9.5,25), labels=1:25, cex=0.8 )
segments( x0=1, x1=4, y0=0:5, lty=6:1, lwd=3 )
text( x=5, y=6:0, adj=c(0,0.5), labels=c("0 = blank", "1 = solid (default)", "2 = dashed", "3 = dotted", "4 = dotdash", "5 = longdash", "6 = twodash") )
segments( x0=10, x1=12, y0=0:6, lty=1, lwd=7:1 )
text( x=13, y=0:6, adj=c(0,0.5), labels=7:1 )
points( x=rep(15,7), y=0:6, cex=rev(c(0.8,1,1.5,2,3,4,7)) )
text( x=16, y=0:6, adj=c(0,0.5), labels=rev(c(0.8,1,1.5,2,3,4,7)) )
text( x=c(1,1,10,15,18,18), y=c(14,7.5,7.5,7.5,7.5,2.5), labels=c("pch","lty","lwd","pt.cex","adj","col"), cex=1.3, col="grey40")
adj <- expand.grid(c(0,0.5,1),c(0,0.5,1))
for( i in 1:nrow(adj) ){
text( x=18+adj[i,1]*7, y=3.5+adj[i,2]*3, label=paste("text", paste(adj[i,], collapse=",") ), adj=unlist(adj[i,]), cex=0.8 )
}
points( x=18:25, y=rep(1,8), col=1:8, pch=15, cex=2 )
text( x=18:25, y=0, adj=c(0.5,0.5), labels=1:8, cex=0.8 )
}
PlotPch <- function (col = NULL, bg = NULL, newwin = FALSE) {
if (newwin == TRUE)
dev.new(width=2, height=5, noRStudioGD=TRUE)
# dev.new(width=3, height=2, xpos=100, ypos=600, noRStudioGD = TRUE)
usr <- par(no.readonly = TRUE)
on.exit(par(usr))
if (!is.null(dev.list())) {
curwin <- dev.cur()
on.exit({
dev.set(curwin)
par(usr)
})
}
if(is.null(col))
col <- hred
if(is.null(bg))
bg <- hecru
par(mar = c(0, 0, 0, 0), mex = 0.001, xaxt = "n", yaxt = "n",
ann = F, xpd = TRUE)
plot(y = 1:25, x = rep(3, 25), pch = 25:1, cex = 1.5, xlab = "",
ylab = "", frame.plot = FALSE, xlim = c(-1, 15))
points(y = 1:25, x = rep(6, 25), pch = 25:1, cex = 1.5,
col = col, bg = bg)
text(y = 25:1, x = rep(9, 25), labels = 1:25, cex = 0.8)
}
ColPicker <- function(locator=TRUE, ord=c("hsv","default"), label=c("text","hex","dec"),
mdim = c(38, 12), newwin = FALSE) {
usr <- par(no.readonly=TRUE)
opt <- options(locatorBell = FALSE)
on.exit({
par(usr)
options(opt)
})
# this does not work and CRAN does not allow windows()
# dev.new(width=13, height=7)
if(newwin == TRUE)
dev.new(width=13, height=7, noRStudioGD = TRUE)
# plots all named colors: PlotRCol(lbel="hex") hat noch zuviele Bezeichnungen
if( !is.null(dev.list()) ){
curwin <- dev.cur()
on.exit({
dev.set(curwin)
par(usr)
})
}
# colors without greys (and grays...) n = 453
cols <- colors()[-grep( pattern="^gr[ea]y", colors())]
# set order
switch( match.arg( arg=ord, choices=c("hsv","default") )
, "default" = { # do nothing
}
, "hsv" = {
rgbc <- col2rgb(cols)
hsvc <- rgb2hsv(rgbc[1,],rgbc[2,],rgbc[3,])
cols <- cols[ order(hsvc[1,],hsvc[2,],hsvc[3,]) ]
}
)
zeilen <- mdim[1]; spalten <- mdim[2] # 660 Farben
farben.zahlen <- matrix( 1:spalten, nrow=zeilen, ncol=spalten, byrow=TRUE) # Matrix fuer Punkte
if(zeilen*spalten > length(cols))
cols <- c(cols, rep(NA, zeilen*spalten - length(cols)) ) # um 3 NULL-Werte erweitern
x_offset <- 0.5
x <- farben.zahlen[, 1:spalten] # x-Werte (Zahlen)
y <- -rep(1:zeilen, spalten) # y-Werte (Zahlen)
par(mar=c(0,0,0,0), mex=0.001, xaxt="n", yaxt="n", ann=F)
plot( x, y
, pch=22 # Punkttyp Rechteck
, cex=2 # Vergroesserung Punkte
, col=NA
, bg=cols # Hintergrundfarben
, bty="n" # keine Box
, xlim=c(1, spalten+x_offset) # x-Wertebereich
)
switch( match.arg( arg=label, choices=c("text","hex","dec") )
, "text" = {
text( x+0.1, y, cols, adj=0, cex=0.6 ) # Text Farben
}
, "hex" = { # HEX-Codes
text( x+0.1, y, adj=0, cex=0.6,
c(apply(apply(col2rgb(cols[1:(length(cols)-3)]), 2, sprintf, fmt=" %02X"), 2, paste, collapse=""), rep("",3))
)
}
, "dec" = { # decimal RGB-Codes
text( x+0.1, y, adj=0, cex=0.6,
c(apply(apply(col2rgb(cols[1:(length(cols)-3)]), 2, sprintf, fmt=" %03d"), 2, paste, collapse=""), rep("",3))
)
}
)
z <- locator()
idx <- with(lapply(z, round), (x-1) * zeilen + abs(y))
return(cols[idx])
}
# not needed with gconvertX()
# FigUsr <- function() {
#
# usr <- par("usr")
# plt <- par("plt")
#
# res <- c(
# usr[1] - diff(usr[1:2])/diff(plt[1:2]) * (plt[1]) ,
# usr[2] + diff(usr[1:2])/diff(plt[1:2]) * (1-plt[2]),
# usr[3] - diff(usr[3:4])/diff(plt[3:4]) * (plt[3]) ,
# usr[4] + diff(usr[3:4])/diff(plt[3:4]) * (1-plt[4])
# )
#
# return(res)
#
# }
PlotMar <- function(){
par(oma=c(3,3,3,3)) # all sides have 3 lines of space
#par(omi=c(1,1,1,1)) # alternative, uncomment this and comment the previous line to try
# - The mar command represents the figure margins. The vector is in the same ordering of
# the oma commands.
#
# - The default size is c(5,4,4,2) + 0.1, (equivalent to c(5.1,4.1,4.1,2.1)).
#
# - The axes tick marks will go in the first line of the left and bottom with the axis
# label going in the second line.
#
# - The title will fit in the third line on the top of the graph.
#
# - All of the alternatives are:
# - mar: Specify the margins of the figure in number of lines
# - mai: Specify the margins of the figure in number of inches
par(mar=c(5,4,4,2) + 0.1)
#par(mai=c(2,1.5,1.5,.5)) # alternative, uncomment this and comment the previous line
# Plot
plot(x=1:10, y=1:10, type="n", xlab="X", ylab="Y") # type="n" hides the points
# Place text in the plot and color everything plot-related red
text(5,5, "Plot", col=hred, cex=2)
text(5,4, "text(5,5, \"Plot\", col=\"red\", cex=2)", col=hred, cex=1)
box("plot", col=hred)
# Place text in the margins and label the margins, all in green
mtext("Figure", side=3, line=2, cex=2, col=hgreen)
mtext("par(mar=c(5,4,4,2) + 0.1)", side=3, line=1, cex=1, col=hgreen)
mtext("Line 0", side=3, line=0, adj=1.0, cex=1, col=hgreen)
mtext("Line 1", side=3, line=1, adj=1.0, cex=1, col=hgreen)
mtext("Line 2", side=3, line=2, adj=1.0, cex=1, col=hgreen)
mtext("Line 3", side=3, line=3, adj=1.0, cex=1, col=hgreen)
mtext("Line 0", side=2, line=0, adj=1.0, cex=1, col=hgreen)
mtext("Line 1", side=2, line=1, adj=1.0, cex=1, col=hgreen)
mtext("Line 2", side=2, line=2, adj=1.0, cex=1, col=hgreen)
mtext("Line 3", side=2, line=3, adj=1.0, cex=1, col=hgreen)
box("figure", col=hgreen)
# Label the outer margin area and color it blue
# Note the 'outer=TRUE' command moves us from the figure margins to the outer
# margins.
mtext("Outer Margin Area", side=1, line=1, cex=2, col=horange, outer=TRUE)
mtext("par(oma=c(3,3,3,3))", side=1, line=2, cex=1, col=horange, outer=TRUE)
mtext("Line 0", side=1, line=0, adj=0.0, cex=1, col=horange, outer=TRUE)
mtext("Line 1", side=1, line=1, adj=0.0, cex=1, col=horange, outer=TRUE)
mtext("Line 2", side=1, line=2, adj=0.0, cex=1, col=horange, outer=TRUE)
box("outer", col=horange)
usr <- par("usr")
# inner <- par("inner")
fig <- par("fig")
plt <- par("plt")
# text("Figure", x=fig, y=ycoord, adj = c(1, 0))
text("Inner", x=usr[2] + (usr[2] - usr[1])/(plt[2] - plt[1]) * (1 - plt[2]),
y=usr[3] - diff(usr[3:4])/diff(plt[3:4]) * (plt[3]), adj = c(1, 0))
#text("Plot", x=usr[1], y=usr[2], adj = c(0, 1))
figusrx <- grconvertX(usr[c(1,2)], to="nfc")
figusry <- grconvertY(usr[c(3,4)], to="nfc")
points(x=figusrx[c(1,1,2,2)], y=figusry[c(3,4,3,4)], pch=15, cex=3, xpd=NA)
points(x=usr[c(1,1,2,2)], y=usr[c(3,4,3,4)], pch=15, col=hred, cex=2, xpd=NA)
arrows(x0 = par("usr")[1], 8, par("usr")[2], 8, col="black", cex=2, code=3, angle = 15, length = .2)
text(x = mean(par("usr")[1:2]), y=8.2, labels = "pin[1]", adj=c(0.5, 0))
}
Mar <- function(bottom=NULL, left=NULL, top=NULL, right=NULL, outer=FALSE){
if(outer){
if(is.null(bottom)) bottom <- par("oma")[1]
if(is.null(left)) left <- par("oma")[2]
if(is.null(top)) top <- par("oma")[3]
if(is.null(right)) right <- par("oma")[4]
res <- par(oma=c(bottom, left, top, right))
} else {
if(is.null(bottom)) bottom <- par("mar")[1]
if(is.null(left)) left <- par("mar")[2]
if(is.null(top)) top <- par("mar")[3]
if(is.null(right)) right <- par("mar")[4]
res <- par(mar=c(bottom, left, top, right))
}
invisible(res)
}
Xplore <- function (x) {
.PrepCmd <- function(xvar, yvar, data, dcol, col, dpch, pch, alpha, cex, grid, smooth, desc, show) {
if(desc){
if(yvar == "none"){
s <- gettextf("Desc(%s$%s, plotit=FALSE)", deparse(substitute(data)), xvar)
} else {
s <- gettextf("Desc(%s ~ %s, data=%s, plotit=FALSE)", yvar, xvar, deparse(substitute(data)))
}
} else {
if(xvar=="none" & yvar == "none"){
s <- "Canvas()"
} else if (yvar == "none") {
s <- gettextf("PlotDesc(%s$%s, na.rm=TRUE)",
deparse(substitute(data)), xvar)
} else {
s <- gettextf("plot(%s ~ %s, data=%s", yvar,
xvar, deparse(substitute(data)))
if (!is.na(dcol)) {
s <- paste(s, gettextf(", col=as.numeric(%s)", dcol))
} else if (!is.na(col)) {
s <- paste(s, gettextf(", col=SetAlpha('%s', %s)", col, alpha))
}
if (!is.na(dpch)) {
s <- paste(s, gettextf(", pch=as.numeric(%s)", dpch))
} else if (!is.na(pch)) {
s <- paste(s, gettextf(", pch=as.numeric(%s)", pch))
}
if (!is.na(cex)) {
s <- paste(s, gettextf(", cex=as.numeric(%s)", cex))
}
s <- paste(s, ")")
}
if (show)
cat(s, "\n")
}
if(grid) s <- paste(s, ";grid()")
if (!is.na(smooth)) {
scmd <- ""
if(smooth == "linear"){
scmd <- gettextf("lines(lm(%s ~ %s, data=%s))", yvar,
xvar, deparse(substitute(data)))
} else if(smooth == "loess"){
scmd <- gettextf("lines(loess(%s ~ %s, data=%s))", yvar,
xvar, deparse(substitute(data)))
}
s <- paste(s, ";", scmd)
}
return(s)
}
if (requireNamespace("manipulate", quietly = FALSE)){
# define the variables here, as the Rcmd check as CRAN will note miss a visible binding:
# Explore: no visible binding for global variable 'xvar'
xvar <- character()
yvar <- character()
dcol <- character()
dpch <- character()
col <- character()
pch <- character()
alpha <- character()
cex <- character()
desc <- logical()
show <- logical()
variables <- c("none", as.list(names(x)))
snames <- c(none = NA, as.list(names(x)[!sapply(x, IsNumeric)]))
cols <- as.list(colors())
smoothers <- as.list(c("none", "loess", "linear", "spline"))
manipulate::manipulate({
eval(parse(text = .PrepCmd(xvar, yvar, x, dcol, col, dpch, pch, alpha, cex, grid, smooth, desc, show)))
},
yvar = manipulate::picker(variables, initial = "none", label = "y-variable "),
xvar = manipulate::picker(variables, initial = "none", label = "x-variable "),
dcol = manipulate::picker(snames, initial = "none", label = "data color "),
col = manipulate::picker(cols, initial = "black", label = "color "),
dpch = manipulate::picker(snames, initial = "none", label = "data point character"),
pch = manipulate::picker(as.list(as.character(1:25)), initial = "1", label = "point character"),
alpha = manipulate::slider(min=0, max = 1, step = 0.1, ticks = TRUE, initial = 1, label = "transparency"),
cex = manipulate::slider(min=0.1, max = 5, step = 0.1, ticks = TRUE, initial = 1, label = "point character extension"),
grid = manipulate::checkbox(initial = FALSE, label = "grid"),
smooth = manipulate::picker(smoothers, initial = "none", label = "smoother "),
desc = manipulate::button("Describe"),
show = manipulate::button("Print command")
)
}
}
###
# PlotTools *************************************
## graphics: base ====
lines.loess <- function(x, col = Pal()[1], lwd = 2, lty = "solid", type = "l", n = 100
, conf.level = 0.95, args.band = NULL, ...){
newx <- seq(from = min(x$x, na.rm=TRUE), to = max(x$x, na.rm=TRUE), length = n)
fit <- predict(x, newdata=newx, se = !is.na(conf.level) )
if (!is.na(conf.level)) {
# define default arguments for ci.band
args.band1 <- list(col = SetAlpha(col, 0.30), border = NA)
# override default arguments with user defined ones
if (!is.null(args.band)) args.band1[names(args.band)] <- args.band
# add a confidence band before plotting the smoother
lwr.ci <- fit$fit + fit$se.fit * qnorm((1 - conf.level)/2)
upr.ci <- fit$fit - fit$se.fit * qnorm((1 - conf.level)/2)
do.call("DrawBand", c(args.band1, list(x=c(newx, rev(newx))), list(y=c(lwr.ci, rev(upr.ci)))) )
# reset fit for plotting line afterwards
fit <- fit$fit
}
lines( y = fit, x = newx, col = col, lwd = lwd, lty = lty, type = type)
}
lines.SmoothSpline <- function (x, col = Pal()[1], lwd = 2, lty = "solid",
type = "l", conf.level = 0.95, args.band = NULL,
...) {
# just pass on to lines
lines.smooth.spline(x, col, lwd, lty,
type, conf.level, args.band, ...)
}
lines.smooth.spline <- function (x, col = Pal()[1], lwd = 2, lty = "solid",
type = "l", conf.level = 0.95, args.band = NULL,
...) {
# newx <- seq(from = min(x$x, na.rm = TRUE), to = max(x$x, na.rm = TRUE), length = n)
newx <- x$x
fit <- predict(x, newdata = newx)
if (!is.na(conf.level)) {
args.band1 <- list(col = SetAlpha(col, 0.3), border = NA)
if (!is.null(args.band))
args.band1[names(args.band)] <- args.band
res <- (x$yin - x$y)/(1-x$lev) # jackknife residuals
sigma <- sqrt(var(res)) # estimate sd
upr.ci <- fit$y + qnorm((1 - conf.level)/2) * sigma * sqrt(x$lev) # upper 95% conf. band
lwr.ci <- fit$y - qnorm((1 - conf.level)/2) * sigma * sqrt(x$lev) # lower 95% conf. band
do.call("DrawBand", c(args.band1, list(x = c(newx, rev(newx))),
list(y = c(lwr.ci, rev(upr.ci)))))
}
lines(y = fit$y, x = fit$x, col = col, lwd = lwd, lty = lty, type = type)
}
lines.lm <- function (x, col = Pal()[1], lwd = 2, lty = "solid",
type = "l", n = 100, conf.level = 0.95, args.cband = NULL,
pred.level = NA, args.pband = NULL, ...) {
mod <- x$model
# we take simply the second column of the model data.frame to identify the x variable
# this will crash, if there are several resps and yield nonsense if there is
# more than one pred,
# so check for a simple regression model y ~ x (just one resp, just one pred)
# Note:
# The following will not work, because predict does not correctly recognise the newdata data.frame:
# lines(lm(d.pizza$temperature ~ d.pizza$delivery_min), col=hred, lwd=3)
# see what happens to the data.frame colnames in: predict(x, newdata=data.frame("d.pizza$delivery_min"=1:20))
# this predict won't work.
# always provide data: y ~ x, data
# thiss is not a really new problem:
# http://faustusnotes.wordpress.com/2012/02/16/problems-with-out-of-sample-prediction-using-r/
# we would only plot lines if there's only one predictor
pred <- all.vars(formula(x)[[3]])
if(length(pred) > 1) {
stop("Can't plot a linear model with more than 1 predictor.")
}
# the values of the predictor
xpred <- eval(x$call$data)[, pred]
newx <- data.frame(seq(from = min(xpred, na.rm = TRUE),
to = max(xpred, na.rm = TRUE), length = n))
colnames(newx) <- pred
fit <- predict(x, newdata = newx)
if (!(is.na(pred.level) || identical(args.pband, NA)) ) {
args.pband1 <- list(col = SetAlpha(col, 0.12), border = NA)
if (!is.null(args.pband))
args.pband1[names(args.pband)] <- args.pband
ci <- predict(x, interval="prediction", newdata=newx, level=pred.level) # Vorhersageband
do.call("DrawBand", c(args.pband1, list(x = c(unlist(newx), rev(unlist(newx)))),
list(y = c(ci[,2], rev(ci[,3])))))
}
if (!(is.na(conf.level) || identical(args.cband, NA)) ) {
args.cband1 <- list(col = SetAlpha(col, 0.12), border = NA)
if (!is.null(args.cband))
args.cband1[names(args.cband)] <- args.cband
ci <- predict(x, interval="confidence", newdata=newx, level=conf.level) # Vertrauensband
do.call("DrawBand", c(args.cband1, list(x = c(unlist(newx), rev(unlist(newx)))),
list(y = c(ci[,2], rev(ci[,3])))))
}
lines(y = fit, x = unlist(newx), col = col, lwd = lwd, lty = lty,
type = type)
}
SmoothSpline <- function(x, ...){
UseMethod("SmoothSpline")
}
SmoothSpline.default <- function (x, y = NULL, w = NULL, df, spar = NULL, cv = FALSE,
all.knots = FALSE, nknots = .nknots.smspl, keep.data = TRUE,
df.offset = 0, penalty = 1, control.spar = list(), tol = 0.000001 *
IQR(x), ...){
# just pass everything to smooth.spline
smooth.spline(x=x, y=y, w=w, df=df, spar=spar, cv=cv,
all.knots=all.knots, nknots=nknots, keep.data=keep.data,
df.offset=df.offset, penalty=penalty, control.spar=control.spar, tol=tol)
}
SmoothSpline.formula <- function(formula, data, subset, na.action, ...) {
# mf <- model.frame(x, data)
# x <- mf[,2]
# y <- mf[,1]
# identify(x, y, ...)
if (missing(formula) || (length(formula) != 3L) || (length(attr(terms(formula[-2L]),
"term.labels")) != 1L))
stop("'formula' missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m[[1L]] <- quote(stats::model.frame)
m$... <- NULL
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
SmoothSpline(x=mf[[-response]], y=mf[[response]], ...)
}
ErrBars <- function(from, to = NULL, pos = NULL, mid = NULL, horiz = FALSE, col = par("fg"), lty = par("lty"),
lwd = par("lwd"), code = 3, length=0.05,
pch = NA, cex.pch = par("cex"), col.pch = par("fg"), bg.pch = par("bg"), ... ) {
if(is.null(to)) {
if(dim(from)[2] %nin% c(2,3)) stop("'from' must be a kx2 or a kx3 matrix, when 'to' is not provided.")
if(dim(from)[2] == 2) {
to <- from[,2]
from <- from[,1]
} else {
mid <- from[,1]
to <- from[,3]
from <- from[,2]
}
}
if(is.null(pos)) pos <- 1:length(from)
if(horiz){
arrows( x0=from, x1=to, y0=pos, col=col, lty=lty, lwd=lwd, angle=90, code=code, length=length, ... )
} else {
arrows( x0=pos, y0=from, y1=to, col=col, lty=lty, lwd=lwd, angle=90, code=code, length=length, ... )
}
if(!is.na(pch)){
if(is.null(mid)) mid <- (from + to)/2
# plot points
if(horiz){
points(x=mid, y=pos, pch = pch, cex = cex.pch, col = col.pch, bg=bg.pch)
} else {
points(x=pos, y=mid, pch = pch, cex = cex.pch, col = col.pch, bg=bg.pch)
}
}
}
ColorLegend <- function( x, y=NULL, cols=rev(heat.colors(100)), labels=NULL
, width=NULL, height=NULL, horiz=FALSE
, xjust=0, yjust=1, inset=0, border=NA, frame=NA
, cntrlbl = FALSE
, adj=ifelse(horiz,c(0.5,1), c(1,0.5)), cex=1.0, ...){
# positionierungscode aus legend
auto <- if (is.character(x))
match.arg(x, c("bottomright", "bottom", "bottomleft",
"left", "topleft", "top", "topright", "right", "center"))
else NA
usr <- par("usr")
if( is.null(width) ) width <- (usr[2L] - usr[1L]) * ifelse(horiz, 0.92, 0.08)
if( is.null(height) ) height <- (usr[4L] - usr[3L]) * ifelse(horiz, 0.08, 0.92)
if (is.na(auto)) {
left <- x - xjust * width
top <- y + (1 - yjust) * height
} else {
inset <- rep(inset, length.out = 2)
insetx <- inset[1L] * (usr[2L] - usr[1L])
left <- switch(auto, bottomright = , topright = ,
right = usr[2L] - width - insetx, bottomleft = ,
left = , topleft = usr[1L] + insetx, bottom = ,
top = , center = (usr[1L] + usr[2L] - width)/2)
insety <- inset[2L] * (usr[4L] - usr[3L])
top <- switch(auto, bottomright = , bottom = , bottomleft = usr[3L] +
height + insety, topleft = , top = , topright = usr[4L] -
insety, left = , right = , center = (usr[3L] +
usr[4L] + height)/2)
}
xpd <- par(xpd=TRUE); on.exit(par(xpd))
ncols <- length(cols)
nlbls <- length(labels)
if(horiz) {
rect( xleft=left, xright=left+width/ncols*seq(ncols,0,-1), ytop=top, ybottom=top-height,
col=rev(cols), border=border)
if(!is.null(labels)){
if(cntrlbl) xlbl <- left + width/(2*ncols)+(width-width/ncols)/(nlbls-1) * seq(0,nlbls-1,1)
else xlbl <- left + width/(nlbls-1) * seq(0,nlbls-1,1)
text(y=top - (height + max(strheight(labels, cex=cex)) * 1.2)
# Gleiche Korrektur wie im vertikalen Fall
# , x=x+width/(2*ncols)+(width-width/ncols)/(nlbls-1) * seq(0,nlbls-1,1)
, x=xlbl, labels=labels, adj=adj, cex=cex, ...)
}
} else {
rect( xleft=left, ybottom=top-height, xright=left+width, ytop=top-height/ncols*seq(0,ncols,1),
col=rev(cols), border=border)
if(!is.null(labels)){
# Korrektur am 13.6:
# die groesste und kleinste Beschriftung sollen nicht in der Mitte der Randfarbkaestchen liegen,
# sondern wirklich am Rand des strips
# alt: , y=y-height/(2*ncols)- (height- height/ncols)/(nlbls-1) * seq(0,nlbls-1,1)
#, y=y-height/(2*ncols)- (height- height/ncols)/(nlbls-1) * seq(0,nlbls-1,1)
# 18.4.2015: reverse labels, as the logic below would misplace...
labels <- rev(labels)
if(cntrlbl) ylbl <- top - height/(2*ncols) - (height- height/ncols)/(nlbls-1) * seq(0, nlbls-1,1)
else ylbl <- top - height/(nlbls-1) * seq(0, nlbls-1, 1)
text(x=left + width + strwidth("0", cex=cex) + max(strwidth(labels, cex=cex)) * adj[1]
, y=ylbl, labels=labels, adj=adj, cex=cex, ... )
}
}
if(!is.na(frame)) rect( xleft=left, xright=left+width, ytop=top, ybottom=top-height, border=frame)
}
BubbleLegend <- function(x, y=NULL, area, cols
, labels=NULL, cols.lbl = "black"
, width = NULL, xjust = 0, yjust = 1, inset=0, border="black", frame=TRUE
, adj=c(0.5,0.5), cex=1.0, cex.names=1, bg = NULL, ...){
# positionierungscode aus legend
auto <- if(is.character(x))
match.arg(x, c("bottomright", "bottom", "bottomleft",
"left", "topleft", "top", "topright", "right", "center"))
else NA
radius <- sqrt((area * cex)/pi)
usr <- par("usr")
if(is.null(width))
width <- 2*max(radius) * 1.1 / Asp()
# if(is.null(asp)) # get aspect ratio from plot w/h
# asp <- par("pin")[1]/diff(par("usr")[1:2]) / par("pin")[2]/diff(par("usr")[3:4])
height <- width * Asp()
if (is.na(auto)) {
left <- x - xjust * width
top <- y + (1 - yjust) * height
} else {
inset <- rep(inset, length.out = 2)
insetx <- inset[1L] * (usr[2L] - usr[1L])
left <- switch(auto, bottomright = , topright = , right = usr[2L] -
width - insetx, bottomleft = , left = , topleft = usr[1L] +
insetx, bottom = , top = , center = (usr[1L] + usr[2L] -
width)/2)
insety <- inset[2L] * (usr[4L] - usr[3L])
top <- switch(auto, bottomright = , bottom = , bottomleft = usr[3L] +
height + insety, topleft = , top = , topright = usr[4L] -
insety, left = , right = , center = (usr[3L] + usr[4L] +
height)/2)
}
xpd <- par(xpd=TRUE); on.exit(par(xpd))
if(!is.na(frame))
rect( xleft=left, ybottom=top-height, xright=left+width, ytop=top,
col=bg, border=frame)
# DrawCircle(x = left + width/2, y = (top - height/2) + max(radius) - radius,
# r.out = radius, col=cols, border=border)
DrawEllipse(x = left + width/2, y = top-height/2 + max(radius) - radius,
radius.x = radius / Asp(), radius.y = radius,
col = cols, border=border)
if(!is.null(labels)){
d <- c(0, 2*radius)
# ylbl <- (top - height/2) + max(radius) - diff(d) /2 + d[-length(d)]
ylbl <- rev((top - height/2) + max(radius) - Midx(rev(2*radius), incl.zero = TRUE))
text(x=left + width/2, y=ylbl, labels=labels, adj=adj, cex=cex.names, col=cols.lbl, ... )
}
}
Canvas <- function(xlim=NULL, ylim=xlim, main=NULL, xpd=par("xpd"), mar=c(5.1,5.1,5.1,5.1),
asp=1, bg=par("bg"), usrbg="white", ...){
SetPars <- function(...){
# expand dots
arg <- unlist(match.call(expand.dots=FALSE)$...)
# match par arguments
par.args <- as.list(arg[names(par(no.readonly = TRUE)[names(arg)])])
# store old values
old <- par(no.readonly = TRUE)[names(par.args)]
# set new values
do.call(par, par.args)
# return old ones
invisible(old)
}
if(is.null(xlim)){
xlim <- c(-1,1)
ylim <- xlim
}
if(length(xlim)==1) {
xlim <- c(-xlim,xlim)
ylim <- xlim
}
oldpar <- par("xpd"=xpd, "mar"=mar, "bg"=bg) # ; on.exit(par(usr))
SetPars(...)
plot( NA, NA, xlim=xlim, ylim=ylim, main=main, asp=asp, type="n", xaxt="n", yaxt="n",
xlab="", ylab="", frame.plot = FALSE, ...)
if(usrbg != "white"){
usr <- par("usr")
rect(xleft=usr[1], ybottom=usr[3], xright=usr[2], ytop=usr[4], col=usrbg, border=NA)
}
# we might want to reset parameters afterwards
invisible(oldpar)
}
Midx <- function(x, incl.zero = FALSE, cumulate = FALSE){
if(incl.zero) x <- c(0, x)
res <- filter(x, rep(1/2,2))
res <- res[-length(res)]
if(cumulate) res <- cumsum(res)
return(res)
}
###
## graphics: colors ----
Pal <- function(pal, n=100, alpha=1) {
if(missing(pal)) {
res <- getOption("palette", default = structure(Pal("Helsana")[c(6,1:5,7:10)] ,
name = "Helsana", class = c("palette", "character")) )
} else {
palnames <- c("RedToBlack","RedBlackGreen","SteeblueWhite","RedWhiteGreen",
"RedWhiteBlue0","RedWhiteBlue1","RedWhiteBlue2","RedWhiteBlue3","Helsana","Tibco","RedGreen1",
"Spring","Soap","Maiden","Dark","Accent","Pastel","Fragile","Big","Long","Night","Dawn","Noon","Light")
if(is.numeric(pal)){
pal <- palnames[pal]
}
big <- c("#800000", "#C00000", "#FF0000", "#FFC0C0",
"#008000","#00C000","#00FF00","#C0FFC0",
"#000080","#0000C0", "#0000FF","#C0C0FF",
"#808000","#C0C000","#FFFF00","#FFFFC0",
"#008080","#00C0C0","#00FFFF","#C0FFFF",
"#800080","#C000C0","#FF00FF","#FFC0FF",
"#C39004","#FF8000","#FFA858","#FFDCA8")
switch(pal
, RedToBlack = res <- colorRampPalette(c("red","yellow","green","blue","black"), space = "rgb")(n)
, RedBlackGreen = res <- colorRampPalette(c("red", "black", "green"), space = "rgb")(n)
, SteeblueWhite = res <- colorRampPalette(c("steelblue","white"), space = "rgb")(n)
, RedWhiteGreen = res <- colorRampPalette(c("red", "white", "green"), space = "rgb")(n)
, RedWhiteBlue0 = res <- colorRampPalette(c("red", "white", "blue"))(n)
, RedWhiteBlue1 = res <- colorRampPalette(c("#67001F", "#B2182B", "#D6604D", "#F4A582", "#FDDBC7",
"#FFFFFF", "#D1E5F0", "#92C5DE", "#4393C3", "#2166AC", "#053061"))(n)
, RedWhiteBlue2 = res <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))(n)
, RedWhiteBlue3 = res <- colorRampPalette(c(hred, "white", hblue))(n)
, Helsana = res <- c("rot"="#9A0941", "orange"="#F08100", "gelb"="#FED037"
, "ecru"="#CAB790", "hellrot"="#D35186", "hellblau"="#8296C4", "hellgruen"="#B3BA12"
, "hellgrau"="#CCCCCC", "dunkelgrau"="#666666", "weiss"="#FFFFFF")
, Tibco = res <- apply( mcol <- matrix(c(
0,91,0, 0,157,69, 253,1,97, 60,120,177,
156,205,36, 244,198,7, 254,130,1,
96,138,138, 178,113,60
), ncol=3, byrow=TRUE), 1, function(x) rgb(x[1], x[2], x[3], maxColorValue=255))
, RedGreen1 = res <- c(rgb(227,0,11, maxColorValue=255), rgb(227,0,11, maxColorValue=255),
rgb(230,56,8, maxColorValue=255), rgb(234,89,1, maxColorValue=255),
rgb(236,103,0, maxColorValue=255), rgb(241,132,0, maxColorValue=255),
rgb(245,158,0, maxColorValue=255), rgb(251,184,0, maxColorValue=255),
rgb(253,195,0, maxColorValue=255), rgb(255,217,0, maxColorValue=255),
rgb(203,198,57, maxColorValue=255), rgb(150,172,98, maxColorValue=255),
rgb(118,147,108, maxColorValue=255))
, Spring = res <- c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3","#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999")
, Soap = res <- c("#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3","#A6D854", "#FFD92F", "#E5C494", "#B3B3B3")
, Maiden = res <- c("#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072","#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5", "#D9D9D9","#BC80BD","#CCEBC5")
, Dark = res <- c("#1B9E77", "#D95F02", "#7570B3", "#E7298A","#66A61E", "#E6AB02", "#A6761D", "#666666")
, Accent = res <- c("#7FC97F", "#BEAED4", "#FDC086", "#FFFF99","#386CB0", "#F0027F", "#BF5B17", "#666666")
, Pastel = res <- c("#FBB4AE", "#B3CDE3", "#CCEBC5", "#DECBE4","#FED9A6", "#FFFFCC", "#E5D8BD", "#FDDAEC", "#F2F2F2")
, Fragile = res <- c("#B3E2CD", "#FDCDAC", "#CBD5E8", "#F4CAE4","#E6F5C9", "#FFF2AE", "#F1E2CC", "#CCCCCC")
, Big = res <- big
, Long = res <- big[c(12,16,25,24,
2,11,6,15,18,26,23,
3,10,7,14,19,27,22,
4,8,20,28)]
, Night = res <- big[seq(1, 28, by=4)]
, Dawn = res <- big[seq(2, 28, by=4)]
, Noon = res <- big[seq(3, 28, by=4)]
, Light = res <- big[seq(4, 28, by=4)]
, GrandBudapest = res < c("#F1BB7B", "#FD6467", "#5B1A18", "#D67236")
, Moonrise1 = res <- c("#F3DF6C", "#CEAB07", "#D5D5D3", "#24281A")
, Royal1 = res <- c("#899DA4", "#C93312", "#FAEFD1", "#DC863B")
, Moonrise2 = res <- c("#798E87","#C27D38", "#CCC591", "#29211F")
, Cavalcanti = res <- c("#D8B70A", "#02401B","#A2A475", "#81A88D", "#972D15")
, Royal2 = res <- c("#9A8822", "#F5CDB4", "#F8AFA8", "#FDDDA0", "#74A089")
, GrandBudapest2 = res <- c("#E6A0C4", "#C6CDF7", "#D8A499", "#7294D4")
, Moonrise3 = res <- c("#85D4E3", "#F4B5BD", "#9C964A", "#CDC08C", "#FAD77B")
, Chevalier = res <- c("#446455", "#FDD262", "#D3DDDC", "#C7B19C")
, Zissou = res <- c("#3B9AB2", "#78B7C5", "#EBCC2A", "#E1AF00", "#F21A00")
, FantasticFox = res <- c("#DD8D29", "#E2D200", "#46ACC8", "#E58601", "#B40F20")
, Darjeeling = res <- c("#FF0000", "#00A08A", "#F2AD00", "#F98400", "#5BBCD6")
, Rushmore = res <- c("#E1BD6D", "#EABE94", "#0B775E", "#35274A", "#F2300F")
, BottleRocket = res <- c("#A42820", "#5F5647", "#9B110E", "#3F5151", "#4E2A1E", "#550307", "#0C1707")
, Darjeeling2 = res <- c("#ECCBAE", "#046C9A", "#D69C4E", "#ABDDDE", "#000000")
)
attr(res, "name") <- pal
class(res) <- append(class(res), "palette")
}
if(alpha != 1)
res <- SetAlpha(res, alpha = alpha)
return(res)
}
print.palette <- function(x, ...){
cat(attr(x, "name"), "\n")
cat(x, "\n")
}
plot.palette <- function(x, cex = 3, ...) {
# # use new window, but store active device if already existing
# if( ! is.null(dev.list()) ){
# curwin <- dev.cur()
# on.exit( {
# dev.set(curwin)
# par(oldpar)
# }
# )
# }
# windows(width=3, height=2.5, xpos=100, ypos=600)
oldpar <- par(mar=c(0,0,0,0), mex=0.001, xaxt="n", yaxt="n", ann=FALSE, xpd=NA)
on.exit(par(oldpar))
palname <- Coalesce(attr(x, "name"), "no name")
n <- length(x)
x <- rev(x)
plot( x=rep(1, n), y=1:n, pch=22, cex=cex, col="grey60", bg=x, xlab="", ylab="", axes=FALSE,
frame.plot=FALSE, ylim=c(0, n + 2), xlim=c(0.8, n))
text( x=4.5, y=n + 1.2, labels="alpha", adj=c(0,0.5), cex=0.8)
text( x=0.8, y=n + 2.0, labels=gettextf("\"%s\" Palette colors", palname), adj=c(0,0.5), cex=1.2)
text( x=c(1,2.75,3.25,3.75,4.25), y= n +1.2, adj=c(0.5,0.5), labels=c("1.0", 0.8, 0.6, 0.4, 0.2), cex=0.8 )
abline(h=n+0.9, col="grey")
palnames <- paste(n:1, names(x))
sapply(1:n, function(i){
xx <- c(2.75, 3.25, 3.75, 4.25)
yy <- rep(i, 4)
points(x=xx, y=yy, pch=22, cex=cex, col="grey60", bg=SetAlpha(x[i], alpha=c(0.8, 0.6, 0.4, 0.2)))
text(x=1.25, y=i, adj=c(0,0.5), cex=0.8, labels=palnames[i])
})
invisible()
# points( x=rep(2.75,7), y=1:7, pch=15, cex=2, col=hc(7:1, alpha=0.8) )
# points( x=rep(3.25,7), y=1:7, pch=15, cex=2, col=hc(7:1, alpha=0.6) )
# points( x=rep(3.75,7), y=1:7, pch=15, cex=2, col=hc(7:1, alpha=0.4) )
# points( x=rep(4.25,7), y=1:7, pch=15, cex=2, col=hc(7:1, alpha=0.2) )
}
# example:
# barplot(1:7, col=SetAlpha(PalHelsana[c("ecru","hellgruen","hellblau")], 1) )
###
## geometric primitives ====
Stamp <- function(txt=NULL, las=par("las"), cex=0.6) {
# set an option like:
# options(stamp=expression("gettextf('%s/%s', Sys.getenv('USERNAME'), Format(Today(), fmt='yyyy-mm-dd')))")
# if stamp is an expression, it will be evaluated
stamp <- function(x) {
# opar <- par(yaxt='s', xaxt='s', xpd=TRUE)
opar <- par(yaxt='s', xaxt='s', xpd=NA)
on.exit(par(opar))
plt <- par('plt')
usr <- par('usr')
## when a logrithmic scale is in use (i.e. par('xlog') is true),
## then the x-limits would be 10^par('usr')[1:2]. Similarly for
## the y axis
xcoord <- usr[2] + (usr[2] - usr[1])/(plt[2] - plt[1]) *
(1-plt[2]) - cex*strwidth('m')
ycoord <- usr[3] - diff(usr[3:4])/diff(plt[3:4])*(plt[3]) +
cex*strheight('m')
if(par('xlog')) xcoord <- 10^(xcoord)
if(par('ylog')) ycoord <- 10^(ycoord)
if(las==3){
srt <- 90
adj <- 0
} else {
srt <- 0
adj <- 1
}
## Print the text on the current plot
text(xcoord, ycoord, x, adj=adj, srt=srt, cex=cex)
invisible(x)
}
if(is.null(txt)) {
# get the option
txt <- DescToolsOptions("stamp")
if(is.null(txt)){
txt <- format(Sys.time(), '%Y-%m-%d')
} else {
if(is.expression(txt)){
txt <- eval(parse(text = txt))
}
}
}
invisible(stamp(txt))
}
BoxedText <- function(x, y = NULL, labels = seq_along(x), adj = NULL,
pos = NULL, offset = 0.5, vfont = NULL,
cex = 1, txt.col = NULL, font = NULL, srt = 0, xpad = 0.2, ypad=0.2,
density = NULL, angle = 45,
col = "white", border = par("fg"), lty = par("lty"), lwd = par("lwd"), ...) {
.BoxedText <- function(x, y = NULL, labels = seq_along(x), adj = NULL,
pos = NA, offset = 0.5, vfont = NULL,
cex = 1, txt.col = NULL, font = NULL, srt = 0, xpad = 0.2, ypad=0.2,
density = NULL, angle = 45,
col = "white", border = NULL, lty = par("lty"), lwd = par("lwd"), ...) {
if(is.na(pos)) pos <- NULL # we have to change default NULL to NA to be able to repeat it
if(is.na(vfont)) vfont <- NULL
w <- strwidth(labels, cex=cex, font=font, vfont=vfont)
h <- strheight(labels, cex=cex, font=font, vfont=vfont)
if(length(adj) == 1) adj <- c(adj, 0.5)
xl <- x - adj[1] * w - strwidth("M", cex=cex, font=font, vfont=vfont) * xpad
xr <- xl + w + 2*strwidth("M", cex=cex, font=font, vfont=vfont) * xpad
yb <- y - adj[2] * h - strheight("M", cex=cex, font=font, vfont=vfont) * ypad
yt <- yb + h + 2*strheight("M", cex=cex, font=font, vfont=vfont) * ypad
xy <- Rotate(x=c(xl,xl,xr,xr), y=c(yb,yt,yt,yb), mx=x, my=y, theta=DegToRad(srt))
polygon(x=xy$x, y=xy$y, col=col, density=density, angle=angle, border=border, lty=lty, lwd=lwd, ...)
text(x=x, y=y, labels=labels, adj=adj, pos=pos, offset=offset, vfont=vfont, cex=cex, col=txt.col, font=font, srt=srt)
}
if(is.null(adj))
adj <- c(0.5, 0.5)
else
adj <- rep(adj, length.out=2)
if (is.null(pos)) pos <- NA
if (is.null(vfont)) vfont <- NA
if (is.null(txt.col)) txt.col <- par("fg")
if (is.null(font)) font <- 1
if (is.null(density)) density <- NA
# recyle arguments:
# which parameter has the highest dimension
# attention: we cannot repeat NULLs but we can repeat NAs, so we swap NULLs to NAs and
# reset them to NULL above
lst <- list(x=x, y=y, labels=labels, pos=pos, offset=offset, vfont=vfont,
cex=cex, txt.col=txt.col, font=font, srt=srt, xpad=xpad, ypad=ypad,
density=density, angle=angle, col=col, border=border, lty=lty, lwd=lwd)
maxdim <- max(unlist(lapply(lst, length)))
# recycle all params to maxdim
lgp <- lapply(lst, rep, length.out=maxdim )
lgp$adj <- as.list(data.frame(replicate(adj, n=maxdim)))
for( i in 1:maxdim){
.BoxedText(
x=lgp$x[i], y=lgp$y[i], labels=lgp$labels[i], adj=lgp$adj[[i]], pos=lgp$pos[i], offset=lgp$offset[i]
, vfont=lgp$vfont[i], cex=lgp$cex[i], txt.col=lgp$txt.col[i], font=lgp$font[i]
, srt=lgp$srt[i], xpad=lgp$xpad[i], ypad=lgp$ypad[i], density=lgp$density[i]
, angle=lgp$angle[i], col=lgp$col[i], border=lgp$border[i], lty=lgp$lty[i], lwd=lgp$lwd[i] )
}
}
DrawBezier <- function (x = 0, y = x, nv = 100, col = par("col"), lty = par("lty")
, lwd = par("lwd"), plot = TRUE ) {
if (missing(y)) {
y <- x[[2]]
x <- x[[1]]
}
n <- length(x)
X <- Y <- single(nv)
Z <- seq(0, 1, length = nv)
X[1] <- x[1]
X[nv] <- x[n]
Y[1] <- y[1]
Y[nv] <- y[n]
for (i in 2:(nv - 1)) {
z <- Z[i]
xz <- yz <- 0
const <- (1 - z)^(n - 1)
for (j in 0:(n - 1)) {
xz <- xz + const * x[j + 1]
yz <- yz + const * y[j + 1]
const <- const * (n - 1 - j)/(j + 1) * z/(1 - z)
# debugging only:
# if (is.na(const)) print(c(i, j, z))
}
X[i] <- xz
Y[i] <- yz
}
if(plot) lines(x = as.single(X), y = as.single(Y), col=col, lty=lty, lwd=lwd )
invisible(list(x = as.single(X), y = as.single(Y)))
}
DrawRegPolygon <- function( x = 0, y = x, radius.x = 1, radius.y = radius.x, rot = 0, nv = 3,
border = par("fg"), col = par("bg"), lty = par("lty"), lwd = par("lwd"), plot = TRUE ) {
# The workhorse for the geom stuff
# example:
# plot(c(0,1),c(0,1), asp=1, type="n")
# DrawRegPolygon( x=0.5, y=0.5, radius.x=seq(0.5,0.1,-0.1), rot=0, nv=3:10, col=2)
# DrawRegPolygon( x=0.5+1:5*0.05, y=0.5, radius.x=seq(0.5,0.1,-0.1), rot=0, nv=100, col=1:5)
# which geom parameter has the highest dimension
lgp <- list(x=x, y=y, radius.x=radius.x, radius.y=radius.y, rot=rot, nv=nv)
maxdim <- max(unlist(lapply(lgp, length)))
# recycle all params to maxdim
lgp <- lapply( lgp, rep, length.out=maxdim )
# recycle shape properties
if (length(col) < maxdim) { col <- rep(col, length.out = maxdim) }
if (length(border) < maxdim) { border <- rep(border, length.out = maxdim) }
if (length(lwd) < maxdim) { lwd <- rep(lwd, length.out = maxdim) }
if (length(lty) < maxdim) { lty <- rep(lty, length.out = maxdim) }
lst <- list() # prepare result
for (i in 1:maxdim) {
theta.inc <- 2 * pi / lgp$nv[i]
theta <- seq(0, 2 * pi - theta.inc, by = theta.inc)
ptx <- cos(theta) * lgp$radius.x[i] + lgp$x[i]
pty <- sin(theta) * lgp$radius.y[i] + lgp$y[i]
if(lgp$rot[i] > 0){
# rotate the structure if the angle is > 0
dx <- ptx - lgp$x[i]
dy <- pty - lgp$y[i]
ptx <- lgp$x[i] + cos(lgp$rot[i]) * dx - sin(lgp$rot[i]) * dy
pty <- lgp$y[i] + sin(lgp$rot[i]) * dx + cos(lgp$rot[i]) * dy
}
if( plot )
polygon(ptx, pty, border = border[i], col = col[i], lty = lty[i],
lwd = lwd[i])
lst[[i]] <- list(x = ptx, y = pty)
}
lst <- lapply(lst, xy.coords)
if(length(lst)==1)
lst <- lst[[1]]
invisible(lst)
}
DrawCircle <- function (x = 0, y = x, r.out = 1, r.in = 0, theta.1 = 0,
theta.2 = 2 * pi, border = par("fg"), col = NA, lty = par("lty"),
lwd = par("lwd"), nv = 100, plot = TRUE) {
DrawSector <- function(x, y, r.in, r.out, theta.1,
theta.2, nv, border, col, lty, lwd, plot) {
# get arc coordinates
pts <- DrawArc(x = x, y = y, rx = c(r.out, r.in), ry = c(r.out, r.in),
theta.1 = theta.1, theta.2 = theta.2, nv = nv,
col = border, lty = lty, lwd = lwd, plot = FALSE)
is.ring <- (r.in != 0)
is.sector <- any( ((theta.1-theta.2) %% (2*pi)) != 0)
if(is.ring || is.sector) {
# we have an inner and an outer circle
ptx <- c(pts[[1]]$x, rev(pts[[2]]$x))
pty <- c(pts[[1]]$y, rev(pts[[2]]$y))
} else {
# no inner circle
ptx <- pts[[1]]$x
pty <- pts[[1]]$y
}
if (plot) {
if (is.ring & !is.sector) {
# we have angles, so plot polygon for the area and lines for borders
polygon(x = ptx, y = pty, col = col, border = NA,
lty = lty, lwd = lwd)
lines(x = pts[[1]]$x, y = pts[[1]]$y, col = border, lty = lty, lwd = lwd)
lines(x = pts[[2]]$x, y = pts[[2]]$y, col = border, lty = lty, lwd = lwd)
}
else {
polygon(x = ptx, y = pty, col = col, border = border,
lty = lty, lwd = lwd)
}
}
invisible(list(x = ptx, y = pty))
}
lgp <- DescTools::Recycle(x=x, y=y, r.in = r.in, r.out = r.out,
theta.1 = theta.1, theta.2 = theta.2, border = border,
col = col, lty = lty, lwd = lwd, nv = nv)
lst <- list()
for (i in 1L:attr(lgp, "maxdim")) {
pts <- with(lgp, DrawSector(x=x[i], y=y[i], r.in=r.in[i],
r.out=r.out[i], theta.1=theta.1[i],
theta.2=theta.2[i], nv=nv[i], border=border[i],
col=col[i], lty=lty[i], lwd=lwd[i],
plot = plot))
lst[[i]] <- pts
}
invisible(lst)
}
#
# DrawCircle <- function( x = 0, y = x, radius = 1, rot = 0, nv = 100, border = par("fg"), col = par("bg")
# , lty = par("lty"), lwd = par("lwd"), plot = TRUE ) {
# invisible( DrawRegPolygon( x = x, y = y, radius.x=radius, nv=nv, border=border, col=col, lty=lty, lwd=lwd, plot = plot ) )
# }
DrawEllipse <- function( x = 0, y = x, radius.x = 1, radius.y = 0.5, rot = 0, nv = 100, border = par("fg"), col = par("bg")
, lty = par("lty"), lwd = par("lwd"), plot = TRUE ) {
invisible( DrawRegPolygon( x = x, y = y, radius.x = radius.x, radius.y = radius.y, nv = nv, rot = rot
, border = border, col = col, lty = lty, lwd = lwd, plot = plot ) )
}
DrawArc <- function (x = 0, y = x, rx = 1, ry = rx, theta.1 = 0,
theta.2 = 2*pi, nv = 100, col = par("col"), lty = par("lty"),
lwd = par("lwd"), plot = TRUE) {
# recycle all params to maxdim
lgp <- DescTools::Recycle(x=x, y=y, rx = rx, ry = ry,
theta.1 = theta.1, theta.2 = theta.2, nv = nv,
col=col, lty=lty, lwd=lwd)
lst <- list()
for (i in 1L:attr(lgp, "maxdim")) {
dthetha <- lgp$theta.2[i] - lgp$theta.1[i]
theta <- seq(from = 0,
to = ifelse(dthetha < 0, dthetha + 2 * pi, dthetha),
length.out = lgp$nv[i]) + lgp$theta.1[i]
ptx <- (cos(theta) * lgp$rx[i] + lgp$x[i])
pty <- (sin(theta) * lgp$ry[i] + lgp$y[i])
if (plot) {
lines(ptx, pty, col = lgp$col[i], lty = lgp$lty[i], lwd = lgp$lwd[i])
}
lst[[i]] <- list(x = ptx, y = pty)
}
invisible(lst)
}
# replaced by 0.99.18:
#
# DrawArc <- function (x = 0, y = x, radius.x = 1, radius.y = radius.x, angle.beg = 0,
# angle.end = pi, nv = 100, col = par("col"), lty = par("lty"), lwd = par("lwd"), plot = TRUE) {
#
# # which geom parameter has the highest dimension
# lgp <- list(x = x, y = y, radius.x = radius.x, radius.y = radius.y,
# angle.beg = angle.beg, angle.end = angle.end, nv = nv)
# maxdim <- max(unlist(lapply(lgp, length)))
# # recycle all params to maxdim
# lgp <- lapply(lgp, rep, length.out = maxdim)
#
# # recycle shape properties
# if (length(col) < maxdim) {
# col <- rep(col, length.out = maxdim)
# }
# if (length(lwd) < maxdim) {
# lwd <- rep(lwd, length.out = maxdim)
# }
# if (length(lty) < maxdim) {
# lty <- rep(lty, length.out = maxdim)
# }
#
# lst <- list()
# for (i in 1:maxdim) {
# angdif <- lgp$angle.end[i] - lgp$angle.beg[i]
# theta <- seq(from = 0, to = ifelse(angdif < 0, angdif + 2*pi, angdif),
# length.out = lgp$nv[i]) + lgp$angle.beg[i]
# ptx <- (cos(theta) * lgp$radius.x[i] + lgp$x[i])
# pty <- (sin(theta) * lgp$radius.y[i] + lgp$y[i])
# if (plot) {
# lines(ptx, pty, col = col[i], lty = lty[i], lwd = lwd[i])
# }
# lst[[i]] <- list(x = ptx, y = pty)
# }
# invisible(lst)
# }
#
# DrawAnnulusSector <- function (x = 0, y = x, radius.in = 1, radius.out = 2, angle.beg = 0, angle.end = pi
# , nv = 100, border = par("fg"), col = par("bg"), lty = par("lty"), lwd = par("lwd"), plot = TRUE) {
#
# DrawSector <- function(x, y, radius.in, radius.out, angle.beg, angle.end
# , nv, border, col, lty, lwd, plot) {
# # let DrawArc calculate the 2 arcs
# pts <- DrawArc( x=x, y=y, radius.x = c(radius.out, radius.in), radius.y = c(radius.out, radius.in)
# , angle.beg = angle.beg, angle.end = angle.end, nv = nv
# , col = border, lty = lty, lwd = lwd, plot = FALSE )
# # combine the arcs to a annulus sector
# ptx <- c(pts[[1]]$x, rev(pts[[2]]$x))
# pty <- c(pts[[1]]$y, rev(pts[[2]]$y))
# if( plot ) { polygon(x = ptx, y = pty, col = col, border = border, lty = lty, lwd = lwd) }
# invisible(list(x = ptx, y = pty))
# }
#
# # which geom parameter has the highest dimension
# lgp <- list(x = x, y = y, radius.in = radius.in, radius.out = radius.out,
# angle.beg = angle.beg, angle.end = angle.end, nv = nv)
# maxdim <- max(unlist(lapply(lgp, length)))
# # recycle all params to maxdim
# lgp <- lapply(lgp, rep, length.out = maxdim)
#
# # recycle shape properties
# if (length(col) < maxdim) { col <- rep(col, length.out = maxdim) }
# if (length(border) < maxdim) { border <- rep(border, length.out = maxdim) }
# if (length(lwd) < maxdim) { lwd <- rep(lwd, length.out = maxdim) }
# if (length(lty) < maxdim) { lty <- rep(lty, length.out = maxdim) }
#
# # Draw the single sectors
# lst <- list()
# for (i in 1:maxdim) {
# pts <- DrawSector( x = lgp$x[i], y = lgp$y[i], radius.in = lgp$radius.in[i], radius.out = lgp$radius.out[i]
# , angle.beg = lgp$angle.beg[i], angle.end = lgp$angle.end[i], nv = lgp$nv[i]
# , border = border[i], col = col[i], lty = lty[i], lwd = lwd[i], plot = plot )
# lst[[i]] <- pts
# }
# invisible(lst)
#
# }
#
#
# DrawAnnulus <- function (x = 0, y = x, radius.in = 1, radius.out = 2, nv = 100, border = par("fg")
# , col = par("bg"), lty = par("lty"), lwd = par("lwd"), plot = TRUE) {
#
# pts.out <- DrawCircle(x = x, y = y, radius = radius.out, plot = FALSE)
# pts.in <- DrawCircle(x = x, y = y, radius = radius.in, plot = FALSE)
#
# ptx <- c( unlist(lapply(pts.out, "[", "x")), rev(unlist(lapply(pts.in, "[", "x"))) )
# pty <- c( unlist(lapply(pts.out, "[", "y")), rev(unlist(lapply(pts.in, "[", "y"))) )
#
# # we have to use polygon here, because of the transparent hole in the middle..
# # but don't know how to ged rid of the closing line, so draw polygon without border and then redraw circles
# polygon(x = ptx, y = pty, col = col, border = NA, lty = lty, lwd = lwd)
# lapply( pts.out, lines, col=border, lty=lty, lwd=lwd )
# lapply( pts.in, lines, col=border, lty=lty, lwd=lwd )
#
# invisible(list(x = ptx, y = pty))
#
# }
#
DrawBand <- function(x, y, col = SetAlpha("grey", 0.5), border = NA) {
# accept matrices but then only n x y
if(!identical(dim(y), dim(x))){
x <- as.matrix(x)
y <- as.matrix(y)
if(dim(x)[2] == 1 && dim(y)[2] == 2)
x <- x[, c(1,1)]
else if(dim(x)[2] == 2 && dim(y)[2] == 1)
y <- y[, c(1,1)]
else
stop("incompatible dimensions for matrices x and y")
x <- c(x[,1], rev(x[,2]))
y <- c(y[,1], rev(y[,2]))
}
# adds a band to a plot, normally used for plotting confidence bands
polygon(x=x, y=y, col = col, border = border)
}
Clockwise <- function(x, start=0){
# Calculates begin and end angles from a list of given angles
angles <- c(0, cumsum(x), 2*pi)
revang <- 2*pi - angles + start
return(data.frame( from=revang[-1], to=revang[-length(revang)]))
}
Rotate <- function( x, y=NULL, mx = NULL, my = NULL, theta=pi/3, asp=1 ) {
# # which geom parameter has the highest dimension
# lgp <- list(x=x, y=y)
# maxdim <- max(unlist(lapply(lgp, length)))
# # recycle all params to maxdim
# lgp <- lapply( lgp, rep, length.out=maxdim )
# polygon doesn't do that either!!
xy <- xy.coords(x, y)
if(is.null(mx))
mx <- mean(xy$x)
if(is.null(my))
my <- mean(xy$y)
# rotate the structure
dx <- xy$x - mx
dy <- xy$y - my
ptx <- mx + cos(theta) * dx - sin(theta) * dy / asp
pty <- my + sin(theta) * dx * asp + cos(theta) * dy
return(xy.coords(x=ptx, y=pty))
}
GeomTrans <- function(x, y=NULL, trans=0, scale=1, theta=0) {
# https://reference.wolfram.com/language/ref/ScalingTransform.html
xy <- xy.coords(x, y)
trans <- rep_len(trans, length.out=2)
scale <- rep_len(trans, length.out=2)
xy$x <- (xy$x * scale[1]) + trans[1]
xy$y <- (xy$y * scale[2]) + trans[2]
xy <- Rotate(xy, theta = theta)
return(xy)
}
Asp <- function(){
w <- par("pin")[1]/diff(par("usr")[1:2])
h <- par("pin")[2]/diff(par("usr")[3:4])
asp <- w/h
return(asp)
}
LineToUser <- function(line, side) {
# http://stackoverflow.com/questions/29125019/get-margin-line-locations-mgp-in-user-coordinates
# jbaums
# Converts line dimensions to user coordinates
lh <- par('cin')[2] * par('cex') * par('lheight')
x_off <- diff(grconvertX(0:1, 'inches', 'user'))
y_off <- diff(grconvertY(0:1, 'inches', 'user'))
switch(side,
`1` = par('usr')[3] - line * y_off * lh,
`2` = par('usr')[1] - line * x_off * lh,
`3` = par('usr')[4] + line * y_off * lh,
`4` = par('usr')[2] + line * x_off * lh,
stop("side must be 1, 2, 3, or 4", call.=FALSE))
}
Arrow <- function(x0, y0, x1, y1, col=par("bg"), border = par("fg"), head=1, cex=1, lwd=1, lty=1){
ArrowHead <- function(x=0, y=0, type=2, cex=1, theta=0){
# choose a default
rx <- par("pin")[1] / 100 * cex
# get aspect ratio for not allowing the arrowhead to lose form
asp <- Asp()
head <- DrawRegPolygon(x, y, radius.x = rx, radius.y = rx * asp, plot=FALSE)
if(type==3){
head$x <- append(head$x, head$x[1] - rx, 2)
head$y <- append(head$y, y, 2)
}
# Rotate the head
head <- Rotate(head, theta=theta, mx=x, my=y, asp = asp)
head$x <- head$x - rx * cos(theta)
head$y <- head$y - rx * sin(theta)
return(head)
}
if(head > 1){
segments(x0 = x0, y0 = y0, x1 = x1, y1 = y1, lty=lty, lwd=lwd)
head <- ArrowHead(x=x1, y=y1, type=head, cex=cex,
theta= (atan((y0-y1) / Asp() /(x0-x1)) + (x0 > x1) * pi))
polygon(head, col=col, border=border)
} else {
arrows(x0 = x0, y0 = y0, x1 = x1, y1 = y1, lty=lty, lwd=lwd)
}
invisible()
}
SpreadOut <- function(x, mindist = NULL, cex = 1.0) {
if(is.null(mindist))
mindist <- 0.9 * max(strheight(x, "inch", cex = cex))
if(sum(!is.na(x)) < 2) return(x)
xorder <- order(x)
goodx <- x[xorder][!is.na(x[xorder])]
gxlen <- length(goodx)
start <- end <- gxlen%/%2
# nicely spread groups of short intervals apart from their mean
while(start > 0) {
while(end < gxlen && goodx[end+1] - goodx[end] < mindist) end <- end+1
while(start > 1 && goodx[start] - goodx[start-1] < mindist) start <- start-1
if(start < end) {
nsqueezed <- 1+end-start
newx <- sum(goodx[start:end]) / nsqueezed - mindist * (nsqueezed %/% 2 - (nsqueezed / 2 == nsqueezed %/% 2) * 0.5)
for(stretch in start:end) {
goodx[stretch] <- newx
newx <- newx+mindist
}
}
start <- end <- start-1
}
start <- end <- length(goodx) %/% 2 + 1
while(start < gxlen) {
while(start > 1 && goodx[start] - goodx[start-1] < mindist) start <- start-1
while(end < gxlen && goodx[end+1] - goodx[end] < mindist) end <- end+1
if(start < end) {
nsqueezed <- 1 + end - start
newx <- sum(goodx[start:end]) / nsqueezed - mindist * (nsqueezed %/% 2 - (nsqueezed / 2 == nsqueezed %/% 2) * 0.5)
for(stretch in start:end) {
goodx[stretch] <- newx
newx <- newx+mindist
}
}
start <- end <- end+1
}
# force any remaining short intervals apart
if(any(diff(goodx) < mindist)) {
start <- gxlen %/% 2
while(start > 1) {
if(goodx[start] - goodx[start-1] < mindist)
goodx[start-1] <- goodx[start] - mindist
start <- start-1
}
end <- gxlen %/% 2
while(end < gxlen) {
if(goodx[end+1] - goodx[end] < mindist)
goodx[end+1] <- goodx[end]+mindist
end <- end+1
}
}
x[xorder][!is.na(x[xorder])] <- goodx
return(x)
}
BarText <- function(height, b, labels=height, beside = FALSE, horiz = FALSE,
cex=par("cex"), adj=NULL, top=TRUE, ...) {
if(beside){
if(horiz){
if(is.null(adj)) adj <- 0
if(top)
x <- height + par("cxy")[1] * cex
else
x <- height/2
text(y=b, x=x, labels=labels, cex=cex, xpd=TRUE, adj=adj, ...)
} else {
if(top)
y <- height + par("cxy")[2] * cex
else
y <- height/2
if(is.null(adj)) adj <- 0.5
text(x=b, y=y, labels=labels, cex=cex, xpd=TRUE, adj=adj, ...)
}
# The xpd=TRUE means to not plot the text even if it is outside
# of the plot area and par("cxy") gives the size of a typical
# character in the current user coordinate system.
} else {
if(horiz){
if(is.null(adj)) adj <- 0.5
x <- t(apply(height, 2, Midx, incl.zero=TRUE, cumulate=TRUE))
text(labels=t(labels), x=x, y=b, cex = cex, adj=adj, ...)
} else {
if(is.null(adj)) adj <- 0.5
x <- t(apply(height, 2, Midx, incl.zero=TRUE, cumulate=TRUE))
text(labels=t(labels), x=b, y=x, cex=cex, adj=adj, ...)
}
}
invisible()
}
ConnLines <- function(..., col = 1, lwd = 1, lty = "solid", xalign = c("mar","mid") ) {
# add connection lines to a barplot
# ... are the arguments, passed to barplot
b <- barplot(..., plot = FALSE)
arg <- unlist(match.call(expand.dots = FALSE)$...)
if(is.null(arg$horiz)) horiz <- FALSE else horiz <- eval(arg$horiz, parent.frame())
# debug: print(horiz)
nr <- nrow(eval(arg[[1]], parent.frame())) # nrow(height)
nc <- length(b)
if(!is.null(nr)) {
tmpcum <- apply(eval(arg[[1]], parent.frame()), 2, cumsum)
ypos1 <- tmpcum[, -nc]
ypos2 <- tmpcum[, -1]
} else {
tmpcum <- eval(arg[[1]], parent.frame())
ypos1 <- tmpcum[-nc]
ypos2 <- tmpcum[-1]
nr <- 1
}
xalign <- match.arg(xalign)
if(xalign=="mar"){
# the midpoints of the bars
mx <- (b[-1] + b[-length(b)]) / 2
if(is.null(arg$space)) space <- 0.2
else space <- eval(arg$space, parent.frame())
lx <- mx - space/2
rx <- mx + space/2
xpos1 <- rep(lx, rep(nr, length(lx)))
xpos2 <- rep(rx, rep(nr, length(rx)))
if(horiz == FALSE)
segments(xpos1, ypos1, xpos2, ypos2, col=col, lwd=lwd, lty=lty)
else
segments(ypos1, xpos1, ypos2, xpos2, col=col, lwd=lwd, lty=lty)
} else if(xalign=="mid") {
if(horiz == FALSE) {
if(nr > 1)
matlines(x=replicate(nr, b), y=t(tmpcum), lty=lty, lwd=lwd, col=col)
else
lines(x=b, y=tmpcum, lty=lty, lwd=lwd, col=col)
} else {
if(nr > 1)
matlines(y=replicate(nr, b), x=t(tmpcum), lty=lty, lwd=lwd, col=col)
else
lines(y=b, x=tmpcum, lty=lty, lwd=lwd, col=col)
}
}
invisible()
}
AxisBreak <- function (axis = 1, breakpos = NULL, pos = NA, bgcol = "white",
breakcol = "black", style = "slash", brw = 0.02) {
figxy <- par("usr")
xaxl <- par("xlog")
yaxl <- par("ylog")
xw <- (figxy[2] - figxy[1]) * brw
yw <- (figxy[4] - figxy[3]) * brw
if (!is.na(pos))
figxy <- rep(pos, 4)
if (is.null(breakpos))
breakpos <- ifelse(axis%%2, figxy[1] + xw * 2, figxy[3] +
yw * 2)
if (xaxl && (axis == 1 || axis == 3))
breakpos <- log10(breakpos)
if (yaxl && (axis == 2 || axis == 4))
breakpos <- log10(breakpos)
switch(axis, br <- c(breakpos - xw/2, figxy[3] - yw/2, breakpos +
xw/2, figxy[3] + yw/2), br <- c(figxy[1] - xw/2, breakpos -
yw/2, figxy[1] + xw/2, breakpos + yw/2), br <- c(breakpos -
xw/2, figxy[4] - yw/2, breakpos + xw/2, figxy[4] + yw/2),
br <- c(figxy[2] - xw/2, breakpos - yw/2, figxy[2] +
xw/2, breakpos + yw/2), stop("Improper axis specification."))
old.xpd <- par("xpd")
par(xpd = TRUE)
if (xaxl)
br[c(1, 3)] <- 10^br[c(1, 3)]
if (yaxl)
br[c(2, 4)] <- 10^br[c(2, 4)]
if (style == "gap") {
if (xaxl) {
figxy[1] <- 10^figxy[1]
figxy[2] <- 10^figxy[2]
}
if (yaxl) {
figxy[3] <- 10^figxy[3]
figxy[4] <- 10^figxy[4]
}
if (axis == 1 || axis == 3) {
rect(breakpos, figxy[3], breakpos + xw, figxy[4],
col = bgcol, border = bgcol)
xbegin <- c(breakpos, breakpos + xw)
ybegin <- c(figxy[3], figxy[3])
xend <- c(breakpos, breakpos + xw)
yend <- c(figxy[4], figxy[4])
if (xaxl) {
xbegin <- 10^xbegin
xend <- 10^xend
}
}
else {
rect(figxy[1], breakpos, figxy[2], breakpos + yw,
col = bgcol, border = bgcol)
xbegin <- c(figxy[1], figxy[1])
ybegin <- c(breakpos, breakpos + yw)
xend <- c(figxy[2], figxy[2])
yend <- c(breakpos, breakpos + yw)
if (xaxl) {
xbegin <- 10^xbegin
xend <- 10^xend
}
}
par(xpd = TRUE)
}
else {
rect(br[1], br[2], br[3], br[4], col = bgcol, border = bgcol)
if (style == "slash") {
if (axis == 1 || axis == 3) {
xbegin <- c(breakpos - xw, breakpos)
xend <- c(breakpos, breakpos + xw)
ybegin <- c(br[2], br[2])
yend <- c(br[4], br[4])
if (xaxl) {
xbegin <- 10^xbegin
xend <- 10^xend
}
}
else {
xbegin <- c(br[1], br[1])
xend <- c(br[3], br[3])
ybegin <- c(breakpos - yw, breakpos)
yend <- c(breakpos, breakpos + yw)
if (yaxl) {
ybegin <- 10^ybegin
yend <- 10^yend
}
}
}
else {
if (axis == 1 || axis == 3) {
xbegin <- c(breakpos - xw/2, breakpos - xw/4,
breakpos + xw/4)
xend <- c(breakpos - xw/4, breakpos + xw/4, breakpos +
xw/2)
ybegin <- c(ifelse(yaxl, 10^figxy[3 + (axis ==
3)], figxy[3 + (axis == 3)]), br[4], br[2])
yend <- c(br[4], br[2], ifelse(yaxl, 10^figxy[3 +
(axis == 3)], figxy[3 + (axis == 3)]))
if (xaxl) {
xbegin <- 10^xbegin
xend <- 10^xend
}
}
else {
xbegin <- c(ifelse(xaxl, 10^figxy[1 + (axis ==
4)], figxy[1 + (axis == 4)]), br[1], br[3])
xend <- c(br[1], br[3], ifelse(xaxl, 10^figxy[1 +
(axis == 4)], figxy[1 + (axis == 4)]))
ybegin <- c(breakpos - yw/2, breakpos - yw/4,
breakpos + yw/4)
yend <- c(breakpos - yw/4, breakpos + yw/4, breakpos +
yw/2)
if (yaxl) {
ybegin <- 10^ybegin
yend <- 10^yend
}
}
}
}
segments(xbegin, ybegin, xend, yend, col = breakcol, lty = 1)
par(xpd = FALSE)
}
###
## graphics: conversions ====
PolToCart <- function(r, theta) list(x=r*cos(theta), y=r*sin(theta))
CartToPol <- function(x, y) {
theta <- atan(y/x)
theta[x<0] <- theta[x<0] + pi # atan can't find the correct square (quadrant)
list(r = sqrt(x^2 + y^2), theta=theta)
}
CartToSph <- function (x, y, z, up = TRUE ) {
vphi <- CartToPol(x, y) # x, y -> c( w, phi )
R <- if (up) {
CartToPol(vphi$r, z) # ( w, z, -> r, theta )
} else {
CartToPol(z, vphi$r) # ( z, w, -> r, theta )
}
res <- c(R[1], R[2], vphi[2])
names(res) <- c("r", "theta", "phi")
return (res)
}
SphToCart <- function (r, theta, phi, up = TRUE) {
if (up) theta <- pi/2 - theta
vz <- PolToCart(r, theta)
xy <- PolToCart(vz$y, phi)
res <- list(x=xy$x, y=xy$x, z=vz$x)
return (res)
}
ColToHex <- function(col, alpha=1) {
col.rgb <- col2rgb(col)
col <- apply( col.rgb, 2, function(x) sprintf("#%02X%02X%02X", x[1], x[2], x[3]) )
if(alpha != 1 ) col <- paste( col, DecToHex( round( alpha * 255, 0)), sep="")
return(col)
# old: sprintf("#%02X%02X%02X", col.rgb[1], col.rgb[2], col.rgb[3])
}
HexToRgb <- function(hex) {
# converts a hexstring color to matrix with 3 red/green/blue rows
# example: HexToRgb(c("#A52A2A","#A52A3B"))
c2 <- do.call("cbind", lapply(hex, function(x) c(strtoi(substr(x,2,3), 16L), strtoi(substr(x,4,5), 16L), strtoi(substr(x,6,7), 16L))))
return(c2)
}
HexToCol <- function(hexstr, method="rgb", metric="euclidean")
RgbToCol(hexstr, method=method, metric=metric)
RgbToCol <- function(col, method="rgb", metric="euclidean") {
switch( match.arg( arg=method, choices=c("rgb","hsv") )
, "rgb" = {
# accepts either a matrix with 3 columns RGB or a hexstr
if(!is.matrix(col)) {
col <- lapply(col, function(x) c(strtoi(substr(x,2,3), 16L), strtoi(substr(x,4,5), 16L), strtoi(substr(x,6,7), 16L)))
col <- do.call("cbind", col)
}
coltab <- col2rgb(colors())
switch( match.arg( arg=metric, choices=c("euclidean","manhattan") )
, "euclidean" = {
colors()[apply(col, 2, function(x) which.min(apply(apply(coltab, 2, "-", x)^2, 2, sum)))]
}
, "manhattan" = {
colors()[apply(col, 2, function(x) which.min(apply(abs(apply(coltab, 2, "-", x)), 2, sum)))]
}
)
}
, "hsv" ={
# accepts either a matrix with 3 columns RGB or a hexstr
col <- ColToHsv(col)
if(!is.matrix(col)) {
col <- lapply(col, function(x) c(strtoi(substr(x,2,3), 16L), strtoi(substr(x,4,5), 16L), strtoi(substr(x,6,7), 16L)))
col <- do.call("cbind", col)
}
coltab <- ColToHsv(colors())
switch( match.arg( arg=metric, choices=c("euclidean","manhattan") )
, "euclidean" = {
colors()[apply(col, 2, function(x) which.min(apply(apply(coltab, 2, "-", x)^2, 2, sum)))]
}
, "manhattan" = {
colors()[apply(col, 2, function(x) which.min(apply(abs(apply(coltab, 2, "-", x)), 2, sum)))]
}
)
}
)
# alternative?
# Identify closest match to a color: plotrix::color.id
# old:
# coltab <- col2rgb(colors())
# cdist <- apply(coltab, 2, function(z) sum((z - col)^2))
# colors()[which(cdist == min(cdist))]
}
RgbToLong <- function(col) (c(1, 256, 256^2) %*% col)[1,]
# example: RgbToLong(ColToRgb(c("green", "limegreen")))
LongToRgb <- function(col)
sapply(col, function(x) c(x %% 256, (x %/% 256) %% 256, (x %/% 256^2) %% 256))
# if ever needed...
# '~~> LONG To RGB
# R = Col Mod 256
# G = (Col \ 256) Mod 256
# B = (Col \ 256 \ 256) Mod 256
# ColToDec is col2rgb??
ColToRgb <- function(col, alpha = FALSE) col2rgb(col, alpha)
ColToHsv <- function(col, alpha = FALSE) rgb2hsv(ColToRgb(col, alpha))
ColToGrey <- function(col){
rgb <- col2rgb(col)
g <- rbind( c(0.3, 0.59, 0.11) ) %*% rgb
rgb(g, g, g, maxColorValue=255)
}
ColToGray <- function(col){
ColToGrey(col)
}
# Add alpha channel to a HexCol
# paste("#00FF00", round(0.3 * 255,0), sep="" )
TextContrastColor <- function(col, method=c("glynn","sonego")) {
switch( match.arg( arg=method, choices=c("glynn","sonego") )
, "glynn" = {
# efg, Stowers Institute for Medical Research
# efg's Research Notes:
# http://research.stowers-institute.org/efg/R/Color/Chart
#
# 6 July 2004. Modified 23 May 2005.
# For a given col, define a text col that will have good contrast.
# Examples:
# > GetTextContrastcol("white")
# [1] "black"
# > GetTextContrastcol("black")
# [1] "white"
# > GetTextContrastcol("red")
# [1] "white"
# > GetTextContrastcol("yellow")
# [1] "black"
vx <- rep("white", length(col))
vx[ apply(col2rgb(col), 2, mean) > 127 ] <- "black"
}
, "sonego" = {
# another idea from Paolo Sonego in OneRTipaDay:
L <- c(0.2, 0.6, 0) %*% col2rgb(col) / 255
vx <- ifelse(L >= 0.2, "#000060", "#FFFFA0")
}
)
return(vx)
}
MixColor <- function (col1, col2, amount1=0.5) {
.mix <- function(col1, col2, amount1=0.5) {
# calculate mix
mix <- apply(col2rgb(c(col1, col2), alpha=TRUE), 1, function(x) amount1 * x[1] + (1-amount1) * x[2])
do.call("rgb", c(as.list(mix), maxColorValue=255))
}
m <- suppressWarnings(cbind(col1, col2, amount1))
apply(m, 1, function(x) .mix(col1=x[1], col2=x[2], amount1=as.numeric(x[3])))
}
FindColor <- function(x, cols=rev(heat.colors(100)), min.x=NULL, max.x=NULL,
all.inside = FALSE){
if(is.null(min.x)) min.x <- min(pretty(x))
if(is.null(max.x)) max.x <- max(pretty(x))
# Korrektur von min und max, wenn nicht standardmaessig
colrange <- range(c(min.x, max.x))
# Berechnung des entsprechenden Farb-Index
col.idx <- findInterval(x, seq(colrange[1], colrange[2], length = length(cols) + 1)
, rightmost.closed=TRUE, all.inside=all.inside)
col.idx[col.idx==0] <- NA # den Index 0 gibt es nicht im Farbenvektor
cols[col.idx]
# alt:
# cols[ findInterval( x, seq(colrange[1], colrange[2], length=length(cols)+1 ) ) ]
}
SetAlpha <- function(col, alpha=0.5) {
if (length(alpha) < length(col)) alpha <- rep(alpha, length.out = length(col))
if (length(col) < length(alpha)) col <- rep(col, length.out = length(alpha))
acol <- substr(ColToHex(col), 1, 7)
acol[!is.na(alpha)] <- paste(acol[!is.na(alpha)], DecToHex(round(alpha[!is.na(alpha)]*255,0)), sep="")
acol[is.na(col)] <- NA
return(acol)
}
###
PlotDev <- function(fn, type=c("tif", "pdf", "eps", "bmp", "png", "jpg"),
width=NULL, height=NULL, units="cm", res=300, open=TRUE,
compression="lzw",
expr, ...) {
# PlotDev(fn="bar", type="tiff", expr=
# barplot(1:5, col=Pal("Helsana"))
# )
type <- match.arg(type)
# golden ratio
golden <- (1+sqrt(5))/2
if(is.null(width))
width <- 8
if(is.null(height))
height <- width/golden
# check if filename fn contains a path, if not appende getwd()
if(!grepl("/", fn))
fn <- paste(getwd(), fn, sep="/")
switch(type,
"tif" = { fn <- paste(fn, ".tif", sep="")
tiff(filename = fn, width = width, height = height, units=units, res=res,
compression=compression, ...)
}
, "pdf" = { fn <- paste(fn, ".pdf", sep="")
pdf(file=fn, width = width, height = height)
}
, "eps" = { fn <- paste(fn, ".eps", sep="")
postscript(file=fn, width = width, height = height)
}
, "bmp" = { fn <- paste(fn, ".bmp", sep="")
bitmap(file=fn, width = width, height = height, units=units, res=res, ...)
}
, "png" = { fn <- paste(fn, ".png", sep="")
png(filename=fn, width = width, height = height, units=units, res=res, ...)
}
, "jpg" = { fn <- paste(fn, ".jpg", sep="")
jpeg(filename=fn, width = width, height = height, units=units, res=res, ...)
}
)
# http://stackoverflow.com/questions/4692231/r-passing-expression-to-an-inner-function
expr <- deparse(substitute(expr))
eval(parse(text=expr))
dev.off()
cat(gettextf("plot produced:\n %s\n", fn))
if(open)
shell(gettextf("\"%s\"", fn))
}
## plots: PlotBubble ====
PlotBubble <-function(x, ...)
UseMethod("PlotBubble")
PlotBubble.default <- function(x, y, area, col=NA, cex=1, border=par("fg"), xlim = NULL, ylim=NULL,
na.rm = FALSE, ...) {
# http://blog.revolutionanalytics.com/2010/11/how-to-make-beautiful-bubble-charts-with-r.html
d.frm <- Sort(as.data.frame(Recycle(x=x, y=y, area=area, col=col, border=border,
ry = sqrt((area * cex)/pi)),
stringsAsFactors=FALSE), ord=3, decreasing=TRUE)
if(na.rm) d.frm <- d.frm[complete.cases(d.frm),]
if(is.null(xlim))
xlim <- range(pretty( sqrt((area * cex / pi)[c(which.min(d.frm$x), which.max(d.frm$x))] / pi) * c(-1,1) + c(min(d.frm$x),max(d.frm$x)) ))
if(is.null(ylim))
ylim <- range(pretty( sqrt((area * cex / pi)[c(which.min(d.frm$y), which.max(d.frm$y))] / pi) * c(-1,1) + c(min(d.frm$y),max(d.frm$y)) ))
# make sure we see all the bubbles
plot(x = x, y = y, xlim=xlim, ylim=ylim, type="n", ...)
# symbols(x=x, y=y, circles=sqrt(area / pi), fg=border, bg=col, inches=inches, add=TRUE)
rx <- d.frm$ry / Asp()
DrawEllipse(x = d.frm$x, y = d.frm$y, radius.x = rx, radius.y = d.frm$ry,
col = d.frm$col, border=d.frm$border)
# if(!identical(args.legend, NA)){
#
# rx <- d.l$ry / Asp()
# DrawEllipse(x = d.l$x, y = d.l$y, radius.x = rx, radius.y = d.frm$ry,
# col = d.l$col, border=d.l$border)
# }
}
PlotBubble.formula <- function (formula, data = parent.frame(), ..., subset, ylab = varnames[response]) {
m <- match.call(expand.dots = FALSE)
eframe <- parent.frame()
md <- eval(m$data, eframe)
if (is.matrix(md))
m$data <- md <- as.data.frame(data)
dots <- lapply(m$..., eval, md, eframe)
nmdots <- names(dots)
if ("main" %in% nmdots)
dots[["main"]] <- enquote(dots[["main"]])
if ("sub" %in% nmdots)
dots[["sub"]] <- enquote(dots[["sub"]])
if ("xlab" %in% nmdots)
dots[["xlab"]] <- enquote(dots[["xlab"]])
# if ("panel.first" %in% nmdots)
# dots[["panel.first"]] <- match.fun(dots[["panel.first"]])
# http://r.789695.n4.nabble.com/panel-first-problem-when-plotting-with-formula-td3546110.html
m$ylab <- m$... <- NULL
subset.expr <- m$subset
m$subset <- NULL
m <- as.list(m)
m[[1L]] <- stats::model.frame.default
m <- as.call(c(m, list(na.action = NULL)))
mf <- eval(m, eframe)
if (!missing(subset)) {
s <- eval(subset.expr, data, eframe)
l <- nrow(mf)
dosub <- function(x) if (length(x) == l)
x[s]
else x
dots <- lapply(dots, dosub)
mf <- mf[s, ]
}
# horizontal <- FALSE
# if ("horizontal" %in% names(dots))
# horizontal <- dots[["horizontal"]]
response <- attr(attr(mf, "terms"), "response")
if (response) {
varnames <- names(mf)
y <- mf[[response]]
funname <- NULL
xn <- varnames[-response]
if (is.object(y)) {
found <- FALSE
for (j in class(y)) {
funname <- paste0("plot.", j)
if (exists(funname)) {
found <- TRUE
break
}
}
if (!found)
funname <- NULL
}
if (is.null(funname))
funname <- "PlotBubble"
if (length(xn)) {
if (!is.null(xlab <- dots[["xlab"]]))
dots <- dots[-match("xlab", names(dots))]
for (i in xn) {
xl <- if (is.null(xlab))
i
else xlab
yl <- ylab
# if (horizontal && is.factor(mf[[i]])) {
# yl <- xl
# xl <- ylab
# }
do.call(funname, c(list(mf[[i]], y, ylab = yl,
xlab = xl), dots))
}
}
else do.call(funname, c(list(y, ylab = ylab), dots))
}
print(c(list(y, ylab = ylab), dots))
invisible()
}
###
## plots: PlotFdist ====
PlotFdist <- function (x, main = deparse(substitute(x)), xlab = ""
, xlim = NULL
# , do.hist =NULL # !(all(IsWhole(x,na.rm=TRUE)) & length(unique(na.omit(x))) < 13)
# do.hist overrides args.hist, add.dens and rug
, args.hist = NULL # list( breaks = "Sturges", ...)
, args.rug = NA # list( ticksize = 0.03, side = 1, ...), pass NA if no rug
, args.dens = NULL # list( bw = "nrd0", col="#9A0941FF", lwd=2, ...), NA for no dens
, args.curve = NA # list( ...), NA for no dcurve
, args.boxplot = NULL # list( pars=list(boxwex=0.5), ...), NA for no boxplot
, args.ecdf = NULL # list( col="#8296C4FF", ...), NA for no ecdf
, args.curve.ecdf = NA # list( ...), NA for no dcurve
, heights = NULL # heights (hist, boxplot, ecdf) used by layout
, pdist = NULL # distances of the plots, default = 0
, na.rm = FALSE, cex.axis = NULL, cex.main = NULL, mar = NULL, las=1) {
.PlotMass <- function(x = x, xlab = "", ylab = "",
xaxt = ifelse(add.boxplot || add.ecdf, "n", "s"), xlim = xlim, ylim = NULL, main = NA, las = 1,
yaxt="n", col=1, lwd=3, pch=NA, col.pch=1, cex.pch=1, bg.pch=0, cex.axis=cex.axis, ...) {
pp <- prop.table(table(x))
if(is.null(ylim))
ylim <- c(0, max(pp))
plot(pp, type = "h", lwd=lwd, col=col,
xlab = "", ylab = "", cex.axis=cex.axis, xlim=xlim, ylim=ylim,
xaxt = xaxt, main = NA, frame.plot = FALSE,
las = las, panel.first = {
abline(h = axTicks(2), col = "grey", lty = "dotted")
abline(h = 0, col = "black")
})
if(!identical(pch, NA))
points(pp, type="p", pch=pch, col=col.pch, bg=bg.pch, cex=cex.pch)
}
# Plot function to display the distribution of a cardinal variable
# combines a histogram with a density curve, a boxplot and an ecdf
# rug can be added by using add.rug = TRUE
# default colors are Helsana CI-colors
# dev question: should dots be passed somewhere??
usr <- par(no.readonly=TRUE); on.exit(par(usr))
opt <- DescToolsOptions(stamp=NULL)
add.boxplot <- !identical(args.boxplot, NA)
add.rug <- !identical(args.rug, NA)
add.dens <- !identical(args.dens, NA)
add.ecdf <- !identical(args.ecdf, NA)
add.dcurve <- !identical(args.curve, NA)
add.pcurve <- !identical(args.curve.ecdf, NA)
# preset heights
if(is.null(heights)){
if(add.boxplot) {
if(add.ecdf) heights <- c(2, 0.5, 1.4)
else heights <- c(2, 1.4)
} else {
if(add.ecdf) heights <- c(2, 1.4)
}
}
if(is.null(pdist)) {
if(add.boxplot) pdist <- c(0, 0)
else pdist <- c(0, 1)
}
if (add.ecdf && add.boxplot) {
layout(matrix(c(1, 2, 3), nrow = 3, byrow = TRUE), heights = heights, TRUE)
if(is.null(cex.axis)) cex.axis <- 1.3
if(is.null(cex.main)) cex.main <- 1.7
} else {
if((add.ecdf || add.boxplot)) {
layout(matrix(c(1, 2), nrow = 2, byrow = TRUE), heights = heights[1:2], TRUE)
if(is.null(cex.axis)) cex.axis <- 0.9
} else {
if(is.null(cex.axis)) cex.axis <- 0.95
}
}
# plot histogram, change margin if no main title
par(mar = c(ifelse(add.boxplot || add.ecdf, 0, 5.1), 6.1, 2.1, 2.1))
if(!is.null(mar)) {
par(oma=mar)
} else {
if(!is.na(main)) { par(oma=c(0,0,3,0)) }
}
# wait for omitting NAs until all arguments are evaluated, e.g. main...
if(na.rm) x <- x[!is.na(x)]
if(!is.null(args.hist[["panel.last"]])) {
panel.last <- args.hist[["panel.last"]]
args.hist[["panel.last"]] <- NULL
} else {
panel.last <- NULL
}
if(is.null(args.hist$type)){
do.hist <- !(isTRUE(all.equal(x, round(x), tol = sqrt(.Machine$double.eps))) && length(unique(x)) < 13)
} else {
do.hist <- (args.hist$type == "hist")
args.hist$type <- NULL
}
# handle open list of arguments: args.legend in barplot is implemented this way...
# we need histogram anyway to define xlim
args.hist1 <- list(x = x, xlab = "", ylab = "", freq = FALSE,
xaxt = ifelse(add.boxplot || add.ecdf, "n", "s"), xlim = xlim, ylim = NULL, main = NA, las = 1,
col = "white", border = "grey70", yaxt="n")
if (!is.null(args.hist)) {
args.hist1[names(args.hist)] <- args.hist
}
x.hist <- DoCall("hist", c(args.hist1[names(args.hist1) %in%
c("x", "breaks", "include.lowest", "right", "nclass")], plot = FALSE))
x.hist$xname <- deparse(substitute(x))
if (is.null(xlim)) args.hist1$xlim <- range(pretty(x.hist$breaks))
args.histplot <- args.hist1[!names(args.hist1) %in% c("x", "breaks", "include.lowest", "right", "nclass")]
if (do.hist) {
# calculate max ylim for density curve, provided there should be one...
# what's the maximal value in density or in histogramm$densities?
# plot density
if (add.dens) {
# preset default values
args.dens1 <- list(x = x, bw = (if(length(x) > 1000){"nrd0"} else {"SJ"}),
col = Pal()[2], lwd = 2, lty = "solid")
if (!is.null(args.dens)) {
args.dens1[names(args.dens)] <- args.dens
}
# x.dens <- DoCall("density", args.dens1[-match(c("col",
# "lwd", "lty"), names(args.dens1))])
#
# # overwrite the ylim if there's a larger density-curve
# args.histplot[["ylim"]] <- range(pretty(c(0, max(c(x.dens$y, x.hist$density)))))
x.dens <- try( DoCall("density", args.dens1[-match(c("col", "lwd", "lty"), names(args.dens1))])
, silent=TRUE)
if(inherits(x.dens, "try-error")) {
warning(gettextf("density curve could not be added\n%s", x.dens))
add.dens <- FALSE
} else {
# overwrite the ylim if there's a larger density-curve
args.histplot[["ylim"]] <- range(pretty(c(0, max(c(x.dens$y, x.hist$density)))))
}
}
# plot histogram
DoCall("plot", append(list(x.hist), args.histplot))
# draw axis
ticks <- axTicks(2)
n <- max(floor(log(ticks, base = 10))) # highest power of ten
if(abs(n)>2) {
lab <- Format(ticks * 10^(-n), digits=max(Ndec(as.character(zapsmall(ticks*10^(-n))))))
axis(side=2, at=ticks, labels=lab, las=las, cex.axis=cex.axis)
text(x=par("usr")[1], y=par("usr")[4], bquote(~~~x~10^.(n)), xpd=NA, pos = 3, cex=cex.axis*0.9)
} else {
axis(side=2, cex.axis=cex.axis, las=las)
}
if(!is.null(panel.last)){
eval(parse(text=panel.last))
}
if (add.dens) {
lines(x.dens, col = args.dens1$col, lwd = args.dens1$lwd, lty = args.dens1$lty)
}
# plot special distribution curve
if (add.dcurve) {
# preset default values
args.curve1 <- list(expr = parse(text = gettextf("dnorm(x, %s, %s)", mean(x), sd(x))),
add = TRUE,
n = 500, col = Pal()[3], lwd = 2, lty = "solid")
if (!is.null(args.curve)) {
args.curve1[names(args.curve)] <- args.curve
}
if (is.character(args.curve1$expr)) args.curve1$expr <- parse(text=args.curve1$expr)
# do.call("curve", args.curve1)
# this throws an error heere:
# Error in eval(expr, envir, enclos) : could not find function "expr"
# so we roll back to do.call
do.call("curve", args.curve1)
}
if (add.rug) {
args.rug1 <- list(x = x, col = "grey")
if (!is.null(args.rug)) {
args.rug1[names(args.rug)] <- args.rug
}
DoCall("rug", args.rug1)
}
} else {
# do not draw a histogram, but a line bar chart
# PlotMass
args.hist1 <- list(x = x, xlab = "", ylab = "", xlim = xlim,
xaxt = ifelse(add.boxplot || add.ecdf, "n", "s"), ylim = NULL, main = NA, las = 1,
yaxt="n", col=1, lwd=3, pch=NA, col.pch=1, cex.pch=2, bg.pch=0, cex.axis=cex.axis)
if (is.null(xlim)) args.hist1$xlim <- range(pretty(x.hist$breaks))
if (!is.null(args.hist)) {
args.hist1[names(args.hist)] <- args.hist
if(is.null(args.hist$col.pch)) # use the same color for pch as for the line, when not defined
args.hist1$col.pch <- args.hist1$col
}
DoCall(.PlotMass, args.hist1)
# plot(prop.table(table(x)), type = "h", xlab = "", ylab = "",
# xaxt = "n", xlim = args.hist1$xlim, main = NA,
# frame.plot = FALSE, las = 1, cex.axis = cex.axis, panel.first = {
# abline(h = axTicks(2), col = "grey", lty = "dotted")
# abline(h = 0, col = "black")
# })
}
# boxplot
if(add.boxplot){
par(mar = c(ifelse(add.ecdf, 0, 5.1), 6.1, pdist[1], 2.1))
args.boxplot1 <- list(x = x, frame.plot = FALSE, main = NA, boxwex = 1,
horizontal = TRUE, ylim = args.hist1$xlim,
at = 1, xaxt = ifelse(add.ecdf, "n", "s"),
outcex = 1.3, outcol = rgb(0,0,0,0.5), cex.axis=cex.axis,
pch.mean=3, col.meanci="grey85")
if (!is.null(args.boxplot)) {
args.boxplot1[names(args.boxplot)] <- args.boxplot
}
plot(1, type="n", xlim=args.hist1$xlim, ylim=c(0,1)+.5, xlab="", ylab="", axes=FALSE)
grid(ny=NA)
if(length(x)>1){
ci <- MeanCI(x, na.rm=TRUE)
rect(xleft = ci[2], ybottom = 0.62, xright = ci[3], ytop = 1.35,
col=args.boxplot1$col.meanci, border=NA)
} else {
ci <- mean(x)
}
args.boxplot1$add = TRUE
DoCall("boxplot", args.boxplot1)
points(x=ci[1], y=1, cex=2, col="grey65", pch=args.boxplot1$pch.mean, bg="white")
}
# plot ecdf
if (add.ecdf) {
par(mar = c(5.1, 6.1, pdist[2], 2.1))
# args.ecdf1 <- list(x = x, frame.plot = FALSE, main = NA,
# xlim = args.hist1$xlim, col = getOption("col1", hblue), lwd = 2,
# xlab = xlab, yaxt = "n", ylab = "", verticals = TRUE,
# do.points = FALSE, cex.axis = cex.axis)
args.ecdf1 <- list(x = x, main = NA, breaks={if(length(x)>1000) 1000 else NULL}, ylim=c(0,1),
xlim = args.hist1$xlim, col = Pal()[1], lwd = 2,
xlab = "", yaxt = "n", ylab = "", cex.axis = cex.axis,
frame.plot = FALSE)
if (!is.null(args.ecdf)) {
args.ecdf1[names(args.ecdf)] <- args.ecdf
}
DoCall("PlotECDF", args.ecdf1)
# DoCall("plot.ecdf", args.ecdf1)
# axis(side = 2, at = seq(0, 1, 0.25), labels = gsub(pattern = "0\\.",
# replacement = " \\.", format(seq(0, 1, 0.25), 2)),
# las = 1, xaxs = "e", cex.axis = cex.axis)
# abline(h = c(0.25, 0.5, 0.75), col = "grey", lty = "dotted")
# grid(ny = NA)
# points(x=range(x), y=c(0,1), col=args.ecdf1$col, pch=3, cex=2)
# plot special distribution ecdf curve
if (add.pcurve) {
# preset default values
args.curve.ecdf1 <- list(expr = parse(text = gettextf("pnorm(x, %s, %s)", mean(x), sd(x))),
add = TRUE,
n = 500, col = Pal()[3], lwd = 2, lty = "solid")
if (!is.null(args.curve.ecdf)) {
args.curve.ecdf1[names(args.curve.ecdf)] <- args.curve.ecdf
}
if (is.character(args.curve.ecdf1$expr))
args.curve.ecdf1$expr <- parse(text=args.curve.ecdf1$expr)
# do.call("curve", args.curve1)
# this throws an error here:
# Error in eval(expr, envir, enclos) : could not find function "expr"
# so we roll back to do.call
do.call("curve", args.curve.ecdf1)
}
}
if(!is.na(main)) {
if(!is.null(cex.main)) par(cex.main=cex.main)
title(main=main, outer = TRUE)
}
DescToolsOptions(opt)
if(!is.null(DescToolsOptions("stamp")))
if(add.ecdf)
Stamp(cex=0.9)
else
Stamp()
layout(matrix(1)) # reset layout on exit
}
PlotECDF <- function(x, breaks=NULL, col=Pal()[1],
ylab="", lwd = 2, xlab = NULL, cex.axis = NULL, ...){
if(is.null(breaks)){
tab <- table(x)
xp <- as.numeric(names(tab))
xp <- c(head(xp,1), xp)
yp <- c(0, cumsum(tab))
} else {
xh <- hist(x, breaks=breaks, plot=FALSE)
xp <- xh$mids
xp <- c(head(xp,1), xp)
yp <- c(0, cumsum(xh$density))
}
yp <- yp * 1/tail(yp, 1)
if(is.null(xlab)) xlab <- deparse(substitute(x))
plot(yp ~ xp, lwd=lwd, type = "s", col=col, xlab= xlab, yaxt="n",
ylab = "", cex.axis=cex.axis, ...)
axis(side = 2, at = seq(0, 1, 0.25),
labels = gsub(pattern = "0\\.", replacement = " \\.", format(seq(0, 1, 0.25), 2)),
las = 1, xaxs = "e", cex.axis = cex.axis)
abline(h = c(0, 0.25, 0.5, 0.75, 1), col = "grey", lty = c("dashed","dotted","dotted","dotted","dashed"))
grid(ny = NA)
points(x = range(x), y = c(0, 1), col = col, pch = 3, cex = 2)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
###
## plots: PlotMultiDens ====
PlotMultiDens <- function (x, ...)
UseMethod("PlotMultiDens")
PlotMultiDens.formula <- function (formula, data, subset, na.action, ...) {
if (missing(formula) || (length(formula) != 3))
stop("formula missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m$... <- NULL
m[[1]] <- as.name("model.frame")
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
PlotMultiDens(split(mf[[response]], mf[-response]), ...)
}
PlotMultiDens.default <- function( x, xlim = NULL, ylim = NULL
, col = Pal(), lty = "solid", lwd = 1
, fill = NA
, xlab = "x", ylab = "density"
# , type = c("line", "stack", "cond")
, args.dens = NULL
, args.legend = NULL
, na.rm = FALSE, flipxy=FALSE, ...) {
# the input MUST be a numeric list, use split if there's no list:
# PlotMultiDens(list(x,y,z))
# Alternative:
# library(lattice)
# densityplot( ~ vl| vjdeck + region_x, data=d.set )
FlipDensXY <- function(x){
# flips x and y values of a density-object
tmp <- x$x
x$x <- x$y
x$y <- tmp
return(x)
}
# na.omit if wished
if(na.rm) x <- lapply(x, na.omit)
args.dens1 <- list(n = 2^12, kernel="epanechnikov") # default values
if (!is.null(args.dens)) {
args.dens1[names(args.dens)] <- args.dens
}
# recycle density arguments
maxdim <- max(length(x), unlist(lapply(args.dens1, length)))
args.dens1 <- lapply( args.dens1, rep, length.out=maxdim )
# recycle x
x <- rep(x, length.out=maxdim )
# let's calculate the densities
l.dens <- list()
for(i in 1:maxdim) {
if(length(x[[i]]) > 2)
l.dens[[i]] <- if(flipxy) {
FlipDensXY(do.call("density", append(list(x[[i]]), lapply(args.dens1,"[", i)) ))
} else {
do.call("density", append(list(x[[i]]), lapply(args.dens1,"[", i)) )
}
}
# recycle line attributes
# which geom parameter has the highest dimension
l.par <- list(lty=lty, lwd=lwd, col=col, fill=fill)
l.par <- lapply( l.par, rep, length.out = maxdim )
if( missing("xlim") ) xlim <- range(pretty( unlist(lapply(l.dens, "[", "x")) ) )
if( missing("ylim") ) ylim <- range(pretty( unlist(lapply(l.dens, "[", "y")) ))
dev.hold()
on.exit(dev.flush())
plot( x=1, y=1, xlim = xlim, ylim = ylim, type="n", xlab=xlab, ylab=ylab, ... )
# switch(match.arg(type,choices=c("line","stack","cond"))
# overlay = {
if(identical(fill, NA)){
for(i in 1:length(l.dens)) {
lines( l.dens[[i]], col=l.par$col[i], lty=l.par$lty[i], lwd=l.par$lwd[i] )
}
} else {
for(i in 1:length(l.dens)) {
polygon(x = l.dens[[i]]$x, y=l.dens[[i]]$y,
col = l.par$fill[i], border=l.par$col[i], lty=l.par$lty[i], lwd=l.par$lwd[i])
}
}
# },
# stack = { },
# cond = {
# }
# )
args.legend1 <- list( x="topright", inset=0, legend=if(is.null(names(x))){1:length(x)} else {names(x)}
, fill=col, bg="white", cex=0.8 )
if( length(unique(lwd))>1 || length(unique(lty))>1 ) {
args.legend1[["fill"]] <- NULL
args.legend1[["col"]] <- col
args.legend1[["lwd"]] <- lwd
args.legend1[["lty"]] <- lty
}
if ( !is.null(args.legend) ) { args.legend1[names(args.legend)] <- args.legend }
add.legend <- TRUE
if(!is.null(args.legend)) if(all(is.na(args.legend))) {add.legend <- FALSE}
if(add.legend) DoCall("legend", args.legend1)
res <- DoCall(rbind, lapply((lapply(l.dens, "[", c("bw","n"))), data.frame))
res$kernel <- unlist(args.dens1["kernel"])
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(res)
}
## plots: PlotMarDens ====
PlotMarDens <- function( x, y, grp=1, xlim = NULL, ylim = NULL
, col = rainbow(nlevels(factor(grp)))
, mardens = c("all","x","y"), pch=1, pch.cex=1.0, main=""
, na.rm = FALSE, args.legend = NULL
, args.dens = NULL, ...){
usr <- par("usr"); on.exit( par(usr) )
opt <- DescToolsOptions(stamp=NULL)
mardens <- match.arg(arg = mardens, choices = c("all", "x", "y"))
par(oma=c(0,0,3,0))
d.frm <- data.frame(x=x, y=y, grp=grp)
pch=rep(pch, length.out=nlevels(factor(grp))) # recycle pch
# this is plot.default defaults
xlim <- if (is.null(xlim)) range(x[is.finite(x)]) else xlim
ylim <- if (is.null(ylim)) range(y[is.finite(y)]) else ylim
switch( mardens
, "all" = { nf <- layout(matrix(c(2,0,1,3),2,2, byrow=TRUE), widths=c(9,1.5), heights=c(0.8,4), TRUE) }
, "x" = { nf <- layout(matrix(c(2,1), 2,1, byrow=TRUE), c(9), c(0.8,4), TRUE) }
, "y" = { nf <- layout(matrix(c(1,2),1,2, byrow=TRUE), c(9,1.5), c(4), TRUE) }
)
par(mar=c(5,5,1,1))
plot(x=d.frm$x, y=d.frm$y, xlim=xlim, ylim=ylim, type="n", ... )
s <- split(d.frm[,1:2], d.frm$grp)
for( i in seq_along(s) ){
points( x=s[[i]]$x, y=s[[i]]$y, col=col[i], pch=pch[i], cex=pch.cex)
}
args.legend1 <- list( x = "topright", inset = 0.02, legend = levels(factor(grp))
, col = col, pch = pch, bg = "white", cex = 0.8 )
if ( !is.null(args.legend) ) {
if(!all(is.na(args.legend))){
args.legend1[names(args.legend)] <- args.legend
} else {
args.legend1 <- NA
}
}
if(!all(is.na(args.legend1))) do.call("legend", args.legend1)
if(mardens %in% c("all","x")){
par(mar=c(0,5,0,1))
args.plotdens1 <- list(x = split(d.frm$x, d.frm$grp), na.rm = TRUE,
col = col, xlim = xlim, axes=FALSE,
args.legend = NA, xlab="", ylab="")
if (!is.null(args.dens)) {
args.plotdens1[names(args.dens)] <- args.dens
}
args.dens1 <- list(n = 4096, bw = "nrd0", kernel = "epanechnikov")
if (!is.null(args.dens)) {
ovr <- names(args.dens)[names(args.dens) %in% names(args.dens1)]
args.dens1[ovr] <- args.dens[ovr]
}
args.plotdens1$args.dens <- args.dens1
args.plotdens1 <- args.plotdens1[names(args.plotdens1) %nin% names(args.dens1)]
do.call("PlotMultiDens", args.plotdens1)
# PlotMultiDens( split(d.frm$x, d.frm$grp), col=col, na.rm=TRUE, xlim=xlim
# , axes=FALSE, args.legend = NA, xlab="", ylab="" )
}
if(mardens %in% c("all","y")){
par(mar=c(5,0,1,1))
args.plotdens1 <- list(x = split(d.frm$y, d.frm$grp), na.rm = TRUE,
col = col, ylim = ylim, axes=FALSE, flipxy=TRUE,
args.legend = NA, xlab="", ylab="")
if (!is.null(args.dens)) {
args.plotdens1[names(args.dens)] <- args.dens
}
args.dens1 <- list(n = 4096, bw = "nrd0", kernel = "epanechnikov")
if (!is.null(args.dens)) {
ovr <- names(args.dens)[names(args.dens) %in% names(args.dens1)]
args.dens1[ovr] <- args.dens[ovr]
}
args.plotdens1$args.dens <- args.dens1
args.plotdens1 <- args.plotdens1[names(args.plotdens1) %nin% names(args.dens1)]
do.call("PlotMultiDens", args.plotdens1)
# PlotMultiDens( split(d.frm$y, d.frm$grp), col=col, na.rm=TRUE, ylim=ylim
# , axes = FALSE, args.legend = NA, flipxy=TRUE, xlab="", ylab="" )
}
title(main=main, outer=TRUE)
options(opt)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
###
## plots: PlotArea ====
PlotArea <- function(x, ...) {
# PlotArea - mehrere Flaechen uebereinander
# source: http://r.789695.n4.nabble.com/PlotArea-td2255121.html
# arni...
UseMethod("PlotArea")
}
PlotArea.default <- function(x, y=NULL, prop=FALSE, add=FALSE, xlab=NULL, ylab=NULL,
col=NULL, frame.plot=FALSE, ...) {
if(is.ts(x)) { # ts/mts
if(is.null(ylab)) ylab <- deparse(substitute(x))
x <- data.frame(Time=time(x), x)
}
if(is.table(x)) { # table
if(is.null(ylab)) ylab <- deparse(substitute(x))
if(length(dim(x)) == 1)
x <- t(t(unclass(x)))
else
x <- unclass(x)
}
if(is.matrix(x)) { # matrix
if(!is.null(rownames(x)) && !any(is.na(suppressWarnings(as.numeric(rownames(x)))))) {
x <- data.frame(as.numeric(rownames(x)), x)
names(x)[1] <- ""
} else {
x <- data.frame(Index=seq_len(nrow(x)), x)
}
}
if(is.list(x)) { # data.frame or list
if(is.null(xlab)) xlab <- names(x)[1]
if(is.null(ylab)) {
if(length(x) == 2)
ylab <- names(x)[2]
else
ylab <- ""
}
y <- x[-1]
x <- x[[1]]
}
if(is.null(y)) { # one numeric vector passed, plot it on 1:n
if(is.null(xlab)) xlab <- "Index"
if(is.null(ylab)) ylab <- deparse(substitute(x))
y <- x
x <- seq_along(x)
}
if(is.null(xlab)) xlab <- deparse(substitute(x))
if(is.null(ylab)) ylab <- deparse(substitute(y))
y <- as.matrix(y)
if(is.null(col)) col <- gray.colors(ncol(y))
col <- rep(col, length.out=ncol(y))
if(prop) y <- prop.table(y, 1)
y <- t(rbind(0, apply(y, 1, cumsum)))
na <- is.na(x) | apply(is.na(y),1,any)
x <- x[!na][order(x[!na])]
y <- y[!na,][order(x[!na]),]
if(!add) suppressWarnings(matplot(x, y, type="n", xlab=xlab, ylab=ylab, frame.plot=frame.plot, ...))
xx <- c(x, rev(x))
for(i in 1:(ncol(y)-1)) {
yy <- c(y[,i+1], rev(y[,i]))
suppressWarnings(polygon(xx, yy, col=col[i], ...))
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(y[,-1])
}
PlotArea.formula <- function (formula, data, subset, na.action, ...) {
m <- match.call(expand.dots=FALSE)
if(is.matrix(eval(m$data,parent.frame()))) m$data <- as.data.frame(data)
m$... <- NULL
m[[1]] <- as.name("model.frame")
if(as.character(formula[[2]]==".")) {
rhs <- unlist(strsplit(deparse(formula[[3]])," *[:+] *"))
lhs <- sprintf("cbind(%s)", paste(setdiff(names(data), rhs),collapse=","))
m[[2]][[2]] <- parse(text=lhs)[[1]]
}
mf <- eval(m, parent.frame())
if(is.matrix(mf[[1]])) {
lhs <- as.data.frame(mf[[1]])
names(lhs) <- as.character(m[[2]][[2]])[-1]
PlotArea.default(cbind(mf[-1],lhs), ...)
} else {
PlotArea.default(mf[2:1], ...)
}
}
###
## plots: PlotDotCI ====
PlotDot <- function (x, labels = NULL, groups = NULL, gdata = NULL, cex = par("cex"),
pch = 21, gpch = 21, bg = par("bg"), color = par("fg"), gcolor = par("fg"),
lcolor = "gray", xlim = NULL, main = NULL, xlab = NULL, ylab = NULL, xaxt=NULL, yaxt=NULL,
add = FALSE, args.errbars = NULL, ...) {
ErrBarArgs <- function(from, to = NULL, pos = NULL, mid = NULL,
horiz = FALSE, col = par("fg"), lty = par("lty"), lwd = par("lwd"),
code = 3, length = 0.05, pch = NA, cex.pch = par("cex"),
col.pch = par("fg"), bg.pch = par("bg"), ...) {
if (is.null(to)) {
if (length(dim(x) != 1))
stop("'to' must be be provided, if x is a matrix.")
if (dim(from)[2] %nin% c(2, 3))
stop("'from' must be a kx2 or a kx3 matrix, when 'to' is not provided.")
if (dim(from)[2] == 2) {
to <- from[, 2]
from <- from[, 1]
}
else {
mid <- from[, 1]
to <- from[, 3]
from <- from[, 2]
}
}
if (length(dim(from)) ==2 )
from <- Rev(from, 2)
if (length(dim(to)) ==2 )
to <- Rev(to, 2)
if (length(dim(mid)) ==2 )
mid <- Rev(mid, 2)
return(list(from = from, to = to, mid = mid, col = col,
col.axis = 1, lty = lty, lwd = lwd, angle = 90, code = code,
length = length, pch = pch, cex.pch = cex.pch, col.pch = col.pch,
bg.pch = bg.pch))
}
x <- Rev(x, 1)
labels <- rev(labels)
groups <- rev(groups)
# gdata <- rev(gdata)
# gcolor <- Rev(gcolor)
lcolor <- Rev(lcolor)
color <- Rev(color)
pch <- Rev(pch)
bg <- Rev(bg)
cex <- rep(cex, length.out = 3)
if (!is.null(args.errbars))
errb <- do.call(ErrBarArgs, args.errbars)
if (!add && is.null(xlim)) {
if (is.null(args.errbars)) {
xlim <- range(x[is.finite(x)])
}
else {
rng <- c(errb$from, errb$to)
xlim <- range(pretty(rng[is.finite(rng)]))
}
}
opar <- par("mai", "mar", "cex", "yaxs")
on.exit(par(opar))
par(cex = cex[1], yaxs = "i")
if (!is.numeric(x))
stop("'x' must be a numeric vector or matrix")
n <- length(x)
if (is.matrix(x)) {
if (is.null(labels))
labels <- rownames(x)
if (is.null(labels))
labels <- as.character(1L:nrow(x))
labels <- rep_len(labels, n)
if (is.null(groups))
groups <- col(x, as.factor = TRUE)
glabels <- levels(groups)
}
else {
if (is.null(labels))
labels <- names(x)
glabels <- if (!is.null(groups))
levels(groups)
if (!is.vector(x)) {
warning("'x' is neither a vector nor a matrix: using as.numeric(x)")
x <- as.numeric(x)
}
}
if (!add)
plot.new()
linch <- if (!is.null(labels))
max(strwidth(labels, "inch"), na.rm = TRUE)
else 0
if (is.null(glabels)) {
ginch <- 0
goffset <- 0
}
else {
ginch <- max(strwidth(glabels, "inch"), na.rm = TRUE)
goffset <- 0.4
}
if (!(is.null(labels) && is.null(glabels) || identical(yaxt, "n"))) {
nmai <- par("mai")
nmai[2L] <- nmai[4L] + max(linch + goffset, ginch) +
0.1
par(mai = nmai)
}
if (is.null(groups)) {
o <- 1L:n
y <- o
ylim <- c(0, n + 1)
}
else {
o <- sort.list(as.numeric(groups), decreasing = TRUE)
x <- x[o]
groups <- groups[o]
# color <- rep_len(color, length(groups))[o]
# lcolor <- rep_len(lcolor, length(groups))[o]
offset <- cumsum(c(0, diff(as.numeric(groups)) != 0))
y <- 1L:n + 2 * offset
ylim <- range(0, y + 2)
}
if (!add)
plot.window(xlim = xlim, ylim = ylim, log = "")
lheight <- par("csi")
if (!is.null(labels)) {
linch <- max(strwidth(labels, "inch"), na.rm = TRUE)
loffset <- (linch + 0.1)/lheight
labs <- labels[o]
if (!identical(yaxt, "n"))
mtext(labs, side = 2, line = loffset, at = y, adj = 0,
col = color, las = 2, cex = cex[2], ...)
}
if (!add)
abline(h = y, lty = "dotted", col = lcolor)
points(x, y, pch = pch, col = color, bg = bg)
if (!is.null(groups)) {
gpos <- rev(cumsum(rev(tapply(groups, groups, length)) +
2) - 1)
ginch <- max(strwidth(glabels, "inch"), na.rm = TRUE)
goffset <- (max(linch + 0.2, ginch, na.rm = TRUE) + 0.1)/lheight
if (!identical(yaxt, "n"))
mtext(glabels, side = 2, line = goffset, at = gpos, adj = 0,
col = gcolor, las = 2, cex = cex[3], ...)
if (!is.null(gdata)) {
abline(h = gpos, lty = "dotted")
points(gdata, gpos, pch = gpch, col = gcolor, bg = bg, ...)
}
}
if (!(add || identical(xaxt, "n") ))
axis(1)
if (!add)
box()
if (!add)
title(main = main, xlab = xlab, ylab = ylab, ...)
if (!is.null(args.errbars)) {
arrows(x0 = rev(errb$from)[o], x1 = rev(errb$to)[o],
y0 = y, col = rev(errb$col), angle = 90, code = rev(errb$code),
lty = rev(errb$lty), lwd = rev(errb$lwd), length = rev(errb$length))
if (!is.null(errb$mid))
points(rev(errb$mid)[o], y = y, pch = rev(errb$pch), col = rev(errb$col.pch),
cex = rev(errb$cex.pch), bg = rev(errb$bg.pch))
}
if (!is.null(DescToolsOptions("stamp")))
Stamp()
# invisible(y[order(o, decreasing = TRUE)])
# replaced by 0.99.18:
invisible(y[order(y, decreasing = TRUE)])
}
TitleRect <- function(label, bg = "grey", border=1, col="black", xjust=0.5, line=2, ...){
xpd <- par(xpd=TRUE); on.exit(par(xpd))
usr <- par("usr")
rect(xleft = usr[1], ybottom = usr[4], xright = usr[2], ytop = LineToUser(line,3),
col="white", border = border)
rect(xleft = usr[1], ybottom = usr[4], xright = usr[2], ytop = LineToUser(line,3),
col=bg, border = border)
if(xjust==0) {
x <- usr[1]
} else if(xjust==0.5) {
x <- mean(usr[c(1,2)])
} else {
x <- usr[2]
}
text(x = x, y = mean(c(usr[4], LineToUser(line,3))), labels=label,
adj = c(xjust, 0.5), col=col, ...)
}
# not yet exported
PlotFacet <- function(x, FUN, mfrow, titles, main="", oma=NULL,
args.titles = NULL, ...){
par(mfrow=mfrow, xpd=TRUE)
nr <- mfrow[1]
nc <- mfrow[2]
if(is.null(oma))
oma <- c(5,5,5,2)
par(mar=c(0,0,2.0,0), oma=oma, las=par("las"))
args.titles1 <- list(col=1, bg="grey", border=1)
if(!is.null(args.titles))
args.titles1[names(args.titles)] <- args.titles
for(i in 1:length(x)){
# nur unterste Zeile, und auch da nur Beschriftung in jedem 2. Plot
xaxt <- c("s","n")[((i <= (max(nr)-1)*nc) || IsOdd(i)) + 1]
# nur unterste Zeile, und auch da nur Beschriftung in jedem 2. Plot
yaxt <- c("s","n")[((i %% nc) != 1) + 1]
# the plot function
FUN(x[[i]], xaxt, yaxt)
do.call(TitleRect, c(args.titles1, label=titles[i]))
}
title(main, outer=TRUE, xpd=NA)
}
PlotLinesA <- function(x, y, col=1:5, lty=1, lwd=1, lend = par("lend"), xlab = NULL,
ylab = NULL, xlim = NULL, ylim = NULL, xaxt=NULL, yaxt=NULL, cex = 1, args.legend = NULL,
main=NULL, grid=TRUE, mar=NULL, pch=NA, pch.col=par("fg"), pch.bg=par("bg"), pch.cex=1, ...){
# example:
#
# m <- matrix(c(3,4,5,1,5,4,2,6,2), nrow = 3,
# dimnames = list(dose = c("A","B","C"),
# age = c("2000","2001","2002")))
# PlotLinesA(m, col=rev(c(PalHelsana(), "grey")), main="Dosw ~ age", lwd=3, ylim=c(1,10))
.legend <- function(line, y, width, labels, lty, lwd, col, cex){
line <- rep(line, length.out=2)
mtext(side = 4, las=1, cex=cex, text = labels,
line = line[1] + ZeroIfNA(width + (!is.na(width)) * line[2]),
at = y
)
if(!is.na(width)){
x0 <- LineToUser(line[1], 4)
segments(x0 = x0, x1 = LineToUser(line[1] + width, 4), y0 = y,
lwd = lwd, lty=lty, lend = 1, col = col)
}
}
add.legend <- !identical(args.legend, NA)
last <- Sort(data.frame(t(tail(apply(as.matrix(x), 2, LOCF), 1))))
last <- setNames(last[,], nm = rownames(last))
if(is.null(mar)){
if(!identical(args.legend, NA))
# no convincing solution before plot.new is called
# http://stackoverflow.com/questions/16452368/calculate-strwidth-without-calling-plot-new
Mar(right = 10) # this would be nice, but there's no plot so far... max(strwidth(names(last))) * 1.2
} else {
do.call(Mar, as.list(mar))
}
matplot(x, y, type="n", las=1, xlim=xlim, ylim=ylim, xaxt="n", yaxt=yaxt, main=main, xlab=xlab, ylab=ylab, cex = cex, ...)
if(!identical(xaxt, "n"))
axis(side = 1, at=c(1:nrow(x)), rownames(x))
if(grid) grid()
matplot(x, type="l", lty=lty, col=col, lwd=lwd, lend=lend, xaxt="n", add=TRUE)
if(!is.na(pch))
matplot(x, type="p", pch=pch, col=pch.col, bg=pch.bg, cex=pch.cex, xaxt="n", add=TRUE)
oldpar <- par(xpd=TRUE); on.exit(par(oldpar))
if (add.legend) {
if(is.null(colnames(x)))
colnames(x) <- 1:ncol(x)
ord <- match(names(last), colnames(x))
lwd <- rep(lwd, length.out=ncol(x))
lty <- rep(lty, length.out=ncol(x))
col <- rep(col, length.out=ncol(x))
# default legend values
args.legend1 <- list(
line = c(1, 1) , # par("usr")[2] + diff(par("usr")[1:2]) * 0.02,
width = 1, # (par("usr")[2] + diff(par("usr")[1:2]) * 0.02 * 2) - (par("usr")[2] + diff(par("usr")[1:2]) * 0.02),
y = SpreadOut(unlist(last), mindist = 1.2 * strheight("M")),
labels=names(last), cex=par("cex"),
col = col[ord], lwd = lwd[ord], lty = lty[ord])
if (!is.null(args.legend)) {
args.legend1[names(args.legend)] <- args.legend
}
DoCall(".legend", args.legend1)
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
PlotLog <- function(x, ..., args.grid=NULL, log="xy"){
add.grid <- !identical(args.grid, NA)
# default grid arguments
args.grid1 <- list(
lwd = 1,
lty = 3, #"dotted",
col = "grey85",
lwd.min = 1,
lty.min = 3,
col.min = "grey60"
)
if (!is.null(args.grid)) {
args.grid1[names(args.grid)] <- args.grid
}
plot(x, ..., type="n", log=log, xaxt="n", yaxt="n", xaxs="i", yaxs="i")
if(grepl("x", log)){
# ticks <- do.call(seq, as.list(range(log(axTicks(1), 10))))
ticks <- do.call(seq, as.list(range(ceiling(log(10^par("usr")[1:2], 10)))))
# need a x log axis
sapply(ticks,
function(n) mtext(side=1, line=1, at = 10^n, text = bquote(~10^.(n))))
if(add.grid){
abline(v=unique(as.vector(sapply(c(ticks, tail(ticks, 1)+1), function(n) seq(0, 0.1, 0.01)*10^n))),
col=args.grid1$col, lty=args.grid1$lty, lwd=args.grid1$lwd)
abline(v=10^(ticks), col=args.grid1$col.min, lty=args.grid1$lty.min, lwd=args.grid1$lwd.min)
}
axis(1, at=c(0, 10^(ticks)), labels=NA)
}
if(grepl("y", log)){
# ticks <- do.call(seq, as.list(range(log(axTicks(1), 10))))
ticks <- do.call(seq, as.list(range(ceiling(log(10^par("usr")[3:4], 10)))))
# need a x log axis
sapply(ticks,
function(n) mtext(side=2, line=1, at = 10^n, text = bquote(~10^.(n)), las=1))
if(add.grid){
abline(h=unique(as.vector(sapply(c(ticks, tail(ticks, 1)+1), function(n) seq(0, 0.1, 0.01)*10^n))),
col=args.grid1$col, lty=args.grid1$lty, lwd=args.grid1$lwd)
abline(h=10^(ticks), col=args.grid1$col.min, lty=args.grid1$lty.min, lwd=args.grid1$lwd.min)
}
axis(2, at=c(0, 10^(ticks)), labels=NA)
}
box()
points(x, ...)
}
###
## plots: PlotFun ====
PlotFun <- function(FUN, args=NULL, from=NULL, to=NULL, by=NULL, xlim=NULL,
ylim = NULL, polar = FALSE, type="l",
col = par("col"), lwd= par("lwd"), lty=par("lty"), pch=NA, mar=NULL,
add = FALSE, ...){
# # all dot arguments
# dot.args <- match.call(expand.dots=FALSE)$...
# # the dot arguments which match PercTable.table
# # pt.args <- dot.args[names(dot.args) %in% names(formals(PercTable.table))]
# # the dot arguments which DO NOT match PercTable.table
# par.args <- dot.args[names(dot.args) %nin% names(formals(PlotFun))]
# see also Hmisc::minor.tick
if(is.null(mar))
Mar(1,1,1,1)
else
par(mar=mar)
vars <- all.vars(FUN)
vars <- vars[vars %nin% names(args)]
# this is not really smart ....
if(is.null(from)) from <- -5
if(is.null(to)) to <- 5
if(is.null(by)) by <- (to - from) / 500
# the independent variable
assign(vars, seq(from = from, to = to, by=by))
# define the parameters
for(i in seq_along(args)) {
assign(names(args)[i], unlist(args[i]))
# this does not work:
if(length(get(names(args)[i])) > 1) {
assign(names(args)[i], get(names(args)[i])[1])
warning(gettextf("first element used of '%s' argument", names(args)[i]))
}
}
# Inhibit model interpretation for function plot
FUN[[2]] <- as.formula("~" %c% gettextf("I(%s)", deparse(FUN[[2]])) )[[2]]
FUN[[3]] <- as.formula("~" %c% gettextf("I(%s)", deparse(FUN[[3]])) )[[2]]
# this will evaluate in parent.frame(), so in function's env
p <- ParseFormula(FUN)
y <- p$lhs$mf.eval[,1]
x <- p$rhs$mf.eval[,1]
if(polar){
cord <- PolToCart(r = y, theta = x)
y <- cord$y
x <- cord$x
}
if(is.null(xlim)){
xlim <- range(pretty(range(x[is.finite(x)])))
}
if(is.null(ylim)){
ylim <- range(pretty(range(y[is.finite(y)])))
}
# define plot parameters
m <- match.call(expand.dots = FALSE)
m$...$frame.plot <- InDots(..., arg="frame.plot", default = FALSE)
m$...$axes <- InDots(..., arg="axes", default = NULL)
m$...$asp <- InDots(..., arg="asp", default = 1)
m$...$xlab <- InDots(..., arg="xlab", default = "")
m$...$ylab <- InDots(..., arg="ylab", default = "")
if(is.null(m$...$axes)) {
add.axes <- TRUE
m$...$axes <- FALSE
} else {
add.axes <- FALSE
}
if(!add){
do.call(plot, c(list(y=1, x=1, xlim=xlim, ylim=ylim, type="n", mar=mar), m$...))
}
if(add.axes && !add) {
tck <- axTicks(side=1)
if(sign(min(tck)) != sign(max(tck)))
tck <- tck[tck!=0]
axis(1, pos = 0, col="darkgrey", at=tck)
# we set minor ticks for the axes, 4 ticks between 2 major ticks
axp <- par("xaxp")
axp[3] <- 5 * axp[3]
axis(1, pos = 0, TRUE, at=axTicks(side=1, axp=axp), labels = NA, tck=-0.01, col="darkgrey")
tck <- axTicks(side=2)
if(sign(min(tck)) != sign(max(tck)))
tck <- tck[tck!=0]
axis(2, pos = 0, las=1, col="darkgrey", at=tck)
axp <- par("yaxp")
axp[3] <- 5 * axp[3]
axis(2, pos = 0, TRUE, at=axTicks(side=1, axp=axp), labels=NA, tck=-0.01, col="darkgrey")
}
lines(y=y, x=x, type=type, col=col, lty=lty, lwd=lwd, pch=pch)
invisible(list(x=x, y=y))
}
# Shade <- function(FUN, col=par("fg"), xlim, density=10, step=0.01, ...) {
#
#
# # but works as well with function(x), but it doesn't
# # Shade(FUN=function(x) dt(x, df=5), xlim=c(qt(0.975, df=5), 6), col="red")
#
# if(is.function(FUN)) {
# # if FUN is a function, then save it under new name and
# # overwrite function name in FUN, which has to be character
# fct <- FUN
# FUN <- "fct"
# # FUN <- gettextf("%s(x)", FUN)
# FUN <- gettextf("function(x) %s", FUN)
# }
#
# from <- xlim[1]
# to <- xlim[2] # qt(0.025, df=degf)
#
# x <- seq(from, to, by = step)
# xval <- c(from, x, to)
#
# # Calculates the function for given xval
# yval <- c(0, eval(parse(text = FUN)), 0)
#
# polygon(xval, yval, col=col, density=density, ...)
#
# }
Shade <- function(FUN, col=par("fg"), breaks, density=10, step=0.01, ...) {
# but works as well with function(x), but it doesn't
# Shade(FUN=function(x) dt(x, df=5), xlim=c(qt(0.975, df=5), 6), col="red")
if(is.function(FUN)) {
# if FUN is a function, then save it under new name and
# overwrite function name in FUN, which has to be character
fct <- FUN
FUN <- "fct"
# FUN <- gettextf("%s(x)", FUN)
FUN <- gettextf("function(x) %s", FUN)
}
.Shade <- function(FUN, col, from, to, density, step, ...) {
x <- seq(from, to, by = step)
xval <- c(from, x, to)
# Calculates the function for given xval
yval <- c(0, eval(parse(text = FUN)), 0)
polygon(xval, yval, col=col, density=density, ...)
}
pars <- Recycle(from=head(breaks, -1), to=tail(breaks, -1), col=col, density=density)
for(i in 1:attr(pars, "maxdim"))
.Shade(FUN, pars$col[i], pars$from[i], pars$to[i], density=pars$density[i], step=step, ...)
}
## plots: PlotPyramid ====
PlotPyramid <- function(lx, rx = NA, ylab = "",
ylab.x = 0, col = c("red", "blue"), border = par("fg"),
main = "", lxlab = "", rxlab = "", xlim = NULL,
gapwidth = NULL, xaxt = TRUE,
args.grid = NULL,
cex.axis = par("cex.axis"), cex.lab = par("cex.axis"), cex.names = par("cex.axis"),
adj = 0.5, rev = FALSE, ...) {
if (missing(rx) && length(dim(lx)) > 0) {
rx <- lx[, 2]
lx <- lx[, 1]
}
if(rev==TRUE){
lx <- Rev(lx, margin=1)
rx <- Rev(rx, margin=1)
ylab <- Rev(ylab)
}
b <- barplot(-lx, horiz=TRUE, plot=FALSE, ...)
ylim <- c(0, max(b))
if(is.null(xlim)) xlim <- c(-max(lx), max(rx))
plot( 1, type="n", xlim=xlim, ylim=ylim, frame.plot=FALSE
, xlab="", ylab="", axes=FALSE, main=main)
if(is.null(gapwidth)) gapwidth <- max(strwidth(ylab, cex=cex.names)) + 3*strwidth("M", cex=cex.names)
at.left <- axTicks(1)[axTicks(1)<=0] - gapwidth/2
at.right <- axTicks(1)[axTicks(1)>=0] + gapwidth/2
# grid: define default arguments
if(!identical(args.grid, NA)){ # add grid
args.grid1 <- list(col="grey", lty="dotted")
# override default arguments with user defined ones
if (!is.null(args.grid)) {
args.grid1[names(args.grid)] <- args.grid
}
abline(v=c(at.left, at.right), col=args.grid1$col, lty=args.grid1$lty )
}
if(length(col) == 1) border <- rep(col, 2)
lcol <- rep(col[seq_along(col) %% 2 == 1], times=length(lx))
rcol <- rep(col[seq_along(col) %% 2 == 0], times=length(rx))
if(length(border) == 1) border <- rep(border, 2)
lborder <- rep(border[seq_along(border) %% 2 == 1], times=length(lx))
rborder <- rep(border[seq_along(border) %% 2 == 0], times=length(rx))
barplot(-lx, horiz=TRUE, col=lcol, add=T, axes=FALSE, names.arg="",
offset=-gapwidth/2, border=lborder, ...)
barplot(rx, horiz=TRUE, col=rcol, add=T, axes=FALSE, names.arg="",
offset=gapwidth/2, border=rborder, ...)
oldpar <- par(xpd=TRUE); on.exit(par(oldpar))
ylab.x <- ylab.x + sign(ylab.x) * gapwidth/2
text(ylab, x=ylab.x, y=b, cex=cex.names, adj = adj)
if(!xaxt == "n"){
axis(side=1, at=at.right, labels=axTicks(1)[axTicks(1)>=0], cex.axis=cex.axis)
axis(side=1, at=at.left, labels=-axTicks(1)[axTicks(1)<=0], cex.axis=cex.axis)
}
mtext(text=rxlab, side=1, at=mean(at.right), padj=0.5, line=2.5, cex=cex.lab)
mtext(text=lxlab, side=1, at=mean(at.left), padj=0.5, line=2.5, cex=cex.lab)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(b) # return the same result as barplot
}
###
## plots: PlotCorr ====
PlotCorr <- function(x, cols = colorRampPalette(c(Pal()[2], "white", Pal()[1]), space = "rgb")(20)
, breaks = seq(-1, 1, length = length(cols)+1), border="grey", lwd=1
, args.colorlegend = NULL, xaxt = par("xaxt"), yaxt = par("yaxt"), cex.axis = 0.8, las = 2
, mar = c(3,8,8,8), mincor=0, ...){
# example:
# m <- cor(d.pizza[,WhichNumerics(d.pizza)][,1:5], use="pairwise.complete.obs")
# PlotCorr(m)
# PlotCorr(m, args.colorlegend="n", las=1)
# PlotCorr(m, cols=colorRampPalette(c("red", "white", "blue"), space = "rgb")(4), args.colorlegend=list(xlab=sprintf("%.1f", seq(1,-1, length=5))) )
# PlotCorr(m, cols=colorRampPalette(c("red", "black", "green"), space = "rgb")(10))
# PlotCorr(round(CramerV(d.pizza[,c("driver","operator","city", "quality")]),3))
pars <- par(mar=mar); on.exit(par(pars))
# if mincor is set delete all correlations with abs. val. < mincor
if(mincor!=0)
x[abs(x) < abs(mincor)] <- NA
x <- x[,ncol(x):1]
image(x=1:nrow(x), y=1:ncol(x), xaxt="n", yaxt="n", z=x, frame.plot=FALSE, xlab="", ylab=""
, col=cols, breaks=breaks, ... )
if(xaxt!="n") axis(side=3, at=1:nrow(x), labels=rownames(x), cex.axis=cex.axis, las=las, lwd=-1)
if(yaxt!="n") axis(side=2, at=1:ncol(x), labels=colnames(x), cex.axis=cex.axis, las=las, lwd=-1)
if((is.list(args.colorlegend) || is.null(args.colorlegend))){
args.colorlegend1 <- list( labels=sprintf("%.1f", seq(-1,1, length=length(cols)/2+1))
, x=nrow(x)+0.5 + nrow(x)/20, y=ncol(x)+0.5
, width=nrow(x)/20, height=ncol(x), cols=cols, cex=0.8 )
if ( !is.null(args.colorlegend) ) { args.colorlegend1[names(args.colorlegend)] <- args.colorlegend }
do.call("ColorLegend", args.colorlegend1)
}
if(!is.na(border)) {
usr <- par("usr")
rect(xleft=0.5, xright=nrow(x)+0.5, ybottom=0.5, ytop=nrow(x)+0.5,
lwd=lwd, border=border)
usr <- par("usr")
clip(0.5, nrow(x)+0.5, 0.5, nrow(x)+0.5)
abline(h=seq(-2, nrow(x)+1,1)-0.5, v=seq(1,nrow(x)+1,1)-0.5, col=border,lwd=lwd)
do.call("clip", as.list(usr))
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
###
## plots: PlotViolin ====
PlotViolin <- function(x, ...) {
UseMethod("PlotViolin")
}
PlotViolin.default <- function (x, ..., horizontal = FALSE, bw = "SJ", na.rm = FALSE
, names = NULL, args.boxplot = NULL) {
# Make a simple violin plot call from violinplot. values are x,y to plot
vlnplt <- function(x, y, center, horizontal = FALSE,
col = NA , border = par("fg"), lty = 1, lwd = 1,
density = NULL, angle = 45, fillOddEven = FALSE, ...) {
# double up first
x <- c(x, rev(x))
y <- c(y, -rev(y))
y <- y + center
# swap x and y if horizontal
if (horizontal == FALSE) { tmp=x; x=y; y=tmp }
polygon(x=x, y=y, border=border, col=col, lty=lty, lwd=lwd,
density=density, angle=angle, fillOddEven=fillOddEven, ...)
}
# main *****************
m <- match.call(expand.dots = FALSE)
pars <- m$...[ names(m$...)[!is.na(match(names(m$...), c(
"cex","cex.axis","cex.lab","cex.main","cex.sub","col.axis","col.lab","col.main","col.sub","family",
"font","font.axis","font.lab","font.main","font.sub","las","tck","tcl","xaxt","xpd","yaxt"
)))]]
oldpar <- par(pars); on.exit(par(oldpar))
args <- list(x, ...)
namedargs <- if (!is.null(attributes(args)$names))
attributes(args)$names != ""
else
rep(FALSE, length = length(args))
groups <- if(is.list(x)) x else args[!namedargs]
if (0 == (n <- length(groups)))
stop("invalid first argument")
if (length(class(groups)))
groups <- unclass(groups)
if (!missing(names))
attr(groups, "names") <- names
else {
if (is.null(attr(groups, "names")))
attr(groups, "names") <- 1:n
names <- attr(groups, "names")
}
xvals <- matrix(0, nrow = 512, ncol = n)
yvals <- matrix(0, nrow = 512, ncol = n)
center <- 1:n
for (i in 1:n) {
if(na.rm) xi <- na.omit(groups[[i]])
else xi <- groups[[i]]
tmp.dens <- density(xi, bw = bw)
xvals[, i] <- tmp.dens$x
yvals.needtoscale <- tmp.dens$y
yvals.scaled <- 7/16 * yvals.needtoscale / max(yvals.needtoscale)
yvals[, i] <- yvals.scaled
}
if (horizontal == FALSE) {
xrange <- c(1/2, n + 1/2)
yrange <- range(xvals)
}
else {
xrange <- range(xvals)
# yrange <- c(min(yvals), max(yvals))
yrange <- c(1/2, n + 1/2)
}
plot.args <- m$...[names(m$...)[!is.na(match(names(m$...),
c("xlim","ylim","main","xlab","ylab","panel.first","panel.last","frame.plot","add")))]]
if(! "xlim" %in% names(plot.args)) plot.args <- c(plot.args, list(xlim=xrange))
if(! "ylim" %in% names(plot.args)) plot.args <- c(plot.args, list(ylim=yrange))
if(! "xlab" %in% names(plot.args)) plot.args <- c(plot.args, list(xlab=""))
if(! "ylab" %in% names(plot.args)) plot.args <- c(plot.args, list(ylab=""))
if(! "frame.plot" %in% names(plot.args)) plot.args <- c(plot.args, list(frame.plot=TRUE))
# plot only if add is not TRUE
if(! "add" %in% names(plot.args)) add <- FALSE else add <- plot.args$add
if(!add) do.call(plot, c(plot.args, list(x=0, y=0, type="n", axes=FALSE)))
# poly.args <- m$...[names(m$...)[!is.na(match(names(m$...), c("border","col","lty","density","angle","fillOddEven")))]]
# neu:
poly.args <- args[names(args)[!is.na(match(names(args), c("border","col","lty","lwd","density","angle","fillOddEven")))]]
poly.args <- lapply( poly.args, rep, length.out=n )
for (i in 1:n)
# do.call(vlnplt, c(poly.args[i], list(x=xvals[, i]), list(y=yvals[, i]),
# list(center=center[i]), list(horizontal = horizontal)))
do.call(vlnplt, c(lapply(poly.args, "[", i), list(x=xvals[, i]), list(y=yvals[, i]),
list(center=center[i]), list(horizontal = horizontal)))
axes <- Coalesce(unlist(m$...[names(m$...)[!is.na(match(names(m$...), c("axes")))]]), TRUE)
if(axes){
xaxt <- Coalesce(unlist(m$...[names(m$...)[!is.na(match(names(m$...), c("xaxt")))]]), TRUE)
if(xaxt!="n") if(horizontal == TRUE) axis(1) else axis(1, at = 1:n, labels = names)
yaxt <- Coalesce(unlist(m$...[names(m$...)[!is.na(match(names(m$...), c("yaxt")))]]), TRUE)
if(yaxt!="n") if(horizontal == TRUE) axis(2, at = 1:n, labels = names) else axis(2)
}
if(!identical(args.boxplot, NA)){
args1.boxplot <- list(col="black", add=TRUE, boxwex=0.05, axes=FALSE,
outline=FALSE, whisklty=1, staplelty=0, medcol="white")
args1.boxplot[names(args.boxplot)] <- args.boxplot
do.call(boxplot, c(list(x, horizontal = horizontal), args1.boxplot))
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
# PlotViolin.formula <- function (formula, data = NULL, ..., subset) {
PlotViolin.formula <- function (formula, data, subset, na.action, ...) {
if (missing(formula) || (length(formula) != 3))
stop("formula missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m$... <- NULL
m[[1]] <- as.name("model.frame")
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
PlotViolin(split(mf[[response]], mf[-response]), ...)
}
###
## plots: PlotPolar ====
PlotPolar <- function(r, theta = NULL, type="p"
, rlim = NULL, main="", lwd = par("lwd"), lty = par("lty"), col = par("col")
, pch = par("pch"), fill = NA, cex = par("cex")
, mar = c(2, 2, 5, 2), add = FALSE, ...) {
if( ncol(r <- as.matrix(r)) == 1) r <- t(r)
k <- nrow(r)
if(is.null(theta)) {
theta <- seq(0, 2*pi, length=ncol(r)+1)[-(ncol(r)+1)]
if( nrow(r) > 1 ){
theta <- matrix( rep(theta, times=nrow(r)), ncol=ncol(r), byrow = TRUE )
} else {
theta <- t(as.matrix(theta))
}
} else {
if( ncol(theta <- as.matrix(theta)) == 1) theta <- t(theta)
}
if (length(type) < k) type <- rep(type, length.out = k)
if (length(lty) < k) lty <- rep(lty, length.out = k)
if (length(lwd) < k) lwd <- rep(lwd, length.out = k)
if (length(pch) < k) pch <- rep(pch, length.out = k)
if (length(col) < k) col <- rep(col, length.out = k)
if (length(fill) < k) fill <- rep(fill, length.out = k)
if (length(cex) < k) cex <- rep(cex, length.out = k)
dev.hold()
on.exit(dev.flush())
# definition follows plot.default()
rlim <- if (is.null(rlim)) max(abs(r[is.finite(r)]))*1.12
if(!add){
par(mar = mar, pty = "s", xpd=TRUE)
plot(x=c(-rlim, rlim), y=c(-rlim, rlim),
type = "n", axes = FALSE, main = main, xlab = "", ylab = "", ...)
}
for (i in seq_len(k)) {
xy <- xy.coords( x=cos(theta[i,]) * r[i,], y=sin(theta[i,])*r[i,])
if(type[i] == "p"){
points( xy, pch = pch[i], col = col[i], cex = cex[i] )
} else if( type[i]=="l") {
polygon(xy, lwd = lwd[i], lty = lty[i], border = col[i], col = fill[i])
} else if( type[i]=="h") {
segments(x0=0, y0=0, x1=xy$x, y1=xy$y, lwd = lwd[i], lty = lty[i], col = col[i])
}
}
if(!add && !is.null(DescToolsOptions("stamp")))
Stamp()
}
PolarGrid <- function(nr = NULL, ntheta = NULL, col = "lightgray",
lty = "dotted", lwd = par("lwd"), rlabels = NULL, alabels = NULL,
lblradians = FALSE, cex.lab = 1, las = 1, adj = NULL, dist = NULL) {
if (is.null(nr)) { # use standard values with pretty axis values
# at <- seq.int(0, par("xaxp")[2L], length.out = 1L + abs(par("xaxp")[3L]))
at <- axTicks(1)[axTicks(1)>=0]
} else if (!all(is.na(nr))) { # use NA for suppress radial gridlines
if (length(nr) > 1) { # use nr as radius
at <- nr
} else {
at <- seq.int(0, par("xaxp")[2L], length.out = nr + 1)#[-c(1, nr + 1)]
}
} else {at <- NULL}
if(!is.null(at))
DrawCircle(x = 0, y = 0, r.out = at, border = col, lty = lty, col = NA)
if (is.null(ntheta)) { # use standard values with pretty axis values
at.ang <- seq(0, 2*pi, by=2*pi/12)
} else if (!all(is.na(ntheta))) { # use NA for suppress radial gridlines
if (length(ntheta) > 1) { # use ntheta as angles
at.ang <- ntheta
} else {
at.ang <- seq(0, 2*pi, by=2*pi/ntheta)
}
} else {at.ang <- NULL}
if(!is.null(at.ang)) segments(x0=0, y0=0, x1=max(par("usr"))*cos(at.ang)
, y1=max(par("usr"))*sin(at.ang), col = col, lty = lty, lwd = lwd)
# plot radius labels
if(!is.null(at)){
if(is.null(rlabels)) rlabels <- signif(at[-1], 3) # standard values
if(!all(is.na(rlabels)))
BoxedText(x=at[-1], y=0, labels=rlabels, border=FALSE, col="white", cex=cex.lab)
}
# # plot angle labels
# if(!is.null(at.ang)){
# if(is.null(alabels))
# if( lblradians == FALSE ){
# alabels <- RadToDeg(at.ang[-length(at.ang)]) # standard values in degrees
# } else {
# alabels <- Format(at.ang[-length(at.ang)], digits=2) # standard values in radians
# }
# if(!all(is.na(alabels)))
# BoxedText( x=par("usr")[2]*1.07*cos(at.ang)[-length(at.ang)], y=par("usr")[2]*1.07*sin(at.ang)[-length(at.ang)]
# , labels=alabels, border=FALSE, col="white")
# }
# plot angle labels
if(!is.null(at.ang)){
if(is.null(alabels))
if(lblradians == FALSE){
alabels <- RadToDeg(at.ang[-length(at.ang)]) # standard values in degrees
} else {
alabels <- Format(at.ang[-length(at.ang)], digits=2) # standard values in radians
}
if(is.null(dist))
dist <- par("usr")[2]*1.07
out <- DescTools::PolToCart(r = dist, theta=at.ang)
if(!all(is.na(alabels)))
# BoxedText(x=par("usr")[2]*1.07*cos(at.ang)[-length(at.ang)],
# y=par("usr")[2]*1.07*sin(at.ang)[-length(at.ang)]
# , labels=alabels, border=FALSE, col="white")
if(is.null(adj)) {
adj <- ifelse(at.ang %(]% c(pi/2, 3*pi/2), 1, 0)
adj[at.ang %in% c(pi/2, 3*pi/2)] <- 0.5
}
adj <- rep(adj, length_out=length(alabels))
if(las == 2){
sapply(seq_along(alabels),
function(i) text(out$x[i], out$y[i], labels=alabels[i], cex=cex.lab,
srt=DescTools::RadToDeg(atan(out$y[i]/out$x[i])), adj=adj[i]))
} else {
sapply(seq_along(alabels),
function(i) BoxedText(x=out$x[i], y=out$y[i], labels=alabels[i], cex=cex.lab,
srt=ifelse(las==3, 90, 0), adj=adj[i],
border=NA, col="white"))
# text(out, labels=alabels, cex=cex.lab, srt=ifelse(las==3, 90, 0), adj=adj)
# BoxedText(x=out$x, y=out$y, labels=alabels, cex=cex.lab,
# srt=ifelse(las==3, 90, 0), adj=adj, border=FALSE, col="white")
}
}
invisible()
}
###
## plots: PlotTernary =====
# clumsy *****************
# PlotTernary <- function(a, f, m, symb = 2, grid = FALSE, ...) {
# # source: cwhmisc:::triplot
# # author: Christian Hoffmann
PlotTernary <- function(x, y = NULL, z = NULL, args.grid=NULL, lbl = NULL, main = "", ...){
if(!(is.null(y) && is.null(z))){
if(is.null(lbl)) lbl <- c(names(x), names(y), names(z))
x <- cbind(x, y, z)
} else {
if(is.null(lbl)) lbl <- colnames(x)
x <- as.matrix(x)
}
if(any(x < 0)) stop("X must be non-negative")
s <- drop(x %*% rep(1, ncol(x)))
if(any(s<=0)) stop("each row of X must have a positive sum")
if(max(abs(s-1)) > 1e-6) {
warning("row(s) of X will be rescaled")
x <- x / s
}
oldpar <- par(xpd=TRUE)
on.exit(par(oldpar))
Canvas(mar=c(1,3,4,1) + .1, main=main)
sq3 <- sqrt(3)/2
# grid: define default arguments
if(!identical(args.grid, NA)){
args.grid1 <- list(col="grey", lty="dotted", nx=5)
# override default arguments with user defined ones
if (!is.null(args.grid)) {
args.grid1[names(args.grid)] <- args.grid
}
d <- seq(0, 2*sq3, sq3*2/(args.grid1$nx))
x0 <- -sq3 + (1) * d
segments(x0 = x0, y0 = -0.5, x1 = x0 + sq3 - d*.5, y1 = 1- d * sq3, col=args.grid1$col, lty=args.grid1$lty)
segments(x0 = x0, y0 = -0.5, x1 = -rev(x0 + sq3 - d*.5), y1 = rev(1- d * sq3), col=args.grid1$col, lty=args.grid1$lty)
segments(x0 = x0 + sq3 - d*.5, y0 = 1- d * sq3, x1 = rev(x0 -d*.5), y1 = 1- d * sq3, col=args.grid1$col, lty=args.grid1$lty)
}
DrawRegPolygon(nv = 3, rot = pi/2, radius.x = 1, col=NA)
eps <- 0.15
pts <- DrawRegPolygon(nv = 3, rot = pi/2, radius.x = 1+eps, plot=FALSE)
text(pts, labels = lbl[c(1,3,2)])
points((x[,2] - x[,3]) * sq3, x[,1] * 1.5 - 0.5, ...)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
## plots: PlotVenn ====
PlotVenn <- function (x, col = "transparent", plotit = TRUE, labels = NULL) {
n <- length(x)
if (n > 5)
stop("Can't plot a Venn diagram with more than 5 sets...")
xnames <- if(is.null(names(x))) LETTERS[1:n] else names(x)
if(is.null(labels)) labels <- xnames
tab <- table(unlist(x), unlist(lapply(1:length(x), function(i) rep(LETTERS[i], length(x[[i]])))))
venntab <- table(apply(tab, 1, function(x) paste(LETTERS[1:n][as.logical(x)], collapse = "")))
if (plotit) {
plot(x = c(-7, 7), y = c(-7, 7), asp = 1, type = "n",
xaxt = "n", yaxt = "n", xlab = "", ylab = "", frame.plot = FALSE)
if (n == 2) {
DrawCircle(x = c(2, -2), y = c(0, 0), r.out = 3, col = col)
xy <- data.frame(x = c(-3, 3, 0), y = c(0, 0, 0),
set = c("A", "B", "AB")
, frq=NA)
xy[match(rownames(venntab), xy$set),"frq"] <- venntab
text(xy$x, xy$y, labels=xy$frq) # labels=xy$set)
lbl <- data.frame(x = c(-6, 6), y = c(2.5, 2.5))
text(lbl$x, lbl$y, label = labels, cex = 2)
}
else if (n == 3) {
DrawCircle(x = c(2, -1, -1), y = c(0, 1.73, -1.73),
r.out = 3, col = col)
xy <- data.frame(x = c(3.5, -1.75, -1.75, 1, -2, 1, 0),
y = c(0, 3, -3, 1.75, 0, -1.75, 0),
set = c("A", "B", "C", "AB", "BC", "AC", "ABC")
, frq=NA)
xy[match(rownames(venntab), xy$set),"frq"] <- venntab
text(xy$x, xy$y, labels=xy$frq) # labels=xy$set)
lbl <- data.frame(x = c(6.5, -4.5, -4.5), y = c(0,4.8,-4.8))
text(lbl$x, lbl$y, label = labels, cex = 2)
}
else if (n == 4) {
DrawEllipse(x = c(0, 0, 2, -2), y = c(0, 0, -2, -2),
radius.x = 6, radius.y = 4, rot = c(1, 3) * pi/4,
col = col)
xy <- data.frame(x=c(-6.0,-4.0,-2.2,0.0,2.2,3.9,5.9,4.3,2.7,-3.1,-4.3,-2.6,-0.1,2.7,0.0)
, y=c(0.3,-2.9,-4.2,-5.7,-4.2,-2.9,0.2,2.3,4.2,4.0,2.3,0.9,-1.6,0.8,3.4)
, set=c("A","AC","ACD","AD","ABD","BD","D","CD","C","B","AB","ABC","ABCD","BCD","BC")
, frq=NA )
xy[match(rownames(venntab), xy$set),"frq"] <- venntab
text(xy$x, xy$y, labels=xy$frq) # labels=xy$set)
lbl <- data.frame(x = c(-8, -4.4, 4.5, 7.7), y = c(1.9, 5.4, 5.5, 2.5))
text(lbl$x, lbl$y, label = labels, cex = 2)
}
else if (n == 5) {
DrawEllipse(x=c(0,-1.5,-2,0,1), y=c(0,0,-2,-2.5,-1), radius.x=6, radius.y=3, rot=c(1.7,2.8,4.1,5.4,6.6), col=col)
xy <- data.frame(x=c(4.9,-0.7,-5.9,-4.3,3.1, 3.6,2.4,0.9,-2.3,-3.8,-4.7,-3.9,-1.5,1.2,3.3, 2.6,1.8,1.2,-0.5,-2.7,-3.7,-4.3,-2.6,-0.9,0.9,3.4, 2.1,-2.1,-3.4,-0.9,-0.5 )
, y=c(0.5,4.5,1.7,-5.5,-6.1, -1.1,1.8,2.7,2.9,1.5,-1.1,-3.1,-5,-4.7,-3.1, 0.1,2,1.4,2.4,2.2,0.2,-1.6,-3.3,-4.7,-3.8,-2.5, -2.1,1.5,-1.3,-3.8,-0.8 )
, set=c("B","A","E","D","C", "BE","AB","AD","AE","CE","DE","BD","CD","AC","BC"
,"ABE","ABD", "ABDE","ADE","ACE","CDE","BDE","BCD","ACD","ABC","BCE", "ABCE","ACDE","BCDE","ABCD","ABCDE" )
, frq=NA )
xy[match(rownames(venntab), xy$set),"frq"] <- venntab
text(xy$x, xy$y, labels=xy$frq) # labels=xy$set)
lbl <- data.frame(x=c(1.8,7.6,5.8,-7.5,-7.9), y=c(6.3,-0.8,-7.1,-6.8,3.9))
text( lbl$x, lbl$y, label=labels, cex=2)
}
xy$setx <- xy$set
# replace AB.. by names of the list
code <- data.frame(id=LETTERS[1:n], x=xnames)
levels(xy$setx) <- sapply(levels(xy$setx), function(x) paste(code$x[match(unlist(strsplit(x, split="")), code$id)], collapse=""))
names(venntab) <- sapply(names(venntab), function(x) paste(code$x[match(unlist(strsplit(x, split="")), code$id)], collapse=""))
}
else {
xy <- NA
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
return(list(venntab, xy))
}
###
## plots: PlotHorizBar (GanttChart) ----------
# info2 <- list(labels=c("Jim","Joe","Jim","John","John","Jake","Joe","Jed","Jake"),
# starts=c(8.1,8.7,13.0,9.1,11.6,9.0,13.6,9.3,14.2),
# ends=c(12.5,12.7,16.5,10.3,15.6,11.7,18.1,18.2,19.0))
#
# PlotHorizBar <- function (from, to, grp = 1, col = "lightgrey", border = "black",
# height = 0.6, add = FALSE, xlim = NULL, ylim = NULL, ...) {
#
# # needed?? 6.5.2014
# # if (is.null(dev.list())) plot.new()
#
# grp <- factor(grp)
#
# if(!add){
#
# par(mai = c(par("mai")[1], max(par("mai")[2], strwidth(levels(grp), "inch")) +
# 0.5, par("mai")[3], par("mai")[4]))
#
# if(is.null(xlim)) xlim <- range(pretty((c(from, to))))
# if(is.null(ylim)) ylim <- c(0, nlevels(grp) + 1)
# plot(1, xlim = xlim, ylim = ylim,
# type = "n", ylab = "", yaxt = "n", ...)
#
# mtext(levels(grp), side=2, line = 1, at=1:nlevels(grp), las=1)
#
# }
# xleft <- from
# xright <- to
# ytop <- as.numeric(grp) + height/2
# ybottom <- as.numeric(grp) - height/2
# rect(xleft, ybottom, xright, ytop, density = NULL, angle = 45,
# col = col, border = border, lty = par("lty"), lwd = par("lwd"))
#
# if(!is.null(DescToolsOptions("stamp")))
# Stamp()
#
# }
#
PlotMiss <- function(x, col = hred, bg=SetAlpha(hecru, 0.3), clust=FALSE,
main = NULL, ...){
x <- as.data.frame(x)
x <- Rev(x, 2)
n <- ncol(x)
inches_to_lines <- (par("mar") / par("mai") )[1] # 5
lab.width <- max(strwidth(colnames(x), units="inches")) * inches_to_lines
ymar <- lab.width + 3
Canvas(xlim=c(1, nrow(x)+1), ylim=c(0, n), asp=NA, xpd=TRUE, mar = c(5.1, ymar, 5.1, 5.1)
, main=main, ...)
usr <- par("usr") # set background color lightgrey
rect(xleft=0, ybottom=usr[3], xright=nrow(x)+1, ytop=usr[4], col=bg, border=NA)
axis(side = 1)
missingIndex <- as.matrix(is.na(x))
if(clust){
orderIndex <- order.dendrogram(as.dendrogram(hclust(dist(missingIndex * 1), method = "mcquitty")))
missingIndex <- missingIndex[orderIndex, ]
res <- orderIndex
} else {
res <- NULL
}
sapply(1:ncol(missingIndex), function(i){
xl <- which(missingIndex[,i])
if(length(xl) > 0)
rect(xleft=xl, xright=xl+1, ybottom=i-1, ytop=i, col=col, border=NA)
})
# for(i in 1:n){
# z <- x[, i]
# if(sum(is.na(z)) > 0)
# rect(xleft=which(is.na(z)), xright=which(is.na(z))+1, ybottom=i-1, ytop=i, col = col, border=NA)
# }
abline(h=1:ncol(x), col="white")
text(x = -0.03 * nrow(x), y = (1:n)-0.5, labels = colnames(x), las=1, adj = 1)
text(x = nrow(x) * 1.04, y = (1:n)-0.5, labels = sapply(x, function(y) sum(is.na(y))), las=1, adj=0)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(res)
}
###
## plots: PlotTreemap ====
# the code is strongly based on Jeff Enos' treemap in library(portfolio), jeff@kanecap.com,
# potential improvements:
# * make the position of the text more flexible (top-left, bottom-right etc.)
# * clip text to the specific rectangles and don't allow to write over the rect.
# * see examples at http://www.hiveondemand.com/portal/treemap_basics.jsp
PlotTreemap <- function(x, grp=NULL, labels=NULL, cex=1.0, text.col="black", col=rainbow(length(x)),
labels.grp=NULL, cex.grp=3, text.col.grp="black", border.grp="grey50",
lwd.grp=5, main="") {
SqMap <- function(x) {
.sqmap <- function(z, x0 = 0, y0 = 0, x1 = 1, y1 = 1, lst=list()) {
cz <- cumsum(z$area)/sum(z$area)
n <- which.min(abs(log(max(x1/y1, y1/x1) * sum(z$area) * ((cz^2)/z$area))))
more <- n < length(z$area)
a <- c(0, cz[1:n])/cz[n]
if (y1 > x1) {
lst <- list( data.frame(idx=z$idx[1:n],
x0=x0 + x1 * a[1:(length(a) - 1)],
y0=rep(y0, n), x1=x0 + x1 * a[-1], y1=rep(y0 + y1 * cz[n], n)))
if (more) {
lst <- append(lst, Recall(z[-(1:n), ], x0, y0 + y1 * cz[n], x1, y1 * (1 - cz[n]), lst))
}
} else {
lst <- list( data.frame(idx=z$idx[1:n],
x0=rep(x0, n), y0=y0 + y1 * a[1:(length(a) - 1)],
x1=rep(x0 + x1 * cz[n], n), y1=y0 + y1 * a[-1]))
if (more) {
lst <- append(lst, Recall(z[-(1:n), ], x0 + x1 * cz[n], y0, x1 * (1 - cz[n]), y1, lst))
}
}
lst
}
# z <- data.frame(idx=seq_along(z), area=z)
if(is.null(names(x))) names(x) <- seq_along(x)
x <- data.frame(idx=names(x), area=x)
res <- do.call(rbind, .sqmap(x))
rownames(res) <- x$idx
return(res[,-1])
}
PlotSqMap <- function(z, col = NULL, border=NULL, lwd=par("lwd"), add=FALSE){
if(is.null(col)) col <- as.character(z$col)
# plot squarified treemap
if(!add) Canvas(c(0,1), xpd=TRUE)
for(i in 1:nrow(z)){
rect(xleft=z[i,]$x0, ybottom=z[i,]$y0, xright=z[i,]$x1, ytop=z[i,]$y1,
col=col[i], border=border, lwd=lwd)
}
}
if(is.null(grp)) grp <- rep(1, length(x))
if(is.null(labels)) labels <- names(x)
# we need to sort the stuff
ord <- order(grp, -x)
x <- x[ord]
grp <- grp[ord]
labels <- labels[ord]
col <- col[ord]
# get the groups rects first
zg <- SqMap(Sort(tapply(x, grp, sum), decreasing=TRUE))
# the transformation information: x0 translation, xs stretching
tm <- cbind(zg[,1:2], xs=zg$x1 - zg$x0, ys=zg$y1 - zg$y0)
gmidpt <- data.frame(x=apply(zg[,c("x0","x1")], 1, mean),
y=apply(zg[,c("y0","y1")], 1, mean))
if(is.null(labels.grp))
if(nrow(zg)>1) {
labels.grp <- rownames(zg)
} else {
labels.grp <- NA
}
Canvas(c(0,1), xpd=TRUE, asp=NA, main=main)
res <- list()
for( i in 1:nrow(zg)){
# get the group index
idx <- grp == rownames(zg)[i]
xg.rect <- SqMap(Sort(x[idx], decreasing=TRUE))
# transform
xg.rect[,c(1,3)] <- xg.rect[,c(1,3)] * tm[i,"xs"] + tm[i,"x0"]
xg.rect[,c(2,4)] <- xg.rect[,c(2,4)] * tm[i,"ys"] + tm[i,"y0"]
PlotSqMap(xg.rect, col=col[idx], add=TRUE)
res[[i]] <- list(grp=gmidpt[i,],
child= cbind(x=apply(xg.rect[,c("x0","x1")], 1, mean),
y=apply(xg.rect[,c("y0","y1")], 1, mean)))
text( x=apply(xg.rect[,c("x0","x1")], 1, mean),
y=apply(xg.rect[,c("y0","y1")], 1, mean),
labels=labels[idx], cex=cex, col=text.col )
}
names(res) <- rownames(zg)
PlotSqMap(zg, col=NA, add=TRUE, border=border.grp, lwd=lwd.grp)
text( x=apply(zg[,c("x0","x1")], 1, mean),
y=apply(zg[,c("y0","y1")], 1, mean),
labels=labels.grp, cex=cex.grp, col=text.col.grp)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(res)
}
###
## plots: PlotCirc ====
PlotCirc <- function(tab, acol = rainbow(sum(dim(tab))), aborder = "darkgrey",
rcol = SetAlpha(acol[1:nrow(tab)], 0.5), rborder = "darkgrey",
gap = 5, main = "", labels = NULL, cex.lab = 1.0,
las = 1, adj = NULL, dist = 2){
ribbon <- function( angle1.beg, angle1.end, angle2.beg, angle2.end,
radius1 = 1, radius2 = radius1, col = "blue",
border ="darkgrey" ){
xy1 <- DescTools::PolToCart( radius1, angle1.beg )
xy2 <- DescTools::PolToCart( radius2, angle1.end )
xy3 <- DescTools::PolToCart( radius1, angle2.beg )
xy4 <- DescTools::PolToCart( radius2, angle2.end )
bez1 <- DescTools::DrawArc(rx = radius2, theta.1 = DescTools::CartToPol(xy2$x, xy2$y)$theta, theta.2 = DescTools::CartToPol(xy4$x, xy4$y)$theta, plot=FALSE)[[1]]
bez2 <- DescTools::DrawBezier( x = c(xy4$x, 0, xy3$x), y = c(xy4$y, 0, xy3$y), plot=FALSE )
bez3 <- DescTools::DrawArc(rx = radius1, theta.1=DescTools::CartToPol(xy3$x, xy3$y)$theta, theta.2 =DescTools::CartToPol(xy1$x, xy1$y)$theta, plot=FALSE )[[1]]
bez4 <- DescTools::DrawBezier(x = c(xy1$x, 0, xy2$x), y = c(xy1$y, 0, xy2$y), plot=FALSE )
polygon( x=c(bez1$x, bez2$x, bez3$x, bez4$x),
y=c(bez1$y, bez2$y, bez3$y, bez4$y), col=col, border=border)
}
n <- sum(tab)
ncol <- ncol(tab)
nrow <- nrow(tab)
d <- DegToRad(gap) # the gap between the sectors in radiant
acol <- rep(acol, length.out = ncol+nrow)
rcol <- rep(rcol, length.out = nrow)
aborder <- rep(aborder, length.out = ncol+nrow)
rborder <- rep(rborder, length.out = nrow)
mpts.left <- c(0, cumsum(as.vector(rbind(rev(apply(tab, 2, sum))/ n * (pi - ncol * d), d))))
mpts.right <- cumsum(as.vector(rbind(rev(apply(tab, 1, sum))/ n * (pi - nrow * d), d)))
mpts <- c(mpts.left, mpts.right + pi) + pi/2 + d/2
DescTools::Canvas(10, main=main, xpd=TRUE)
DescTools::DrawCircle(x=0, y=0, r.in=9.5, r.out=10,
theta.1=mpts[seq_along(mpts) %% 2 == 1],
theta.2=mpts[seq_along(mpts) %% 2 == 0],
col=acol, border=aborder)
if(is.null(labels)) labels <- rev(c(rownames(tab), colnames(tab)))
ttab <- rbind(DescTools::Rev(tab, margin=2) / n * (pi - ncol * d), d)
pts.left <- (c(0, cumsum(as.vector(ttab))))
ttab <- rbind(DescTools::Rev(t(tab), margin=2)/ n * (pi - nrow * d), d)
pts.right <- (c( cumsum(as.vector(ttab)))) + pi
pts <- c(pts.left, pts.right) + pi/2 + d/2
dpt <- data.frame(from=pts[-length(pts)], to=pts[-1])
for( i in 1:ncol) {
for( j in 1:nrow) {
lang <- dpt[(i-1)*(nrow+1)+j,]
rang <- DescTools::Rev(dpt[-nrow(dpt),], margin=1)[(j-1)*(ncol+1) + i,]
ribbon( angle1.beg=rang[,2], angle1.end=lang[,1], angle2.beg=rang[,1], angle2.end=lang[,2],
radius1 = 10, radius2 = 9, col = rcol[j], border = rborder[j])
}}
out <- DescTools::PolToCart(r = 10 + dist, theta=filter(mpts, rep(1/2,2))[seq(1,(nrow+ncol)*2, by=2)])
if(las == 2){
if(is.null(adj)) adj <- c(rep(1, nrow), rep(0,ncol))
adj <- rep(adj, length_out=length(labels))
sapply(seq_along(labels),
function(i) text(out$x[i], out$y[i], labels=labels[i], cex=cex.lab,
srt=DescTools::RadToDeg(atan(out$y[i]/out$x[i])), adj=adj[i]))
} else {
text(out, labels=labels, cex=cex.lab, srt=ifelse(las==3, 90, 0), adj=adj)
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(out)
}
###
## plots: PlotWeb ====
PlotWeb <- function(m, col=c(hred, hblue), lty=NULL, lwd = NULL, args.legend=NULL, pch=21, pt.cex=2,
pt.col="black", pt.bg="darkgrey", cex.lab = 1.0,
las = 1, adj = NULL, dist = 0.5, ... ){
# following an idee from library(LIM)
# example(plotweb)
oldpar <- par(c("lend","xpd"))
on.exit(par(oldpar))
w <- 4
par("xpd"=TRUE, lend="butt")
DescTools::Canvas(w, ...)
angles <- seq(0, 2*pi, length=nrow(m)+1)[-1]
xy <- DescTools::PolToCart(r=3, theta=angles)
xylab <- DescTools::PolToCart(r=3 + dist, theta=angles)
labels <- colnames(m)
if(las == 2){
if(is.null(adj)) adj <- (angles %[]% c(pi/2, 3*pi/2))*1
adj <- rep(adj, length_out=length(labels))
sapply(seq_along(labels),
function(i) text(xylab$x[i], xylab$y[i], labels=labels[i], cex=cex.lab,
srt=DescTools::RadToDeg(atan(xy$y[i]/xy$x[i])), adj=adj[i]))
} else {
if(is.null(adj)){
if(las==1)
adj <- (angles %[]% c(pi/2, 3*pi/2))*1
if(las==3)
adj <- (angles %[]% c(3*pi/4, 7*pi/4))*1
}
adj <- rep(adj, length_out=length(labels))
sapply(seq_along(labels),
function(i) text(xylab$x[i], xylab$y[i], labels=labels[i], cex=cex.lab,
srt=ifelse(las==3, 90, 0), adj=adj[i]))
}
# d.m <- data.frame( from=rep(colnames(m), nrow(m)), to=rep(colnames(m), each=nrow(m))
# , d=as.vector(m)
# , from.x=rep(xy$x, nrow(m)), from.y=rep(xy$y, nrow(m)), to.x=rep(xy$x, each=nrow(m)), to.y=rep(xy$y, each=nrow(m)) )
# d.m <- d.m[d.m$d > 0,]
# lineare transformation of linewidth
a <- 0.5
b <- 10
# d.m$d.sc <- (b-a) * (min(d.m$d)-a) + (b-a) /diff(range(d.m$d)) * d.m$d
i <- DescTools::CombPairs(1:dim(m)[1])
d.m <- data.frame(from=colnames(m)[i[,1]], from=colnames(m)[i[, 2]], d=m[lower.tri(m)],
from.x=xy[[1]][i[,2]], to.x=xy[[1]][i[,1]],
from.y=xy[[2]][i[,2]], to.y=xy[[2]][i[,1]])
if(is.null(lwd))
d.m$d.sc <- DescTools::LinScale(abs(d.m$d), newlow=a, newhigh=b )
else
d.m$d.sc <- lwd
if(is.null(lwd))
d.m$lty <- par("lty")
else
d.m$lty <- lty
col <- rep(col, length.out=2)
segments( x0=d.m$from.x, y0=d.m$from.y, x1 = d.m$to.x, y1 = d.m$to.y,
col = col[((sign(d.m$d)+1)/2)+1], lty = d.m$lty, lwd=d.m$d.sc, lend= 1)
points( xy, cex=pt.cex, pch=pch, col=pt.col, bg=pt.bg )
# find min/max negative value and min/max positive value
i <- c(which.min(d.m$d), which.max(ifelse(d.m$d<=0, d.m$d, NA)), which.min(ifelse(d.m$d>0, d.m$d, NA)), which.max(d.m$d))
args.legend1 <- list( x="bottomright",
legend=Format(d.m$d[i], digits=3, leading="drop"), lwd = d.m$d.sc[i],
col=rep(col, each=2), bg="white", cex=0.8)
if ( !is.null(args.legend) ) { args.legend1[names(args.legend)] <- args.legend }
add.legend <- TRUE
if(!is.null(args.legend)) if(all(is.na(args.legend))) {add.legend <- FALSE}
if(add.legend) do.call("legend", args.legend1)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(xy)
}
###
## plots: PlotCandlestick ====
PlotCandlestick <- function(x, y, xlim = NULL, ylim = NULL, col = c("springgreen4","firebrick"), border=NA, args.grid = NULL, ...) {
xlim <- if (is.null(xlim))
range(x[is.finite(x)])
else xlim
ylim <- if (is.null(ylim))
range(y[is.finite(y)])
else ylim
plot(x = 1, y = 1, xlim = xlim,
ylim = ylim, type = "n", xaxt = "n", xlab = "", ...)
add.grid <- TRUE
if(!is.null(args.grid)) if(all(is.na(args.grid))) {add.grid <- FALSE}
if (add.grid) {
args.grid1 <- list(lty="solid", col="grey83")
if (!is.null(args.grid)) {
args.grid1[names(args.grid)] <- args.grid
}
do.call("grid", args.grid1)
}
# open low high close
segments(x0 = x, y0 = y[,2], y1 = y[,3], col = col[(y[,1] > y[,4]) * 1 + 1])
rect(xleft = x - 0.3, ybottom = y[,1], xright = x + 0.3, ytop = y[, 4],
col = col[(y[,1] > y[,4]) * 1 + 1], border = border)
axis(side = 1, at = x, labels = x)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
###
## plots: PlotSuperbar
# ueberlagerte Barplots
# Superbarplot in UsingR
###
## plots: PlotMatrix ====
PlotMatrix <- function(x, y=NULL, data=NULL, panel=l.panel,
nrows=0, ncols=nrows, save=TRUE, robrange.=FALSE, range.=NULL,
pch=NULL, col=1, reference=0, ltyref=3,
log="", xaxs="r", yaxs="r", xaxmar=NULL, yaxmar=NULL,
vnames=NULL, main='', cex.points=NA, cex.lab=0.7, cex.text=1.3,
cex.title=1,
bty="o", oma=NULL, ...) {
# Purpose: pairs with different plotting characters, marks and/or colors
# showing submatrices of the full scatterplot matrix
# possibly on several pages
# ******************************************************************************
# Author: Werner Stahel, Date: 23 Jul 93; minor bug-fix+comments:
# M.Maechler
is.formula <- function(object) length(class(object))>0 && class(object)=="formula"
l.panel <- function(x,y,indx,indy,pch=1,col=1,cex=cex.points,...) {
if (is.character(pch)) text(x,y,pch,col=col,cex=cex) else
points(x,y,pch=pch,col=col,cex=cex,...)
}
oldpar <- par(c("mfrow","mar","cex","oma","mgp"))
on.exit(par(oldpar))
# **************** preparations **************
# data
if (is.formula(x)) {
if (length(x)==2)
x <- model.frame(x,data, na.action=NULL) else {
ld <- model.frame(x[c(1,3)],data, na.action=NULL)
ld <- cbind(ld, model.frame(x[1:2],data, na.action=NULL))
x <- ld
}
}
if (is.data.frame(x)) {
for (jj in 1:length(x)) x[[jj]] <- as.numeric(x[[jj]])
x <- as.matrix(x)
} else x <- cbind(x)
# stop("!PlotMatrix! first argument must either be a formula or a data.frame or matrix")
nv1 <- dim(x)[2]
lv1 <- lv2 <- 0
if (is.null(y)) {
ldata <- x
if (save) { nv1 <- nv1-1; lv2 <- 1 }
nv2 <- nv1
} else { # cbind y to data for easier preparations
save <- FALSE
if (is.formula(y)) {
ld <- model.frame(x[c(1,3)],data, na.action=NULL)
if (length(x)>2)
ld <- cbind(ld, model.frame(x[1:2],data, na.action=NULL))
x <- ld
}
if (is.formula(y)) {
if (length(y)==2)
y <- model.frame(y,data, na.action=NULL) else {
ld <- model.frame(y[c(1,3)],data, na.action=NULL)
ld <- cbind(ld, model.frame(y[1:2],data, na.action=NULL))
y <- ld
}
}
if (is.data.frame(y)) {
for (jj in 1:length(y)) y[[jj]] <- as.numeric(y[[jj]])
y <- as.matrix(y)
}
ldata <- cbind(x, as.matrix(y))
nv2 <- ncol(ldata)-nv1 ; lv2 <- nv1 }
nvv <- ncol(ldata)
tnr <- nrow(ldata)
# variable labels
if (missing(vnames)) vnames <- dimnames(ldata)[[2]]
if (is.null(vnames)) vnames <- paste("V",1:nvv)
# plotting characters
if (length(pch)==0) pch <- 1
# range
rg <- matrix(nrow=2,ncol=nvv,dimnames=list(c("min","max"),vnames))
if(is.matrix(range.)) {
if (is.null(colnames(range.))) {
if (ncol(range)==ncol(rg)) rg[,] <- range. else
warning('argument range. not suitable. ignored')
} else {
lj <- match(colnames(range.),vnames)
if (any(is.na(lj))) {
warning('variables', colnames(range.)[is.na(lj)],'not found')
if (any(!is.na(lj))) rg[,lj[!is.na(lj)]] <- range.[,!is.na(lj)]
}
}
}
else
if (length(range.)==2&&is.numeric(range.)) rg[,] <- matrix(range.,2,nvv)
lna <- apply(is.na(rg),2, any)
if (any(lna))
rg[,lna] <- apply(ldata[,lna,drop=FALSE],2,
Range, robust=robrange., na.rm=TRUE, finite=TRUE)
colnames(rg) <- vnames
# reference lines
tjref <- (length(reference)>0)&&!(is.logical(reference)&&!reference)
if (tjref) {
if(length(reference)==1) lref <- rep(reference,length=nvv) else {
lref <- rep(NA,nvv)
lref[match(names(reference),vnames)] <- reference
}
names(lref) <- vnames
}
# plot
jmain <- !is.null(main)&&main!=""
lpin <- par("pin")
lnm <- if (lpin[1]>lpin[2]) {
if (nv1==6 && nv2==6) c(6,6) else c(5,6) } else c(8,5)
if (is.na(nrows)||nrows<1) nrows <- ceiling(nv1/((nv1-1)%/%lnm[1]+1))
if (is.na(ncols)||ncols<1) ncols <- ceiling(nv2/((nv2-1)%/%lnm[2]+1))
if (is.null(xaxmar)) xaxmar <- 1+(nv1*nv2>1)
if (any(is.na(xaxmar))) xaxmar <- 1+(nv1*nv2>1)
xaxmar <- ifelse(xaxmar>1,3,1)
if (is.null(yaxmar)) yaxmar <- 2+(nv1*nv2>1)
if (any(is.na(yaxmar))) yaxmar <- 2+(nv1*nv2>1)
yaxmar <- ifelse(yaxmar>2,4,2)
if (length(oma)!=4)
oma <- c(2+(xaxmar==1), 2+(yaxmar==2),
1.5+(xaxmar==3)+cex.title*2*jmain,
2+(yaxmar==4))
# oma <- 2 + c(0,0,!is.null(main)&&main!="",1)
par(mfrow=c(nrows,ncols))
##- if (!is.na(cex)) par(cex=cex)
##- cex <- par("cex")
##- cexl <- cex*cexlab
##- cext <- cex*cextext
par(oma=oma*cex.lab, mar=rep(0.2,4), mgp=cex.lab*c(1,0.5,0))
if (is.na(cex.points)) cex.points <- max(0.2,min(1,1.5-0.2*log(tnr)))
#
# log
if (length(grep("x",log))>0) ldata[ldata[,1:nv1]<=0,1:nv1] <- NA
if (length(grep("y",log))>0) ldata[ldata[,lv2+1:nv2]<=0,lv2+1:nv2] <- NA
npgr <- ceiling(nv2/nrows)
npgc <- ceiling(nv1/ncols)
# ******************** plots **********************
for (ipgr in 1:npgr) {
lr <- (ipgr-1)*nrows
for (ipgc in 1:npgc) {
lc <- (ipgc-1)*ncols
if (save&&((lr+nrows)<=lc)) break
for (jr in 1:nrows) { #-- plot row [j]
jd2 <- lr+jr
j2 <- lv2 + jd2
if (jd2<=nv2) v2 <- ldata[,j2]
for (jc in 1:ncols) { #-- plot column [j2-lv2] = 1:nv2
jd1 <- lc+jc
j1 <- lv1 + jd1
if (jd2<=nv2 & jd1<=nv1) {
v1 <- ldata[,j1]
plot(v1,v2, type="n", xlab="", ylab="", axes=FALSE,
xlim <- rg[,j1], ylim <- rg[,j2],
xaxs=xaxs, yaxs=yaxs, log=log, cex=cex.points)
usr <- par("usr")
if (jr==nrows||jd2==nv2) {
if (xaxmar==1) axis(1)
mtext(vnames[j1], side=1, line=(0.5+1.2*(xaxmar==1))*cex.lab,
cex=cex.lab, at=mean(usr[1:2]))
}
if (jc==1) {
if (yaxmar==2) axis(2)
mtext(vnames[j2], side=2, line=(0.5+1.2*(yaxmar==2))*cex.lab,
cex=cex.lab, at=mean(usr[3:4]))
}
if (jr==1&&xaxmar==3) axis(3,xpd=TRUE)
if (jc==ncols||jd1==nv1) if (yaxmar==4) axis(4,xpd=TRUE)
box(bty=bty)
if (any(v1!=v2,na.rm=TRUE)) { # not diagonal
panel(v1,v2,jd1,jd2, pch, col, ...)
if (tjref) abline(h=lref[j1],v=lref[j2],lty=ltyref)
}
else { uu <- par("usr") # diagonal: print variable name
text(mean(uu[1:2]),mean(uu[3:4]), vnames[j1], cex=cex.text) }
}
else frame()
}
}
if (jmain) mtext(main,3,oma[3]*0.9-2*cex.title,outer=TRUE,cex=cex.title)
##- stamp(sure=FALSE,line=par("mgp")[1]+0.5)
# stamp(sure=FALSE,line=oma[4]-1.8) ### ??? why does it need so much space?
}}
on.exit(par(oldpar))
"PlotMatrix: done"
}
###
## plots: ACF, GACF and other TimeSeries plots ----------
PlotACF <- function(series, lag.max = 10*log10(length(series)), ...) {
## Purpose: time series plot with correlograms
# Original name: f.acf
## ---
## Arguments: series : time series
## lag.max : the maximum number of lags for the correlograms
## ---
## Author: Markus Huerzeler, Date: 15 Jun 94
## Revision: Christian Keller, 5 May 98
## Revision: Markus Huerzeler, 11. Maerz 04
# the stamp option should only be active for the third plot, so deactivate it here
opt <- DescToolsOptions(stamp=NULL)
if (!is.null(dim(series)))
stop("f.acf is only implemented for univariate time series")
par(mfrow=c(1,1))
old.par <- par(mar=c(3,3,1,1), mgp=c(1.5,0.5,0))
on.exit(par(old.par))
split.screen(figs=matrix(c(0,1,0.33,1, 0,0.5,0,0.33, 0.5,1,0,0.33),
ncol=4, byrow=T), erase=TRUE)
## screen(1)
plot.ts(series, cex=0.7, ylab=deparse(substitute(series)), ...)
screen(2)
PlotGACF(series, lag.max=lag.max, cex=0.7)
screen(3)
# Stamp only the last plot
options(opt)
PlotGACF(series, lag.max=lag.max, type="part", cex=0.7)
close.screen(all.screens=TRUE)
invisible(par(old.par))
}
PlotGACF <- function(series, lag.max=10*log10(length(series)), type="cor", ylab=NULL, ...) {
## Author: Markus Huerzeler, Date: 6 Jun 94
## Revision: Christian Keller, 27 Nov 98
## Revision: Markus Huerzeler, 11 Mar 02
## Correction for axis labels with ts-objects and deletion of ACF(0), Andri/10.01.2014
# original name g.plot.acf
# erg <- acf(series, type=type, plot=FALSE, lag.max=lag.max, na.action=na.omit)
# debug: series <- AirPassengers
type <- match.arg(type, c("cor","cov","part"))
erg <- acf(na.omit(series), type=type, plot=FALSE, lag.max=lag.max)
erg.acf <- erg$acf
# set the first acf(0) = 1 to 0
if(type=="cor") {
erg.acf[1] <- 0
if(is.null(ylab)) ylab <- "ACF"
}
if(type=="part") {
# add a 0-value to the partial corr. fct.
erg.acf <- c(0, erg.acf)
if(is.null(ylab)) ylab <- "PACF"
}
erg.konf <- 2/sqrt(erg$n.used)
yli <- range(c(erg.acf, erg.konf, -erg.konf))*c(1.1, 1.1)
# old: erg.lag <- as.vector(erg$lag)
# new: get rid of the phases and use lags even with timeseries
erg.lag <- seq_along(erg.acf)-1
## Labels fuer x-Achse definieren:
## 1. Label ist immer erg.lag[1]
pos <- pretty(c(0, erg.lag))
n <- length(pos)
d <- pos[2] - pos[1] ; f <- pos[1]-erg.lag[1]
pos <- c(erg.lag[1], pos[1][f > d/2], pos[2:n])
plot(erg.lag, erg.acf, type="h", ylim=yli, xlab="Lag k", ylab=ylab,
xaxt="n", xlim=c(0,length(erg.acf)), ...)
axis(1, at=pos, ...)
abline(0,0)
abline(h=c(erg.konf, - erg.konf), lty=2, col="blue")
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible()
}
PlotMonth <- function(x, type = "l", labels, xlab = "", ylab = deparse(substitute(x)), ...)
#--
# Funktion fuer univariate Zeitreihen, zeichnet die Monats- oder Saisoneffekte
#
# von S+5 uebernommen und an R angepasst
#
# x muss eine univariate Zeitreihe sein
#--
{
if(length(dim(x)))
stop("This implementation is only for univariate time series")
old.opts <- options(warn = -1)
on.exit(options(old.opts))
if(!(type == "l" || type == "h"))
stop(paste("type is \"", type, "\", it must be \"l\" or \"h\"",
sep = ""))
f <- frequency(x)
cx <- cycle(x)
m <- tapply(x, cx, mean)
if(cx[1] != 1 || cx[length(x)] != f) {
x <- ts(c(rep(NA, cx[1] - 1), x, rep(NA, f - cx[length(x)])),
start = start(x, format = T)[1], end = c(end(x, format
= T)[1], f), frequency = f)
cx <- cycle(x)
}
i <- order(cx)
n <- length(x)
if(missing(labels))
labels <- if(f == 12) c("Jan", "Feb", "Mar", "Apr", "May",
"Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
) else if(f == 4)
c("First", "Second", "Third", "Fourth")
else 1:f
if(length(labels) != f)
stop(paste("There must be", f, "labels"))
p <- n/f
hx <- seq(1, n, by = p) + (0:(f - 1))
hy <- rep(m, rep(2, length(m)))
X <- as.vector(outer(0:(p - 1), hx, "+"))
plot(c(1, n + f), range(x[!is.na(x)]), type = "n", axes = F, xlab =
xlab, ylab = ylab, ...)
dotdot <- list(...)
ddttl <- match(c("main", "sub", "axes", "ylim"), names(dotdot), nomatch
= 0)
ddttl <- ddttl[ddttl != 0]
add.axes <- T
if(length(ddttl)) {
if(any(names(dotdot) == "axes"))
add.axes <- dotdot$axes
dotdot <- dotdot[ - ddttl]
}
if(type == "l")
for(j in 1:f)
do.call("lines", c(list(hx[j]:(hx[j] + p - 1), x[i][
((j - 1) * p + 1):(j * p)]), dotdot))
else if(type == "h")
do.call("segments", c(list(X, x[i], X, m[cx][i]), dotdot))
do.call("segments", c(list(hx, m, hx + p, m), dotdot))
if(add.axes) {
box()
axis(2)
axis(1, at = hx + p/2, labels = labels)
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible()
}
PlotQQ <- function(x, qdist, main=NULL, xlab=NULL, ylab=NULL, add=FALSE,
args.qqline=NULL, conf.level=0.95, args.cband = NULL, ...) {
# qqplot for an optional distribution
# example:
# y <- rexp(100, 1/10)
# PlotQQ(y, function(p) qexp(p, rate=1/10))
y <- sort(x)
p <- ppoints(y)
x <- qdist(p)
if(is.null(main)) main <- gettextf("Q-Q-Plot", qdist)
if(is.null(xlab)) xlab <- "Theoretical Quantiles"
if(is.null(ylab)) ylab <- "Sample Quantiles"
if(!add)
plot(x=x, y, main=main, xlab=xlab, ylab=ylab, type="n", ...)
# add confidence band if desired
if (!(is.na(conf.level) || identical(args.cband, NA)) ) {
cix <- qdist(ppoints(x))
ciy <- replicate(1000, sort(qdist(runif(length(x)))))
args.cband1 <- list(col = SetAlpha(Pal()[1], 0.25), border = NA)
if (!is.null(args.cband))
args.cband1[names(args.cband)] <- args.cband
ci <- apply(ciy, 1, quantile, c(-1, 1) * conf.level/2 + 0.5)
do.call("DrawBand", c(args.cband1,
list(x = c(cix, rev(cix))),
list(y = c(ci[1,], rev(ci[2,])) )
))
}
points(x=x, y=y, ...)
# John Fox implements a envelope option in car::qqplot, in the sense of:
# (unfortunately using ddist...)
#
# # add qqline if desired
# if(!identical(args.band, NA)) {
# n <- length(x)
# zz <- qnorm(1 - (1 - args.band$conf.level) / 2)
# SE <- (slope / d.function(z, ...)) * sqrt(p * (1 - p) / n)
# fit.value <- int + slope * z
#
# upper <- fit.value + zz * SE
# lower <- fit.value - zz * SE
#
# lines(z, upper, lty = 2, lwd = lwd, col = col.lines)
# lines(z, lower, lty = 2, lwd = lwd, col = col.lines)
# }
# add qqline if desired
if(!identical(args.qqline, NA)) {
# define default arguments for ci.band
args.qqline1 <- list(probs = c(0.25, 0.75), qtype=7, col=par("fg"), lwd=par("lwd"), lty=par("lty"))
# override default arguments with user defined ones
if (!is.null(args.qqline)) args.qqline1[names(args.qqline)] <- args.qqline
# estimate qqline, instead of set it to abline(a = 0, b = 1)
# plot qqline through the 25% and 75% quantiles (same as qqline does for normal dist)
ly <- quantile(y, prob=args.qqline1[["probs"]], type=args.qqline1[["qtype"]], na.rm = TRUE)
lx <- qdist(args.qqline1[["probs"]])
slope <- diff(ly) / diff(lx)
int <- ly[1L] - slope * lx[1L]
do.call("abline", c(args.qqline1[c("col","lwd","lty")], list(a=int, b=slope)) )
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
## Describe ====
# not needed anymore, by 0.99.19
# .txtline <- function(txt, width, space="", ind="") {
# paste(
# ind, paste(format(names(txt), width=width, justify="right"), collapse=space), "\n",
# ind, paste(format(txt, width=width, justify="right"), collapse=space), "\n",
# sep="" )
# }
TOne <- function(x, grp = NA, add.length=TRUE,
colnames=NULL, vnames=NULL, total=TRUE,
align="\\l", FUN = NULL, NUMTEST = NULL, numtestlab = NULL){
afmt <- Fmt("abs")
pfmt <- Fmt("per")
nfmt <- Fmt("num")
if(is.null(vnames)){
vnames <- if(is.null(colnames(x))) "Var1" else colnames(x)
default_vnames <- TRUE
} else {
default_vnames <- TRUE
}
# creates the table one in a study
if(is.null(FUN)){
num_fun <- function(x){
# wie soll die einzelne Zelle fuer numerische Daten aussehen
gettextf("%s (%s)",
Format(mean(x, na.rm=TRUE), fmt=nfmt),
Format(sd(x, na.rm=TRUE), fmt=nfmt))
}
} else {
num_fun <- FUN
}
# define test for numeric values
if(is.null(NUMTEST)){
num_test <- function(x, g){
# how should the test be calculated and represented
Format(kruskal.test(x = x, g = g)$p.value, fmt="*", na.form = " ")
}
numtestlab <- "Kruskal-Wallis test"
} else {
num_test <- NUMTEST
if(is.null(numtestlab)) numtestlab <- "numeric test"
}
# replaced for flexible test in 0.99.19
# num_row <- function(x, g, total=TRUE, test="kruskal.test", vname = deparse(substitute(x))){
# # wie soll die zeile aussehen fuer numerische Daten
# p <- eval(parse(text=gettextf("%s(x ~ g)", test)))
# cbind(var=vname, total = num_fun(x), rbind(tapply(x, g, num_fun)),
# # paste(Format(p$p.value, fmt="*", na.form = " "), ifelse(is.na(p), "", .FootNote(1))))
# paste(Format(p$p.value, fmt="*", na.form = " "), ifelse(is.na(p$p.value), "", .FootNote(1))))
# }
num_row <- function(x, g, total=TRUE, vname = deparse(substitute(x))){
if(!identical(g, NA)) {
res <- num_test(x, g)
num_test_label <- names(res)
} else {
res <- ""
}
cbind(var=vname, total = num_fun(x), rbind(tapply(x, g, num_fun)),
paste(res, .FootNote(1)))
}
cat_mat <- function(x, g, vname=deparse(substitute(x))){
if(class(x)=="character")
x <- factor(x)
tab <- table(x, g)
ptab <- prop.table(tab, margin = 2)
tab <- addmargins(tab, 2)
ptab <- cbind(ptab, Sum=prop.table(table(x)))
# crunch tab and ptab
m <- matrix(NA, nrow=nrow(tab), ncol=ncol(tab))
m[,] <- gettextf("%s (%s)",
Format(tab, fmt=afmt),
Format(ptab, fmt=pfmt))
# totals to the left
m <- m[, c(ncol(m), 1:(ncol(m)-1))]
# set rownames
m <- cbind( c(vname, paste(" ", levels(x))),
rbind("", m))
# add test
if(nrow(tab)>1)
p <- chisq.test(tab)$p.value
else
p <- NA
m <- cbind(m, c(paste(Format(p, fmt="*", na.form = " "), ifelse(is.na(p), "", .FootNote(3))), rep("", nlevels(x))))
if(nrow(m) <=3) {
m[2,1] <- gettextf("%s (= %s)", m[1, 1], row.names(tab)[1])
m <- m[2, , drop=FALSE]
}
colnames(m) <- c("var","total", head(colnames(tab), -1), "")
m
}
dich_mat <- function(x, g, vname=deparse(substitute(x))){
tab <- table(x, g)
if(identical(dim(tab), c(2L,2L))){
p <- fisher.test(tab)$p.value
foot <- .FootNote(2)
} else {
p <- chisq.test(tab)$p.value
foot <- .FootNote(3)
}
ptab <- prop.table(tab, 2)
tab <- addmargins(tab, 2)
ptab <- cbind(ptab, Sum = prop.table(tab[,"Sum"]))
m <- matrix(NA, nrow=nrow(tab), ncol=ncol(tab))
m[,] <- gettextf("%s (%s)",
Format(tab, fmt=afmt),
Format(ptab, fmt=pfmt))
# totals to the left
m <- m[, c(ncol(m), 1:(ncol(m)-1)), drop=FALSE]
m <- rbind(c(vname, m[1,], paste(Format(p, fmt="*", na.form = " "), foot)))
colnames(m) <- c("var","total", head(colnames(tab), -1), "")
m
}
if(mode(x) %in% c("logical","numeric","complex","character"))
x <- data.frame(x)
# find description types
ctype <- sapply(x, class)
# should we add "identical type": only one value??
ctype[sapply(x, IsDichotomous, strict=TRUE, na.rm=TRUE)] <- "dich"
ctype[sapply(ctype, function(x) any(x %in% c("numeric","integer")))] <- "num"
ctype[sapply(ctype, function(x) any(x %in% c("factor","ordered","character")))] <- "cat"
lst <- list()
for(i in 1:ncol(x)){
if(ctype[i] == "num"){
lst[[i]] <- num_row(x[,i], grp, vname=vnames[i])
} else if(ctype[i] == "cat") {
lst[[i]] <- cat_mat(x[,i], grp, vname=vnames[i])
} else if(ctype[i] == "dich") {
if(default_vnames){
# only declare the ref level on default_vnames
lst[[i]] <- dich_mat(x[,i], grp, vname=gettextf("%s (= %s)", vnames[i], head(levels(factor(x[,i])), 1)))
} else {
# the user is expected to define ref level, if he wants one
lst[[i]] <- dich_mat(x[,i], grp, vname=gettextf("%s", vnames[i]))
}
} else {
lst[[i]] <- rbind(c(colnames(x)[i], rep(NA, nlevels(grp) + 2)))
}
}
res <- do.call(rbind, lst)
if(add.length)
res <- rbind(c("n", c(Format(sum(!is.na(grp)), fmt=afmt),
paste(Format(table(grp), fmt=afmt), " (",
Format(prop.table(table(grp)), fmt=pfmt), ")", sep=""), ""))
, res)
if(!is.null(colnames))
colnames(res) <- colnames
# align the table
if(align != "\\l")
res[,-c(1, ncol(res))] <- StrAlign(res[,-c(1, ncol(res))], sep = align)
attr(res, "legend") <- gettextf("%s) %s, %s) Fisher exact test, %s) Chi-Square test\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1",
.FootNote(1), numtestlab, .FootNote(2), .FootNote(3))
if(!total)
res <- res[, -2]
class(res) <- "TOne"
return(res)
}
.FootNote <- function(i){
# internal function, not exported
# x <- getOption("footnote")
x <- DescToolsOptions("footnote")
if(is.null(x))
x <- c("'", '"', '""')
return(x[i])
}
print.TOne <- function(x, ...){
write.table(format(rbind(colnames(x), x), justify="left"),
row.names=FALSE, col.names=FALSE, quote=FALSE)
cat("---\n")
cat(attr(x, "legend"), "\n\n")
}
Flags <- function(x, na.rm=FALSE){
res <- x[, sapply(x, IsDichotomous, na.rm=TRUE)]
class(res) <- "flags"
return(res)
}
PlotMosaic <- function (x, main = deparse(substitute(x)), horiz = TRUE, cols = NULL,
off = 0.02, mar = NULL, xlab = NULL, ylab = NULL, cex=par("cex"), las=2, ...) {
if(length(dim(x))>2){
warning("PlotMosaic is restricted to max. 2 dimensions")
invisible()
}
if (is.null(xlab))
xlab <- Coalesce(names(dimnames(x)[2]), "x")
if (is.null(ylab))
ylab <- Coalesce(names(dimnames(x)[1]), "y")
if (is.null(mar)){
# ymar <- 5.1
# xmar <- 6.1
inches_to_lines <- (par("mar") / par("mai") )[1] # 5
lab.width <- max(strwidth(colnames(x), units="inches")) * inches_to_lines
xmar <- lab.width + 1
lab.width <- max(strwidth(rownames(x), units="inches")) * inches_to_lines
ymar <- lab.width + 1
mar <- c(ifelse(is.na(xlab), 2.1, 5.1), ifelse(is.na(ylab), ymar, ymar+2),
ifelse(is.na(main), xmar, xmar+4), 1.6)
# par(mai = c(par("mai")[1], max(par("mai")[2], strwidth(levels(grp), "inch")) +
# 0.5, par("mai")[3], par("mai")[4]))
}
Canvas(xlim = c(0, 1), ylim = c(0, 1), asp = NA, mar = mar)
col1 <- Pal()[1]
col2 <- Pal()[2]
oldpar <- par(xpd = TRUE)
on.exit(par(oldpar))
if(any(dim(x)==1)) {
if (is.null(cols))
cols <- colorRampPalette(c(col1, "white", col2), space = "rgb")(length(x))
if(horiz){
ptab <- prop.table(as.vector(x))
pxt <- ptab * (1 - (length(ptab) - 1) * off)
y_from <- c(0, cumsum(pxt) + (1:(length(ptab))) * off)[-length(ptab) - 1]
y_to <- cumsum(pxt) + (0:(length(ptab) - 1)) * off
if(nrow(x) > ncol(x))
x <- t(x)
x_from <- y_from
x_to <- y_to
y_from <- 0
y_to <- 1
} else {
ptab <- rev(prop.table(as.vector(x)))
pxt <- ptab * (1 - (length(ptab) - 1) * off)
y_from <- c(0, cumsum(pxt) + (1:(length(ptab))) * off)[-length(ptab) - 1]
y_to <- cumsum(pxt) + (0:(length(ptab) - 1)) * off
x_from <- 0
x_to <- 1
if(ncol(x) > nrow(x))
x <- t(x)
}
rect(xleft = x_from, ybottom = y_from, xright = x_to, ytop = y_to, col = cols)
txt_y <- apply(cbind(y_from, y_to), 1, mean)
txt_x <- Midx(c(x_from, 1))
} else {
if (horiz) {
if (is.null(cols))
cols <- colorRampPalette(c(col1, "white", col2), space = "rgb")(ncol(x))
ptab <- Rev(prop.table(x, 1), margin = 1)
ptab <- ptab * (1 - (ncol(ptab) - 1) * off)
pxt <- Rev(prop.table(margin.table(x, 1)) * (1 - (nrow(x) - 1) * off))
y_from <- c(0, cumsum(pxt) + (1:(nrow(x))) * off)[-nrow(x) - 1]
y_to <- cumsum(pxt) + (0:(nrow(x) - 1)) * off
x_from <- t((apply(cbind(0, ptab), 1, cumsum) + (0:ncol(ptab)) * off)[-(ncol(ptab) + 1), ])
x_to <- t((apply(ptab, 1, cumsum) + (0:(ncol(ptab) - 1) * off))[-(ncol(ptab) + 1), ])
for (j in 1:nrow(ptab)) {
rect(xleft = x_from[j,], ybottom = y_from[j],
xright = x_to[j,], ytop = y_to[j], col = cols)
}
txt_y <- apply(cbind(y_from, y_to), 1, mean)
txt_x <- apply(cbind(x_from[nrow(x_from),], x_to[nrow(x_from),]), 1, mean)
# srt.x <- if (las > 1) 90 else 0
# srt.y <- if (las == 0 || las == 3) 90 else 0
#
# text(labels = Rev(rownames(x)), y = txt_y, x = -0.04, adj = ifelse(srt.y==90, 0.5, 1), cex=cex, srt=srt.y)
# text(labels = colnames(x), x = txt_x, y = 1.04, adj = ifelse(srt.x==90, 0, 0.5), cex=cex, srt=srt.x)
} else {
if (is.null(cols))
cols <- colorRampPalette(c(col1, "white", col2), space = "rgb")(nrow(x))
ptab <- Rev(prop.table(x, 2), margin = 1)
ptab <- ptab * (1 - (nrow(ptab) - 1) * off)
pxt <- (prop.table(margin.table(x, 2)) * (1 - (ncol(x) - 1) * off))
x_from <- c(0, cumsum(pxt) + (1:(ncol(x))) * off)[-ncol(x) - 1]
x_to <- cumsum(pxt) + (0:(ncol(x) - 1)) * off
y_from <- (apply(rbind(0, ptab), 2, cumsum) + (0:nrow(ptab)) *
off)[-(nrow(ptab) + 1), ]
y_to <- (apply(ptab, 2, cumsum) + (0:(nrow(ptab) - 1) *
off))[-(nrow(ptab) + 1), ]
for (j in 1:ncol(ptab)) {
rect(xleft = x_from[j], ybottom = y_from[, j], xright = x_to[j],
ytop = y_to[, j], col = cols)
}
txt_y <- apply(cbind(y_from[, 1], y_to[, 1]), 1, mean)
txt_x <- apply(cbind(x_from, x_to), 1, mean)
# srt.x <- if (las > 1) 90 else 0
# srt.y <- if (las == 0 || las == 3) 90 else 0
#
# text(labels = Rev(rownames(x)), y = txt_y, x = -0.04, adj = ifelse(srt.y==90, 0.5, 1), cex=cex, srt=srt.y)
# text(labels = colnames(x), x = txt_x, y = 1.04, adj = ifelse(srt.x==90, 0, 0.5), cex=cex, srt=srt.x)
}
}
srt.x <- if (las > 1) 90 else 0
srt.y <- if (las == 0 || las == 3) 90 else 0
text(labels = Rev(rownames(x)), y = txt_y, x = -0.04, adj = ifelse(srt.y==90, 0.5, 1), cex=cex, srt=srt.y)
text(labels = colnames(x), x = txt_x, y = 1.04, adj = ifelse(srt.x==90, 0, 0.5), cex=cex, srt=srt.x)
if (!is.na(main)) {
usr <- par("usr")
plt <- par("plt")
ym <- usr[4] + diff(usr[3:4])/diff(plt[3:4])*(plt[3]) + (1.2 + is.na(xlab)*4) * strheight('m', cex=1.2, font=2)
text(x=0.5, y=ym, labels = main, cex=1.2, font=2)
}
if (!is.na(xlab)) title(xlab = xlab, line = 1)
if (!is.na(ylab)) title(ylab = ylab)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(list(x = txt_x, y = txt_y))
}
###
# see also package Mosaic
# modelVars extract predictor variables from a model
ParseFormula <- function(formula, data=parent.frame(), drop = TRUE) {
xhs <- function(formula, data = parent.frame(), na.action=na.pass){
# get all variables out of the formula
vars <- attr(terms(formula, data=data), "term.labels")
# evaluate model.frame
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "na.action"), names(mf), 0)
mf <- mf[c(1, m)]
mf$na.action <- na.action
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
mf.rhs <- eval.parent(mf)
# model frame does not evaluate interaction, so let's do that here
d.tmp <- mf.rhs[,FALSE] # create a new data.frame
for(x in vars){
if( length(grep(":", x))>0 ) # there's a : in the variable
d.tmp <- data.frame(d.tmp,
interaction( mf.rhs[, names(mf.rhs)[names(mf.rhs) %in% unlist(strsplit(x, ":"))]],
sep=":", drop = drop) # set drop unused levels to TRUE here by default
)
else
d.tmp <- data.frame(d.tmp, mf.rhs[,x])
}
names(d.tmp) <- vars
return(list(formula=formula, mf=mf.rhs, mf.eval=d.tmp, vars=vars))
}
f1 <- formula
# evaluate subset
m <- match.call(expand.dots = FALSE)
# do not support . on both sides of the formula
if( (length(grep("^\\.$", all.vars(f1[[2]])))>0) && (length(grep("^\\.$", all.vars(f1[[3]])))>0) )
stop("dot argument on both sides of the formula are not supported")
# swap left and right hand side and take just the right side
# so both sides are evaluated with right side logic, but independently
lhs <- xhs(formula(paste("~", deparse(f1[[2]])), data=data), data=data)
rhs <- xhs(formula(paste("~", deparse(f1[[3]])), data=data), data=data)
# now handle the dot argument
if(any(all.vars(f1[[2]]) == ".")){ # dot on the left side
lhs$vars <- lhs$vars[!lhs$vars %in% rhs$vars]
lhs$mf <- lhs$mf[lhs$vars]
lhs$mf.eval <- lhs$mf.eval[lhs$vars]
} else if(any(all.vars(f1[[3]]) == ".")){ # dot on the right side
rhs$vars <- rhs$vars[!rhs$vars %in% lhs$vars]
rhs$mf <- rhs$mf[rhs$vars]
rhs$mf.eval <- rhs$mf.eval[rhs$vars]
} else { # no dot: do nothing
}
list(formula=formula, lhs=list(mf=lhs$mf, mf.eval=lhs$mf.eval, vars=lhs$vars),
rhs=list(mf=rhs$mf, mf.eval=rhs$mf.eval, vars=rhs$vars))
}
###
## Word fundamentals ====
createCOMReference <- function(ref, className) {
RDCOMClient::createCOMReference(ref, className)
}
GetCurrWrd <- function() {
# stopifnot(require(RDCOMClient))
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
# there's no "get"-function in RDCOMClient, so just create a new here..
hwnd <- RDCOMClient::COMCreate("Word.Application", existing=TRUE)
if(is.null(hwnd)) warning("No running Word application found!")
# options(lastWord = hwnd)
DescToolsOptions(lastWord = hwnd)
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
wrd <- NULL
}
invisible(hwnd)
}
GetNewWrd <- function(visible = TRUE, template = "Normal", header=FALSE
, main="Descriptive report") {
# stopifnot(require(RDCOMClient))
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
# Starts the Word application with wrd as handle
hwnd <- RDCOMClient::COMCreate("Word.Application", existing=FALSE)
DescToolsOptions(lastWord = hwnd)
if( visible == TRUE ) hwnd[["Visible"]] <- TRUE
# Create a new document based on template
# VBA code:
# Documents.Add Template:= _
# "O:\G\GI\_Admin\Administration\09_Templates\newlogo_GI_doc_bericht.dot", _
# NewTemplate:=False, DocumentType:=0
#
newdoc <- hwnd[["Documents"]]$Add(template, FALSE, 0)
# prepare word document, with front page, table of contents, footer ...
if(header) .WrdPrepRep( wrd=hwnd, main=main )
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
hwnd <- NULL
}
invisible( hwnd )
}
WrdKill <- function(){
# Word might not always quit and end the task
# so killing the task is "ultima ratio"...
shell('taskkill /F /IM WINWORD.EXE')
}
.WrdPrepRep <- function(wrd, main="Bericht" ){
# only internal user out from GetNewWrd()
# creates new word instance and prepares document for report
# constants
# wdPageBreak <- 7
# wdSeekCurrentPageHeader <- 9 ### Kopfzeile
# wdSeekCurrentPageFooter <- 10 ### Fusszeile
# wdSeekMainDocument <- 0
# wdPageFitBestFit <- 2
# wdFieldEmpty <- -1
# Show DocumentMap
wrd[["ActiveWindow"]][["DocumentMap"]] <- TRUE
wrdWind <- wrd[["ActiveWindow"]][["ActivePane"]][["View"]][["Zoom"]]
wrdWind[["PageFit"]] <- wdConst$wdPageFitBestFit
wrd[["Selection"]]$TypeParagraph()
wrd[["Selection"]]$TypeParagraph()
wrd[["Selection"]]$WholeStory()
# 15.1.2012 auskommentiert: WrdSetFont(wrd=wrd)
# Idee: ueberschrift definieren (geht aber nicht!)
#wrd[["ActiveDocument"]][["Styles"]]$Item("ueberschrift 2")[["Font"]][["Name"]] <- "Consolas"
#wrd[["ActiveDocument"]][["Styles"]]$Item("ueberschrift 2")[["Font"]][["Size"]] <- 10
#wrd[["ActiveDocument"]][["Styles"]]$Item("ueberschrift 2")[["Font"]][["Bold"]] <- TRUE
#wrd[["ActiveDocument"]][["Styles"]]$Item("ueberschrift 2")[["ParagraphFormat"]]["Borders"]]$Item(wdBorderTop)[["LineStyle"]] <- wdConst$wdLineStyleSingle
WrdCaption( main, wrd=wrd)
wrd[["Selection"]]$TypeText(gettextf("%s/%s\n",format(Sys.time(), "%d.%m.%Y"), Sys.getenv("username")))
wrd[["Selection"]]$InsertBreak( wdConst$wdPageBreak)
# Inhaltsverzeichnis einfuegen ***************
wrd[["ActiveDocument"]][["TablesOfContents"]]$Add( wrd[["Selection"]][["Range"]] )
# Original VB-Code:
# With ActiveDocument
# .TablesOfContents.Add Range:=Selection.Range, RightAlignPageNumbers:= _
# True, UseHeadingStyles:=True, UpperHeadingLevel:=1, _
# LowerHeadingLevel:=2, IncludePageNumbers:=True, AddedStyles:="", _
# UseHyperlinks:=True, HidePageNumbersInWeb:=True, UseOutlineLevels:= _
# True
# .TablesOfContents(1).TabLeader = wdTabLeaderDots
# .TablesOfContents.Format = wdIndexIndent
# End With
# Fusszeile ***************
wrdView <- wrd[["ActiveWindow"]][["ActivePane"]][["View"]]
wrdView[["SeekView"]] <- wdConst$wdSeekCurrentPageFooter
wrd[["Selection"]]$TypeText( gettextf("%s/%s\t\t",format(Sys.time(), "%d.%m.%Y"), Sys.getenv("username")) )
wrd[["Selection"]][["Fields"]]$Add( wrd[["Selection"]][["Range"]], wdConst$wdFieldEmpty, "PAGE" )
# Roland wollte das nicht (23.11.2014):
# wrd[["Selection"]]$TypeText("\n\n")
wrdView[["SeekView"]] <- wdConst$wdSeekMainDocument
wrd[["Selection"]]$InsertBreak( wdConst$wdPageBreak)
invisible()
}
# put that to an example...
# WrdPageBreak <- function( wrd = .lastWord ) {
# wrd[["Selection"]]$InsertBreak(wdConst$wdPageBreak)
# }
ToWrd <- function(x, font=NULL, ..., wrd=DescToolsOptions("lastWord")){
UseMethod("ToWrd")
}
ToWrd.default <- function(x, font=NULL, ..., wrd=DescToolsOptions("lastWord")){
ToWrd.character(x=.CaptOut(x), font=font, ..., wrd=wrd)
invisible()
}
ToWrd.TOne <- function(x, font=NULL, para=NULL, main=NULL, align=NULL,
autofit=TRUE, ..., wrd=DescToolsOptions("lastWord")){
wTab <- ToWrd.table(x, main=NULL, font=font, align=align, autofit=autofit, wrd=wrd, ...)
if(!is.null(para)){
wTab$Select()
WrdParagraphFormat(wrd) <- para
# move out of table
wrd[["Selection"]]$EndOf(wdConst$wdTable)
wrd[["Selection"]]$MoveRight(wdConst$wdCharacter, 2, 0)
}
if(is.null(font)) font <- list()
if(is.null(font$size))
font$size <- WrdFont(wrd)$size - 2
else
font$size <- font$size - 2
ToWrd.character(paste("\n", attr(x, "legend"), "\n\n", sep=""),
font=font, wrd=wrd)
if(!is.null(main)){
sel <- wrd$Selection() # "Abbildung"
sel$InsertCaption(Label=wdConst$wdCaptionTable, Title=paste(" - ", main, sep=""))
sel$TypeParagraph()
}
invisible(wTab)
}
ToWrd.abstract <- function(x, font=NULL, autofit=TRUE, ..., wrd=DescToolsOptions("lastWord")){
WrdCaption(x=attr(x, "main"), wrd=wrd)
if(!is.null(attr(x, "label"))){
if(is.null(font)){
lblfont <- list(fontsize=8)
} else {
lblfont <- font
lblfont$fontsize <- 8
}
ToWrd.character(paste("\n", attr(x, "label"), "\n", sep=""),
font = lblfont, wrd=wrd)
}
ToWrd.character(gettextf("\ndata.frame: %s obs. of %s variables\n\n", attr(x, "nrow"), attr(x, "ncol"))
, font=font, wrd=wrd)
wTab <- ToWrd.data.frame(x, wrd=wrd, autofit=autofit, font=font, align="l", ...)
invisible(wTab)
}
ToWrd.lm <- function(x, font=NULL, ..., wrd=DescToolsOptions("lastWord")){
invisible()
}
ToWrd.character <- function (x, font = NULL, para = NULL, style = NULL, ..., wrd = DescToolsOptions("lastWord")) {
# we will convert UTF-8 strings to Latin-1, if the local info is Latin-1
if(any(l10n_info()[["Latin-1"]] & Encoding(x)=="UTF-8"))
x <- iconv(x, from="UTF-8", to="latin1")
wrd[["Selection"]]$InsertAfter(paste(x, collapse = "\n"))
if (!is.null(style))
WrdStyle(wrd) <- style
if (!is.null(para))
WrdParagraphFormat(wrd) <- para
if(identical(font, "fix")){
font <- DescToolsOptions("fixedfont")
if(is.null(font))
font <- structure(list(name="Courier New", size=8), class="font")
}
if(!is.null(font)){
currfont <- WrdFont(wrd)
WrdFont(wrd) <- font
on.exit(WrdFont(wrd) <- currfont)
}
wrd[["Selection"]]$Collapse(Direction=wdConst$wdCollapseEnd)
invisible()
}
WrdCaption <- function(x, index = 1, wrd = DescToolsOptions("lastWord")){
ToWrd.character(paste(x, "\n", sep=""),
style=eval(parse(text=gettextf("wdConst$wdStyleHeading%s", index))))
invisible()
}
ToWrd.PercTable <- function(x, font=NULL, main = NULL, ..., wrd = DescToolsOptions("lastWord")){
ToWrd.ftable(x$ftab, font=font, main=main, ..., wrd=wrd)
}
ToWrd.data.frame <- function(x, font=NULL, main = NULL, row.names=NULL, ..., wrd = DescToolsOptions("lastWord")){
x <- apply(x, 2, as.character)
if(is.null(row.names))
if(identical(row.names(x), as.character(1:nrow(x))))
row.names <- FALSE
else
row.names <- TRUE
ToWrd.table(x=x, font=font, main=main, row.names=row.names, ..., wrd=wrd)
}
# ToWrd.data.frame <- function(x, font=NULL, main = NULL, row.names=NULL, as.is=FALSE, ..., wrd = DescToolsOptions("lastWord")){
#
# if(as.is)
# x <- apply(x, 2, as.character)
# else
# x <- FixToTable(capture.output(x))
#
# if(is.null(row.names))
# if(identical(row.names, seq_along(1:nrow(x))))
# row.names <- FALSE
# else
# row.names <- TRUE
#
# if(row.names==TRUE)
# x <- cbind(row.names(x), x)
#
# ToWrd.table(x=x, font=font, main=main, ..., wrd=wrd)
# }
ToWrd.matrix <- function(x, font=NULL, main = NULL, ..., wrd = DescToolsOptions("lastWord")){
ToWrd.table(x=x, font=font, main=main, ..., wrd=wrd)
}
ToWrd.Freq <- function(x, font=NULL, main = NULL, ..., wrd = DescToolsOptions("lastWord")){
x[,c(3,5)] <- sapply(round(x[,c(3,5)], 3), Format, digits=3)
res <- ToWrd.data.frame(x=x, main=main, font=font, wrd=wrd)
invisible(res)
}
ToWrd.ftable <- function (x, font = NULL, main = NULL, align=NULL, method = "compact", ..., wrd = DescToolsOptions("lastWord")) {
# simple version:
# x <- FixToTable(capture.output(x))
# ToWrd.character(x, font=font, main=main, ..., wrd=wrd)
# let R do all the complicated formatting stuff
# but we can't import a not exported function, so we provide an own copy of it
# so this is a verbatim copy of it
.format.ftable <- function (x, quote = TRUE, digits = getOption("digits"), method = c("non.compact",
"row.compact", "col.compact", "compact"), lsep = " | ", ...)
{
if (!inherits(x, "ftable"))
stop("'x' must be an \"ftable\" object")
charQuote <- function(s) if (quote && length(s))
paste0("\"", s, "\"")
else s
makeLabels <- function(lst) {
lens <- lengths(lst)
cplensU <- c(1, cumprod(lens))
cplensD <- rev(c(1, cumprod(rev(lens))))
y <- NULL
for (i in rev(seq_along(lst))) {
ind <- 1 + seq.int(from = 0, to = lens[i] - 1) *
cplensD[i + 1L]
tmp <- character(length = cplensD[i])
tmp[ind] <- charQuote(lst[[i]])
y <- cbind(rep(tmp, times = cplensU[i]), y)
}
y
}
makeNames <- function(x) {
nmx <- names(x)
if (is.null(nmx))
rep_len("", length(x))
else nmx
}
l.xrv <- length(xrv <- attr(x, "row.vars"))
l.xcv <- length(xcv <- attr(x, "col.vars"))
method <- match.arg(method)
if (l.xrv == 0) {
if (method == "col.compact")
method <- "non.compact"
else if (method == "compact")
method <- "row.compact"
}
if (l.xcv == 0) {
if (method == "row.compact")
method <- "non.compact"
else if (method == "compact")
method <- "col.compact"
}
LABS <- switch(method, non.compact = {
cbind(rbind(matrix("", nrow = length(xcv), ncol = length(xrv)),
charQuote(makeNames(xrv)), makeLabels(xrv)), c(charQuote(makeNames(xcv)),
rep("", times = nrow(x) + 1)))
}, row.compact = {
cbind(rbind(matrix("", nrow = length(xcv) - 1, ncol = length(xrv)),
charQuote(makeNames(xrv)), makeLabels(xrv)), c(charQuote(makeNames(xcv)),
rep("", times = nrow(x))))
}, col.compact = {
cbind(rbind(cbind(matrix("", nrow = length(xcv), ncol = length(xrv) -
1), charQuote(makeNames(xcv))), charQuote(makeNames(xrv)),
makeLabels(xrv)))
}, compact = {
xrv.nms <- makeNames(xrv)
xcv.nms <- makeNames(xcv)
mat <- cbind(rbind(cbind(matrix("", nrow = l.xcv - 1,
ncol = l.xrv - 1), charQuote(makeNames(xcv[-l.xcv]))),
charQuote(xrv.nms), makeLabels(xrv)))
mat[l.xcv, l.xrv] <- paste(tail(xrv.nms, 1), tail(xcv.nms,
1), sep = lsep)
mat
}, stop("wrong method"))
DATA <- rbind(if (length(xcv))
t(makeLabels(xcv)), if (method %in% c("non.compact",
"col.compact"))
rep("", times = ncol(x)), format(unclass(x), digits = digits,
...))
cbind(apply(LABS, 2L, format, justify = "left"), apply(DATA,
2L, format, justify = "right"))
}
tab <- .format.ftable(x, quote=FALSE, method=method, lsep="")
tab <- StrTrim(tab)
if(is.null(align))
align <- c(rep("l", length(attr(x, "row.vars"))), rep("r", ncol(x)))
wtab <- ToWrd.table(tab, font=font, main=main, align=align, ..., wrd=wrd)
invisible(wtab)
}
ToWrd.table <- function (x, font = NULL, main = NULL, align=NULL, tablestyle=NULL, autofit = TRUE,
row.names=FALSE, col.names=TRUE, ..., wrd = DescToolsOptions("lastWord")) {
x[] <- as.character(x)
# add column names to character table
if(col.names)
x <- rbind(colnames(x), x)
if(row.names){
rown <- rownames(x)
# if(col.names)
# rown <- c("", rown)
x <- cbind(rown, x)
}
# replace potential \n in table with /cr, as convertToTable would make a new cell for them
x <- gsub(pattern= "\n", replacement = "/cr", x = x)
# paste the cells and separate by \t
txt <- paste(apply(x, 1, paste, collapse="\t"), collapse="\n")
nc <- ncol(x)
nr <- nrow(x)
# insert and convert
wrd[["Selection"]]$InsertAfter(txt)
wrdTable <- wrd[["Selection"]]$ConvertToTable(Separator = wdConst$wdSeparateByTabs,
NumColumns = nc, NumRows = nr,
AutoFitBehavior = wdConst$wdAutoFitFixed)
wrdTable[["ApplyStyleHeadingRows"]] <- col.names
# replace /cr by \n again in word
wrd[["Selection"]][["Find"]]$ClearFormatting()
wsel <- wrd[["Selection"]][["Find"]]
wsel[["Text"]] <- "/cr"
wrep <- wsel[["Replacement"]]
wrep[["Text"]] <- "^l"
wsel$Execute(Replace=wdConst$wdReplaceAll)
# http://www.thedoctools.com/downloads/DocTools_List_Of_Built-in_Style_English_Danish_German_French.pdf
if(is.null(tablestyle)){
WrdTableBorders(wrdTable, from=c(1,1), to=c(1, nc),
border = wdConst$wdBorderTop, wrd=wrd)
if(col.names)
WrdTableBorders(wrdTable, from=c(1,1), to=c(1, nc),
border = wdConst$wdBorderBottom, wrd=wrd)
WrdTableBorders(wrdTable, from=c(nr, 1), to=c(nr, nc),
border = wdConst$wdBorderBottom, wrd=wrd)
space <- RoundTo((if(is.null(font$size)) WrdFont(wrd)$size else font$size) * .2, multiple = .5)
wrdTable$Rows(1)$Select()
WrdParagraphFormat(wrd) <- list(SpaceBefore=space, SpaceAfter=space)
if(col.names){
wrdTable$Rows(2)$Select()
WrdParagraphFormat(wrd) <- list(SpaceBefore=space)
}
wrdTable$Rows(nr)$Select()
WrdParagraphFormat(wrd) <- list(SpaceAfter=space)
# wrdTable[["Style"]] <- -115 # code for "Tabelle Klassisch 1"
} else
if(!is.na(tablestyle))
wrdTable[["Style"]] <- tablestyle
# align the columns
if(is.null(align))
align <- c(rep("l", row.names), rep(x = "r", nc-row.names))
else
align <- rep(align, length.out=nc)
align[align=="l"] <- wdConst$wdAlignParagraphLeft
align[align=="c"] <- wdConst$wdAlignParagraphCenter
align[align=="r"] <- wdConst$wdAlignParagraphRight
for(i in seq_along(align)){
wrdTable$Columns(i)$Select()
wrdSel <- wrd[["Selection"]]
wrdSel[["ParagraphFormat"]][["Alignment"]] <- align[i]
}
if(!is.null(font)){
wrdTable$Select()
WrdFont(wrd) <- font
}
if(autofit)
wrdTable$Columns()$AutoFit()
# Cursor aus der Tabelle auf die letzte Postition im Dokument setzten
# This code will get you out of the table and put the text cursor directly behind it:
wrdTable$Select()
wrd[["Selection"]]$Collapse(wdConst$wdCollapseEnd)
# instead of goint to the end of the document ...
# Selection.GoTo What:=wdGoToPercent, Which:=wdGoToLast
# wrd[["Selection"]]$GoTo(What = wdConst$wdGoToPercent, Which= wdConst$wdGoToLast)
if(!is.null(main)){
# insert caption
sel <- wrd$Selection() # "Abbildung"
sel$InsertCaption(Label=wdConst$wdCaptionTable, Title=paste(" - ", main, sep=""))
sel$TypeParagraph()
}
wrd[["Selection"]]$TypeParagraph()
invisible(wrdTable)
}
WrdTableBorders <- function (wtab, from = NULL, to = NULL, border = NULL,
lty = wdConst$wdLineStyleSingle, col=wdConst$wdColorBlack,
lwd = wdConst$wdLineWidth050pt, wrd) {
# paint borders of a table
if(is.null(from))
from <- c(1,1)
if(is.null(to))
to <- c(wtab[["Rows"]]$Count(), wtab[["Columns"]]$Count())
rng <- wrd[["ActiveDocument"]]$Range(start=wtab$Cell(from[1], from[2])[["Range"]][["Start"]],
end=wtab$Cell(to[1], to[2])[["Range"]][["End"]])
rng$Select()
if(is.null(border))
# use all borders by default
border <- wdConst[c("wdBorderTop","wdBorderBottom","wdBorderLeft","wdBorderRight",
"wdBorderHorizontal","wdBorderVertical")]
for(b in border){
wborder <- wrd[["Selection"]]$Borders(b)
wborder[["LineStyle"]] <- lty
wborder[["Color"]] <- col
wborder[["LineWidth"]] <- lwd
}
invisible()
}
WrdCellRange <- function(wtab, rstart, rend) {
# returns a handle for the table range
wtrange <- wtab[["Parent"]]$Range(
wtab$Cell(rstart[1], rstart[2])[["Range"]][["Start"]],
wtab$Cell(rend[1], rend[2])[["Range"]][["End"]]
)
return(wtrange)
}
WrdMergeCells <- function(wtab, rstart, rend) {
rng <- WrdCellRange(wtab, rstart, rend)
rng[["Cells"]]$Merge()
}
WrdFormatCells <- function(wtab, rstart, rend, col=NULL, bg=NULL, font=NULL,
border=NULL, align=NULL){
rng <- WrdCellRange(wtab, rstart, rend)
shad <- rng[["Shading"]]
if (!is.null(col))
shad[["ForegroundPatternColor"]] <- col
if (!is.null(bg))
shad[["BackgroundPatternColor"]] <- bg
wrdFont <- rng[["Font"]]
if (!is.null(font$name))
wrdFont[["Name"]] <- font$name
if (!is.null(font$size))
wrdFont[["Size"]] <- font$size
if (!is.null(font$bold))
wrdFont[["Bold"]] <- font$bold
if (!is.null(font$italic))
wrdFont[["Italic"]] <- font$italic
if (!is.null(font$color))
wrdFont[["Color"]] <- font$color
if (!is.null(align)) {
align <- match.arg(align, choices = c("l", "c", "r"))
align <- Lookup(align, ref = c("l", "c", "r"),
val = unlist(wdConst[c("wdAlignParagraphLeft",
"wdAlignParagraphCenter",
"wdAlignParagraphRight")]))
rng[["ParagraphFormat"]][["Alignment"]] <- align
}
if(!is.null(border)) {
if(identical(border, TRUE))
# set default values
border <- list(border=c(wdConst$wdBorderBottom,
wdConst$wdBorderLeft,
wdConst$wdBorderTop,
wdConst$wdBorderRight),
linestyle=wdConst$wdLineStyleSingle,
linewidth=wdConst$wdLineWidth025pt,
color=wdConst$wdColorBlack)
if(is.null(border$border))
border$border <- c(wdConst$wdBorderBottom,
wdConst$wdBorderLeft,
wdConst$wdBorderTop,
wdConst$wdBorderRight)
if(is.null(border$linestyle))
border$linestyle <- wdConst$wdLineStyleSingle
border <- do.call(Recycle, border)
for(i in 1:attr(border, which = "maxdim")) {
b <- rng[["Borders"]]$Item(border$border[i])
if(!is.null(border$linestyle[i]))
b[["LineStyle"]] <- border$linestyle[i]
if(!is.null(border$linewidth[i]))
b[["LineWidth"]] <- border$linewidth[i]
if(!is.null(border$color))
b[["Color"]] <- border$color[i]
}
}
}
# Get and set font
WrdFont <- function(wrd = DescToolsOptions("lastWord") ) {
# returns the font object list: list(name, size, bold, italic) on the current position
wrdSel <- wrd[["Selection"]]
wrdFont <- wrdSel[["Font"]]
currfont <- list(
name = wrdFont[["Name"]] ,
size = wrdFont[["Size"]] ,
bold = wrdFont[["Bold"]] ,
italic = wrdFont[["Italic"]],
color = setNames(wrdFont[["Color"]], names(which(
wdConst==wrdFont[["Color"]] & grepl("wdColor", names(wdConst)))))
)
class(currfont) <- "font"
return(currfont)
}
`WrdFont<-` <- function(wrd, value){
wrdSel <- wrd[["Selection"]]
wrdFont <- wrdSel[["Font"]]
# set the new font
if(!is.null(value$name)) wrdFont[["Name"]] <- value$name
if(!is.null(value$size)) wrdFont[["Size"]] <- value$size
if(!is.null(value$bold)) wrdFont[["Bold"]] <- value$bold
if(!is.null(value$italic)) wrdFont[["Italic"]] <- value$italic
if(!is.null(value$color)) wrdFont[["Color"]] <- value$color
return(wrd)
}
# Get and set ParagraphFormat
WrdParagraphFormat <- function(wrd = DescToolsOptions("lastWord") ) {
wrdPar <- wrd[["Selection"]][["ParagraphFormat"]]
currpar <- list(
LeftIndent =wrdPar[["LeftIndent"]] ,
RightIndent =wrdPar[["RightIndent"]] ,
SpaceBefore =wrdPar[["SpaceBefore"]] ,
SpaceBeforeAuto =wrdPar[["SpaceBeforeAuto"]] ,
SpaceAfter =wrdPar[["SpaceAfter"]] ,
SpaceAfterAuto =wrdPar[["SpaceAfterAuto"]] ,
LineSpacingRule =wrdPar[["LineSpacingRule"]],
Alignment =wrdPar[["Alignment"]],
WidowControl =wrdPar[["WidowControl"]],
KeepWithNext =wrdPar[["KeepWithNext"]],
KeepTogether =wrdPar[["KeepTogether"]],
PageBreakBefore =wrdPar[["PageBreakBefore"]],
NoLineNumber =wrdPar[["NoLineNumber"]],
Hyphenation =wrdPar[["Hyphenation"]],
FirstLineIndent =wrdPar[["FirstLineIndent"]],
OutlineLevel =wrdPar[["OutlineLevel"]],
CharacterUnitLeftIndent =wrdPar[["CharacterUnitLeftIndent"]],
CharacterUnitRightIndent =wrdPar[["CharacterUnitRightIndent"]],
CharacterUnitFirstLineIndent=wrdPar[["CharacterUnitFirstLineIndent"]],
LineUnitBefore =wrdPar[["LineUnitBefore"]],
LineUnitAfter =wrdPar[["LineUnitAfter"]],
MirrorIndents =wrdPar[["MirrorIndents"]]
# wrdPar[["TextboxTightWrap"]] <- TextboxTightWrap
)
class(currpar) <- "paragraph"
return(currpar)
}
`WrdParagraphFormat<-` <- function(wrd, value){
wrdPar <- wrd[["Selection"]][["ParagraphFormat"]]
# set the new font
if(!is.null(value$LeftIndent)) wrdPar[["LeftIndent"]] <- value$LeftIndent
if(!is.null(value$RightIndent)) wrdPar[["RightIndent"]] <- value$RightIndent
if(!is.null(value$SpaceBefore)) wrdPar[["SpaceBefore"]] <- value$SpaceBefore
if(!is.null(value$SpaceBeforeAuto)) wrdPar[["SpaceBeforeAuto"]] <- value$SpaceBeforeAuto
if(!is.null(value$SpaceAfter)) wrdPar[["SpaceAfter"]] <- value$SpaceAfter
if(!is.null(value$SpaceAfterAuto)) wrdPar[["SpaceAfterAuto"]] <- value$SpaceAfterAuto
if(!is.null(value$LineSpacingRule)) wrdPar[["LineSpacingRule"]] <- value$LineSpacingRule
if(!is.null(value$Alignment)) {
if(is.character(value$Alignment))
switch(match.arg(value$Alignment, choices = c("left","center","right"))
, left=value$Alignment <- wdConst$wdAlignParagraphLeft
, center=value$Alignment <- wdConst$wdAlignParagraphCenter
, right=value$Alignment <- wdConst$wdAlignParagraphRight
)
wrdPar[["Alignment"]] <- value$Alignment
}
if(!is.null(value$WidowControl)) wrdPar[["WidowControl"]] <- value$WidowControl
if(!is.null(value$KeepWithNext)) wrdPar[["KeepWithNext"]] <- value$KeepWithNext
if(!is.null(value$KeepTogether)) wrdPar[["KeepTogether"]] <- value$KeepTogether
if(!is.null(value$PageBreakBefore)) wrdPar[["PageBreakBefore"]] <- value$PageBreakBefore
if(!is.null(value$NoLineNumber)) wrdPar[["NoLineNumber"]] <- value$NoLineNumber
if(!is.null(value$Hyphenation)) wrdPar[["Hyphenation"]] <- value$Hyphenation
if(!is.null(value$FirstLineIndent)) wrdPar[["FirstLineIndent"]] <- value$FirstLineIndent
if(!is.null(value$OutlineLevel)) wrdPar[["OutlineLevel"]] <- value$OutlineLevel
if(!is.null(value$CharacterUnitLeftIndent)) wrdPar[["CharacterUnitLeftIndent"]] <- value$CharacterUnitLeftIndent
if(!is.null(value$CharacterUnitRightIndent)) wrdPar[["CharacterUnitRightIndent"]] <- value$CharacterUnitRightIndent
if(!is.null(value$CharacterUnitFirstLineIndent)) wrdPar[["CharacterUnitFirstLineIndent"]] <- value$CharacterUnitFirstLineIndent
if(!is.null(value$LineUnitBefore)) wrdPar[["LineUnitBefore"]] <- value$LineUnitBefore
if(!is.null(value$LineUnitAfter)) wrdPar[["LineUnitAfter"]] <- value$LineUnitAfter
if(!is.null(value$MirrorIndents)) wrdPar[["MirrorIndents"]] <- value$MirrorIndents
return(wrd)
}
WrdStyle <- function (wrd = DescToolsOptions("lastWord")) {
wrdSel <- wrd[["Selection"]]
wrdStyle <- wrdSel[["Style"]][["NameLocal"]]
return(wrdStyle)
}
`WrdStyle<-` <- function (wrd, value) {
wrdSel <- wrd[["Selection"]][["Paragraphs"]]
wrdSel[["Style"]] <- value
return(wrd)
}
IsValidWrd <- function(wrd = DescToolsOptions("lastWord")){
# returns TRUE if the selection of the wrd pointer can be evaluated
# meaning the pointer points to a running word instance and so far valid
res <- tryCatch(wrd[["Selection"]], error=function(e) {e})
return(!inherits(res, "simpleError")) # Error in
}
# This has been replaced by ToWrd.character in 0.99.18
# WrdText <- function(txt, fixedfont=TRUE, fontname=NULL,
# fontsize=NULL, bold=FALSE, italic=FALSE, col=NULL,
# alignment = c("left","right","center"), spaceBefore=0, spaceAfter=0,
# lineSpacingRule = wdConst$wdLineSpaceSingle,
# appendCR=TRUE, wrd=DescToolsOptions("lastWord") ){
#
# if(fixedfont){
# fontname <- Coalesce(fontname, getOption("fixedfont", "Consolas"))
# fontsize <- Coalesce(fontsize, getOption("fixedfontsize", 7))
# }
#
# if (!inherits(txt, "character")) txt <- .CaptOut(txt)
#
# wrdSel <- wrd[["Selection"]]
# wrdFont <- wrdSel[["Font"]]
#
# currfont <- list(
# name = wrdFont[["Name"]] ,
# size = wrdFont[["Size"]] ,
# bold = wrdFont[["Bold"]] ,
# italic = wrdFont[["Italic"]],
# color = wrdFont[["Color"]]
# )
#
# if(!is.null(fontname)) wrdFont[["Name"]] <- fontname
# if(!is.null(fontsize)) wrdFont[["Size"]] <- fontsize
# wrdFont[["Bold"]] <- bold
# wrdFont[["Italic"]] <- italic
# wrdFont[["Color"]] <- Coalesce(col, wdConst$wdColorBlack)
#
# alignment <- switch(match.arg(alignment),
# "left"= wdConst$wdAlignParagraphLeft,
# "right"= wdConst$wdAlignParagraphRight,
# "center"= wdConst$wdAlignParagraphCenter
# )
#
# wrdSel[["ParagraphFormat"]][["Alignment"]] <- alignment
# wrdSel[["ParagraphFormat"]][["SpaceBefore"]] <- spaceBefore
# wrdSel[["ParagraphFormat"]][["SpaceAfter"]] <- spaceAfter
# wrdSel[["ParagraphFormat"]][["LineSpacingRule"]] <- lineSpacingRule
#
# wrdSel$TypeText( paste(txt,collapse="\n") )
# if(appendCR) wrdSel$TypeParagraph()
#
# # Restore old font
# wrdFont[["Name"]] <- currfont[["name"]]
# wrdFont[["Size"]] <- currfont[["size"]]
# wrdFont[["Bold"]] <- currfont[["bold"]]
# wrdFont[["Italic"]] <- currfont[["italic"]]
# wrdFont[["Color"]] <- currfont[["color"]]
#
# invisible(currfont)
#
# }
WrdGoto <- function (name, what = wdConst$wdGoToBookmark, wrd = DescToolsOptions("lastWord")) {
wrdSel <- wrd[["Selection"]]
wrdSel$GoTo(what=what, Name=name)
invisible()
}
WrdInsertBookmark <- function (name, wrd = DescToolsOptions("lastWord")) {
# With ActiveDocument.Bookmarks
# .Add Range:=Selection.Range, Name:="entb"
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
wrdBookmarks <- wrd[["ActiveDocument"]][["Bookmarks"]]
wrdBookmarks$Add(name)
invisible()
}
WrdUpdateBookmark <- function (name, text, what = wdConst$wdGoToBookmark, wrd = DescToolsOptions("lastWord")) {
# With ActiveDocument.Bookmarks
# .Add Range:=Selection.Range, Name:="entb"
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
wrdSel <- wrd[["Selection"]]
wrdSel$GoTo(What=what, Name=name)
wrdSel[["Text"]] <- text
# the bookmark will be deleted, how can we avoid that?
wrdBookmarks <- wrd[["ActiveDocument"]][["Bookmarks"]]
wrdBookmarks$Add(name)
invisible()
}
# This has been made defunct in 0.99.18
#
# WrdR <- function(x, wrd = DescToolsOptions("lastWord") ){
#
# WrdText(paste("> ", x, sep=""), wrd=wrd, fontname="Courier New", fontsize=10, bold=TRUE, italic=TRUE)
# txt <- .CaptOut(eval(parse(text=x)))
# if(sum(nchar(txt))>0) WrdText(txt, wrd=wrd, fontname="Courier New", fontsize=10, bold=TRUE)
#
# invisible()
#
# }
# Example: WrdPlot(picscale=30)
# WrdPlot(width=8)
.CentimetersToPoints <- function(x) x * 28.35
.PointsToCentimeters <- function(x) x / 28.35
# http://msdn.microsoft.com/en-us/library/bb214076(v=office.12).aspx
WrdPlot <- function( type="png", append.cr=TRUE, crop=c(0,0,0,0), main = NULL,
picscale=100, height=NA, width=NA, res=300, dfact=1.6, wrd = DescToolsOptions("lastWord") ){
# png is considered a good choice for export to word (Smith)
# http://blog.revolutionanalytics.com/2009/01/10-tips-for-making-your-r-graphics-look-their-best.html
# height, width in cm!
# scale will be overidden, if height/width defined
# handle missing height or width values
if (is.na(width) ){
if (is.na(height)) {
width <- 14
height <- par("pin")[2] / par("pin")[1] * width
} else {
width <- par("pin")[1] / par("pin")[2] * height
}
} else {
if (is.na(height) ){
height <- par("pin")[2] / par("pin")[1] * width
}
}
# get a [type] tempfilename:
fn <- paste( tempfile(pattern = "file", tmpdir = tempdir()), ".", type, sep="" )
# this is a problem for RStudio....
# savePlot( fn, type=type )
# png(fn, width=width, height=height, units="cm", res=300 )
dev.copy(eval(parse(text=type)), fn, width=width*dfact, height=height*dfact, res=res, units="cm")
d <- dev.off()
# add it to our word report
res <- wrd[["Selection"]][["InlineShapes"]]$AddPicture( fn, FALSE, TRUE )
wrdDoc <- wrd[["ActiveDocument"]]
pic <- wrdDoc[["InlineShapes"]]$Item( wrdDoc[["InlineShapes"]][["Count"]] )
pic[["LockAspectRatio"]] <- -1 # = msoTrue
picfrmt <- pic[["PictureFormat"]]
picfrmt[["CropBottom"]] <- .CentimetersToPoints(crop[1])
picfrmt[["CropLeft"]] <- .CentimetersToPoints(crop[2])
picfrmt[["CropTop"]] <- .CentimetersToPoints(crop[3])
picfrmt[["CropRight"]] <- .CentimetersToPoints(crop[4])
if( is.na(height) & is.na(width) ){
# or use the ScaleHeight/ScaleWidth attributes:
pic[["ScaleHeight"]] <- picscale
pic[["ScaleWidth"]] <- picscale
} else {
# Set new height:
if( is.na(width) ) width <- height / .PointsToCentimeters( pic[["Height"]] ) * .PointsToCentimeters( pic[["Width"]] )
if( is.na(height) ) height <- width / .PointsToCentimeters( pic[["Width"]] ) * .PointsToCentimeters( pic[["Height"]] )
pic[["Height"]] <- .CentimetersToPoints(height)
pic[["Width"]] <- .CentimetersToPoints(width)
}
if( append.cr == TRUE ) { wrd[["Selection"]]$TypeText("\n")
} else {
wrd[["Selection"]]$MoveRight(wdConst$wdCharacter, 1, 0)
}
if( file.exists(fn) ) { file.remove(fn) }
if(!is.null(main)){
# insert caption
sel <- wrd$Selection() # "Abbildung"
sel$InsertCaption(Label=wdConst$wdCaptionFigure, Title=main)
sel$TypeParagraph()
}
invisible(pic)
}
WrdTable <- function(nrow = 1, ncol = 1, heights = NULL, widths = NULL, main = NULL, wrd = DescToolsOptions("lastWord")){
res <- wrd[["ActiveDocument"]][["Tables"]]$Add(wrd[["Selection"]][["Range"]],
NumRows = nrow, NumColumns = ncol)
if(!is.null(widths)) {
widths <- rep(widths, length.out=ncol)
for(i in 1:ncol){
# set column-widths
tcol <- res$Columns(i)
tcol[["Width"]] <- .CentimetersToPoints(widths[i])
}
}
if(!is.null(heights)) {
heights <- rep(heights, length.out=nrow)
for(i in 1:nrow){
# set row heights
tcol <- res$Rows(i)
tcol[["Height"]] <- .CentimetersToPoints(heights[i])
}
}
if(!is.null(main)){
# insert caption
sel <- wrd$Selection() # "Abbildung"
sel$InsertCaption(Label=wdConst$wdCaptionTable, Title=main)
sel$TypeParagraph()
}
invisible(res)
}
Phrase <- function(x, g, glabels=NULL, xname=NULL, unit=NULL, lang="engl") {
if(is.null(xname))
xname <- deparse(substitute(x))
if(is.null(glabels))
glabels <- levels(g)
if(is.null(unit))
unit <- ""
if(lang=="engl"){
txt1 <- "The collective consists of a total of %s elements. Of these, %s are %s (%s, mean %s %s %s) and %s %s (%s, mean %s %s %s).\n"
txt2 <- "The difference is significant (t-test, p = %s) and is %s %s [%s, %s] (95%s CI)."
txt3 <- "The difference is not significant.\n"
} else {
txt1 <- "Das Kollektiv besteht aus insgesamt %s Elementen. Davon sind %s %s (%s, mittleres %s %s %s) und %s %s (%s, mittleres %s %s %s).\n"
txt2 <- "Der Unterschied ist signifikant (t-test, p = %s) und betraegt %s %s [%s, %s] (95%s-CI).\n"
txt3 <- "Der Unterschied ist nicht signifikant.\n"
}
lst <- split(x, g)
names(lst) <- c("x","y")
n <- tapply(x, g, length)
meanage <- tapply(x, g, mean)
txt <- gettextf(txt1
, Format(sum(n), digits=0, big.mark="'")
, Format(n[1], digits=0, big.mark="'")
, glabels[1]
, Format(n[1]/sum(n), digits=1, fmt="%")
, xname
, round(meanage[1], 1)
, unit
, Format(n[2], digits=0, big.mark="'")
, glabels[2]
, Format(n[2]/sum(n), digits=1, fmt="%")
, xname
, round(meanage[2],1)
, unit
)
r.t <- t.test(lst$x, lst$y)
if(r.t$p.value < 0.05){
md <- round(MeanDiffCI(lst$x, lst$y), 1)
txt <- paste(txt, gettextf(txt2, format.pval(r.t$p.value), md[1], unit, md[2], md[3], "%"), sep="" )
} else {
txt <- paste(txt, txt3, sep="")
}
# pasting "" uses collapse character, so get rid of multiple spaces here
gsub(" )", ")", gsub(" +", " ", txt))
}
###
# ## Word Table - experimental code
#
# WrdTable <- function(tab, main = NULL, wrd = DescToolsOptions("lastWord"), row.names = FALSE, ...){
# UseMethod("WrdTable")
#
# }
#
#
# WrdTable.Freq <- function(tab, main = NULL, wrd = DescToolsOptions("lastWord"), row.names = FALSE, ...){
#
# tab[,c(3,5)] <- sapply(round(tab[,c(3,5)], 3), Format, digits=3)
# res <- WrdTable.default(tab=tab, wrd=wrd)
#
# if(!is.null(main)){
# # insert caption
# sel <- wrd$Selection() # "Abbildung"
# sel$InsertCaption(Label=wdConst$wdCaptionTable, Title=main)
# sel$TypeParagraph()
# }
#
# invisible(res)
#
# }
#
# WrdTable.ftable <- function(tab, main = NULL, wrd = DescToolsOptions("lastWord"), row.names = FALSE, ...) {
# tab <- FixToTable(capture.output(tab))
# NextMethod()
# }
#
#
# WrdTable.default <- function (tab, font = NULL, align=NULL, autofit = TRUE, main = NULL,
# wrd = DescToolsOptions("lastWord"), row.names=FALSE,
# ...) {
#
# dim1 <- ncol(tab)
# dim2 <- nrow(tab)
# if(row.names) dim1 <- dim1 + 1
#
# # wdConst ist ein R-Objekt (Liste mit 2755 Objekten!!!)
#
# write.table(tab, file = "clipboard", sep = "\t", quote = FALSE, row.names=row.names)
#
# myRange <- wrd[["Selection"]][["Range"]]
# bm <- wrd[["ActiveDocument"]][["Bookmarks"]]$Add("PasteHere", myRange)
# myRange$Paste()
#
# if(row.names) wrd[["Selection"]]$TypeText("\t")
#
# myRange[["Start"]] <- bm[["Range"]][["Start"]]
# myRange$Select()
# bm$Delete()
# wrd[["Selection"]]$ConvertToTable(Separator = wdConst$wdSeparateByTabs,
# NumColumns = dim1,
# NumRows = dim2,
# AutoFitBehavior = wdConst$wdAutoFitFixed)
#
# wrdTable <- wrd[["Selection"]][["Tables"]]$Item(1)
# # http://www.thedoctools.com/downloads/DocTools_List_Of_Built-in_Style_English_Danish_German_French.pdf
# wrdTable[["Style"]] <- -115 # "Tabelle Klassisch 1"
# wrdSel <- wrd[["Selection"]]
#
#
# # align the columns
# if(is.null(align))
# align <- c("l", rep(x = "r", ncol(tab)-1))
# else
# align <- rep(align, length.out=ncol(tab))
#
# align[align=="l"] <- wdConst$wdAlignParagraphLeft
# align[align=="c"] <- wdConst$wdAlignParagraphCenter
# align[align=="r"] <- wdConst$wdAlignParagraphRight
#
# for(i in seq_along(align)){
# wrdTable$Columns(i)$Select()
# wrd[["Selection"]][["ParagraphFormat"]][["Alignment"]] <- align[i]
# }
#
# if(!is.null(font)){
# wrdTable$Select()
# WrdFont(wrd) <- font
# }
#
# if(autofit)
# wrdTable$Columns()$AutoFit()
#
# # Cursor aus der Tabelle auf die letzte Postition im Dokument setzten
# # Selection.GoTo What:=wdGoToPercent, Which:=wdGoToLast
# wrd[["Selection"]]$GoTo(What = wdConst$wdGoToPercent, Which= wdConst$wdGoToLast)
#
# if(!is.null(main)){
# # insert caption
# sel <- wrd$Selection() # "Abbildung"
# sel$InsertCaption(Label=wdConst$wdCaptionTable, Title=main)
# sel$TypeParagraph()
#
# }
#
# invisible(wrdTable)
#
# }
#
# WrdTable <- function(tab, wrd){
# ### http://home.wanadoo.nl/john.hendrickx/statres/other/PasteAsTable.html
# write.table(tab, file="clipboard", sep="\t", quote=FALSE)
# myRange <- wrd[["Selection"]][["Range"]]
# bm <- wrd[["ActiveDocument"]][["Bookmarks"]]$Add("PasteHere", myRange)
# myRange$Paste()
# wrd[["Selection"]]$TypeText("\t")
# myRange[["Start"]] <- bm[["Range"]][["Start"]]
# myRange$Select()
# bm$Delete()
# wrd[["Selection"]]$ConvertToTable(Separator=wdConst$wdSeparateByTabs, NumColumns=4,
# NumRows=9, AutoFitBehavior=wdConst$wdAutoFitFixed)
# wrdTable <- wrd[["Selection"]][["Tables"]]$Item(1)
# wrdTable[["Style"]] <- "Tabelle Klassisch 1"
# wrdSel <- wrd[["Selection"]]
# wrdSel[["ParagraphFormat"]][["Alignment"]] <- wdConst$wdAlignParagraphRight
# #left align the first column
# wrdTable[["Columns"]]$Item(1)$Select()
# wrd[["Selection"]][["ParagraphFormat"]][["Alignment"]] <- wdConst$wdAlignParagraphLeft
# ### wtab[["ApplyStyleHeadingRows"]] <- TRUE
# ### wtab[["ApplyStyleLastRow"]] <- FALSE
# ### wtab[["ApplyStyleFirstColumn"]] <- TRUE
# ### wtab[["ApplyStyleLastColumn"]] <- FALSE
# ### wtab[["ApplyStyleRowBands"]] <- TRUE
# ### wtab[["ApplyStyleColumnBands"]] <- FALSE
# ### With Selection.Tables(1)
# #### If .Style <> "Tabellenraster" Then
# ### .Style = "Tabellenraster"
# ### End If
# ### wrd[["Selection"]]$ConvertToTable( Separator=wdConst$wdSeparateByTabs, AutoFit=TRUE, Format=wdConst$wdTableFormatSimple1,
# ### ApplyBorders=TRUE, ApplyShading=TRUE, ApplyFont=TRUE,
# ### ApplyColor=TRUE, ApplyHeadingRows=TRUE, ApplyLastRow=FALSE,
# ### ApplyFirstColumn=TRUE, ApplyLastColumn=FALSE)
# ### wrd[["Selection"]][["Tables"]]$Item(1)$Select()
# #wrd[["Selection"]][["ParagraphFormat"]][["Alignment"]] <- wdConst$wdAlignParagraphRight
# ### ### left align the first column
# ### wrd[["Selection"]][["Columns"]]$Item(1)$Select()
# ### wrd[["Selection"]][["ParagraphFormat"]][["Alignment"]] <- wdConst$wdAlignParagraphLeft
# ### wrd[["Selection"]][["ParagraphFormat"]][["Alignment"]] <- wdConst$wdAlignParagraphRight
# }
# require ( xtable )
# data ( tli )
# fm1 <- aov ( tlimth ~ sex + ethnicty + grade + disadvg , data = tli )
# fm1.table <- print ( xtable (fm1), type ="html")
# Tabellen-Studie via HTML FileExport
# WrdInsTable <- function( tab, wrd ){
# htmtab <- print(xtable(tab), type ="html")
# ### Let's create a summary file and insert it
# ### get a tempfile:
# fn <- paste(tempfile(pattern = "file", tmpdir = tempdir()), ".txt", sep="")
# write(htmtab, file=fn)
# wrd[["Selection"]]$InsertFile(fn)
# wrd[["ActiveDocument"]][["Tables"]]$Item(
# wrd[["ActiveDocument"]][["Tables"]][["Count"]] )[["Style"]] <- "Tabelle Klassisch 1"
# }
# WrdInsTable( fm1, wrd=wrd )
# data(d.pizza)
# txt <- Desc( temperature ~ driver, data=d.pizza )
# WrdInsTable( txt, wrd=wrd )
# WrdPlot(PlotDescNumFact( temperature ~ driver, data=d.pizza, newwin=T )
# , wrd=wrd, width=17, crop=c(0,0,60,0))
###
## Excel functions ====
GetNewXL <- function( visible = TRUE ) {
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
# Starts the Excel with xl as handle
hwnd <- RDCOMClient::COMCreate("Excel.Application")
if( visible == TRUE ) hwnd[["Visible"]] <- TRUE
# Create a new workbook
newwb <- hwnd[["Workbooks"]]$Add
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
hwnd <- NULL
}
invisible(hwnd)
}
GetCurrXL <- function() {
# stopifnot(require(RDCOMClient))
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
# try to get a handle to a running XL instance
# there's no "get"-function in RDCOMClient, so just create a new here..
hwnd <- RDCOMClient::COMCreate("Excel.Application", existing=TRUE)
if(is.null(hwnd)) warning("No running Excel application found!")
# options(lastXL = hwnd)
DescToolsOptions(lastXL = hwnd)
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
hwnd <- NULL
}
invisible(hwnd)
}
XLView <- function (x, col.names = TRUE, row.names = FALSE, na = "") {
# define some XL constants
xlToRight <- -4161
fn <- paste(tempfile(pattern = "file", tmpdir = tempdir()),
".csv", sep = "")
xl <- GetNewXL()
owb <- xl[["Workbooks"]]
if(!missing(x)){
if(class(x) == "ftable"){
x <- FixToTable(capture.output(x), sep = " ", header = FALSE)
col.names <- FALSE
}
write.table(x, file = fn, sep = ";", col.names = col.names,
qmethod = "double", row.names = row.names, na=na)
ob <- owb$Open(fn)
# if row.names are saved there's the first cell in the first line missing
# I don't actually see, how to correct this besides inserting a cell in XL
if(row.names) xl$Cells(1, 1)$Insert(Shift=xlToRight)
xl[["Cells"]][["EntireColumn"]]$AutoFit()
} else {
owb$Add()
awb <- xl[["ActiveWorkbook"]]
# delete sheets(2,3) without asking, if it's ok
xl[["DisplayAlerts"]] <- FALSE
xl$Sheets(c(2,3))$Delete()
xl[["DisplayAlerts"]] <- TRUE
awb$SaveAs( Filename=fn, FileFormat=6 )
}
invisible(fn)
}
XLGetRange <- function (file = NULL, sheet = NULL, range = NULL, as.data.frame = TRUE,
header = FALSE, stringsAsFactors = FALSE, echo = FALSE, datecols = NA) {
A1ToZ1S1 <- function(x){
xlcol <- c( LETTERS
, sort(c(outer(LETTERS, LETTERS, paste, sep="" )))
, sort(c(outer(LETTERS, c(outer(LETTERS, LETTERS, paste, sep="" )), paste, sep="")))
)[1:16384]
z1s1 <- function(x) {
colnr <- match( regmatches(x, regexec("^[[:alpha:]]+", x)), xlcol)
rownr <- as.numeric(regmatches(x, regexec("[[:digit:]]+$", x)))
return(c(rownr, colnr))
}
lapply(unlist(strsplit(toupper(x),":")), z1s1)
}
# main function *******************************
# to do: 30.8.2015
# we could / should check for a running XL instance here...
# ans <- RDCOMClient::getCOMInstance("Excel.Application", force = FALSE, silent = TRUE)
# if (is.null(ans) || is.character(ans)) print("not there")
if(is.null(file)){
xl <- GetCurrXL()
ws <- xl$ActiveSheet()
if(is.null(range)) {
# if there is a selection in XL then use it, if only one cell selected use currentregion
sel <- xl$Selection()
if(sel$Cells()$Count() == 1 ){
range <- xl$ActiveCell()$CurrentRegion()$Address(FALSE, FALSE)
} else {
range <- sapply(1:sel$Areas()$Count(), function(i) sel$Areas()[[i]]$Address(FALSE, FALSE) )
# old: this did not work on some XL versions with more than 28 selected areas
# range <- xl$Selection()$Address(FALSE, FALSE)
# range <- unlist(strsplit(range, ";"))
# there might be more than 1 single region, split by ;
# (this might be a problem for other locales)
}
}
} else {
xl <- GetNewXL()
wb <- xl[["Workbooks"]]$Open(file)
# set defaults for sheet and range here
if(is.null(sheet))
sheet <- 1
if(is.null(range))
range <- xl$Cells(1,1)$CurrentRegion()$Address(FALSE, FALSE)
ws <- wb$Sheets(sheet)$select()
}
lst <- list()
# for(i in 1:length(range)){ # John Chambers prefers seq_along: (why actually?)
for(i in seq_along(range)){
zs <- A1ToZ1S1(range[i])
rr <- xl$Range(xl$Cells(zs[[1]][1], zs[[1]][2]), xl$Cells(zs[[2]][1], zs[[2]][2]) )
lst[[i]] <- rr[["Value2"]]
names(lst)[i] <- range[i]
}
# implement na.strings:
# if(!identical(na.strings, NA)){
# for(s in na.strings){
# lst[[i]] <- replace(lst[[i]], list = na.strings, values = NA)
# }
# }
# replace NULL values by NAs, as NULLs are evil while coercing to data.frame!
if(as.data.frame){
# for(i in 1:length(lst)){ # original
for(i in seq_along(lst)){
# for(j in 1:length(lst[[i]])){
for(j in seq_along(lst[[i]])){
lst[[i]][[j]][unlist(lapply(lst[[i]][[j]], is.null))] <- NA
}
xnames <- unlist(lapply(lst[[i]], "[", 1)) # define the names in case header = TRUE
if(header) lst[[i]] <- lapply(lst[[i]], "[", -1) # delete the first row
lst[[i]] <- do.call(data.frame, c(lapply(lst[[i]][], unlist), stringsAsFactors = stringsAsFactors))
if(header){
names(lst[[i]]) <- xnames
} else {
names(lst[[i]]) <- paste("X", 1:ncol(lst[[i]]), sep="")
}
}
# convert date columns to date
if(!identical(datecols, NA)){
# apply to all selections
for(i in seq_along(lst)){
# switch to colindex if given as text
if(!is.numeric(datecols) && header)
datecols <- which(names(lst[[i]]) %in% datecols)
for(j in datecols)
lst[[i]][,j] <- as.Date(XLDateToPOSIXct(lst[[i]][,j]))
}
}
}
# just return a single object (for instance data.frame) if only one range was supplied
if(length(lst)==1) lst <- lst[[1]]
# opt <- options(useFancyQuotes=FALSE); on.exit(options(opt))
attr(lst,"call") <- gettextf("XLGetRange(file = %s, sheet = %s,
range = c(%s),
as.data.frame = %s, header = %s, stringsAsFactors = %s)",
gsub("\\\\", "\\\\\\\\",
shQuote(paste(xl$ActiveWorkbook()$Path(),
xl$ActiveWorkbook()$Name(), sep="\\"))),
shQuote(xl$ActiveSheet()$Name()),
# gettextf(paste(dQuote(names(lst)), collapse=",")),
gettextf(paste(shQuote(range), collapse=",")),
as.data.frame, header, stringsAsFactors)
if(!is.null(file)) xl$Quit() # only quit, if a new XL-instance was created before
if(echo)
cat(attr(lst,"call"))
return(lst)
}
# XLGetWorkbook <- function (file) {
#
# xlLastCell <- 11
#
# xl <- GetNewXL()
# wb <- xl[["Workbooks"]]$Open(file)
#
# lst <- list()
# for( i in 1:wb[["Sheets"]][["Count"]]){
# ws <- wb[["Sheets", i]]
# ws[["Range", "A1"]][["Select"]]
# rngLast <- xl[["ActiveCell"]][["SpecialCells", xlLastCell]][["Address"]]
# lst[[i]] <- ws[["Range", paste("A1",rngLast, sep=":")]][["Value2"]]
# }
#
# xl$Quit()
# return(lst)
#
# }
# New in 0.99.18:
XLGetWorkbook <- function (file, compactareas = TRUE) {
IsEmptySheet <- function(sheet)
sheet$UsedRange()$Rows()$Count() == 1 &
sheet$UsedRange()$columns()$Count() == 1 &
is.null(sheet$cells(1,1)$Value())
CompactArea <- function(lst)
do.call(cbind, lapply(lst, cbind))
xlCellTypeConstants <- 2
xlCellTypeFormulas <- -4123
xl <- GetNewXL()
wb <- xl[["Workbooks"]]$Open(file)
lst <- list()
for (i in 1:wb$Sheets()$Count()) {
if(!IsEmptySheet(sheet=xl$Sheets(i))) {
# has.formula is TRUE, when all cells contain formula, FALSE when no cell contains a formula
# and NULL else, thus: !identical(FALSE) for having some or all
if(!identical(xl$Sheets(i)$UsedRange()$HasFormula(), FALSE))
areas <- xl$union(
xl$Sheets(i)$UsedRange()$SpecialCells(xlCellTypeConstants),
xl$Sheets(i)$UsedRange()$SpecialCells(xlCellTypeFormulas))$areas()
else
areas <- xl$Sheets(i)$UsedRange()$SpecialCells(xlCellTypeConstants)$areas()
alst <- list()
for ( j in 1:areas$count())
alst[[j]] <- areas[[j]]$Value2()
lst[[xl$Sheets(i)$name()]] <- alst
}
}
if(compactareas)
lst <- lapply(lst, function(x) lapply(x, CompactArea))
# close without saving
wb$Close(FALSE)
xl$Quit()
return(lst)
}
XLKill <- function(){
# Excel would only quit, when all workbooks are closed before, someone said.
# http://stackoverflow.com/questions/15697282/excel-application-not-quitting-after-calling-quit
# We experience, that it would not even then quit, when there's no workbook loaded at all.
# maybe gc() would help
# so killing the task is "ultima ratio"...
shell('taskkill /F /IM EXCEL.EXE')
}
XLDateToPOSIXct <- function (x, tz = "GMT", xl1904 = FALSE) {
# https://support.microsoft.com/en-us/kb/214330
if(xl1904)
origin <- "1904-01-01"
else
origin <- "1899-12-30"
as.POSIXct(x * (60 * 60 * 24), origin = origin, tz = tz)
}
###
## PowerPoint functions ====
GetNewPP <- function (visible = TRUE, template = "Normal") {
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
hwnd <- RDCOMClient::COMCreate("PowerPoint.Application")
if (visible == TRUE) { hwnd[["Visible"]] <- TRUE }
newpres <- hwnd[["Presentations"]]$Add(TRUE)
ppLayoutBlank <- 12
newpres[["Slides"]]$Add(1, ppLayoutBlank)
# options("lastPP" = hwnd)
DescToolsOptions(lastPP = hwnd)
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
hwnd <- NULL
}
invisible(hwnd)
}
GetCurrPP <- function() {
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
# there's no "get"-function in RDCOMClient, so just create a new here..
hwnd <- RDCOMClient::COMCreate("PowerPoint.Application", existing=TRUE)
if(is.null(hwnd)) warning("No running PowerPoint application found!")
# options("lastPP" = hwnd)
DescToolsOptions(lastPP = hwnd)
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
hwnd <- NULL
}
invisible(hwnd)
}
PpAddSlide <- function(pos = NULL, pp = DescToolsOptions("lastPP")){
slides <- pp[["ActivePresentation"]][["Slides"]]
if(is.null(pos)) pos <- slides$Count()+1
slides$AddSlide(pos, slides$Item(1)[["CustomLayout"]])$Select()
invisible()
}
PpText <- function (txt, x=1, y=1, height=50, width=100, fontname = "Calibri", fontsize = 18, bold = FALSE,
italic = FALSE, col = "black", bg = "white", hasFrame = TRUE, pp = DescToolsOptions("lastPP")) {
msoShapeRectangle <- 1
if (class(txt) != "character")
txt <- .CaptOut(txt)
# slide <- pp[["ActivePresentation"]][["Slides"]]$Item(1)
slide <- pp$ActiveWindow()$View()$Slide()
shape <- slide[["Shapes"]]$AddShape(msoShapeRectangle, x, y, x + width, y+height)
textbox <- shape[["TextFrame"]]
textbox[["TextRange"]][["Text"]] <- txt
tbfont <- textbox[["TextRange"]][["Font"]]
tbfont[["Name"]] <- fontname
tbfont[["Size"]] <- fontsize
tbfont[["Bold"]] <- bold
tbfont[["Italic"]] <- italic
tbfont[["Color"]] <- RgbToLong(ColToRgb(col))
textbox[["MarginBottom"]] <- 10
textbox[["MarginLeft"]] <- 10
textbox[["MarginRight"]] <- 10
textbox[["MarginTop"]] <- 10
shp <- shape[["Fill"]][["ForeColor"]]
shp[["RGB"]] <- RgbToLong(ColToRgb(bg))
shp <- shape[["Line"]]
shp[["Visible"]] <- hasFrame
invisible(shape)
}
PpPlot <- function( type="png", crop=c(0,0,0,0),
picscale=100, x=1, y=1, height=NA, width=NA, res=200, dfact=1.6, pp = DescToolsOptions("lastPP") ){
# height, width in cm!
# scale will be overidden, if height/width defined
# Example: PpPlot(picscale=30)
# PpPlot(width=8)
.CentimetersToPoints <- function(x) x * 28.35
.PointsToCentimeters <- function(x) x / 28.35
# http://msdn.microsoft.com/en-us/library/bb214076(v=office.12).aspx
# handle missing height or width values
if (is.na(width) ){
if (is.na(height)) {
width <- 14
height <- par("pin")[2] / par("pin")[1] * width
} else {
width <- par("pin")[1] / par("pin")[2] * height
}
} else {
if (is.na(height) ){
height <- par("pin")[2] / par("pin")[1] * width
}
}
# get a [type] tempfilename:
fn <- paste( tempfile(pattern = "file", tmpdir = tempdir()), ".", type, sep="" )
# this is a problem for RStudio....
# savePlot( fn, type=type )
# png(fn, width=width, height=height, units="cm", res=300 )
dev.copy(eval(parse(text=type)), fn, width=width*dfact, height=height*dfact, res=res, units="cm")
d <- dev.off()
# slide <- pp[["ActivePresentation"]][["Slides"]]$Item(1)
slide <- pp$ActiveWindow()$View()$Slide()
pic <- slide[["Shapes"]]$AddPicture(fn, FALSE, TRUE, x, y)
picfrmt <- pic[["PictureFormat"]]
picfrmt[["CropBottom"]] <- .CentimetersToPoints(crop[1])
picfrmt[["CropLeft"]] <- .CentimetersToPoints(crop[2])
picfrmt[["CropTop"]] <- .CentimetersToPoints(crop[3])
picfrmt[["CropRight"]] <- .CentimetersToPoints(crop[4])
if( is.na(height) & is.na(width) ){
# or use the ScaleHeight/ScaleWidth attributes:
msoTrue <- -1
msoFalse <- 0
pic$ScaleHeight(picscale/100, msoTrue)
pic$ScaleWidth(picscale/100, msoTrue)
} else {
# Set new height:
if( is.na(width) ) width <- height / .PointsToCentimeters( pic[["Height"]] ) * .PointsToCentimeters( pic[["Width"]] )
if( is.na(height) ) height <- width / .PointsToCentimeters( pic[["Width"]] ) * .PointsToCentimeters( pic[["Height"]] )
pic[["Height"]] <- .CentimetersToPoints(height)
pic[["Width"]] <- .CentimetersToPoints(width)
}
if( file.exists(fn) ) { file.remove(fn) }
invisible( pic )
}
CourseData <- function(name, url=NULL, header=TRUE, sep=";", ...){
if(length(grep(pattern = "\\..{3}", x = name))==0)
name <- paste(name, ".txt", sep="")
if(is.null(url))
url <- "http://www.signorell.net/hwz/datasets/"
url <- gettextf(paste(url, "%s", sep=""), name)
read.table(file = url, header = header, sep = sep, ...)
}
###
## Entwicklungs-Ideen ====
# With ActiveDocument.Bookmarks
# .Add Range:=Selection.Range, Name:="start"
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
# Selection.TypeText Text:="Hier kommt mein Text"
# Selection.TypeParagraph
# Selection.TypeText Text:="und auf weiteren Zeilen"
# Selection.TypeParagraph
# With ActiveDocument.Bookmarks
# .Add Range:=Selection.Range, Name:="stop"
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
# Selection.GoTo What:=wdGoToBookmark, Name:="start"
# Selection.GoTo What:=wdGoToBookmark, Name:="stop"
# With ActiveDocument.Bookmarks
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
# Selection.MoveLeft Unit:=wdWord, Count:=2, Extend:=wdExtend
# Selection.HomeKey Unit:=wdStory, Extend:=wdExtend
# Selection.Font.Name = "Arial Black"
# Selection.EndKey Unit:=wdStory
# Selection.GoTo What:=wdGoToBookmark, Name:="stop"
# Selection.Find.ClearFormatting
# With Selection.Find
# .Text = "0."
# .Replacement.Text = " ."
# .Forward = True
# .Wrap = wdFindContinue
# .Format = False
# .MatchCase = False
# .MatchWholeWord = False
# .MatchWildcards = False
# .MatchSoundsLike = False
# .MatchAllWordForms = False
# End With
# ActiveDocument.Bookmarks("start").Delete
# With ActiveDocument.Bookmarks
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
# End Sub
# wdSortByName =0
# wdGoToBookmark = -1
# wdFindContinue = 1
# wdStory = 6
# Bivariate Darstellungen gute uebersicht
# pairs( lapply( lapply( c( d.set[,-1], list()), "as.numeric" ), "jitter" ), col=rgb(0,0,0,0.2) )
# Gruppenweise Mittelwerte fuer den ganzen Recordset
# wrdInsertText( "Mittelwerte zusammengefasst\n\n" )
# wrdInsertSummary(
# signif( cbind(
# t(as.data.frame( lapply( d.frm, tapply, grp, "mean", na.rm=T )))
# , tot=mean(d.frm, na.rm=T)
# ), 3)
|
/R/DescTools.r
|
no_license
|
acabaya/DescTools
|
R
| false
| false
| 476,379
|
r
|
#
# Project: DescTools
#
# Purpose: Tools for descriptive statistics, the missing link...
# Univariat, pairwise bivariate, groupwise und multivariate
#
# Author: Andri Signorell
# Version: 0.99.19 (under construction)
#
# Depends: tcltk
# Imports: boot
# Suggests: RDCOMClient
#
# Datum:
# 31.07.2013 version 0.99.4 almost releaseable
# 06.05.2011 created
#
# ****************************************************************************
# ********** DescTools' design goals, Dos and Donts
# Some thoughts about coding:
# 1. Use recycling rules as often and wherever possible.
# 2. Handle NAs by adding an na.rm option (default FALSE) where it makes sense.
# 3. Use Google Naming StyleGuide
# 4. no data.frame or matrix interfaces for functions, the user is supposed to use
# sapply and apply.
# Interfaces for data.frames are widely deprecated nowadays and so we abstained to implement one.
# Use do.call (do.call), rbind and lapply for getting a matrix with estimates and confidence
# intervals for more than 1 column.
# 5. A pairwise apply construction is implemented PwApply
# 6. Use formula interfaces wherever possible.
# 7. use test results format class "htest"
# 8. deliver confidence intervals wherever possible, rather than tests (use ci for that)
# 9. always define appropriate default values for function arguments
# 10. provide an inverse function whenever possible (ex.: BoxCox - BoxCoxInv)
# 11. auxiliary functions, which don't have to be defined globally are put in the function's body
# (and not made invisible to the user by using .funname)
# 12. restrict the use of other libraries to the minimum (possibly only core),
# avoid hierarchical dependencies of packages over more than say 2 steps
# 13. do not create wrappers, which basically only define specific arguments and
# call an existing function (we would run into a forest of functions, loosing overview)
# 14. make functions as flexible as possible but do not define more than say
# a maximum of 12 arguments for a function (can hardly be controlled by the user)
# 15. define reasonable default values for possibly all used arguments
# (besides x), the user should get some result when typing fun(x)!
# 16. do not reinvent the wheel
# 17. do not write a function for a problem already solved(!), unless you think
# it is NOT (from your point of view) and you are pretty sure you can do better..
# 18. take the most flexible function on the market, if there are several
# take the most efficient function on the market, if there are differences in speed
# 19. make it work - make it safe - make it fast (in this very order...)
# 20. possibly publish all functions, if internal functions are used, define it within
# the functions body, this will ensure a quick source lookup.
# ********** Similar packages:
# - descr, UsingR
# - prettyR
# - reporttools
# - lessR (full)
# - Hmisc (describe)
# - psych
# check:
# library(pwr) # Power-Analyse
# http://www.ats.ucla.edu/stat/r/dae/t_test_power2.htm
# Data in packages
# http://www.hep.by/gnu/r-patched/r-exts/R-exts_8.html
# library(gtools): odd zu IsOdd, vgl: stars.pval
# library(e1071): hamming.distance, hamming.window, hsv_palette, matchControls (SampleTwins)
# library(plotrix): color.id (RgbToCol), color.scale (FindColor)
# vgl: PlotCI (plotCI), plot_bg
# ********** Know issues:
# bug: Desc( driver + temperature ~ operator + interaction(city, driver, sep=":") , data=d.pizza)
# works: Desc( driver + temperature ~ operator + interaction(city, driver, sep=".") , data=d.pizza)
# works: Desc( driver + temperature ~ operator + city:driver, data=d.pizza)
# - bei der Anwendung von tapply wird die Bezeichnung des Levels nicht verwendet
# Beispiel:
# tapply( d.pizza$delivery_min, d.pizza$driver, Desc )
# Problem: Titel und level kommt nicht mit ***CLEARME***CLEARME***CLEARME***CLEARME***CLEARME***
# - DescWrd.factor.factor gibt die Argumente an WrdText nicht weiter? fontsize, etc. (17.4.2012)
# - ein langer label fuehrt dazu, dass die Tabellenausgabe umgebrochen wird und die Grafik unter dem Text plaziert wird.
# this error arises when no plot windows exists, but is the same for boxplot, so we leave it here
# PlotViolin(temperature ~ driver, d.pizza, col="steelblue", panel.first=grid())
# Error in int_abline(a = a, b = b, h = h, v = v, untf = untf, ...) :
# plot.new has not been called yet
# ********** Open implementations:
# functions:
# polychor, tetrachor
# Cohen's effect fformat(ISOdate(2000, 1:12, 1), "%B")ct
# Cohen's effect hlp
# eta fct lines
# eta hlp
# eta2 <- function(x,y) {
# return(summary(lm(as.formula(x~y)))$r.squared)
# }
# open multiple comparisons:
# ScottKnott test (scottknott),
# Waller-Duncan test (agricolae), Gabriel test (not found)
# flag ~ flag mit mosaicplot und allgemein bivariate darstellung
# ConDisPairs als O(n log(n)) AVL-Tree implementation
# PlotMultiDens stack and 100% (cdplot)
#
# PlotCirc for symmetric tables
# Konsequente ueberpruefung der uebergabe und weiterreichung der parameter
# z.B. was ist mit Boxplot las?
# uebersicht, was wird wo vewendet, z.b. kommt rfrq ueberhaupt an bei Desc(data.frame)
# Was ist die maximale Menge an parameter?
# - Tabellen factor ~ factor nebeneinander wenn Platz
# PercTable tasks:
# Sum, perc, usw. Texte parametrisieren
# 0 values als '-' optional anzeigen
# Format perc stimmt im ersten Fall nicht, parametrisieren?
# Reihenfolge Zuerich, perc vs. perc , Zuerich wechselbar machen. Ist das schon?
# faqNC <- function() browseURL("http://www.ncfaculty.net/dogle/R/FAQ/FAQ_R_NC.html")
# Formula-Interface fuer PlotBag
# - replace .fmt by Format
# - DescDlg
# - Object Browser a la RevoR
# - Fixierung Nachkommastellen pro Variable - geloest, aber unbefriedigend
# sollte unterscheiden zwischen kleinen (1.22e-22), mittleren (100.33) und
# grossen Zahlen (1.334e5)
# grosse Zahlen mit Tausendertrennzeichen ausgegeben: 13'899
# - Alle PlotDesc sollten so funktionieren wie Desc, also mit data, ohne data etc.
# wenn mal viel Zeit: test routinen mit htest result fuer
# SomersDelta, GoodmanKruskal etc.
# separate Data ========
# Creation of the Page distribution function for the Page TrendTest
#
# .PageDF <- list(
# NA, NA
# , k3 = c(1, 3, 3, 5, 6)
# , k4 = c(1, 4, 5, 9, 11, 13, 15, 19, 20, 23, 24)
# , k5 = c(1, 5, 8, 14, 21, 27, 31, 41, 47, 57, 63, 73, 79, 89, 93, 99, 106, 112, 115, 119, 120)
# , k6 = c(1, 6, 12, 21, 37, 49, 63, 87, 107, 128, 151, 179, 203, 237,
# 257, 289, 331, 360, 389, 431, 463, 483, 517, 541, 569, 592, 613,
# 633, 657, 671, 683, 699, 708, 714, 719, 720)
# , k7 = c(1, 7, 17, 31, 60, 86, 121, 167, 222, 276, 350, 420, 504, 594,
# 672, 762, 891, 997, 1120, 1254, 1401, 1499, 1667, 1797, 1972,
# 2116, 2284, 2428, 2612, 2756, 2924, 3068, 3243, 3373, 3541, 3639,
# 3786, 3920, 4043, 4149, 4278, 4368, 4446, 4536, 4620, 4690, 4764,
# 4818, 4873, 4919, 4954, 4980, 5009, 5023, 5033, 5039, 5040)
# , k8 = c(1, 8, 23, 45, 92, 146, 216, 310, 439, 563, 741, 924, 1161,
# 1399, 1675, 1939, 2318, 2667, 3047, 3447, 3964, 4358, 4900, 5392,
# 6032, 6589, 7255, 7850, 8626, 9310, 10096, 10814, 11736, 12481,
# 13398, 14179, 15161, 15987, 16937, 17781, 18847, 19692, 20628,
# 21473, 22539, 23383, 24333, 25159, 26141, 26922, 27839, 28584,
# 29506, 30224, 31010, 31694, 32470, 33065, 33731, 34288, 34928,
# 35420, 35962, 36356, 36873, 37273, 37653, 38002, 38381, 38645,
# 38921, 39159, 39396, 39579, 39757, 39881, 40010, 40104, 40174,
# 40228, 40275, 40297, 40312, 40319, 40320)
# , k9 = c(1, 9, 30, 64, 136, 238, 368, 558, 818, 1102, 1500, 1954, 2509,
# 3125, 3881, 4625, 5647, 6689, 7848, 9130, 10685, 12077, 13796,
# 15554, 17563, 19595, 21877, 24091, 26767, 29357, 32235, 35163,
# 38560, 41698, 45345, 48913, 52834, 56700, 61011, 65061, 69913,
# 74405, 79221, 84005, 89510, 94464, 100102, 105406, 111296, 116782,
# 122970, 128472, 134908, 140730, 146963, 152987, 159684, 165404,
# 172076, 178096, 184784, 190804, 197476, 203196, 209893, 215917,
# 222150, 227972, 234408, 239910, 246098, 251584, 257474, 262778,
# 268416, 273370, 278875, 283659, 288475, 292967, 297819, 301869,
# 306180, 310046, 313967, 317535, 321182, 324320, 327717, 330645,
# 333523, 336113, 338789, 341003, 343285, 345317, 347326, 349084,
# 350803, 352195, 353750, 355032, 356191, 357233, 358255, 358999,
# 359755, 360371, 360926, 361380, 361778, 362062, 362322, 362512,
# 362642, 362744, 362816, 362850, 362871, 362879, 362880)
# , k10 = c(1, 10, 38, 89, 196, 373, 607, 967, 1465, 2084, 2903, 3943, 5195, 6723, 8547, 10557, 13090, 15927, 19107, 22783, 27088, 31581, 36711, 42383, 48539, 55448, 62872, 70702, 79475, 88867, 98759, 109437, 121084, 133225, 146251, 160169, 174688, 190299, 206577, 223357, 242043, 261323, 280909, 301704, 324089, 346985, 370933, 395903, 421915, 449011, 477478, 505905, 536445, 567717, 599491, 632755, 667503, 702002, 738301, 774897, 813353, 852279, 892263, 931649, 973717, 1016565, 1058989, 1101914, 1146958, 1191542, 1237582, 1283078, 1329968, 1377004, 1424345, 1471991, 1520878, 1569718, 1617762, 1666302, 1716368, 1765338, 1814400, 1863462, 1912432, 1962498, 2011038, 2059082, 2107922, 2156809, 2204455, 2251796, 2298832, 2345722, 2391218, 2437258, 2481842, 2526886, 2569811, 2612235, 2655083, 2697151, 2736537, 2776521, 2815447, 2853903, 2890499, 2926798, 2961297, 2996045, 3029309, 3061083, 3092355, 3122895, 3151322, 3179789, 3206885, 3232897, 3257867, 3281815, 3304711, 3327096, 3347891, 3367477, 3386757, 3405443, 3422223, 3438501, 3454112, 3468631, 3482549, 3495575, 3507716, 3519363, 3530041, 3539933, 3549325, 3558098, 3565928, 3573352, 3580261, 3586417, 3592089, 3597219, 3601712, 3606017, 3609693, 3612873, 3615710, 3618243, 3620253, 3622077, 3623605, 3624857, 3625897, 3626716, 3627335, 3627833, 3628193, 3628427, 3628604, 3628711, 3628762, 3628790, 3628799, 3628800)
#
# , k11 = c(1, 11, 47, 121, 277, 565, 974, 1618, 2548, 3794, 5430, 7668, 10382, 13858, 18056, 23108, 29135, 36441, 44648, 54464, 65848, 78652, 92845, 109597, 127676, 148544, 171124, 196510, 223843, 254955, 287403, 323995, 363135, 406241, 451019, 501547, 553511, 610953, 670301, 735429, 803299, 877897, 953161, 1036105, 1122228, 1215286, 1309506, 1413368, 1518681, 1632877, 1749090, 1874422, 2002045, 2140515, 2278832, 2429566, 2581919, 2744859, 2908190, 3085090, 3263110, 3453608, 3643760, 3847514, 4052381, 4272633, 4489678, 4722594, 4956028, 5204156, 5449644, 5712530, 5973493, 6250695, 6523539, 6816137, 7104526, 7411262, 7710668, 8030252, 8345178, 8678412, 9002769, 9348585, 9686880, 10046970, 10393880, 10763840, 11125055, 11506717, 11876164, 12267556, 12646883, 13049009, 13434313, 13845399, 14241951, 14660041, 15058960, 15484804, 15894731, 16324563, 16734970, 17170868, 17587363, 18027449, 18444344, 18884724, 19305912, 19748160, 20168640, 20610888, 21032076, 21472456, 21889351, 22329437, 22745932, 23181830, 23592237, 24022069, 24431996, 24857840, 25256759, 25674849, 26071401, 26482487, 26867791, 27269917, 27649244, 28040636, 28410083, 28791745, 29152960, 29522920, 29869830, 30229920, 30568215, 30914031, 31238388, 31571622, 31886548, 32206132, 32505538, 32812274, 33100663, 33393261, 33666105, 33943307, 34204270, 34467156, 34712644, 34960772, 35194206, 35427122, 35644167, 35864419, 36069286, 36273040, 36463192, 36653690, 36831710, 37008610, 37171941, 37334881, 37487234, 37637968, 37776285, 37914755, 38042378, 38167710, 38283923, 38398119, 38503432, 38607294, 38701514, 38794572, 38880695, 38963639, 39038903, 39113501, 39181371, 39246499, 39305847, 39363289, 39415253, 39465781, 39510559, 39553665, 39592805, 39629397, 39661845, 39692957, 39720290, 39745676, 39768256, 39789124, 39807203, 39823955, 39838148, 39850952, 39862336, 39872152, 39880359, 39887665, 39893692, 39898744, 39902942, 39906418, 39909132, 39911370, 39913006, 39914252, 39915182, 39915826, 39916235, 39916523, 39916679, 39916753, 39916789, 39916799, 39916800)
#
# , k12 = c(1, 12, 57, 161, 385, 832, 1523, 2629, 4314, 6678, 9882, 14397, 20093, 27582, 36931, 48605, 62595, 80232, 100456, 125210, 154227, 188169, 226295, 272179, 322514, 381283, 446640, 521578, 602955, 697449, 798012, 913234, 1037354, 1177139, 1325067, 1493942, 1670184, 1867627, 2075703, 2306597, 2547605, 2817918, 3095107, 3402876, 3723206, 4075092, 4436130, 4836594, 5245232, 5694249, 6155263, 6658390, 7171170, 7734985, 8304533, 8927791, 9562307, 10250749, 10946272, 11707175, 12472247, 13304674, 14143124, 15051520, 15964324, 16958207, 17951038, 19024576, 20103385, 21266520, 22428668, 23688490, 24941145, 26293113, 27640685, 29092979, 30538037, 32094364, 33635325, 35292663, 36939122, 38705429, 40450799, 42327667, 44179645, 46167953, 48128734, 50226064, 52293360, 54508939, 56686818, 59015668, 61303483, 63746140, 66141668, 68703444, 71211606, 73883239, 76497639, 79284492, 82008603, 84912335, 87739711, 90750133, 93683865, 96803338, 99840816, 103063901, 106199027, 109522404, 112757434, 116187490, 119511072, 123034744, 126446666, 130064197, 133565830, 137269085, 140848253, 144633119, 148294783, 152161902, 155889546, 159821171, 163617371, 167622510, 171480066, 175541648, 179449088, 183562195, 187525039, 191692873, 195691020, 199891634, 203924412, 208164174, 212229695, 216488881, 220574078, 224852631, 228953203, 233247651, 237351468, 241650132, 245753949, 250048397, 254148969, 258427522, 262512719, 266771905, 270837426, 275077188, 279109966, 283310580, 287308727, 291476561, 295439405, 299552512, 303459952, 307521534, 311379090, 315384229, 319180429, 323112054, 326839698, 330706817, 334368481, 338153347, 341732515, 345435770, 348937403, 352554934, 355966856, 359490528, 362814110, 366244166, 369479196, 372802573, 375937699, 379160784, 382198262, 385317735, 388251467, 391261889, 394089265, 396992997, 399717108, 402503961, 405118361, 407789994, 410298156, 412859932, 415255460, 417698117, 419985932, 422314782, 424492661, 426708240, 428775536, 430872866, 432833647, 434821955, 436673933, 438550801, 440296171, 442062478, 443708937,
# 445366275, 446907236, 448463563, 449908621, 451360915, 452708487, 454060455, 455313110, 456572932, 457735080, 458898215, 459977024, 461050562, 462043393, 463037276, 463950080, 464858476, 465696926, 466529353, 467294425, 468055328, 468750851, 469439293, 470073809, 470697067, 471266615, 471830430, 472343210, 472846337, 473307351, 473756368, 474165006, 474565470, 474926508, 475278394, 475598724, 475906493, 476183682, 476453995, 476695003, 476925897, 477133973, 477331416, 477507658, 477676533, 477824461, 477964246, 478088366, 478203588, 478304151, 478398645, 478480022, 478554960, 478620317, 478679086, 478729421, 478775305, 478813431, 478847373, 478876390, 478901144, 478921368, 478939005, 478952995, 478964669, 478974018, 478981507, 478987203, 478991718, 478994922, 478997286, 478998971, 479000077, 479000768, 479001215, 479001439, 479001543, 479001588, 479001599, 479001600 )
#
# , k13 = c(1, 13, 68, 210, 527, 1197, 2324, 4168, 7119, 11429, 17517, 26225, 37812, 53230, 73246, 98816, 130483, 170725, 218750, 278034, 349136, 434162, 532482, 651024, 785982, 944022, 1124332, 1332640, 1565876, 1835792, 2132840, 2472812, 2848749, 3273357, 3735585, 4260527, 4827506, 5461252, 6147299, 6908609, 7725716, 8635460, 9600260, 10666252, 11804773, 13050503, 14365677, 15812701, 17335403, 18994955, 20742001, 22638493, 24624900, 26787112, 29032733, 31464927, 34008755, 36743621, 39579021, 42647201, 45817786, 49226378, 52752239, 56535435, 60435209, 64628147, 68927405, 73528499, 78274283, 83329815, 88504447, 94050417, 99720505, 105759011, 111937321, 118508917, 125224959, 132372517, 139644194, 147366078, 155251313, 163598355, 172068955, 181074075, 190212385, 199875487, 209687980, 220053214, 230566521, 241680167, 252905559, 264763303, 276775771, 289421809, 302176267, 315640063, 329231261, 343509837, 357915454, 373057790, 388317114, 404365328, 420470916, 437394874, 454438992, 472280042, 490183678, 508970736, 527836540, 547557794, 567333404, 588036304, 608771329, 630463117, 652127890, 674778950, 697468748, 721126694, 744732766, 769392312, 794014392, 819670692, 845236737, 871892593, 898464180, 926132356, 953650676, 982290898, 1010834369, 1040477655, 1069921254, 1100563830, 1131007339, 1162609975, 1193943276, 1226507722, 1258827639, 1292328257, 1325502938, 1359918362, 1394027869, 1429370035, 1464279071, 1500517059, 1536339992, 1573396522, 1609980791, 1647854021, 1685286706, 1723967698, 1762082365, 1801533261, 1840420643, 1880601675, 1920106583, 1960960701, 2001224218, 2042719638, 2083488859, 2125600829, 2167005742, 2209678334, 2251531986, 2294726538, 2337123023, 2380790291, 2423568572, 2467632034, 2510865295, 2555331665, 2598793469, 2643582407, 2687416596, 2732465154, 2776464125, 2821723625, 2865981806, 2911394478, 2955721182, 3001237104, 3045709215, 3091307829, 3135712971, 3181311585, 3225783696, 3271299618, 3315626322, 3361038994, 3405297175, 3450556675, 3494555646, 3539604204, 3583438393, 3628227331, 3671689135, 3716155505,
# 3759388766, 3803452228, 3846230509, 3889897777, 3932294262, 3975488814, 4017342466, 4060015058, 4101419971, 4143531941, 4184301162, 4225796582, 4266060099, 4306914217, 4346419125, 4386600157, 4425487539, 4464938435, 4503053102, 4541734094, 4579166779, 4617040009, 4653624278, 4690680808, 4726503741, 4762741729, 4797650765, 4832992931, 4867102438, 4901517862, 4934692543, 4968193161, 5000513078, 5033077524, 5064410825, 5096013461, 5126456970, 5157099546, 5186543145, 5216186431, 5244729902, 5273370124, 5300888444, 5328556620, 5355128207, 5381784063, 5407350108, 5433006408, 5457628488, 5482288034, 5505894106, 5529552052, 5552241850, 5574892910, 5596557683, 5618249471, 5638984496, 5659687396, 5679463006, 5699184260, 5718050064, 5736837122, 5754740758, 5772581808, 5789625926, 5806549884, 5822655472, 5838703686, 5853963010, 5869105346, 5883510963, 5897789539, 5911380737, 5924844533, 5937598991, 5950245029, 5962257497, 5974115241, 5985340633, 5996454279, 6006967586, 6017332820, 6027145313, 6036808415, 6045946725, 6054951845, 6063422445, 6071769487, 6079654722, 6087376606, 6094648283, 6101795841, 6108511883, 6115083479, 6121261789, 6127300295, 6132970383, 6138516353, 6143690985, 6148746517, 6153492301, 6158093395, 6162392653, 6166585591, 6170485365, 6174268561, 6177794422, 6181203014, 6184373599, 6187441779, 6190277179, 6193012045, 6195555873, 6197988067, 6200233688, 6202395900, 6204382307, 6206278799, 6208025845, 6209685397, 6211208099, 6212655123, 6213970297, 6215216027, 6216354548, 6217420540, 6218385340, 6219295084, 6220112191, 6220873501, 6221559548, 6222193294, 6222760273, 6223285215, 6223747443, 6224172051, 6224547988, 6224887960, 6225185008, 6225454924, 6225688160, 6225896468, 6226076778, 6226234818, 6226369776, 6226488318, 6226586638, 6226671664, 6226742766, 6226802050, 6226850075, 6226890317, 6226921984, 6226947554, 6226967570, 6226982988, 6226994575, 6227003283, 6227009371, 6227013681, 6227016632, 6227018476, 6227019603, 6227020273, 6227020590, 6227020732, 6227020787, 6227020799, 6227020800)
#
# , k14 = c(1, 14, 80, 269, 711, 1689, 3467, 6468, 11472, 19093, 30278, 46574, 69288, 99975, 141304, 195194, 264194, 352506, 462442, 598724, 766789, 970781, 1213870, 1507510, 1853680, 2260125, 2736501, 3291591, 3930026, 4668007, 5508108, 6466862, 7556159, 8787659, 10165645, 11724144, 13460539, 15392221, 17539134, 19922717, 22546063, 25447736, 28627069, 32116076, 35937108, 40106433, 44631074, 49573596, 54926631, 60716114, 66974508, 73740246, 81009240, 88845749, 97239223, 106246902, 115900686, 126216169, 137197091, 148953202, 161446731, 174730758, 188835459, 203837905, 219695178, 236524328, 254283795, 273083666, 292923813, 313860397, 335854799, 359112526, 383528656, 409202706, 436135896, 464473466, 494134210, 525276498, 557815202, 591946436, 627603800, 664907029, 703773267, 744486823, 786877234, 831103465, 877129675, 925182097, 975110533, 1027121161, 1081080881, 1137323422, 1195661689, 1256271970, 1319049120, 1384348268, 1451952010, 1522055063, 1594541080, 1669783989, 1747541228, 1828055758, 1911151548, 1997286462, 2086139682, 2177925841, 2272580839, 2370486063, 2471328513, 2575410222, 2682471831, 2793082385, 2906881741, 3024092956, 3144510886, 3268758800, 3396339981, 3527578003, 3662304885, 3800998837, 3943227695, 4089440734, 4239185132, 4393196954, 4551031331, 4712856765, 4878478438, 5048720892, 5222754969, 5401045094, 5583410846, 5770395123, 5961416258, 6157027619, 6356554732, 6561015163, 6769843465, 6983093805, 7200534248, 7423263710, 7650023569, 7881592853, 8117625307, 8358760439, 8604199870, 8854704639, 9109316970, 9369314835, 9633980748, 9903337745, 10177004917, 10456529218, 10740122230, 11028754748, 11321981370, 11620526571, 11923494567, 12231834199, 12544092637, 12862071155, 13184668352, 13511964024, 13843525611, 14181198310, 14522618329, 14869105782, 15220174133, 15576509168, 15936926462, 16302784406, 16672089744, 17047134658, 17426587171, 17810429228, 18198087372, 18591770156, 18988751460, 19390461912, 19796344325, 20207120401, 20621426516, 21040873172, 21463087253, 21890649743, 22322106033, 22757217771, 23195600046,
# 23639594170, 24086026475, 24536477172, 24990465186, 25448639418, 25909641657, 26374985116, 26842266606, 27314012018, 27788960817, 28266602799, 28746609271, 29231436410, 29717689954, 30206932003, 30698971843, 31193949888, 31690902354, 32191012868, 32692174745, 33196629733, 33703478249, 34211544046, 34720969890, 35234031737, 35747617060, 36262719119, 36779697578, 37298186864, 37817722298, 38338904825, 38860175016, 39383211341, 39907644570, 40431821887, 40956454566, 41483109694, 42009225414, 42535209127, 43062242912, 43589145600, 44116048288, 44643082073, 45169065786, 45695181506, 46221836634, 46746469313, 47270646630, 47795079859, 48318116184, 48839386375, 49360568902, 49880104336, 50398593622, 50915572081, 51430674140, 51944259463, 52457321310, 52966747154, 53474812951, 53981661467, 54486116455, 54987278332, 55487388846, 55984341312, 56479319357, 56971359197, 57460601246, 57946854790, 58431681929, 58911688401, 59389330383, 59864279182, 60336024594, 60803306084, 61268649543, 61729651782, 62187826014, 62641814028, 63092264725, 63538697030, 63982691154, 64421073429, 64856185167, 65287641457, 65715203947, 66137418028, 66556864684, 66971170799, 67381946875, 67787829288, 68189539740, 68586521044, 68980203828, 69367861972, 69751704029, 70131156542, 70506201456, 70875506794, 71241364738, 71601782032, 71958117067, 72309185418, 72655672871, 72997092890, 73334765589, 73666327176, 73993622848, 74316220045, 74634198563, 74946457001, 75254796633, 75557764629, 75856309830, 76149536452, 76438168970, 76721761982, 77001286283, 77274953455, 77544310452, 77808976365, 78068974230, 78323586561, 78574091330, 78819530761, 79060665893, 79296698347, 79528267631, 79755027490, 79977756952, 80195197395, 80408447735, 80617276037, 80821736468, 81021263581, 81216874942, 81407896077, 81594880354, 81777246106, 81955536231, 82129570308, 82299812762, 82465434435, 82627259869, 82785094246, 82939106068, 83088850466, 83235063505, 83377292363, 83515986315, 83650713197, 83781951219, 83909532400, 84033780314, 84154198244, 84271409459, 84385208815, 84495819369,
# 84602880978, 84706962687, 84807805137, 84905710361, 85000365359, 85092151518, 85181004738, 85267139652, 85350235442, 85430749972, 85508507211, 85583750120, 85656236137, 85726339190, 85793942932, 85859242080, 85922019230, 85982629511, 86040967778, 86097210319, 86151170039, 86203180667, 86253109103, 86301161525, 86347187735, 86391413966, 86433804377, 86474517933, 86513384171, 86550687400, 86586344764, 86620475998, 86653014702, 86684156990, 86713817734, 86742155304, 86769088494, 86794762544, 86819178674, 86842436401, 86864430803, 86885367387, 86905207534, 86924007405, 86941766872, 86958596022, 86974453295, 86989455741, 87003560442, 87016844469, 87029337998, 87041094109, 87052075031, 87062390514, 87072044298, 87081051977, 87089445451, 87097281960, 87104550954, 87111316692, 87117575086, 87123364569, 87128717604, 87133660126, 87138184767, 87142354092, 87146175124, 87149664131, 87152843464, 87155745137, 87158368483, 87160752066, 87162898979, 87164830661, 87166567056, 87168125555, 87169503541, 87170735041, 87171824338, 87172783092, 87173623193, 87174361174, 87174999609, 87175554699, 87176031075, 87176437520, 87176783690, 87177077330, 87177320419, 87177524411, 87177692476, 87177828758, 87177938694, 87178027006, 87178096006, 87178149896, 87178191225, 87178221912, 87178244626, 87178260922, 87178272107, 87178279728, 87178284732, 87178287733, 87178289511, 87178290489, 87178290931, 87178291120, 87178291186, 87178291199, 87178291200 )
#
# , k15 = c(1, 15, 93, 339, 946, 2344, 5067, 9845, 18094, 31210, 51135, 80879, 123856, 183350, 265744, 375782, 520770, 709108, 950935, 1254359, 1637783, 2110255, 2688261, 3392105, 4243753, 5253985, 6463435, 7887051, 9559689, 11508657, 13779635, 16385319, 19406949, 22847453, 26778757, 31237429, 36312890, 41988174, 48415169, 55581133, 63617482, 72531890, 82493993, 93449491, 105663309, 119038213, 133821033, 149981059, 167810258, 187138620, 208394580, 231407260, 256572630, 283728734, 313349422, 345140612, 379784963, 416871267, 457037763, 499992359, 546463298, 595886554, 649243982, 705940396, 766920856, 831552862, 900947933, 974276983, 1052930913, 1135866291, 1224452526, 1317816142, 1417501545, 1522137313, 1633652530, 1750626806, 1875052020, 2005336686, 2143665106, 2288248572, 2441639216, 2601691186, 2771087853, 2947714613, 3134569070, 3328885582, 3534148307, 3747528715, 3972688056, 4206327920, 4452435789, 4707707507, 4976502908, 5254730366, 5547265512, 5849894908, 6167966973, 6496524245, 6841251954, 7197208516, 7570606695, 7955492307, 8358702869, 8774325693, 9209487348, 9657140024, 10125565750, 10607269130, 11110947428, 11628498256, 12168723926, 12723609294, 13303228032, 13897378066, 14517038181, 15152582797, 15815095216, 16493452984, 17200382721, 17923779849, 18677052770, 19447720986, 20249039825, 21068309835, 21920989644, 22790961184, 23695090223, 24618800757, 25577947305, 26555930925, 27571664648, 28606831690, 29681188983, 30776084989, 31910591023, 33065874467, 34264718158, 35483254398, 36745418556, 38030320602, 39360005810, 40711195500, 42110524356, 43531199878, 45001319765, 46494257553, 48036654343, 49602075643, 51221875032, 52862604614, 54557065970, 56276716608, 58051331346, 59848489468, 61704800734, 63582981112, 65521450173, 67484389131, 69506528883, 71552497079, 73663855894, 75795896650, 77992481274, 80214974822, 82502403057, 84811883255, 87191972089, 89593082611, 92064881373, 94560883919, 97125402107, 99713005329, 102377610307, 105060302611, 107817686686, 110599694856, 113456740182, 116333639168, 119291579167, 122267356121,
# 125323501236, 128401997238, 131558157109, 134734085833, 137997611218, 141274089126, 144635051739, 148017803651, 151483637626, 154964665476, 158536414603, 162120609581, 165794608949, 169485898871, 173262539499, 177052751993, 180940334728, 184834047000, 188819766650, 192821736664, 196913537154, 201013587060, 205213037672, 209416246916, 213716661616, 218026615728, 222428224181, 226835589231, 231347734832, 235855804736, 240461451056, 245075672864, 249785350011, 254493014069, 259306386598, 264111876662, 269020469253, 273929072733, 278932752466, 283931152738, 289039128373, 294131477475, 299325743006, 304517112400, 309806619906, 315081186550, 320465864608, 325829963244, 331299254515, 336756611895, 342309552544, 347844707934, 353492785526, 359109888388, 364830049809, 370533853771, 376336452468, 382110605480, 387994926455, 393843943991, 399797486177, 405725583879, 411748092537, 417737799943, 423839699258, 429894358406, 436050852136, 442177460900, 448399401827, 454577618889, 460862851875, 467097523711, 473433714049, 479729592211, 486115143213, 492451898587, 498897897209, 505281471971, 511760849379, 518195355931, 524718405991, 531183425467, 537750411835, 544250726707, 550846203604, 557385785810, 564007939322, 570567450178, 577227764133, 583810787025, 590480506935, 597092270467, 603784200787, 610403013525, 617114828578, 623745063632, 630461354816, 637109043600, 643828046362, 650470873262, 657203494738, 663846321638, 670565324400, 677213013184, 683929304368, 690559539422, 697271354475, 703890167213, 710582097533, 717193861065, 723863580975, 730446603867, 737106917822, 743666428678, 750288582190, 756828164396, 763423641293, 769923956165, 776490942533, 782955962009, 789479012069, 795913518621, 802392896029, 808776470791, 815222469413, 821559224787, 827944775789, 834240653951, 840576844289, 846811516125, 853096749111, 859274966173, 865496907100, 871623515864, 877780009594, 883834668742, 889936568057, 895926275463, 901948784121, 907876881823, 913830424009, 919679441545, 925563762520, 931337915532, 937140514229, 942844318191, 948564479612,
# 954181582474, 959829660066, 965364815456, 970917756105, 976375113485, 981844404756, 987208503392, 992593181450, 997867748094, 1003157255600, 1008348624994, 1013542890525, 1018635239627, 1023743215262, 1028741615534, 1033745295267, 1038653898747, 1043562491338, 1048367981402, 1053181353931, 1057889017989, 1062598695136, 1067212916944, 1071818563264, 1076326633168, 1080838778769, 1085246143819, 1089647752272, 1093957706384, 1098258121084, 1102461330328, 1106660780940, 1110760830846, 1114852631336, 1118854601350, 1122840321000, 1126734033272, 1130621616007, 1134411828501, 1138188469129, 1141879759051, 1145553758419, 1149137953397, 1152709702524, 1156190730374, 1159656564349, 1163039316261, 1166400278874, 1169676756782, 1172940282167, 1176116210891, 1179272370762, 1182350866764, 1185407011879, 1188382788833, 1191340728832, 1194217627818, 1197074673144, 1199856681314, 1202614065389, 1205296757693, 1207961362671, 1210548965893, 1213113484081, 1215609486627, 1218081285389, 1220482395911, 1222862484745, 1225171964943, 1227459393178, 1229681886726, 1231878471350, 1234010512106, 1236121870921, 1238167839117, 1240189978869, 1242152917827, 1244091386888, 1245969567266, 1247825878532, 1249623036654, 1251397651392, 1253117302030, 1254811763386, 1256452492968, 1258072292357, 1259637713657, 1261180110447, 1262673048235, 1264143168122, 1265563843644, 1266963172500, 1268314362190, 1269644047398, 1270928949444, 1272191113602, 1273409649842, 1274608493533, 1275763776977, 1276898283011, 1277993179017, 1279067536310, 1280102703352, 1281118437075, 1282096420695, 1283055567243, 1283979277777, 1284883406816, 1285753378356, 1286606058165, 1287425328175, 1288226647014, 1288997315230, 1289750588151, 1290473985279, 1291180915016, 1291859272784, 1292521785203, 1293157329819, 1293776989934, 1294371139968, 1294950758706, 1295505644074, 1296045869744, 1296563420572, 1297067098870, 1297548802250, 1298017227976, 1298464880652, 1298900042307, 1299315665131, 1299718875693, 1300103761305, 1300477159484, 1300833116046, 1301177843755, 1301506401027, 1301824473092,
# 1302127102488, 1302419637634, 1302697865092, 1302966660493, 1303221932211, 1303468040080, 1303701679944, 1303926839285, 1304140219693, 1304345482418, 1304539798930, 1304726653387, 1304903280147, 1305072676814, 1305232728784, 1305386119428, 1305530702894, 1305669031314, 1305799315980, 1305923741194, 1306040715470, 1306152230687, 1306256866455, 1306356551858, 1306449915474, 1306538501709, 1306621437087, 1306700091017, 1306773420067, 1306842815138, 1306907447144, 1306968427604, 1307025124018, 1307078481446, 1307127904702, 1307174375641, 1307217330237, 1307257496733, 1307294583037, 1307329227388, 1307361018578, 1307390639266, 1307417795370, 1307442960740, 1307465973420, 1307487229380, 1307506557742, 1307524386941, 1307540546967, 1307555329787, 1307568704691, 1307580918509, 1307591874007, 1307601836110, 1307610750518, 1307618786867, 1307625952831, 1307632379826, 1307638055110, 1307643130571, 1307647589243, 1307651520547, 1307654961051, 1307657982681, 1307660588365, 1307662859343, 1307664808311, 1307666480949, 1307667904565, 1307669114015, 1307670124247, 1307670975895, 1307671679739, 1307672257745, 1307672730217, 1307673113641, 1307673417065, 1307673658892, 1307673847230, 1307673992218, 1307674102256, 1307674184650, 1307674244144, 1307674287121, 1307674316865, 1307674336790, 1307674349906, 1307674358155, 1307674362933, 1307674365656, 1307674367054, 1307674367661, 1307674367907, 1307674367985, 1307674367999, 1307674368000 )
# )
#
# .PageDF <- lapply(.PageDF, function(x) c(x[1], diff(x)) / tail(x,1))
# save(.PageDF, file="C:/Users/Andri/Documents/R/sources/DescTools/MakeDescToolsBase/PageDF.rda")
# load(file="C:/Users/Andri/Documents/R/Projects/load/PageDF.rda")
# load(file="C:/Users/Andri/Documents/R/Projects/DescTools/load/wdConst.rda")
# load(file="C:/Users/Andri/Documents/R/sources/DescTools/periodic.rda")
# just for check not to bark!
utils::globalVariables(c("d.units","d.periodic","d.prefix",
"day.name","day.abb","wdConst",
"fmt", "pal",
"hred","hblue","horange","hyellow","hecru","hgreen",
"tarot","cards","roulette"))
# hred <- unname(Pal("Helsana")[1])
# horange <- unname(Pal("Helsana")[2])
# hyellow <- unname(Pal("Helsana")[3])
# hecru <- unname(Pal("Helsana")[4])
# hblue <- unname(Pal("Helsana")[6])
# hgreen <- unname(Pal("Helsana")[7])
#
# save(x=hred, file='C:/Users/andri/Documents/R/Projects/DescTools/data/hred.rda')
# save(x=horange, file='C:/Users/andri/Documents/R/Projects/DescTools/data/horange.rda')
# save(x=hyellow, file='C:/Users/andri/Documents/R/Projects/DescTools/data/hyellow.rda')
# save(x=hecru, file='C:/Users/andri/Documents/R/Projects/DescTools/data/hecru.rda')
# save(x=hblue, file='C:/Users/andri/Documents/R/Projects/DescTools/data/hblue.rda')
# save(x=hgreen, file='C:/Users/andri/Documents/R/Projects/DescTools/data/hgreen.rda')
# source( "C:/Users/Andri/Documents/R/sources/DescTools/wdConst.r" )
# Base functions ====
## base: calculus
# we have month.name and month.abb in base R, but nothing similar for day names
# in english (use format(ISOdate(2000, 1:12, 1), "%B") for months in current locale)
# day.name <- c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
# day.abb <- c("Mon","Tue","Wed","Thu","Fri","Sat","Sun")
# internal: golden section constant
gold_sec_c <- (1+sqrt(5)) / 2
# tarot <- structure(list(rank = c("1", "2", "3", "4", "5", "6", "7", "8",
# "9", "10", "page", "knight", "queen", "king", "1", "2", "3",
# "4", "5", "6", "7", "8", "9", "10", "page", "knight", "queen",
# "king", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "page",
# "knight", "queen", "king", "1", "2", "3", "4", "5", "6", "7",
# "8", "9", "10", "page", "knight", "queen", "king", "0", "1",
# "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13",
# "14", "15", "16", "17", "18", "19", "20", "21"), suit = c("wands",
# "wands", "wands", "wands", "wands", "wands", "wands", "wands",
# "wands", "wands", "wands", "wands", "wands", "wands", "coins",
# "coins", "coins", "coins", "coins", "coins", "coins", "coins",
# "coins", "coins", "coins", "coins", "coins", "coins", "cups",
# "cups", "cups", "cups", "cups", "cups", "cups", "cups", "cups",
# "cups", "cups", "cups", "cups", "cups", "swords", "swords", "swords",
# "swords", "swords", "swords", "swords", "swords", "swords", "swords",
# "swords", "swords", "swords", "swords", "trumps", "trumps", "trumps",
# "trumps", "trumps", "trumps", "trumps", "trumps", "trumps", "trumps",
# "trumps", "trumps", "trumps", "trumps", "trumps", "trumps", "trumps",
# "trumps", "trumps", "trumps", "trumps", "trumps"), desc = c(NA,
# NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
# NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
# NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
# NA, NA, NA, NA, NA, NA, NA, "The Fool", "The Magician", "The High Priestess",
# "The Empress", "The Emperor", "The Hierophant", "The Lovers",
# "The Chariot", "Strength", "The Hermit", "Wheel of Fortune",
# "Justice", "The Hanged Man", "Death", "Temperance", "The Devil",
# "The Tower", "The Star", "The Moon", "The Sun", "Judgment", "The World"
# )), .Names = c("rank", "suit", "desc"), out.attrs = structure(list(
# dim = structure(c(14L, 4L), .Names = c("rank", "suit")),
# dimnames = structure(list(rank = c("rank=1", "rank=2", "rank=3",
# "rank=4", "rank=5", "rank=6", "rank=7", "rank=8", "rank=9",
# "rank=10", "rank=page", "rank=knight", "rank=queen", "rank=king"
# ), suit = c("suit=wands", "suit=coins", "suit=cups", "suit=swords"
# )), .Names = c("rank", "suit"))), .Names = c("dim", "dimnames"
# )), row.names = c(NA, 78L), class = "data.frame")
#
#
# cards <- structure(list(rank = structure(c(1L, 2L, 3L, 4L, 5L, 6L, 7L,
# 8L, 9L, 10L, 11L, 12L, 13L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L,
# 10L, 11L, 12L, 13L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L,
# 11L, 12L, 13L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L,
# 12L, 13L), .Label = c("2", "3", "4", "5", "6", "7", "8", "9",
# "10", "J", "Q", "K", "A"), class = "factor"), suit = structure(c(1L,
# 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L,
# 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 3L,
# 3L, 3L, 3L, 3L, 3L, 3L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L,
# 4L, 4L, 4L), .Label = c("club", "diamond", "heart", "spade"), class = "factor")), .Names = c("rank",
# "suit"), out.attrs = structure(list(dim = structure(c(13L, 4L
# ), .Names = c("rank", "suit")), dimnames = structure(list(rank = c("rank=2",
# "rank=3", "rank=4", "rank=5", "rank=6", "rank=7", "rank=8", "rank=9",
# "rank=10", "rank=J", "rank=Q", "rank=K", "rank=A"), suit = c("suit=club",
# "suit=diamond", "suit=heart", "suit=spade")), .Names = c("rank",
# "suit"))), .Names = c("dim", "dimnames")), class = "data.frame", row.names = c(NA, -52L))
#
#
# roulette <- structure(list(num = structure(c(1L, 20L, 24L, 30L, 5L, 22L,
# 35L, 23L, 11L, 16L, 37L, 26L, 7L, 14L, 2L, 28L, 9L, 18L, 33L,
# 3L, 17L, 36L, 25L, 4L, 31L, 6L, 21L, 34L, 29L, 10L, 19L, 13L,
# 15L, 32L, 12L, 8L, 27L), .Label = c("0", "1", "10", "11", "12",
# "13", "14", "15", "16", "17", "18", "19", "2", "20", "21", "22",
# "23", "24", "25", "26", "27", "28", "29", "3", "30", "31", "32",
# "33", "34", "35", "36", "4", "5", "6", "7", "8", "9"), class = "factor"),
# col = structure(c(2L,
# 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L,
# 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L, 1L, 3L,
# 1L, 3L, 1L, 3L, 1L, 3L), .Label = c("black", "white", "red"
# ), class = "factor")), .Names = c("num", "col"
# ), row.names = c(NA, -37L), class = "data.frame")
#
# save(tarot, file="tarot.rda")
# save(cards, file="cards.rda")
# save(roulette, file="roulette.rda")
# Define some alias(es)
N <- as.numeric
## This is not exported as it would mask base function and
# but it would be very, very handy if the base function was changed accoringly
as.Date.numeric <- function (x, origin, ...) {
if (missing(origin))
origin <- "1970-01-01"
as.Date(origin, ...) + x
}
Primes <- function (n) {
# Source: sfsmisc
# Bill Venables (<= 2001); Martin Maechler gained another 40% speed, working with logicals and integers.
if ((M2 <- max(n)) <= 1)
return(integer(0))
P <- rep.int(TRUE, M2)
P[1] <- FALSE
M <- as.integer(sqrt(M2))
n <- as.integer(M2)
for (p in 1:M) if (P[p])
P[seq(p * p, n, p)] <- FALSE
(1:n)[P]
}
Factorize <- function (n) {
# Factorize <- function (n, verbose = FALSE) {
# Source sfsmisc: Martin Maechler, Jan. 1996.
if (all(n < .Machine$integer.max))
n <- as.integer(n)
else {
warning("factorizing large int ( > maximal integer )")
n <- round(n)
}
N <- length(n)
M <- as.integer(sqrt(max(n)))
k <- length(pr <- Primes(M))
nDp <- outer(pr, n, FUN = function(p, n) n%%p == 0)
res <- vector("list", length = N)
names(res) <- n
for (i in 1:N) {
nn <- n[i]
if (any(Dp <- nDp[, i])) {
nP <- length(pfac <- pr[Dp])
# if (verbose) cat(nn, " ")
}
else {
res[[i]] <- cbind(p = nn, m = 1)
# if (verbose) cat("direct prime", nn, "\n")
next
}
m.pr <- rep(1, nP)
Ppf <- prod(pfac)
while (1 < (nn <- nn%/%Ppf)) {
Dp <- nn%%pfac == 0
if (any(Dp)) {
m.pr[Dp] <- m.pr[Dp] + 1
Ppf <- prod(pfac[Dp])
}
else {
pfac <- c(pfac, nn)
m.pr <- c(m.pr, 1)
break
}
}
res[[i]] <- cbind(p = pfac, m = m.pr)
}
res
}
GCD <- function(..., na.rm = FALSE) {
x <- unlist(list(...), recursive=TRUE)
if(na.rm) x <- x[!is.na(x)]
if(anyNA(x)) return(NA)
stopifnot(is.numeric(x))
if (floor(x) != ceiling(x) || length(x) < 2)
stop("Argument 'x' must be an integer vector of length >= 2.")
x <- x[x != 0]
n <- length(x)
if (n == 0) {
g <- 0
} else if (n == 1) {
g <- x
} else if (n == 2) {
g <- .Call("_DescTools_compute_GCD", PACKAGE = "DescTools", x[1], x[2])
} else {
# g <- .GCD(x[1], x[2])
g <- .Call("_DescTools_compute_GCD", PACKAGE = "DescTools", x[1], x[2])
for (i in 3:n) {
g <- .Call("_DescTools_compute_GCD", PACKAGE = "DescTools", g, x[i])
if (g == 1) break
}
}
return(g)
}
LCM <- function(..., na.rm = FALSE) {
# .LCM <- function(n, m) {
# stopifnot(is.numeric(n), is.numeric(m))
# if (length(n) != 1 || floor(n) != ceiling(n) ||
# length(m) != 1 || floor(m) != ceiling(m))
# stop("Arguments 'n', 'm' must be integer scalars.")
# if (n == 0 && m == 0) return(0)
#
# return(n / GCD(c(n, m)) * m)
# }
x <- unlist(list(...), recursive=TRUE)
if(na.rm) x <- x[!is.na(x)]
if(anyNA(x)) return(NA)
stopifnot(is.numeric(x))
if (floor(x) != ceiling(x) || length(x) < 2)
stop("Argument 'x' must be an integer vector of length >= 2.")
x <- x[x != 0]
n <- length(x)
if (n == 0) {
l <- 0
} else if (n == 1) {
l <- x
} else if (n == 2) {
# l <- .LCM(x[1], x[2])
l <- .Call("_DescTools_compute_LCM", PACKAGE = "DescTools", x[1], x[2])
} else {
# l <- .LCM(x[1], x[2])
l <- .Call("_DescTools_compute_LCM", PACKAGE = "DescTools", x[1], x[2])
for (i in 3:n) {
# l <- .LCM(l, x[i])
l <- .Call("_DescTools_compute_LCM", PACKAGE = "DescTools", l, x[i])
}
}
return(l)
}
DigitSum <- function(x)
# calculates the digit sum of a number: DigitSum(124) = 7
sapply(x, function(z)
sum(floor(z / 10^(0:(nchar(z) - 1))) %% 10))
CombN <- function(x, m, repl=FALSE, ord=FALSE){
# return the number for the 4 combinatoric cases
n <- length(x)
if(repl){
res <- n^m
if(!ord){
res <- choose(n+m-1, m)
}
} else {
if(ord){
# res <- choose(n, m) * factorial(m)
# res <- gamma(n+1) / gamma(m+1)
# avoid numeric overflow
res <- exp(lgamma(n+1)-lgamma(n-m+1))
} else {
res <- choose(n, m)
}
}
return(res)
}
Permn <- function(x, sort = FALSE) {
# by F. Leisch
n <- length(x)
if (n == 1)
return(matrix(x))
# Andri: why should we need that??? ...
# else if (n < 2)
# stop("n must be a positive integer")
z <- matrix(1)
for (i in 2:n) {
y <- cbind(z, i)
a <- c(1:i, 1:(i - 1))
z <- matrix(0, ncol = ncol(y), nrow = i * nrow(y))
z[1:nrow(y), ] <- y
for (j in 2:i - 1) {
z[j * nrow(y) + 1:nrow(y), ] <- y[, a[1:i + j]]
}
}
dimnames(z) <- NULL
m <- apply(z, 2, function(i) x[i])
if(any(duplicated(x)))
m <- unique(m)
if(sort) m <- Sort(m)
return(m)
}
CombSet <- function(x, m, repl=FALSE, ord=FALSE, as.list=FALSE) {
if(length(m)>1){
res <- lapply(m, function(i) CombSet(x=x, m=i, repl=repl, ord=ord))
} else {
# generate the samples for the 4 combinatoric cases
if(repl){
res <- as.matrix(do.call(expand.grid, as.list(as.data.frame(replicate(m, x)))))
dimnames(res) <- NULL
if(!ord){
res <- unique(t(apply(res, 1, sort)))
}
} else {
if(ord){
res <- do.call(rbind, combn(x, m=m, FUN=Permn, simplify = FALSE))
} else {
res <- t(combn(x, m))
}
}
}
if(as.list){
# Alternative: we could flatten the whole list
# and now flatten the list of lists into one list
# lst <- split(unlist(lst), rep(1:length(idx <- rapply(lst, length)), idx))
if(is.list(res)){
res <- do.call(c, lapply(res,
function(x){ as.list(as.data.frame(t(x), stringsAsFactors = FALSE))}))
} else {
res <- as.list(as.data.frame(t(res), stringsAsFactors = FALSE))
}
names(res) <- NULL
}
return(res)
}
# CombSet(x, m, repl=TRUE, ord=FALSE)
# CombSet(x, m, repl=TRUE, ord=TRUE)
# CombSet(x, m, repl=FALSE, ord=TRUE)
# CombSet(x, m, repl=FALSE, ord=FALSE)
CombPairs <- function(x, y = NULL) {
# liefert einen data.frame mit allen paarweisen Kombinationen der Variablen
if( missing(y)) { # kein y vorhanden, use x only
data.frame( t(combn(x, 2)), stringsAsFactors=F )
} else {
# wenn y definiert ist, wird all.x zu all.y zurueckgegeben
expand.grid(x, y, stringsAsFactors=F )
}
}
Fibonacci <- function(n) {
if (!is.numeric(n) || !IsWhole(n) || n < 0)
stop("Argument 'n' must be integer >= 0.")
maxn <- max(n)
if (maxn == 0) return(0)
if (maxn == 1) return(c(0, 1)[n+1])
if (maxn == 2) return(c(0, 1, 1)[n+1])
z <- c(0, 1, 1, rep(NA, maxn-3))
for (i in 4:(maxn+1)) {
z[i] <- z[i-1] + z[i-2]
}
z[n+1]
}
### M^k for a matrix M and non-negative integer 'k'
## Matrixpower
"%^%" <- expm::"%^%"
Vigenere <- function(x, key = NULL, decrypt = FALSE) {
# hold that constant, as it makes the function too flexible else
# in cases you maybe remind your password, but lost the charlist definition....
charlist <- c(LETTERS, letters, 0:9)
if(is.null(key)) key <- PasswordDlg()
.mod1 <- function(v, n) {
# mod1(1:20, 6) => 1 2 3 4 5 6 1 2 3 4 5 6 1 2 3 4 5 6 1 2
((v - 1) %% n) + 1
}
.str2ints <- function(s) {
as.integer(Filter(Negate(is.na),
factor(levels = charlist, strsplit(s, "")[[1]])))
}
x <- .str2ints(x)
key <- rep(.str2ints(key), len = length(x)) - 1
paste(collapse = "", charlist[
.mod1(x + (if (decrypt) -1 else 1)*key, length(charlist))])
}
Winsorize <- function(x, minval = NULL, maxval = NULL,
probs=c(0.05, 0.95), na.rm = FALSE) {
# following an idea from Gabor Grothendieck
# http://r.789695.n4.nabble.com/how-to-winsorize-data-td930227.html
# in HuberM things are implemented the same way
# don't eliminate NAs in x, moreover leave them untouched,
# just calc quantile without them...
# pmax(pmin(x, maxval), minval)
# the pmax(pmin()-version is slower than the following
if(is.null(minval) || is.null(maxval)){
xq <- quantile(x=x, probs=probs, na.rm=na.rm)
if(is.null(minval)) minval <- xq[1]
if(is.null(maxval)) maxval <- xq[2]
}
x[x<minval] <- minval
x[x>maxval] <- maxval
return(x)
# see also Andreas Alfons, KU Leuven
# roubustHD, Winsorize
# Jim Lemon's rather clumsy implementation:
# #added winsor.var and winsor.sd and winsor.mean (to supplement winsor.means)
# #August 28, 2009 following a suggestion by Jim Lemon
# #corrected January 15, 2009 to use the quantile function rather than sorting.
# #suggested by Michael Conklin in correspondence with Karl Healey
# #this preserves the order of the data
# "wins" <- function(x,trim=.2, na.rm=TRUE) {
# if ((trim < 0) | (trim>0.5) )
# stop("trimming must be reasonable")
# qtrim <- quantile(x,c(trim,.5, 1-trim),na.rm = na.rm)
# xbot <- qtrim[1]
# xtop <- qtrim[3]
# if(trim<.5) {
# x[x < xbot] <- xbot
# x[x > xtop] <- xtop} else {x[!is.na(x)] <- qtrim[2]}
# return(x) }
}
Trim <- function(x, trim = 0.1, na.rm = FALSE){
if (na.rm) x <- x[!is.na(x)]
if (!is.numeric(trim) || length(trim) != 1L)
stop("'trim' must be numeric of length one")
n <- length(x)
if (trim > 0 && n) {
if (is.complex(x))
stop("trim is not defined for complex data")
if (anyNA(x))
return(NA_real_)
if (trim >= 0.5 && trim < 1)
return(NA_real_)
if(trim < 1)
lo <- floor(n * trim) + 1
else{
lo <- trim + 1
if (trim >= (n/2))
return(NA_real_)
}
hi <- n + 1 - lo
# x <- sort.int(x, partial = unique(c(lo, hi)))[lo:hi]
res <- sort.int(x, index.return = TRUE)
trimi <- res[["ix"]][c(1:(lo-1), (hi+1):length(x))]
# x <- res[["x"]][order(res[["ix"]])[lo:hi]]
x <- res[["x"]][lo:hi][order(res[["ix"]][lo:hi])]
attr(x, "trim") <- trimi
}
return(x)
}
RobScale <- function(x, center = TRUE, scale = TRUE){
x <- as.matrix(x)
if(center) {
x <- scale(x, center = apply(x, 2, median, na.rm=TRUE), scale = FALSE)
}
if(scale) {
x <- scale(x, center = FALSE, scale = apply(x, 2, mad, na.rm=TRUE))
}
return(x)
}
MoveAvg <- function(x, order, align = c("center","left","right"),
endrule = c("NA", "keep", "constant")){
n <- length(x)
align = match.arg(align)
switch(align,
"center" = {
idx <- c(1:(order %/% 2), (n-order %/% 2+1):n)
idx_const <- c(rep((order %/% 2)+1, order %/% 2),
rep(n-(order %/% 2), order %/% 2))
if(order %% 2 == 1){ # order is odd
z <- filter(x, rep(1/order, order), sides=2)
} else { # order is even
z <- filter(x, c(1/(2*order), rep(1/order, order-1), 1/(2*order)), sides=2)
} }
, "right" = {
idx <- 1:(order-1)
idx_const <- order
z <- filter(x, rep(1/order, order), sides=1)
}
, "left" = {
idx <- (n-order+2):n
idx_const <- n-order+1
z <- rev(filter(rev(x), rep(1/order, order), sides=1))
}
)
endrule <- match.arg(endrule)
switch(endrule,
"NA" = {},
keep = {z[idx] <- x[idx]},
constant = {z[idx] <- z[idx_const]})
if(!is.ts(x)) attr(z, "tsp") <- NULL
class(z) <- class(x)
return(z)
}
LinScale <- function (x, low = NULL, high = NULL, newlow = 0, newhigh = 1) {
x <- as.matrix(x)
if(is.null(low)) {
low <- apply(x, 2, min, na.rm=TRUE)
} else {
low <- rep(low, length.out=ncol(x))
}
if(is.null(high)) {
high <- apply(x, 2, max, na.rm=TRUE)
} else {
high <- rep(high, length.out=ncol(x))
}
# do the recycling job
newlow <- rep(newlow, length.out=ncol(x))
newhigh <- rep(newhigh, length.out=ncol(x))
xcntr <- (low * newhigh - high * newlow) / (newhigh - newlow)
xscale <- (high - low) / (newhigh - newlow)
return( scale(x, center = xcntr, scale = xscale))
}
Large <- function (x, k = 5, unique = FALSE, na.last = NA) {
n <- length(x)
x <- x[!is.na(x)]
na_n <- n - length(x)
# na.last
# for controlling the treatment of NAs. If TRUE, missing values in the data are put last;
# if FALSE, they are put first;
# if NA, they are removed.
if (unique==TRUE) {
res <- .Call("_DescTools_top_n", PACKAGE = "DescTools", x, k)
if(na_n > 0){
if(!is.na(na.last)){
if(na.last==FALSE) {
res$value <- tail(c(NA, res$value), k)
res$frequency <- tail(c(na_n, res$frequency), k)
}
if(na.last==TRUE){
res$value <- tail(c(res$value, NA), k)
res$frequency <- tail(c(res$frequency, na_n), k)
}
}
}
if(is.factor(x))
res$value <- levels(x)[res$value]
else
class(res$value) <- class(x)
} else {
# do not allow k be bigger than n
k <- min(k, n)
res <- x[.Call("_DescTools_top_i", PACKAGE = "DescTools", x, k)]
if(!is.na(na.last)){
if(na.last==FALSE)
res <- tail(c(rep(NA, na_n), res), k)
if(na.last==TRUE)
res <- tail(c(res, rep(NA, na_n)), k)
}
}
return(res)
}
# old version, replaced 0.99.17/13.5.2016
#
# Large <- function (x, k = 5, unique = FALSE, na.rm = FALSE) {
#
# if (na.rm)
# x <- x[!is.na(x)]
#
# if (unique==TRUE) {
# ux <- unique(x)
# # un <- length(ux)
# un <- sum(!is.na(ux))
# minval <- sort(ux, partial=max((un-k+1), 1):un, na.last = TRUE)[max((un-k+1),1)]
#
# # we are using the rationale of rle here, as it turned out to be the fastest approach
# x <- sort(x[x>=minval])
# n <- length(x)
# if (n == 0L)
# res <- list(lengths = integer(), values = x)
#
# y <- x[-1L] != x[-n]
# i <- c(which(y | is.na(y)), n)
# res <- list(lengths = diff(c(0L, i)), values = x[i])
#
# # res <- unclass(rle(sort(x[x>=minval])))
# }
# else {
# # n <- length(x)
# n <- sum(!is.na(x))
# res <- sort(x, partial=max((n-k+1),1):n, na.last = TRUE)[max((n-k+1),1):n]
# # lst <- as.vector(unlist(lapply(lst, "[", "val")))
# # http://stackoverflow.com/questions/15659783/why-does-unlist-kill-dates-in-r
#
# # faster alternative (but check NA-handling first):
# # res <- x[.Call("_DescTools_top_index", PACKAGE = "DescTools", x, k)]
#
# }
# return(res)
# }
Small <- function (x, k = 5, unique = FALSE, na.last = NA) {
n <- length(x)
x <- x[!is.na(x)]
na_n <- n - length(x)
# na.last
# for controlling the treatment of NAs. If TRUE, missing values in the data are put last;
# if FALSE, they are put first;
# if NA, they are removed.
if (unique==TRUE) {
res <- .Call("_DescTools_bottom_n", PACKAGE = "DescTools", x, k)
if(na_n > 0){
if(!is.na(na.last)){
if(na.last==FALSE) {
k <- min(length(res$value) + 1, k)
res$value <- c(NA, res$value)[1:k]
res$frequency <- c(na_n, res$frequency)[1:k]
}
if(na.last==TRUE){
k <- min(length(res$value) + 1, k)
res$value <- c(res$value, NA)[1:k]
res$frequency <- c(res$frequency, na_n)[1:k]
}
}
}
if(is.factor(x))
res$value <- levels(x)[res$value]
else
class(res$value) <- class(x)
} else {
# do not allow k be bigger than n
k <- min(k, n)
res <- rev(x[.Call("_DescTools_bottom_i", PACKAGE = "DescTools", x, k)])
if(!is.na(na.last)){
if(na.last==FALSE)
res <- c(rep(NA, na_n), res)[1:k]
if(na.last==TRUE)
res <- c(res, rep(NA, na_n))[1:k]
}
}
return(res)
}
# Small <- function (x, k = 5, unique = FALSE, na.rm = FALSE) {
#
# if (na.rm)
# x <- x[!is.na(x)]
#
# if (unique==TRUE) {
# ux <- unique(x)
# un <- length(ux)
# maxval <- sort(ux, partial = min(k, un))[min(k, un)]
#
# # we are using the rationale of rle here, as it turned out to be the fastest approach
# x <- sort(x[x<=maxval])
# n <- length(x)
# if (n == 0L)
# res <- list(lengths = integer(), values = x)
#
# y <- x[-1L] != x[-n]
# i <- c(which(y | is.na(y)), n)
# res <- list(lengths = diff(c(0L, i)), values = x[i])
#
# # res <- unclass(rle(sort(x[x<=maxval])))
# }
# else {
# n <- length(x)
# res <- sort(x, partial = 1:min(k, n))[1:min(k, n)]
# # lst <- as.vector(unlist(lapply(lst, "[", "val")))
# # http://stackoverflow.com/questions/15659783/why-does-unlist-kill-dates-in-r
# }
# return(res)
# }
HighLow <- function (x, nlow = 5, nhigh = nlow, na.last = NA) {
# updated 1.2.2014 / Andri
# using table() was unbearable slow and inefficient for big vectors!!
# sort(partial) is the way to go..
# http://r.789695.n4.nabble.com/Fast-way-of-finding-top-n-values-of-a-long-vector-td892565.html
# updated 1.5.2016 / Andri
# ... seemed the way to go so far, but now outperformed by nathan russell's C++ solution
if ((nlow + nhigh) != 0) {
frqs <- Small(x, k=nlow, unique=TRUE, na.last=na.last)
frql <- Large(x, k=nhigh, unique=TRUE, na.last=na.last)
frq <- c(frqs$frequency, frql$frequency)
vals <- c(frqs$value, frql$value)
if (is.numeric(x)) {
vals <- prettyNum(vals, big.mark = "'")
}
else {
vals <- vals
}
frqtxt <- paste(" (", frq, ")", sep = "")
frqtxt[frq < 2] <- ""
txt <- StrTrim(paste(vals, frqtxt, sep = ""))
lowtxt <- paste(head(txt, min(length(frqs$frequency), nlow)), collapse = ", ")
hightxt <- paste(tail(txt, min(length(frql$frequency), nhigh)), collapse = ", ")
}
else {
lowtxt <- ""
hightxt <- ""
}
return(paste("lowest : ", lowtxt, "\n",
"highest: ", hightxt, "\n", sep = ""))
}
Closest <- function(x, a, which = FALSE, na.rm = FALSE){
# # example: Closest(a=67.5, x=d.pizza$temperature)
#
if(na.rm) x <- x[!is.na(x)]
mdist <- min(abs(x-a))
if(is.na(mdist))
res <- NA
else {
idx <- DescTools::IsZero(abs(x-a) - mdist) # beware of floating-point-gods
if(which == TRUE )
res <- which(idx)
else
res <- x[idx]
}
# Frank's Hmisc solution is faster
# but does not handle ties satisfactorily
# res <- .Fortran("wclosest", as.double(a), as.double(x), length(a),
# length(x), j = integer(length(a)), PACKAGE = "DescTools")$j
# if(!which) res <- x[res]
return(res)
}
DenseRank <- function(x, na.last = TRUE) {
as.numeric(as.factor(rank(x, na.last)))
}
PercentRank <- function(x)
trunc(rank(x, na.last="keep"))/sum(!is.na(x))
Unwhich <- function(idx, n, useNames=TRUE){
# Author: Nick Sabbe
# http://stackoverflow.com/questions/7659833/inverse-of-which
# less performant, but oneliner:
# is.element(seq_len(n), i)
res <- logical(n)
if(length(idx) > 0) {
res[idx] <- TRUE
if(useNames) names(res)[idx] <- names(idx)
}
return(res)
}
CombLevels <- function(...){
dots <- list( ... )
unique(unlist(lapply(dots, function(x) {
if(!inherits(x, "factor")) x <- factor(x)
levels(x)
}
)))
}
###
## base: string functions ====
# Missing string functions for newbies, but not only..
StrTrim <- function(x, pattern=" \t\n", method="both") {
switch(match.arg(arg = method, choices = c("both", "left", "right")),
both = { gsub( pattern=gettextf("^[%s]+|[%s]+$", pattern, pattern), replacement="", x=x) },
left = { gsub( pattern=gettextf("^[%s]+",pattern), replacement="", x=x) },
right = { gsub( pattern=gettextf("[%s]+$",pattern), replacement="", x=x) }
)
}
StrRight <- function(x, n) {
n <- rep(n, length.out=length(x))
sapply(seq_along(x), function(i) {
if(n[i] >= 0)
substr(x[i], (nchar(x[i]) - n[i]+1), nchar(x[i]))
else
substr(x[i], - n[i]+1, nchar(x[i]))
} )
}
StrLeft <- function(x, n) {
n <- rep(n, length.out=length(x))
sapply(seq_along(x), function(i) {
if(n[i] >= 0)
substr(x[i], 0, n[i])
else
substr(x[i], 0, nchar(x[i]) + n[i])
} )
}
StrExtract <- function(x, pattern){
# example regmatches
## Match data from regexpr()
m <- regexpr(pattern, x)
regmatches(x, m)
res <- rep(NA_character_, length(m))
res[m>0] <- regmatches(x, m)
res
}
StrTrunc <- function(x, maxlen = 20) {
# original truncString from prettyR
# author: Jim Lemon
# toolong <- nchar(x) > maxlen
# maxwidth <- ifelse(toolong, maxlen - 3, maxlen)
# chopx <- substr(x, 1, maxwidth)
#
# for(i in 1:length(x)) if(toolong[i]) chopx[i] <- paste(chopx[i], "...", sep="")
#
# return(formatC(chopx, width = maxlen, flag = ifelse(justify == "left", "-", " ")) )
# ... but this is all a bit clumsy, let's have it shorter - and much faster! ;-)
paste(substr(x, 0, maxlen), ifelse(nchar(x) > maxlen, "...", ""), sep="")
}
StrAbbr <- function(x, minchar=1, method=c("left","fix")){
switch(match.arg(arg = method, choices = c("left", "fix")),
"left"={
idx <- rep(minchar, length(x))-1
for(i in minchar:max(nchar(x))){
adup <- AllDuplicated(substr(x, 1, i))
idx[adup] <- i
}
res <- substr(x, 1, idx+1)
},
"fix"={
i <- 1
while(sum(duplicated(substr(x, 1, i))) > 0) { i <- i+1 }
res <- substr(x, 1, pmax(minchar, i))
}
)
return(res)
}
# replaced by 0.99.19 with method by word and title
# StrCap <- function(x) {
# # Source: Hmisc
# # Author: Charles Dupont
# capped <- grep('^[^A-Z]*', x, perl=TRUE)
#
# substr(x[capped], 1,1) <- toupper(substr(x[capped], 1,1))
# return(x)
#
# }
StrCap <- function(x, method=c("first", "word", "title")) {
.cap <- function(x){
# Source: Hmisc
# Author: Charles Dupont
capped <- grep('^[^A-Z]*', x, perl=TRUE)
substr(x[capped], 1,1) <- toupper(substr(x[capped], 1,1))
return(x)
}
na <- is.na(x)
switch(match.arg(method),
first = {
res <- .cap(x)
},
word = {
res <- unlist(lapply(lapply(strsplit(x, split="\\b\\W+\\b"), .cap), paste, collapse=" "))
},
title={
z <- strsplit(tolower(x), split="\\b\\W+\\b")
low <- c("a","an","the","at","by","for","in","of","on","to","up","and","as","but","or","nor","s")
z <- lapply(z, function(y) {
y[y %nin% low] <- StrCap(y[y %nin% low])
y[y %in% low] <- tolower(y[y %in% low])
y}
)
nn <- strsplit(x, split="\\w+")
res <- unlist(lapply(1:length(z), function(i) {
if(length(nn[[i]]) != length(z[[i]])){
if(z[[i]][1] == "" ){
z[[i]] <- z[[i]][-1]
} else {
z[[i]] <- c(z[[i]], "")
}
} else {
if(z[[i]][1] == "" & length(z[[i]])>1)
z[[i]] <- VecRot(z[[i]], -1)
}
do.call(paste, list(nn[[i]], z[[i]], sep="", collapse=""))
}
))
}
)
res[na] <- NA
return(res)
}
StrDist <- function (x, y, method = "levenshtein", mismatch = 1, gap = 1, ignore.case = FALSE){
# source MKmisc, Author: Matthias Kohl
if(ignore.case){
x <- tolower(x)
y <- tolower(y)
}
if (!is.na(pmatch(method, "levenshtein")))
method <- "levenshtein"
METHODS <- c("levenshtein", "normlevenshtein", "hamming")
method <- pmatch(method, METHODS)
if (is.na(method))
stop("invalid distance method")
if (method == -1)
stop("ambiguous distance method")
stopifnot(is.character(x), is.character(y))
if (length(x) == 1 & nchar(x[1]) > 1)
x1 <- strsplit(x, split = "")[[1]]
else
x1 <- x
if (length(y) == 1 & nchar(y[1]) > 1)
y1 <- strsplit(y, split = "")[[1]]
else
y1 <- y
if (method %in% c(1,2)){ ## Levenshtein
m <- length(x1)
n <- length(y1)
D <- matrix(NA, nrow = m+1, ncol = n+1)
M <- matrix("", nrow = m+1, ncol = n+1)
D[,1] <- seq_len(m+1)*gap-1
D[1,] <- seq_len(n+1)*gap-1
D[1,1] <- 0
M[,1] <- "d"
M[1,] <- "i"
M[1,1] <- "start"
text <- c("d", "m", "i")
for(i in c(2:(m+1))){
for(j in c(2:(n+1))){
m1 <- D[i-1,j] + gap
m2 <- D[i-1,j-1] + (x1[i-1] != y1[j-1])*mismatch
m3 <- D[i,j-1] + gap
D[i,j] <- min(m1, m2, m3)
wmin <- text[which(c(m1, m2, m3) == D[i,j])]
if("m" %in% wmin & x1[i-1] != y1[j-1])
wmin[wmin == "m"] <- "mm"
M[i,j] <- paste(wmin, collapse = "/")
}
}
rownames(M) <- rownames(D) <- c("gap", x1)
colnames(M) <- colnames(D) <- c("gap", y1)
d <- D[m+1, n+1]
if(method == 2){ ## normalized levenshtein
d <- 1-d / (max(m, n))
}
}
if(method == 3){ ## Hamming
if(length(x1) != length(y1))
stop("Hamming distance is only defined for equal length strings")
d <- sum(x1 != y1)
D <- NULL
M <- NULL
}
attr(d, "Size") <- 2
attr(d, "Diag") <- FALSE
if(length(x) > 1) x <- paste0("", x, collapse = "")
if(length(y) > 1) y <- paste0("", y, collapse = "")
attr(d, "Labels") <- c(x,y)
attr(d, "Upper") <- FALSE
attr(d, "method") <- METHODS[method]
attr(d, "call") <- match.call()
attr(d, "ScoringMatrix") <- D
attr(d, "TraceBackMatrix") <- M
class(d) <- c("stringDist", "dist")
return(d)
}
StrRev <- function(x) {
# reverses a string
sapply(lapply(strsplit(x, NULL), rev), paste, collapse="")
}
# defunct by 0.99.21
# StrRep <- function(x, times, sep=""){
# # same as strrep which seems to be new in 3.4.0
# z <- Recycle(x=x, times=times, sep=sep)
# sapply(1:attr(z, "maxdim"), function(i) paste(rep(z$x[i], times=z$times[i]), collapse=z$sep[i]))
# }
# useless because we have base::strwrap but interesting as regexp example
#
# StrWordWrap <- function(x, n, sep = "\n") {
#
# res <- gsub(gettextf("(.{1,%s})(\\s|$)", n), gettextf("\\1%s", sep), x)
# res <- gsub(gettextf("[%s]$", sep), "", res)
#
# return(res)
#
# }
#
StrPad <- function(x, width = NULL, pad = " ", adj = "left") {
.pad <- function(x, width, pad=" ", adj="left"){
if(is.na(x)) return(NA)
mto <- match.arg(adj, c("left", "right", "center"))
free <- max(0, width - nchar(x))
fill <- substring(paste(rep(pad, ceiling(free / nchar(pad))), collapse = ""), 1, free)
#### cat(" free=",free,", fill=",fill,", mto=",mto,"\n")
# old, but chop is not a good idea: if(free <= 0) substr(x, 1, len)
if(free <= 0) x
else if (mto == "left") paste(x, fill, sep = "")
else if (mto == "right") paste(fill, x, sep = "")
else paste(substring(fill, 1, free %/% 2), x, substring(fill, 1 + free %/% 2, free), sep = "")
}
# adj <- sapply(adj, match.arg, choices=c("left", "right", "center"))
if(is.null(width)) width <- max(nchar(x), na.rm=TRUE)
lgp <- DescTools::Recycle(x=x, width=width, pad=pad, adj=adj)
sapply( 1:attr(lgp, "maxdim"), function(i) .pad(lgp$x[i], lgp$width[i], lgp$pad[i], lgp$adj[i]) )
}
StrAlign <- function(x, sep = "\\r"){
# replace \l by \\^, \r by \\$ and \c means centered
# check for NA only and combined
# return x if sep is not found in x
id.na <- is.na(x)
# what should be done, if x does not contain sep??
# we could return unchanged, but this is often not adaquate
# we align right to the separator
if(length(grep("\\", sep, fixed=TRUE)) == 0) {
idx <- !grepl(x=x, pattern=sep, fixed = TRUE)
x[idx] <- paste(x[idx], sep, sep="")
}
# center alignment
# keep this here, as we may NOT pad x for centered text!!
# example?? don't see why anymore... check!
if (sep == "\\c")
return(StrPad(x, width = max(nchar(x), na.rm=TRUE), pad = " ", adj = "center"))
# Pad to same maximal length, for right alignment this is mandatory
# for left alignment not, but again for any character
x <- StrPad(x, max(nchar(x), na.rm=TRUE))
# left alignment
if(sep == "\\l")
return( sub("(^ +)(.+)", "\\2\\1", x) )
# right alignment
if(sep == "\\r")
return( sub("(.+?)( +$)", "\\2\\1", x) )
# alignment by a special character
bef <- substr(x, 1, StrPos(x, sep, fix=TRUE)) # use fix = TRUE as otherwise the decimal would be to have entered as \\.
aft <- substr(x, StrPos(x, sep, fix=TRUE) + 1, nchar(x))
# chop white space on the right
aft <- substr(aft, 1, max(nchar(StrTrim(aft, method="right"))))
res <- paste(replace(StrPad(bef, max(nchar(bef), na.rm=TRUE),
" ", adj = "right"), is.na(bef), ""),
replace(StrPad(aft, max(nchar(aft), na.rm=TRUE), " ", adj = "left"), is.na(aft),
""), sep = "")
# restore orignal NAs
res[id.na] <- NA
# overwrite the separator
if(length(grep("\\", sep, fixed=TRUE)) == 0)
res[idx] <- gsub(sep, " ", res[idx], fixed = TRUE)
# return unchanged values not containing sep
return(res)
}
# replaced by 0.99.19: new argument pos for cutting positions and vector support
# StrChop <- function(x, len) {
# # Splits a string into a number of pieces of fixed length
# # example: StrChop(x=paste(letters, collapse=""), len = c(3,5,0))
# xsplit <- character(0)
# for(i in 1:length(len)){
# xsplit <- append(xsplit, substr(x, 1, len[i]))
# x <- substr(x, len[i]+1, nchar(x))
# }
# return(xsplit)
# }
StrChop <- function(x, len, pos) {
.chop <- function(x, len, pos) {
# Splits a string into a number of pieces of fixed length
# example: StrChop(x=paste(letters, collapse=""), len = c(3,5,0))
if(!missing(len)){
if(!missing(pos))
stop("too many arguments")
} else {
len <- c(pos[1], diff(pos), nchar(x))
}
xsplit <- character(0)
for(i in 1:length(len)){
xsplit <- append(xsplit, substr(x, 1, len[i]))
x <- substr(x, len[i]+1, nchar(x))
}
return(xsplit)
}
res <- lapply(x, .chop, len=len, pos=pos)
if(length(x)==1)
res <- res[[1]]
return(res)
}
StrCountW <- function(x){
# old: does not work for one single word!!
# return(sapply(gregexpr("\\b\\W+\\b", x, perl=TRUE), length) + 1)
return(sapply(gregexpr("\\b\\W+\\b", x, perl = TRUE), function(x) sum(x>0)) + 1)
}
StrVal <- function(x, paste = FALSE, as.numeric = FALSE){
# Problem 20.2.2015: - will not be accepted, when a space is between sign and number
# not sure if this is really a problem: -> oberserve...
# StrVal(x="- 2.5", paste = FALSE, as.numeric = FALSE)
pat <- "[-+.e0-9]*\\d"
gfound <- gregexpr(pattern=pat, text=x)
vals <- lapply(seq_along(x), function(i){
found <- gfound[[i]]
ml <- attr(found, which="match.length")
res <- sapply(seq_along(found), function(j) substr(x[i], start=found[j], stop=found[j]+ml[j]-1) )
return(res)
})
if(paste==TRUE) {
vals <- sapply(vals, paste, collapse="")
if(as.numeric==TRUE)
vals <- as.numeric(vals)
} else {
if(as.numeric==TRUE)
vals <- sapply(vals, as.numeric)
else
vals <- sapply(vals, as.character)
}
return(vals)
}
StrPos <- function(x, pattern, pos=1, ... ){
# example:
# StrPos(x=levels(d.pizza$driver), "t", pos=4)
pos <- rep(pos, length.out=length(x))
x <- substr(x, start=pos, stop=nchar(x))
i <- as.vector(regexpr(pattern = pattern, text = x, ...))
i[i<0] <- NA
return(i)
}
SplitPath <- function(path, last.is.file=NULL) {
if(is.null(last.is.file)){
# if last sign is delimiter / or \ read path as dirname
last.is.file <- (length(grep(pattern="[/\\]$", path)) == 0)
}
path <- normalizePath(path, mustWork = FALSE)
lst <- list()
lst$normpath <- path
if (.Platform$OS.type == "windows") {
lst$drive <- regmatches(path, regexpr("^([[:alpha:]]:)|(\\\\[[:alnum:]]+)", path))
lst$dirname <- gsub(pattern=lst$drive, x=dirname(path), replacement="")
} else {
lst$drive <- NA
lst$dirname <- dirname(path)
}
lst$dirname <- paste(lst$dirname, "/", sep="")
lst$fullfilename <- basename(path)
lst$filename <- strsplit(lst$fullfilename, "\\.")[[1]][1]
lst$extension <- strsplit(lst$fullfilename, "\\.")[[1]][2]
if(!last.is.file){
lst$dirname <- paste(lst$dirname, lst$fullfilename, "/",
sep="")
lst$extension <- lst$filename <- lst$fullfilename <- NA
}
return(lst)
}
###
## base: conversion functions ====
CharToAsc <- function(x) {
# Original from Henrik Bengtsson R.oo:
# char2asc <- function (ch, ...) { match(ch, ASCII) - 1 }
# example: x.char <- char2asc(x="Andri")
if(length(x) == 1)
strtoi(charToRaw(x), 16L)
else
sapply(x, function(x) strtoi(charToRaw(x), 16L))
}
AscToChar <- function(i) {
# old version:
# example: AscToChar(x.char)
# ASCII <- intToUtf8(1:256, multiple=TRUE)
# new and far more elegant
# ref: http://datadebrief.blogspot.ch/search/label/R
rawToChar(as.raw(i))
}
HexToDec <- function(x) strtoi(x, 16L)
# example: strtoi(c("9A", "3B"), 16L)
DecToHex <- function(x) as.hexmode(as.numeric(x))
OctToDec <- function(x) strtoi(x, 8L)
# example: strtoi(c("12", "24"), 8L)
DecToOct <- function(x) as.numeric(as.character(as.octmode(as.numeric(x))))
# Alternative: as.numeric(sprintf(242, fmt="%o"))
BinToDec <- function(x) {
# Alternative: bin2dec <- function(x) { sum(2^.subset((length(x)-1):0, x)) }
# example: bin2dec(x=as.numeric(unlist(strsplit("1001", split=NULL)))==1)
strtoi(x, 2L)
}
# example: strtoi(c("100001", "101"), 2L)
# DecToBin <- function (x) {
# # This would be nice, but does not work: (intToBin from R.utils)
# # y <- as.integer(x)
# # class(y) <- "binmode"
# # y <- as.character(y)
# # dim(y) <- dim(x)
# # y
# as.vector(sapply(x, function(x) as.integer(paste(rev(as.integer(intToBits(x))), collapse=""))))
# }
DecToBin <- function (x) {
z <- .Call("_DescTools_conv_DecToBin", PACKAGE = "DescTools", x)
z[x > 536870911] <- NA
return(sub("^0+", "", z))
}
# void dec_to_bin(int number) {
# int remainder;
#
# if(number <= 1) {
# cout << number;
# return;
# }
#
# remainder = number%2;
# dec_to_bin(number >> 1);
# cout << remainder;
# }
# DecToBinC <- function(x){
# z <- .C("dec_to_bin", x = as.integer(x))
# return(z)
# }
RomanToInt <- function (x) {
# opposite to as.roman
roman2int.inner <- function (roman) {
results <- .C("roman2int", roman = as.character(roman), nchar = as.integer(nchar(roman)),
value = integer(1), PACKAGE = "DescTools")
return(results$value)
}
roman <- trimws(toupper(as.character(x)))
tryIt <- function(x) {
retval <- try(roman2int.inner(x), silent = TRUE)
if (is.numeric(retval))
retval
else NA
}
retval <- sapply(roman, tryIt)
retval
}
DegToRad <- function(deg) deg * pi /180
RadToDeg <- function(rad) rad * 180 / pi
UnitConv <- function(x, from_unit, to_unit){
if(from_unit == "C") {
if(to_unit=="F") return(x *1.8+32)
}
if(from_unit == "F") {
if(to_unit=="C") return((x -32) *5/9)
}
fact <- d.units[d.units$from == from_unit & d.units$to==to_unit, "fact"]
if(length(fact)==0) fact <- NA
return(x * fact)
}
DoCall <- function (what, args, quote = FALSE, envir = parent.frame()) {
# source: Gmisc
# author: Max Gordon <max@gforge.se>
if (quote)
args <- lapply(args, enquote)
if (is.null(names(args)) ||
is.data.frame(args)){
argn <- args
args <- list()
}else{
# Add all the named arguments
argn <- lapply(names(args)[names(args) != ""], as.name)
names(argn) <- names(args)[names(args) != ""]
# Add the unnamed arguments
argn <- c(argn, args[names(args) == ""])
args <- args[names(args) != ""]
}
if (class(what) == "character"){
if(is.character(what)){
fn <- strsplit(what, "[:]{2,3}")[[1]]
what <- if(length(fn)==1) {
get(fn[[1]], envir=envir, mode="function")
} else {
get(fn[[2]], envir=asNamespace(fn[[1]]), mode="function")
}
}
call <- as.call(c(list(what), argn))
}else if (class(what) == "function"){
f_name <- deparse(substitute(what))
call <- as.call(c(list(as.name(f_name)), argn))
args[[f_name]] <- what
}else if (class(what) == "name"){
call <- as.call(c(list(what, argn)))
}
eval(call,
envir = args,
enclos = envir)
}
###
## base: transformation functions ====
as.matrix.xtabs <- function(x, ...){
# xtabs would not be converted by as.matrix.default...
attr(x, "class") <- NULL
attr(x, "call") <- NULL
return(x)
}
TextToTable <- function(x, dimnames = NULL, ...){
d.frm <- read.table(text=x, ...)
tab <- as.table(as.matrix(d.frm))
if(!is.null(dimnames)) names(dimnames(tab)) <- dimnames
return(tab)
}
Recode <- function(x, ..., elselevel=NA, use.empty=FALSE){
newlevels <- list(...)
if( sum(duplicated(unlist(newlevels))) > 0) stop ("newlevels contain non unique values!")
if(is.null(elselevel)) { # leave elselevels as they are
elselevels <- setdiff(levels(x), unlist(newlevels))
names(elselevels) <- elselevels
newlevels <- c(newlevels, elselevels)
} else {
if(!is.na(elselevel)){
newlevels[[length(newlevels)+1]] <- setdiff(levels(x), unlist(newlevels))
names(newlevels)[[length(newlevels)]] <- elselevel
}
}
levels(x) <- newlevels
if(!use.empty) x <- factor(x) # delete potentially empty levels
return(x)
}
ZeroIfNA <- function(x) {
# same as zeroifnull in SQL
replace(x, is.na(x), 0)
}
NAIfZero <- function(x)
replace(x, IsZero(x), NA)
Impute <- function(x, FUN = function(x) median(x, na.rm=TRUE)) {
if(is.function(FUN)) {
# if FUN is a function, then save it under new name and
# overwrite function name in FUN, which has to be character
fct <- FUN
FUN <- "fct"
FUN <- gettextf("%s(x)", FUN)
}
# Calculates the mean absolute deviation from the sample mean.
return(eval(parse(text = gettextf("replace(x, is.na(x), %s)", FUN))))
}
reorder.factor <- function(x, X, FUN, ..., order=is.ordered(x), new.order,
sort=SortMixed) {
# 25.11.2017 verbatim from gdata, Greg Warnes
constructor <- if (order) ordered else factor
if(!missing(X) || !missing(FUN)){
if(missing(FUN)) FUN <- 'mean'
## I would prefer to call stats::reorder.default directly,
## but it exported from stats, so the relevant code is
## replicated here:
## -->
scores <- tapply(X = X, INDEX = x, FUN = FUN, ...)
levels <- names(base::sort(scores, na.last = TRUE))
if(order)
ans <- ordered(x, levels=levels)
else
ans <- factor(x, levels=levels)
attr(ans, "scores") <- scores
## <--
return(ans)
} else if (!missing(new.order)) {
if (is.numeric(new.order))
new.order <- levels(x)[new.order]
else
new.order <- new.order
} else
new.order <- sort(levels(x))
constructor(x, levels=new.order)
}
SortMixed <- function(x,
decreasing=FALSE,
na.last=TRUE,
blank.last=FALSE,
numeric.type=c("decimal", "roman"),
roman.case=c("upper","lower","both") ) {
ord <- OrderMixed(x,
decreasing=decreasing,
na.last=na.last,
blank.last=blank.last,
numeric.type=numeric.type,
roman.case=roman.case
)
x[ord]
}
OrderMixed <- function(x,
decreasing=FALSE,
na.last=TRUE,
blank.last=FALSE,
numeric.type=c("decimal", "roman"),
roman.case=c("upper","lower","both") ) {
# 25.11.2017 verbatim from gtools, Greg Warnes
# - Split each each character string into an vector of strings and
# numbers
# - Separately rank numbers and strings
# - Combine orders so that strings follow numbers
numeric.type <- match.arg(numeric.type)
roman.case <- match.arg(roman.case)
if(length(x)<1)
return(NULL)
else if(length(x)==1)
return(1)
if( !is.character(x) )
return( order(x, decreasing=decreasing, na.last=na.last) )
delim="\\$\\@\\$"
if(numeric.type=="decimal")
{
regex <- "((?:(?i)(?:[-+]?)(?:(?=[.]?[0123456789])(?:[0123456789]*)(?:(?:[.])(?:[0123456789]{0,}))?)(?:(?:[eE])(?:(?:[-+]?)(?:[0123456789]+))|)))" # uses PERL syntax
numeric <- function(x) as.numeric(x)
}
else if (numeric.type=="roman")
{
regex <- switch(roman.case,
"both" = "([IVXCLDMivxcldm]+)",
"upper" = "([IVXCLDM]+)",
"lower" = "([ivxcldm]+)"
)
numeric <- function(x) RomanToInt(x)
}
else
stop("Unknown value for numeric.type: ", numeric.type)
nonnumeric <- function(x)
{
ifelse(is.na(numeric(x)), toupper(x), NA)
}
x <- as.character(x)
which.nas <- which(is.na(x))
which.blanks <- which(x=="")
####
# - Convert each character string into an vector containing single
# character and numeric values.
####
# find and mark numbers in the form of +1.23e+45.67
delimited <- gsub(regex,
paste(delim,"\\1",delim,sep=""),
x,
perl=TRUE)
# separate out numbers
step1 <- strsplit(delimited, delim)
# remove empty elements
step1 <- lapply( step1, function(x) x[x>""] )
# create numeric version of data
suppressWarnings( step1.numeric <- lapply( step1, numeric ) )
# create non-numeric version of data
suppressWarnings( step1.character <- lapply( step1, nonnumeric ) )
# now transpose so that 1st vector contains 1st element from each
# original string
maxelem <- max(sapply(step1, length))
step1.numeric.t <- lapply(1:maxelem,
function(i)
sapply(step1.numeric,
function(x)x[i])
)
step1.character.t <- lapply(1:maxelem,
function(i)
sapply(step1.character,
function(x)x[i])
)
# now order them
rank.numeric <- sapply(step1.numeric.t, rank)
rank.character <- sapply(step1.character.t,
function(x) as.numeric(factor(x)))
# and merge
rank.numeric[!is.na(rank.character)] <- 0 # mask off string values
rank.character <- t(
t(rank.character) +
apply(matrix(rank.numeric),2,max,na.rm=TRUE)
)
rank.overall <- ifelse(is.na(rank.character),rank.numeric,rank.character)
order.frame <- as.data.frame(rank.overall)
if(length(which.nas) > 0)
if(is.na(na.last))
order.frame[which.nas,] <- NA
else if(na.last)
order.frame[which.nas,] <- Inf
else
order.frame[which.nas,] <- -Inf
if(length(which.blanks) > 0)
if(is.na(blank.last))
order.frame[which.blanks,] <- NA
else if(blank.last)
order.frame[which.blanks,] <- 1e99
else
order.frame[which.blanks,] <- -1e99
order.frame <- as.list(order.frame)
order.frame$decreasing <- decreasing
order.frame$na.last <- NA
retval <- do.call("order", order.frame)
return(retval)
}
Lookup <- function(x, ref, val){
val[match(x, ref)]
}
# StahelLogC <- function(x, na.rm=FALSE) {
# if(na.rm) x <- na.omit(x)
# ### muessen die 0-Werte hier weggelassen werden??
# x <- x[x>0]
# ### additive Konstante fuer die Logarithmierung nach Stahel "...es hat sich gezeigt, dass..."
# return(as.vector(median(x) / (median(x)/quantile(x, 0.25))^2.9))
# }
# http://support.sas.com/documentation/cdl/en/statugfreq/63124/PDF/default/statugfreq.pdf
LogSt <- function(x, base = 10, calib = x, threshold = NULL, mult = 1) {
# original function logst in source regr
#
# # Purpose: logs of x, zeros and small values treated well
# # *********************************************************************
# # Author: Werner Stahel, Date: 3 Nov 2001, 08:22
# x <- cbind(x)
# calib <- cbind(calib)
# lncol <- ncol(calib)
# ljthr <- length(threshold) > 0
# if (ljthr) {
# if (!length(threshold) %in% c(1, lncol))
# stop("!LogSt! length of argument 'threshold' is inadequate")
# lthr <- rep(threshold, length=lncol)
# ljdt <- !is.na(lthr)
# } else {
# ljdt <- rep(TRUE, lncol)
# lthr <- rep(NA, lncol)
# for (lj in 1:lncol) {
# lcal <- calib[, lj]
# ldp <- lcal[lcal > 0 & !is.na(lcal)]
# if(length(ldp) == 0) ljdt[lj] <- FALSE else {
# lq <- quantile(ldp,probs = c(0.25,0.75), na.rm = TRUE)
# if(lq[1] == lq[2]) lq[1] <- lq[2]/2
# lthr[lj] <- lc <- lq[1]^(1 + mult) / lq[2]^mult
# }
# }
# }
# # transform x
# for (lj in 1:lncol) {
# ldt <- x[,lj]
# lc <- lthr[lj]
# li <- which(ldt < lc)
# if (length(li))
# ldt[li] <- lc * 10^((ldt[li] - lc) / (lc * log(10)))
# x[,lj] <- log10(ldt)
# }
# if (length(colnames(x)))
# lnmpd <- names(ljdt) <- names(lthr) <- colnames(x) else
# lnmpd <- as.character(1:lncol)
#
# attr(x,"threshold") <- c(lthr)
#
# if (any(!ljdt)) {
# warning(':LogSt: no positive x for variables',lnmpd[!ljdt],
# '. These are not transformed')
# attr(x,"untransformed") <- c(ljdt)
# }
# x
if(is.null(threshold)){
lq <- quantile(calib[calib > 0], probs = c(0.25, 0.75), na.rm = TRUE)
if (lq[1] == lq[2]) lq[1] <- lq[2]/2
threshold <- lq[1]^(1 + mult)/lq[2]^mult
}
res <- rep(NA, length(x))
idx <- (x < threshold)
idx.na <- is.na(idx)
res[idx & !idx.na] <- log(x = threshold, base=base) + ((x[idx & !idx.na] - threshold)/(threshold * log(base)))
res[!idx & !idx.na] <- log(x = x[!idx & !idx.na], base=base)
attr(res, "threshold") <- threshold
attr(res, "base") <- base
return(res)
}
LogStInv <- function (x, base=NULL, threshold = NULL) {
if(is.null(threshold)) threshold <- attr(x, "threshold")
if(is.null(base)) base <- attr(x, "base")
res <- rep(NA, length(x))
idx <- (x < log10(threshold))
idx.na <- is.na(idx)
res[idx & !idx.na] <- threshold - threshold * log(base) *( log(x = threshold, base=base) - x[idx & !idx.na])
res[!idx & !idx.na] <- base^(x[!idx & !idx.na])
return(res)
}
# Variance stabilizing functions
# log(x+a)
# log(x+a, base=10)
# sqrt(x+a)
# 1/x
# arcsinh(x)
# LogGen <- function(x, a) { return( log((x + sqrt(x^2 + a^2)) / 2)) }
#
#
# LogLin <- function(x, a) {
# # log-linear hybrid transformation
# # introduced by Rocke and Durbin (2003)
# x[x<=a] <- x[x<=a] / a + log(a) - 1
# x[x>a] <- log(x[x>a])
#
# return(x)
# }
Logit <- function(x, min=0, max=1) {
# variant in boot:::logit - CHECKME if better ********
p <- (x-min)/(max-min)
log(p/(1-p))
}
LogitInv <- function(x, min=0, max=1) {
p <- exp(x)/(1+exp(x))
p <- ifelse( is.na(p) & !is.na(x), 1, p ) # fix problems with +Inf
p * (max-min) + min
}
# from library(forecast)
BoxCox <- function (x, lambda) {
# Author: Rob J Hyndman
# origin: library(forecast)
if (lambda < 0)
x[x < 0] <- NA
if (lambda == 0)
out <- log(x)
else out <- (sign(x) * abs(x)^lambda - 1)/lambda
if (!is.null(colnames(x)))
colnames(out) <- colnames(x)
return(out)
# Greg Snow's Variant
# BoxCox <- function (x, lambda)
# {
# ### Author: Greg Snow
# ### Source: Teaching Demos
# xx <- exp(mean(log(x)))
# if (lambda == 0)
# return(log(x) * xx)
# res <- (x^lambda - 1)/(lambda * xx^(lambda - 1))
# return(res)
# }
}
BoxCoxInv <- function(x, lambda){
if (lambda < 0)
x[x > -1/lambda] <- NA
if (lambda == 0)
out <- exp(x)
else {
xx <- x * lambda + 1
out <- sign(xx) * abs(xx)^(1/lambda)
}
if (!is.null(colnames(x)))
colnames(out) <- colnames(x)
return(out)
}
# This R script contains code for extracting the Box-Cox
# parameter, lambda, using Guerrero's method (1993).
# Written by Leanne Chhay
BoxCoxLambda <- function(x, method=c("guerrero","loglik"), lower=-1, upper=2) {
# Guerrero extracts the required lambda
# Input: x = original time series as a time series object
# Output: lambda that minimises the coefficient of variation
Guerrero <- function(x, lower=-1, upper=2, nonseasonal.length=2) {
# guer.cv computes the coefficient of variation
# Input:
# lam = lambda
# x = original time series as a time series object
# Output: coefficient of variation
guer.cv <- function(lam, x, nonseasonal.length=2) {
period <- max(nonseasonal.length, frequency(x))
nobsf <- length(x)
nyr <- floor(nobsf / period)
nobst <- nyr * period
x.mat <- matrix(x[(nobsf-nobst+1):nobsf], period, nyr)
x.mean <- apply(x.mat, 2, mean, na.rm=TRUE)
x.sd <- apply(x.mat, 2, sd, na.rm=TRUE)
x.rat <- x.sd / x.mean^(1-lam)
return(sd(x.rat, na.rm=TRUE)/mean(x.rat, na.rm=TRUE))
}
return(optimize(guer.cv, c(lower,upper), x=x,
nonseasonal.length=nonseasonal.length)$minimum)
}
# Modified version of boxcox from MASS package
BCLogLik <- function(x, lower=-1, upper=2) {
n <- length(x)
if (any(x <= 0))
stop("x must be positive")
logx <- log(x)
xdot <- exp(mean(logx))
# if(all(class(x)!="ts"))
fit <- lm(x ~ 1, data=data.frame(x=x))
# else if(frequency(x)>1)
# fit <- tslm(x ~ trend + season, data=data.frame(x=x))
# else
# fit <- tslm(x ~ trend, data=data.frame(x=x))
xqr <- fit$qr
lambda <- seq(lower,upper,by=.05)
xl <- loglik <- as.vector(lambda)
m <- length(xl)
for (i in 1L:m)
{
if (abs(la <- xl[i]) > 0.02)
xt <- (x^la - 1)/la
else
xt <- logx * (1 + (la*logx)/2 * (1+(la*logx)/3*(1+(la*logx)/4)))
loglik[i] <- -n/2 * log(sum(qr.resid(xqr, xt/xdot^(la-1))^2))
}
return(xl[which.max(loglik)])
}
if(any(x <= 0))
lower <- 0
# stop("All values must be positive")
method <- match.arg(method)
if(method=="loglik")
return(BCLogLik(x,lower,upper))
else
return(Guerrero(x,lower,upper))
}
LOCF <- function(x) UseMethod("LOCF")
LOCF.default <- function(x) {
# last observation carried forward
# replaces NAs by the last observed value
# while(any(is.na(x))) {
# x[is.na(x)] <- x[which(is.na(x))-1]
# }
# return(x)
# faster solution from Daniel Wollschlaeger:
# corrected by 0.99.19, as this didn't handle c(NA, 3.0, NA, 5,5) correctly
# rep(x[!is.na(x)], diff(c(which(!is.na(x)), length(x)+1)))
l <- !is.na(x)
rep(c(NA, x[l]), diff(c(1, which(l), length(x) + 1)))
}
LOCF.data.frame <- function(x){
as.data.frame(lapply(x, LOCF))
}
LOCF.matrix <- function(x){
apply(x, 2, LOCF)
}
# Alternative names: PairApply, PwApply, pwapply, papply, ...
PairApply <- function(x, FUN = NULL, ..., symmetric = FALSE){
if(is.function(FUN)) {
# if FUN is a function, then save it under new name and
# overwrite function name in FUN, which has to be character
fct <- FUN
FUN <- "fct"
}
if(is.matrix(x)) x <- as.data.frame(x)
x <- as.list(x)
ix <- 1:length(x)
# pairwise logic from pairwise.table
pp <- outer(ix, ix, function(ivec, jvec) sapply(seq_along(ivec),
function(k) {
i <- ivec[[k]]
j <- jvec[[k]]
if (i >= j)
eval(parse(text = gettextf("%s(x[[i]], x[[j]], ...)", FUN)))
else NA
}))
# why did we need that? in any case it's wrong, if no symmetric calcs are done
# diag(pp) <- 1
if(symmetric){
pp[upper.tri(pp)] <- t(pp)[upper.tri(t(pp))]
} else {
pp.upr <- outer(ix, ix, function(ivec, jvec) sapply(seq_along(ivec),
function(k) {
i <- ivec[[k]]
j <- jvec[[k]]
if (i >= j)
eval(parse(text = gettextf("%s(x[[j]], x[[i]], ...)", FUN)))
else NA
}))
pp[upper.tri(pp)] <- t(pp.upr)[upper.tri(pp.upr)]
}
dimnames(pp) <- list(names(x),names(x))
return(pp)
}
###
## base: date functions ====
# fastPOSIXct <- function(x, tz=NULL, required.components = 3L)
# .POSIXct(if (is.character(x)) .Call("parse_ts", x, required.components) else .Call("parse_ts", as.character(x), required.components), tz)
HmsToSec <- function(x) {
hms <- as.character(x)
z <- sapply(data.frame(do.call(rbind, strsplit(hms, ":"))),
function(x) { as.numeric(as.character(x)) })
z[,1] * 3600 + z[,2] * 60 + z[,3]
}
SecToHms <- function(x, digits=NULL) {
x <- as.numeric(x)
h <- floor(x/3600)
m <- floor((x-h*3600)/60)
s <- floor(x-(m*60 + h*3600))
b <- x-(s + m*60 + h*3600)
if(is.null(digits)) digits <- ifelse(all(b < sqrt(.Machine$double.eps)),0, 2)
if(digits==0) f <- "" else f <- gettextf(paste(".%0", digits, "d", sep=""), round(b*10^digits, 0))
gettextf("%02d:%02d:%02d%s", h, m, s, f)
}
IsDate <- function(x, what=c('either','both','timeVaries')) {
what <- match.arg(what)
cl <- class(x) # was oldClass 22jun03
if(!length(cl)) return(FALSE)
dc <- c('POSIXt','POSIXct','dates','times','chron','Date')
dtc <- c('POSIXt','POSIXct','chron')
switch(what,
either = any(cl %in% dc),
both = any(cl %in% dtc),
timeVaries = {
# original: if('chron' %in% cl || !.R.) { ### chron or S+ timeDate
if('chron' %in% cl) { # chron ok, but who cares about S+?
y <- as.numeric(x)
length(unique(round(y - floor(y),13))) > 1
} else {
length(unique(format(x, '%H%M%S'))) > 1
}
}
)
}
IsWeekend <- function(x) {
x <- as.POSIXlt(x)
x$wday > 5 | x$wday < 1
}
# This is not useful anymore. Use: as.Date(ISODate())
# Date <- function(year, month = NA, day = NA) {
# if(is.na(month) && is.na(day)) {
# # try to interpret year as yearmonthday yyyymmdd
# res <- as.Date(ISOdate(year %/% 10000, (year %% 10000) %/% 100, (year %% 100)))
# } else {
# res <- as.Date(ISOdate(year, month, day))
# }
# return(res)
# }
# Year <- function(x){ as.integer( format(as.Date(x), "%Y") ) }
Year <- function(x){ as.POSIXlt(x)$year + 1900 }
IsLeapYear <- function(x){
if(!IsWhole(x))
x <- Year(as.Date(x))
ifelse(x %% 100 == 0, x %% 400 == 0, x %% 4 == 0)
}
Month <- function (x, fmt = c("m", "mm", "mmm"), lang = DescToolsOptions("lang"), stringsAsFactors = TRUE) {
res <- as.POSIXlt(x)$mon + 1
switch(match.arg(arg = fmt, choices = c("m", "mm", "mmm")),
m = { res },
mm = {
# res <- as.integer(format(x, "%m"))
switch(match.arg(arg = lang, choices = c("local", "engl")),
local = {
# months in current locale: format(ISOdate(2000, 1:12, 1), "%b")
res <- factor(res, levels=1:12, labels=format(ISOdate(2000, 1:12, 1), "%b"))
},
engl = {
res <- factor(res, levels=1:12, labels=month.abb)
})
if(!stringsAsFactors) res <- as.character(res)
},
mmm = {
# res <- as.integer(format(x, "%m"))
switch(match.arg(arg = lang, choices = c("local", "engl")),
local = {
# months in current locale: format(ISOdate(2000, 1:12, 1), "%b")
res <- factor(res, levels=1:12, labels=format(ISOdate(2000, 1:12, 1), "%B"))
},
engl = {
res <- factor(res, levels=1:12, labels=month.name)
})
if(!stringsAsFactors) res <- as.character(res)
})
return(res)
}
Week <- function(x, method = c("iso", "us")){
# cast x to date, such as being able to handle POSIX-Dates automatically
x <- as.Date(x)
method <- match.arg(method, c("iso", "us"))
switch(method,
"iso" = {
#??? fast implementation in lubridate:
# xday <- ISOdate(year(x), month(x), day(x), tz = tz(x))
# dn <- 1 + (wday(x) + 5)%%7
# nth <- xday + ddays(4 - dn)
# jan1 <- ISOdate(year(nth), 1, 1, tz = tz(x))
# 1 + (nth - jan1)%/%ddays(7)
# The weeknumber is the number of weeks between the
# first thursday of the year and the thursday in the target week
# der Donnerstag in der Zielwoche
# x.y <- Year(x)
# x.weekday <- Weekday(x)
#
# x.thursday <- (x - x.weekday + 4)
# # der erste Donnerstag des Jahres
# jan1.weekday <- Weekday(as.Date(paste(x.y, "01-01", sep="-")))
# first.thursday <- as.Date(paste(x.y, "01", (5 + 7*(jan1.weekday > 4) - jan1.weekday), sep="-"))
#
# wn <- (as.integer(x.thursday - first.thursday) %/% 7) + 1 - ((x.weekday < 4) & (Year(x.thursday) != Year(first.thursday)))*52
# wn <- ifelse(wn == 0, Week(as.Date(paste(x.y-1, "12-31", sep="-"))), wn)
z <- x + (3 - (as.POSIXlt(x)$wday + 6) %% 7)
jan1 <- as.Date(paste(Year(z), "-01-01", sep=""))
wn <- 1 + as.integer(z - jan1) %/% 7
},
"us"={
wn <- as.numeric(strftime(as.POSIXlt(x), format="%W"))
}
)
return(wn)
}
# Day <- function(x){ as.integer(format(as.Date(x), "%d") ) }
Day <- function(x){ as.POSIXlt(x)$mday }
# Accessor for Day, as defined by library(lubridate)
"Day<-" <- function(x, value) { x <- x + (value - Day(x)) }
Weekday <- function (x, fmt = c("d", "dd", "ddd"), lang = DescToolsOptions("lang"), stringsAsFactors = TRUE) {
# x <- as.Date(x)
res <- as.POSIXlt(x)$wday
res <- replace(res, res==0, 7)
switch(match.arg(arg = fmt, choices = c("d", "dd", "ddd")),
d = { res },
dd = {
# weekdays in current locale, Sunday : Saturday, format(ISOdate(2000, 1, 2:8), "%A")
switch(match.arg(arg = lang, choices = c("local", "engl")),
local = {
# months in current locale: format(ISOdate(2000, 1:12, 1), "%b")
res <- factor(res, levels=1:7, labels=format(ISOdate(2000, 1, 3:9), "%a"))
},
engl = {
res <- factor(res, levels=1:7, labels=day.abb)
})
if(!stringsAsFactors) res <- as.character(res)
},
ddd = {
# weekdays in current locale, Sunday : Saturday, format(ISOdate(2000, 1, 2:8), "%A")
switch(match.arg(arg = lang, choices = c("local", "engl")),
local = {
# months in current locale: format(ISOdate(2000, 1:12, 1), "%b")
res <- factor(res, levels=1:7, labels=format(ISOdate(2000, 1, 3:9), "%A"))
},
engl = {
res <- factor(res, levels=1:7, labels=day.name)
})
if(!stringsAsFactors) res <- as.character(res)
})
return(res)
}
Quarter <- function (x) {
# Berechnet das Quartal eines Datums
# y <- as.numeric( format( x, "%Y") )
# paste(y, "Q", (as.POSIXlt(x)$mon)%/%3 + 1, sep = "")
# old definition is counterintuitive...
return((as.POSIXlt(x)$mon) %/% 3 + 1)
}
YearDay <- function(x) {
# return(as.integer(format(as.Date(x), "%j")))
return(as.POSIXlt(x)$yday)
}
YearMonth <- function(x){
# returns the yearmonth representation of a date x
x <- as.POSIXlt(x)
return((x$year + 1900)*100 + x$mon + 1)
}
Today <- function() Sys.Date()
Now <- function() Sys.time()
Hour <- function(x) {
# strptime(x, "%H")
as.POSIXlt(x)$hour
}
Minute <- function(x) {
# strptime(x, "%M")
as.POSIXlt(x)$min
}
Second <- function(x) {
# strptime(x, "%S")
as.POSIXlt(x)$sec
}
Timezone <- function(x) {
as.POSIXlt(x)$zone
}
DiffDays360 <- function(start_d, end_d, method=c("eu","us")){
# source: http://en.wikipedia.org/wiki/360-day_calendar
start_d <- as.Date(start_d)
end_d <- as.Date(end_d)
d1 <- Day(start_d)
m1 <- Month(start_d)
y1 <- Year(start_d)
d2 <- Day(end_d)
m2 <- Month(end_d)
y2 <- Year(end_d)
method = match.arg(method)
switch(method,
"eu" = {
if(Day(start_d)==31) start_d <- start_d-1
if(Day(end_d)==31) end_d <- end_d-1
}
, "us" ={
if( (Day(start_d+1)==1 & Month(start_d+1)==3) &
(Day(end_d+1)==1 & Month(end_d+1)==3)) d2 <- 30
if( d1==31 ||
(Day(start_d+1)==1 & Month(start_d+1)==3)) {
d1 <- 30
if(d2==31) d2 <- 30
}
}
)
return( (y2-y1)*360 + (m2-m1)*30 + d2-d1)
}
LastDayOfMonth <- function(x){
z <- AddMonths(x, 1)
Day(z) <- 1
return(z-1)
}
AddMonths <- function (x, n, ...) {
.addMonths <- function (x, n) {
# ref: http://stackoverflow.com/questions/14169620/add-a-month-to-a-date
# Author: Antonio
# no ceiling
res <- sapply(x, seq, by = paste(n, "months"), length = 2)[2,]
# sapply kills the Date class, so recreate down the road
# ceiling
DescTools::Day(x) <- 1
res_c <- sapply(x, seq, by = paste(n + 1, "months"), length = 2)[2,] - 1
# use ceiling in case of overlapping
res <- pmin(res, res_c)
return(res)
}
x <- as.Date(x, ...)
res <- mapply(.addMonths, x, n)
# mapply (as sapply above) kills the Date class, so recreate here
# and return res in the same class as x
class(res) <- "Date"
return(res)
}
AddMonthsYM <- function (x, n) {
.addMonths <- function (x, n) {
if (x %[]% c(100001, 999912)) {
# Author: Roland Rapold
# YYYYMM
y <- x %/% 100
m <- x - y * 100
res <- (y - 10 + ((m + n + 120 - 1) %/% 12)) * 100 +
((m + n + 120 - 1) %% 12) + 1
} else if (x %[]% c(10000101, 99991231)) {
# YYYYMMDD
res <- DescTools::AddMonths(x = as.Date(as.character(x), "%Y%m%d"), n = n)
res <- DescTools::Year(res)*10000 + DescTools::Month(res)*100 + Day(res)
}
return(res)
}
res <- mapply(.addMonths, x, n)
return(res)
}
Zodiac <- function(x, lang = c("engl","deu"), stringsAsFactors = TRUE) {
switch(match.arg(lang, choices=c("engl","deu"))
, engl = {z <- c("Capricorn","Aquarius","Pisces","Aries","Taurus","Gemini","Cancer","Leo","Virgo","Libra","Scorpio","Sagittarius","Capricorn") }
, deu = {z <- c("Steinbock","Wassermann","Fische","Widder","Stier","Zwillinge","Krebs","Loewe","Jungfrau","Waage","Skorpion","Schuetze","Steinbock") }
)
i <- cut(DescTools::Month(x)*100 + DescTools::Day(x),
breaks=c(0,120,218,320,420,520,621,722,822,923,1023,1122,1221,1231))
if(stringsAsFactors){
res <- i
levels(res) <- z
} else {
res <- z[i]
}
return(res)
}
axTicks.POSIXct <- function (side, x, at, format, labels = TRUE, ...) {
# This is completely original R-code with one exception:
# Not an axis is drawn but z are returned.
mat <- missing(at) || is.null(at)
if (!mat)
x <- as.POSIXct(at)
else x <- as.POSIXct(x)
range <- par("usr")[if (side%%2)
1L:2L
else 3L:4L]
d <- range[2L] - range[1L]
z <- c(range, x[is.finite(x)])
attr(z, "tzone") <- attr(x, "tzone")
if (d < 1.1 * 60) {
sc <- 1
if (missing(format))
format <- "%S"
}
else if (d < 1.1 * 60 * 60) {
sc <- 60
if (missing(format))
format <- "%M:%S"
}
else if (d < 1.1 * 60 * 60 * 24) {
sc <- 60 * 60
if (missing(format))
format <- "%H:%M"
}
else if (d < 2 * 60 * 60 * 24) {
sc <- 60 * 60
if (missing(format))
format <- "%a %H:%M"
}
else if (d < 7 * 60 * 60 * 24) {
sc <- 60 * 60 * 24
if (missing(format))
format <- "%a"
}
else {
sc <- 60 * 60 * 24
}
if (d < 60 * 60 * 24 * 50) {
zz <- pretty(z/sc)
z <- zz * sc
z <- .POSIXct(z, attr(x, "tzone"))
if (sc == 60 * 60 * 24)
z <- as.POSIXct(round(z, "days"))
if (missing(format))
format <- "%b %d"
}
else if (d < 1.1 * 60 * 60 * 24 * 365) {
z <- .POSIXct(z, attr(x, "tzone"))
zz <- as.POSIXlt(z)
zz$mday <- zz$wday <- zz$yday <- 1
zz$isdst <- -1
zz$hour <- zz$min <- zz$sec <- 0
zz$mon <- pretty(zz$mon)
m <- length(zz$mon)
M <- 2 * m
m <- rep.int(zz$year[1L], m)
zz$year <- c(m, m + 1)
zz <- lapply(zz, function(x) rep(x, length.out = M))
zz <- .POSIXlt(zz, attr(x, "tzone"))
z <- as.POSIXct(zz)
if (missing(format))
format <- "%b"
}
else {
z <- .POSIXct(z, attr(x, "tzone"))
zz <- as.POSIXlt(z)
zz$mday <- zz$wday <- zz$yday <- 1
zz$isdst <- -1
zz$mon <- zz$hour <- zz$min <- zz$sec <- 0
zz$year <- pretty(zz$year)
M <- length(zz$year)
zz <- lapply(zz, function(x) rep(x, length.out = M))
z <- as.POSIXct(.POSIXlt(zz))
if (missing(format))
format <- "%Y"
}
if (!mat)
z <- x[is.finite(x)]
keep <- z >= range[1L] & z <= range[2L]
z <- z[keep]
if (!is.logical(labels))
labels <- labels[keep]
else if (identical(labels, TRUE))
labels <- format(z, format = format)
else if (identical(labels, FALSE))
labels <- rep("", length(z))
# axis(side, at = z, labels = labels, ...)
# return(list(at=z, labels=labels))
return(z)
}
axTicks.Date <- function(side = 1, x, ...) {
## This functions is almost a copy of axis.Date
x <- as.Date(x)
range <- par("usr")[if (side%%2)
1L:2L
else 3:4L]
range[1L] <- ceiling(range[1L])
range[2L] <- floor(range[2L])
d <- range[2L] - range[1L]
z <- c(range, x[is.finite(x)])
class(z) <- "Date"
if (d < 7)
format <- "%a"
if (d < 100) {
z <- structure(pretty(z), class = "Date")
format <- "%b %d"
}
else if (d < 1.1 * 365) {
zz <- as.POSIXlt(z)
zz$mday <- 1
zz$mon <- pretty(zz$mon)
m <- length(zz$mon)
m <- rep.int(zz$year[1L], m)
zz$year <- c(m, m + 1)
z <- as.Date(zz)
format <- "%b"
}
else {
zz <- as.POSIXlt(z)
zz$mday <- 1
zz$mon <- 0
zz$year <- pretty(zz$year)
z <- as.Date(zz)
format <- "%Y"
}
keep <- z >= range[1L] & z <= range[2L]
z <- z[keep]
z <- sort(unique(z))
class(z) <- "Date"
z
}
###
## base: information functions ====
# Between operators
`%[]%` <- function(x, rng) {
if(is.matrix(rng)){
# recycle things
# which parameter has the highest dimension
maxdim <- max(length(x), nrow(rng))
# recycle all params to maxdim
x <- rep(x, length.out = maxdim)
# the rows of the matrix rng
rng <- rng[rep(1:nrow(rng), length.out = maxdim),]
res <- .Call("between_num_lrm", as.numeric(x), as.numeric(rng[,1]), as.numeric(rng[,2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
return( res)
}
if(is.numeric(x) || IsDate(x)) {
# as.numeric still needed for casting integer to numeric!!
res <- .Call("between_num_lr", as.numeric(x), as.numeric(rng[1]), as.numeric(rng[2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(is.ordered(x)) {
res <- .Call("between_num_lr", as.numeric(x), as.numeric(match(rng[1], levels(x))), as.numeric(match(rng[2], levels(x))), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(class(x) == "character") {
res <- ifelse ( x >= rng[1] & x <= rng[2], TRUE, FALSE )
} else {
res <- rep(NA, length(x))
}
return(res)
}
`%(]%` <- function(x, rng) {
if(is.matrix(rng)){
# recycle things
# which parameter has the highest dimension
maxdim <- max(length(x), nrow(rng))
# recycle all params to maxdim
x <- rep(x, length.out = maxdim)
# the rows of the matrix rng
rng <- rng[rep(1:nrow(rng), length.out = maxdim),]
res <- .Call("between_num_rm", as.numeric(x), as.numeric(rng[,1]), as.numeric(rng[,2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
return( res)
}
if(is.numeric(x) || IsDate(x)) {
# as.numeric still needed for casting integer to numeric!!
res <- .Call("between_num_r", as.numeric(x), as.numeric(rng[1]), as.numeric(rng[2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(is.ordered(x)) {
res <- .Call("between_num_r", as.numeric(x), as.numeric(match(rng[1], levels(x))), as.numeric(match(rng[2], levels(x))), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(class(x) == "character") {
res <- ifelse ( x > rng[1] & x <= rng[2], TRUE, FALSE )
} else {
res <- rep(NA, length(x))
}
return(res)
}
`%[)%` <- function(x, rng) {
if(is.matrix(rng)){
# recycle things
# which parameter has the highest dimension
maxdim <- max(length(x), nrow(rng))
# recycle all params to maxdim
x <- rep(x, length.out = maxdim)
# the rows of the matrix rng
rng <- rng[rep(1:nrow(rng), length.out = maxdim),]
res <- .Call("between_num_lm", as.numeric(x), as.numeric(rng[,1]), as.numeric(rng[,2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
return( res)
}
if(is.numeric(x) || IsDate(x)) {
# as.numeric still needed for casting integer to numeric!!
res <- .Call("between_num_l", as.numeric(x), as.numeric(rng[1]), as.numeric(rng[2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(is.ordered(x)) {
res <- .Call("between_num_l", as.numeric(x), as.numeric(match(rng[1], levels(x))), as.numeric(match(rng[2], levels(x))), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(class(x) == "character") {
res <- ifelse ( x >= rng[1] & x < rng[2], TRUE, FALSE )
} else {
res <- rep(NA, length(x))
}
return(res)
}
`%()%` <- function(x, rng) {
if(is.matrix(rng)){
# recycle things
# which parameter has the highest dimension
maxdim <- max(length(x), nrow(rng))
# recycle all params to maxdim
x <- rep(x, length.out = maxdim)
# the rows of the matrix rng
rng <- rng[rep(1:nrow(rng), length.out = maxdim),]
res <- .Call("between_num_m", as.numeric(x), as.numeric(rng[,1]), as.numeric(rng[,2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
return( res)
}
if(is.numeric(x) || IsDate(x)) {
# as.numeric still needed for casting integer to numeric!!
res <- .Call("between_num_", as.numeric(x), as.numeric(rng[1]), as.numeric(rng[2]), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(is.ordered(x)) {
res <- .Call("between_num_", as.numeric(x), as.numeric(match(rng[1], levels(x))), as.numeric(match(rng[2], levels(x))), PACKAGE="DescTools")
res[is.na(x)] <- NA
} else if(class(x) == "character") {
res <- ifelse ( x > rng[1] & x < rng[2], TRUE, FALSE )
} else {
res <- rep(NA, length(x))
}
return(res)
}
# outside operators (not exactly the negations)
`%][%` <- function(x, rng) {
return(!(x %()% rng))
}
`%](%` <- function(x, rng) {
return(!(x %(]% rng))
}
`%)[%` <- function(x, rng) {
return(!(x %[)% rng))
}
`%)(%` <- function(x, rng) {
return(!(x %[]% rng))
}
# Not %in% operator
`%nin%` <- function(x, table) match(x, table, nomatch = 0) == 0
# quick paste operator
# Core (Chambers) does not recommend + for non commutative operators, but still it's convenient and so we use c
# is it really? I doubt meanwhile...
# https://www.stat.math.ethz.ch/pipermail/r-devel/2006-August/039013.html
# http://stackoverflow.com/questions/1319698/why-doesnt-operate-on-characters-in-r?lq=1
`%c%` <- function(x, y) paste(x, y, sep="")
`%like%` <- function(x, pattern) {
return(`%like any%`(x, pattern))
}
`%like any%` <- function(x, pattern) {
pattern <- sapply(pattern, function(z){
if (!substr(z, 1, 1) == "%") {
z <- paste("^", z, sep="")
} else {
z <- substr(z, 2, nchar(z) )
}
if (!substr(z, nchar(z), nchar(z)) == "%") {
z <- paste(z, "$", sep="")
} else {
z <- substr(z, 1, nchar(z)-1 )
}
return(z)
})
grepl(pattern=paste(pattern, collapse = "|"), x=x)
# since 0.99.17: better returning the values, than a logical vector:
# grep(pattern=paste(pattern, collapse = "|"), x=x, value=TRUE)
# rolled back 26.4.2016: did not really prove successful
}
# c(Date(2012,1,3), Date(2012,2,3)) %overlaps% c(Date(2012,3,1), Date(2012,3,3))
# c(Date(2012,1,3), Date(2012,2,3)) %overlaps% c(Date(2012,1,15), Date(2012,1,21))
# Date(2012,1,3) %overlaps% c(Date(2012,3,1), Date(2012,3,3))
# c(1, 18) %overlaps% c(10, 45)
# Interval <- function(xp, yp){
# # calculates the number of days of the overlapping part of two date periods
# length(intersect(xp[1]:xp[2], yp[1]:yp[2]))
# }
Interval <- function(x, y){
# make sure that min is left and max right
x <- cbind(apply(rbind(x), 1, min), apply(rbind(x), 1, max))
y <- cbind(apply(rbind(y), 1, min), apply(rbind(y), 1, max))
# replicate
maxdim <- max(nrow(x), nrow(y))
x <- x[rep(1:nrow(x), length.out=maxdim), , drop=FALSE]
y <- y[rep(1:nrow(y), length.out=maxdim), , drop=FALSE]
d <- numeric(maxdim)
idx <- y[,1] > x[,2]
d[idx] <- (y[idx,1] - x[idx,2])
idx <- y[,2] < x[,1]
d[idx] <- (y[idx,2] - x[idx,1])
unname(d)
}
`%overlaps%` <- function(x, y) {
if(length(x) < 2) x <- rep(x, 2)
if(length(y) < 2) y <- rep(y, 2)
return(!(max(x) < min(y) | min(x) > max(y)) )
}
Overlap <- function(x, y){
# make sure that min is left and max right
x <- cbind(apply(rbind(x), 1, min), apply(rbind(x), 1, max))
y <- cbind(apply(rbind(y), 1, min), apply(rbind(y), 1, max))
# replicate
maxdim <- max(nrow(x), nrow(y))
x <- x[rep(1:nrow(x), length.out=maxdim), , drop=FALSE]
y <- y[rep(1:nrow(y), length.out=maxdim), , drop=FALSE]
# old: replaced in 0.99.17 as it did not what it was expected to
#
# d <- (apply(x, 1, diff) + apply(y, 1, diff)) - pmin(x[,2] - y[,1], y[,2]- x[,1])
# d[x[,1] > y[,2] | y[,1] > x[,2]] <- 0
d1 <- x[, 2]
idx <- x[, 2] > y[, 2]
d1[idx] <- y[idx, 2]
d2 <- y[, 1]
idx <- x[, 1] > y[, 1]
d2[idx] <- x[idx, 1]
d <- d1 - d2
d[d <=0 ] <- 0
unname(d)
}
AllDuplicated <- function(x){
# returns an index vector of all values involved in ties
# so !AllDuplicated determines all values in x just appearing once
duplicated(x, fromLast=FALSE) | duplicated(x, fromLast=TRUE)
}
# dummy codierung als Funktion aus: library(nnet)
# see also model.frame(...)
# ClassInd <- function(cl) {
# n <- length(cl)
# cl <- as.factor(cl)
# x <- matrix(0, n, length(levels(cl)))
# x[(1L:n) + n * (unclass(cl) - 1L)] <- 1
# dimnames(x) <- list(names(cl), levels(cl))
# x
# }
Dummy <- function (x, method = c("treatment", "sum", "helmert", "poly", "full"), base = 1, levels=NULL) {
# Alternatives:
# options(contrasts = c("contr.sum", "contr.poly"))
# model.matrix(~x.)[, -1] ### und die dummy-codes
# or Ripley's brilliant shorty-function:
# diag(nlevels(x))[x,]
if(is.null(levels))
x <- factor(x)
else
x <- factor(x, levels=levels)
if(!is.numeric(base)) base <- match(base, levels(x))
method <- match.arg( arg = method, choices = c("treatment", "sum", "helmert", "poly", "full") )
switch( method
, "treatment" = { res <- contr.treatment(n = nlevels(x), base = base)[x,] }
, "sum" = { res <- contr.sum(n = nlevels(x))[x,] }
, "helmert" = { res <- contr.helmert(n = nlevels(x))[x,] }
, "poly" = { res <- contr.poly(n = nlevels(x))[x,] }
, "full" = { res <- diag(nlevels(x))[x,] }
)
res <- as.matrix(res) # force res to be matrix, avoiding res being a vector if nlevels(x) = 2
if(method=="full") {
dimnames(res) <- list(if(is.null(names(x))) 1:length(x) else names(x), levels(x))
attr(res, "base") <- NA
} else {
dimnames(res) <- list(if(is.null(names(x))) 1:length(x) else names(x), levels(x)[-base])
attr(res, "base") <- levels(x)[base]
}
return(res)
}
# would not return characters correctly
#
Coalesce <- function(..., method = c("is.na", "is.finite")) {
# Returns the first element in x which is not NA
if(length(list(...)) > 1) {
if(all(lapply(list(...), length) > 1)){
x <- data.frame(..., stringsAsFactors = FALSE)
} else {
x <- unlist(list(...))
}
} else {
if(is.matrix(...)) {
x <- data.frame(..., stringsAsFactors = FALSE)
} else {
x <- (...)
}
}
switch(match.arg(method, choices=c("is.na", "is.finite")),
"is.na" = res <- Reduce(function (x,y) ifelse(!is.na(x), x, y), x),
"is.finite" = res <- Reduce(function (x,y) ifelse(is.finite(x), x, y), x)
)
return(res)
}
PartitionBy <- function(x, by, FUN, ...){
# SQL-OLAP: sum() over (partition by g)
# (more than 1 grouping variables are enumerated like by=list(g1,g2,g3),
# as it is defined in tapply
# see also ave, which only handles arguments otherwise..
if (missing(by))
x[] <- FUN(x, ...)
else {
g <- interaction(by)
split(x, g) <- lapply(split(x, g), FUN, ...)
}
x
}
IsWhole <- function (x, all=FALSE, tol = sqrt(.Machine$double.eps), na.rm=FALSE) {
if (na.rm)
x <- x[!is.na(x)]
if(all){
if (is.integer(x)) {
TRUE
} else if (is.numeric(x)) {
isTRUE(all.equal(x, round(x), tol))
} else if (is.complex(x)) {
isTRUE(all.equal(Re(x), round(Re(x)), tol)) && isTRUE(all.equal(Im(x), round(Im(x)), tol))
} else FALSE
} else {
if (is.integer(x)) {
rep(TRUE, length(x))
} else if (is.numeric(x)) {
abs(x - round(x)) < tol
} else if (is.complex(x)) {
abs(Re(x) - round(Re(x))) < tol && abs(Im(x) - round(Im(x))) < tol
} else rep(FALSE, length(x))
}
}
IsZero <-function(x, tol = sqrt(.Machine$double.eps), na.rm=FALSE) {
# Define check if a numeric is 0
if (na.rm)
x <- x[!is.na(x)]
if(is.numeric(x))
abs(x) < tol
else
FALSE
}
IsNumeric <- function (x, length.arg = Inf, integer.valued = FALSE, positive = FALSE, na.rm = FALSE){
if (na.rm)
x <- x[!is.na(x)]
if (all(is.numeric(x)) && all(is.finite(x)) && (if (is.finite(length.arg)) length(x) ==
length.arg else TRUE) && (if (integer.valued) all(x == round(x)) else TRUE) &&
(if (positive) all(x > 0) else TRUE)) TRUE else FALSE
}
IsOdd <- function(x) x %% 2 == 1
IsDichotomous <- function(x, strict=FALSE, na.rm=FALSE) {
if(na.rm)
x <- x[!is.na(x)]
if(strict)
length(unique(x)) == 2
else
length(unique(x)) <= 2
}
StrIsNumeric <- function(x){
# example:
# x <- c("123", "-3.141", "foobar123")
# StrIsNUmeric(x)
suppressWarnings(!is.na(as.numeric(x)))
}
IsPrime <- function(x) {
if (is.null(x) || length(x) == 0)
stop("Argument 'x' must be a nonempty vector or matrix.")
if (!is.numeric(x) || any(x < 0) || any(x != round(x)))
stop("All entries of 'x' must be nonnegative integers.")
n <- length(x)
X <- x[1:n]
L <- logical(n)
p <- DescTools::Primes(ceiling(sqrt(max(x))))
for (i in 1:n) {
L[i] <- all(X[i] %% p[p < X[i]] != 0)
}
L[X == 1 | X == 0] <- FALSE
dim(L) <- dim(x)
return(L)
}
VecRot <- function(x, k = 1) {
if (k != round(k)) {
k <- round(k)
warning("'k' is not an integer")
}
# just one shift: (1:x %% x) + 1
k <- k %% length(x)
rep(x, times=2)[(length(x) - k+1):(2*length(x)-k)]
}
VecShift <- function(x, k = 1){
if (k != round(k)) {
k <- round(k)
warning("'k' is not an integer")
}
if(k < 0){
c(x[-k:length(x)], rep(NA, -k))
} else {
c(rep(NA, k), x[1:(length(x)-k)])
}
}
RoundTo <- function(x, multiple = 1, FUN = round) {
# check for functions: round, ceiling, floor, but how????
# FUN <- match.arg(FUN, c(round, ceiling, floor))
if(is.function(FUN)) {
# if FUN is a function, then save it under new name and
# overwrite function name in FUN, which has to be character
fct <- FUN
FUN <- "fct"
FUN <- gettextf("%s", FUN)
}
# round will set digits to 0 by default, which is exactly what we need here
return(eval(parse(text = gettextf("%s(x/multiple) * multiple", FUN))))
}
# Alternative Idee mit up and down:
# Round <- function(x, digits = 0, direction=c("both", "down", "up"), multiple = NA) {
#
# direction <- match.arg(direction)
#
# switch(direction
# , both={
# if(is.na(multiple)){
# res <- round(x, digits = digits)
# } else {
# res <- round(x/multiple) * multiple
# }
# }
# , down={
# if(is.na(multiple)){
# res <- floor(x, digits = digits)
# } else {
# res <- floor(x/multiple) * multiple
# }
# }
# , up={
# if(is.na(multiple)){
# res <- ceiling(x, digits = digits)
# } else {
# res <- ceiling(x/multiple) * multiple
# }
# }
# )
# return(res)
# }
Str <- function(x, ...){
if(identical(class(x), "data.frame")) {
args <- list(...)
if(is.null(args["strict.width"])) args["strict.width"] <- "cut"
out <- .CaptOut(do.call(str, c(list(object=x), args)))
idx <- format(1:length(grep(pattern="^ \\$", out)))
i <- 1
j <- 1
while(i <= length(out)) {
if( length(grep(pattern="^ \\$", out[i])) > 0 ) {
out[i] <- gsub(pattern="^ \\$", replacement= paste(" ", idx[j], " \\$", sep=""), out[i])
j <- j + 1
}
i <- i + 1
}
res <- out
} else {
res <- str(x, ...)
}
cat(res, sep="\n")
invisible(res)
}
Some <- function(x, n = 6L, ...){
UseMethod("Some")
}
Some.data.frame <- function (x, n = 6L, ...) {
stopifnot(length(n) == 1L)
n <- if (n < 0L)
max(nrow(x) + n, 0L)
else min(n, nrow(x))
x[sort(sample(nrow(x), n)), , drop = FALSE]
}
Some.matrix <- function (x, n = 6L, addrownums = TRUE, ...) {
stopifnot(length(n) == 1L)
nrx <- nrow(x)
n <- if (n < 0L)
max(nrx + n, 0L)
else min(n, nrx)
sel <- sort(sample(nrow(x)))
ans <- x[sel, , drop = FALSE]
if (addrownums && is.null(rownames(x)))
rownames(ans) <- format(sprintf("[%d,]", sel), justify = "right")
ans
}
Some.default <- function (x, n = 6L, ...) {
stopifnot(length(n) == 1L)
n <- if (n < 0L)
max(length(x) + n, 0L)
else min(n, length(x))
x[sort(sample(length(x), n))]
}
LsFct <- function(package){
as.vector(unclass(lsf.str(pos = gettextf("package:%s", package) )))
}
# LsData <- function(package){
# # example lsf("DescTools")
# ls(pos = gettextf("package:%s", package))
# as.vector(unclass(ls.str(gettextf("package:%s", package), mode="list")))
#
# }
LsObj <- function(package){
# example lsf("DescTools")
ls(pos = gettextf("package:%s", package))
}
What <- function(x){
list(mode=mode(x), typeof=typeof(x), storage.mode=storage.mode(x),
dim=dim(x), length=length(x),class=class(x))
}
PDFManual <- function(package){
package <- as.character(substitute(package))
browseURL(paste("http://cran.r-project.org/web/packages/", package,"/", package, ".pdf", sep = ""))
}
# showPDFmanual <- function(package, lib.loc=NULL)
# {
# path <- find.package(package, lib.loc)
# system(paste(shQuote(file.path(R.home("bin"), "R")),
# "CMD", "Rd2pdf",
# shQuote(path)))
# }
###
## base: organisation, format, report and printing routines ====
# Mbind <- function(...){
# # matrix bind
# # function um n nxm-matrizen zu einem 3d-array zusammenzufassen
#
# arg.list <- list(...)
# # check dimensions, by compare the dimension of each matrix to the first
# if( !all( unlist(lapply(arg.list, function(m) all(unlist(dim(arg.list[[1]])) == unlist(dim(m)))) )))
# stop("Not all matrices have the same dimension!")
#
# ma <- array(unlist(arg.list), dim=c(nrow(arg.list[[1]]), ncol(arg.list[[2]]), length(arg.list)) )
# dimnames(ma) <- dimnames(arg.list[[1]])
# dimnames(ma)[[3]] <- if(is.null(names(arg.list))){1:length(arg.list)} else {names(arg.list)}
#
# return(ma)
# }
Abind <- function(..., along=N, rev.along=NULL, new.names=NULL,
force.array=TRUE, make.names=FALSE,
use.first.dimnames=FALSE, hier.names=FALSE, use.dnns=FALSE) {
if (is.character(hier.names))
hier.names <- match.arg(hier.names, c('before', 'after', 'none'))
else
hier.names <- if (hier.names) 'before' else 'no'
arg.list <- list(...)
if (is.list(arg.list[[1]]) && !is.data.frame(arg.list[[1]])) {
if (length(arg.list)!=1)
stop("can only supply one list-valued argument for ...")
if (make.names)
stop("cannot have make.names=TRUE with a list argument")
arg.list <- arg.list[[1]]
have.list.arg <- TRUE
} else {
N <- max(1, sapply(list(...), function(x) length(dim(x))))
have.list.arg <- FALSE
}
if (any(discard <- sapply(arg.list, is.null)))
arg.list <- arg.list[!discard]
if (length(arg.list)==0)
return(NULL)
N <- max(1, sapply(arg.list, function(x) length(dim(x))))
## N will eventually be length(dim(return.value))
if (!is.null(rev.along))
along <- N + 1 - rev.along
if (along < 1 || along > N || (along > floor(along) && along < ceiling(along))) {
N <- N + 1
along <- max(1, min(N+1, ceiling(along)))
}
## this next check should be redundant, but keep it here for safety...
if (length(along) > 1 || along < 1 || along > N + 1)
stop(paste("\"along\" must specify one dimension of the array,",
"or interpolate between two dimensions of the array",
sep="\n"))
if (!force.array && N==2) {
if (!have.list.arg) {
if (along==2)
return(cbind(...))
if (along==1)
return(rbind(...))
} else {
if (along==2)
return(do.call("cbind", arg.list))
if (along==1)
return(do.call("rbind", arg.list))
}
}
if (along>N || along<0)
stop("along must be between 0 and ", N)
pre <- seq(from=1, len=along-1)
post <- seq(to=N-1, len=N-along)
## "perm" specifies permutation to put join dimension (along) last
perm <- c(seq(len=N)[-along], along)
arg.names <- names(arg.list)
if (is.null(arg.names)) arg.names <- rep("", length(arg.list))
## if new.names is a character vector, treat it as argument names
if (is.character(new.names)) {
arg.names[seq(along=new.names)[nchar(new.names)>0]] <-
new.names[nchar(new.names)>0]
new.names <- NULL
}
## Be careful with dot.args, because if Abind was called
## using do.call(), and had anonymous arguments, the expressions
## returned by match.call() are for the entire structure.
## This can be a problem in S-PLUS, not sure about R.
## E.g., in this one match.call() returns compact results:
## > (function(...)browser())(1:10,letters)
## Called from: (function(...) browser())....
## b()> match.call(expand.dots=FALSE)$...
## list(1:10, letters)
## But in this one, match.call() returns evaluated results:
## > test <- function(...) browser()
## > do.call("test", list(1:3,letters[1:4]))
## Called from: test(c(1, 2, 3), c("a", "b....
## b(test)> match.call(expand.dots=FALSE)$...
## list(c(1, 2, 3), c("a", "b", "c", "d")
## The problem here was largely mitigated by making Abind()
## accept a single list argument, which removes most of the
## need for the use of do.call("Abind", ...)
## Create deparsed versions of actual arguments in arg.alt.names
## These are used for error messages
if (any(arg.names=="")) {
if (make.names) {
## Create dot.args to be a list of calling expressions for the objects to be bound.
## Be careful here with translation to R --
## dot.args does not have the "list" functor with R
## (and dot.args is not a call object), whereas with S-PLUS, dot.args
## must have the list functor removed
dot.args <- match.call(expand.dots=FALSE)$... ## [[2]]
if (is.call(dot.args) && identical(dot.args[[1]], as.name("list")))
dot.args <- dot.args[-1]
arg.alt.names <- arg.names
for (i in seq(along=arg.names)) {
if (arg.alt.names[i]=="") {
if (object.size(dot.args[[i]])<1000) {
arg.alt.names[i] <- paste(deparse(dot.args[[i]], 40), collapse=";")
} else {
arg.alt.names[i] <- paste("X", i, sep="")
}
arg.names[i] <- arg.alt.names[i]
}
}
## unset(dot.args) don't need dot.args any more, but R doesn't have unset()
} else {
arg.alt.names <- arg.names
arg.alt.names[arg.names==""] <- paste("X", seq(along=arg.names), sep="")[arg.names==""]
}
} else {
arg.alt.names <- arg.names
}
use.along.names <- any(arg.names!="")
## need to have here: arg.names, arg.alt.names, don't need dot.args
names(arg.list) <- arg.names
## arg.dimnames is a matrix of dimension names, each element of the
## the matrix is a character vector, e.g., arg.dimnames[j,i] is
## the vector of names for dimension j of arg i
arg.dimnames <- matrix(vector("list", N*length(arg.names)), nrow=N, ncol=length(arg.names))
dimnames(arg.dimnames) <- list(NULL, arg.names)
## arg.dnns is a matrix of names of dimensions, each element is a
## character vector len 1, or NULL
arg.dnns <- matrix(vector("list", N*length(arg.names)), nrow=N, ncol=length(arg.names))
dimnames(arg.dnns) <- list(NULL, arg.names)
dimnames.new <- vector("list", N)
## Coerce all arguments to have the same number of dimensions
## (by adding one, if necessary) and permute them to put the
## join dimension last.
## Create arg.dim as a matrix with length(dim) rows and
## length(arg.list) columns: arg.dim[j,i]==dim(arg.list[[i]])[j],
## The dimension order of arg.dim is original
arg.dim <- matrix(integer(1), nrow=N, ncol=length(arg.names))
for (i in seq(len=length(arg.list))) {
m <- arg.list[[i]]
m.changed <- FALSE
## be careful with conversion to array: as.array converts data frames badly
if (is.data.frame(m)) {
## use as.matrix() in preference to data.matrix() because
## data.matrix() uses the unintuitive codes() function on factors
m <- as.matrix(m)
m.changed <- TRUE
} else if (!is.array(m) && !is.null(m)) {
if (!is.atomic(m))
stop("arg '", arg.alt.names[i], "' is non-atomic")
## make sure to get the names of a vector and attach them to the array
dn <- names(m)
m <- as.array(m)
if (length(dim(m))==1 && !is.null(dn))
dimnames(m) <- list(dn)
m.changed <- TRUE
}
new.dim <- dim(m)
if (length(new.dim)==N) {
## Assign the dimnames of this argument to the i'th column of arg.dimnames.
## If dimnames(m) is NULL, would need to do arg.dimnames[,i] <- list(NULL)
## to set all elts to NULL, as arg.dimnames[,i] <- NULL does not actually
## change anything in S-PLUS (leaves whatever is there) and illegal in R.
## Since arg.dimnames has NULL entries to begin with, don't need to do
## anything when dimnames(m) is NULL
if (!is.null(dimnames(m))) {
arg.dimnames[,i] <- dimnames(m)
if (use.dnns && !is.null(names(dimnames(m))))
arg.dnns[,i] <- as.list(names(dimnames(m)))
}
arg.dim[,i] <- new.dim
} else if (length(new.dim)==N-1) {
## add another dimension (first set dimnames to NULL to prevent errors)
if (!is.null(dimnames(m))) {
## arg.dimnames[,i] <- c(dimnames(m)[pre], list(NULL), dimnames(m))[post]
## is equivalent to arg.dimnames[-N,i] <- dimnames(m)
arg.dimnames[-along,i] <- dimnames(m)
if (use.dnns && !is.null(names(dimnames(m))))
arg.dnns[-along,i] <- as.list(names(dimnames(m)))
## remove the dimnames so that we can assign a dim of an extra length
dimnames(m) <- NULL
}
arg.dim[,i] <- c(new.dim[pre], 1, new.dim[post])
if (any(perm!=seq(along=perm))) {
dim(m) <- c(new.dim[pre], 1, new.dim[post])
m.changed <- TRUE
}
} else {
stop("'", arg.alt.names[i], "' does not fit: should have `length(dim())'=",
N, " or ", N-1)
}
if (any(perm!=seq(along=perm)))
arg.list[[i]] <- aperm(m, perm)
else if (m.changed)
arg.list[[i]] <- m
}
## Make sure all arguments conform
conform.dim <- arg.dim[,1]
for (i in seq(len=ncol(arg.dim))) {
if (any((conform.dim!=arg.dim[,i])[-along])) {
stop("arg '", arg.alt.names[i], "' has dims=", paste(arg.dim[,i], collapse=", "),
"; but need dims=", paste(replace(conform.dim, along, "X"), collapse=", "))
}
}
## find the last (or first) names for each dimensions except the join dimension
if (N>1)
for (dd in seq(len=N)[-along]) {
for (i in (if (use.first.dimnames) seq(along=arg.names) else rev(seq(along=arg.names)))) {
if (length(arg.dimnames[[dd,i]]) > 0) {
dimnames.new[[dd]] <- arg.dimnames[[dd,i]]
if (use.dnns && !is.null(arg.dnns[[dd,i]]))
names(dimnames.new)[dd] <- arg.dnns[[dd,i]]
break
}
}
}
## find or create names for the join dimension
for (i in seq(len=length(arg.names))) {
## only use names if arg i contributes some elements
if (arg.dim[along,i] > 0) {
dnm.along <- arg.dimnames[[along,i]]
if (length(dnm.along)==arg.dim[along,i]) {
use.along.names <- TRUE
if (hier.names=='before' && arg.names[i]!="")
dnm.along <- paste(arg.names[i], dnm.along, sep=".")
else if (hier.names=='after' && arg.names[i]!="")
dnm.along <- paste(dnm.along, arg.names[i], sep=".")
} else {
## make up names for the along dimension
if (arg.dim[along,i]==1)
dnm.along <- arg.names[i]
else if (arg.names[i]=="")
dnm.along <- rep("", arg.dim[along,i])
else
dnm.along <- paste(arg.names[i], seq(length=arg.dim[along,i]), sep="")
}
dimnames.new[[along]] <- c(dimnames.new[[along]], dnm.along)
}
if (use.dnns) {
dnn <- unlist(arg.dnns[along,])
if (length(dnn)) {
if (!use.first.dimnames)
dnn <- rev(dnn)
names(dimnames.new)[along] <- dnn[1]
}
}
}
## if no names at all were given for the along dimension, use none
if (!use.along.names)
dimnames.new[along] <- list(NULL)
## Construct the output array from the pieces.
## Could experiment here with more efficient ways of constructing the
## result than using unlist(), e.g.
## out <- numeric(prod(c( arg.dim[-along,1], sum(arg.dim[along,]))))
## Don't use names in unlist because this can quickly exhaust memory when
## Abind is called with "do.call" (which creates horrendous names in S-PLUS).
out <- array(unlist(arg.list, use.names=FALSE),
dim=c( arg.dim[-along,1], sum(arg.dim[along,])),
dimnames=dimnames.new[perm])
## permute the output array to put the join dimension back in the right place
if (any(order(perm)!=seq(along=perm)))
out <- aperm(out, order(perm))
## if new.names is list of character vectors, use whichever are non-null
## for dimension names, checking that they are the right length
if (!is.null(new.names) && is.list(new.names)) {
for (dd in seq(len=N)) {
if (!is.null(new.names[[dd]])) {
if (length(new.names[[dd]])==dim(out)[dd])
dimnames(out)[[dd]] <- new.names[[dd]]
else if (length(new.names[[dd]]))
warning(paste("Component ", dd,
" of new.names ignored: has length ",
length(new.names[[dd]]), ", should be ",
dim(out)[dd], sep=""))
}
if (use.dnns && !is.null(names(new.names)) && names(new.names)[dd]!='')
names(dimnames(out))[dd] <- names(new.names)[dd]
}
}
if (use.dnns && !is.null(names(dimnames(out))) && any(i <- is.na(names(dimnames(out)))))
names(dimnames(out))[i] <- ''
out
}
# *********************************** 12.12.2014
# stack/unstack does exactly that
# ToLong <- function(x, varnames=NULL){
# lst <- as.list(x)
# res <- data.frame(rep(names(lst), lapply(lst, length)), unlist(lst))
# rownames(res) <- NULL
# if(is.null(varnames)) varnames <- c("grp","x")
# colnames(res) <- varnames
# return(res)
# }
ToLong <- function (x, varnames = NULL) {
if(!is.list(x)) {
if(is.matrix(x) || is.table(x))
x <- as.data.frame(x)
lst <- as.list(x)
} else {
lst <- x
}
grpnames <- names(lst)
if(is.null(grpnames)) grpnames <- paste("X", 1:length(lst), sep="")
res <- data.frame(rep(grpnames, lapply(lst, length)), unlist(lst))
rownames(res) <- NULL
if (is.null(varnames))
varnames <- c("grp", "x")
colnames(res) <- varnames
rownames(res) <- do.call(paste, c(expand.grid(rownames(x), grpnames), sep="."))
return(res)
}
ToWide <- function(x, g, by=NULL, varnames=NULL){
if(is.null(varnames))
varnames <- levels(g)
if(is.null(by)){
by <- "row.names"
} else {
x <- data.frame(x, idx=by)
by <- "idx"
varnames <- c("by", varnames)
}
g <- factor(g)
s <- split(x, g)
res <- Reduce(function(x, y) {
z <- merge(x, y, by=by, all.x=TRUE, all.y=TRUE)
# kill the rownames
if(by=="row.names") z <- z[, -grep("Row.names", names(z))]
return(z)
}, s)
colnames(res) <- varnames
return(res)
}
# ToWide <- function(x, g, varnames=NULL){
# g <- factor(g)
# res <- do.call("cbind", split(x, g))
# if(is.null(varnames)) varnames <- levels(g)
# colnames(res) <- varnames
# return(res)
# }
CatTable <- function( tab, wcol, nrepchars, width=getOption("width") ) {
# Wie viele Datenspalten haben vollstaendig Platz auf einer Linie?
ncols <- ( width - nrepchars ) %/% wcol
# Wieviele Zeilen ergeben sich?
nrows <- ((nchar(tab[1]) - nrepchars) %/% wcol) / ncols +
(((nchar(tab[1]) - nrepchars) %% wcol ) > 0) *1 # Rest Linie
for( i in 1:nrows ) {
for( j in 1:length(tab) ){
# cat( i, nrepchars + 1 + (i-1)*(ncols*wcol-4), nrepchars + i*ncols*wcol-5, "\n")
cat( substr(tab[j],1,nrepchars)
, substr(tab[j], nrepchars + 1 + (i-1)*(ncols*wcol), nrepchars + 1 + i*ncols*wcol-1 )
, "\n", sep="" )
}
cat( "\n" )
}
}
.CaptOut <- function(..., file = NULL, append = FALSE, width=150) {
opt <- options(width=width)
args <- substitute(list(...))[-1L]
rval <- NULL
closeit <- TRUE
if (is.null(file))
file <- textConnection("rval", "w", local = TRUE)
else if (is.character(file))
file <- file(file, if (append)
"a"
else "w")
else if (inherits(file, "connection")) {
if (!isOpen(file))
open(file, if (append)
"a"
else "w")
else closeit <- FALSE
}
else stop("'file' must be NULL, a character string or a connection")
sink(file)
on.exit({
sink()
if (closeit) close(file)
options(opt)
})
pf <- parent.frame()
evalVis <- function(expr) withVisible(eval(expr, pf))
for (i in seq_along(args)) {
expr <- args[[i]]
tmp <- switch(mode(expr), expression = lapply(expr, evalVis),
call = , name = list(evalVis(expr)), stop("bad argument"))
for (item in tmp) if (item$visible)
print(item$value)
}
on.exit(options(opt))
sink()
if (closeit)
close(file)
if (is.null(rval))
invisible(NULL)
else rval
}
Ndec <- function(x) {
# liefert die Anzahl der Nachkommastellen einer Zahl x
# Alternative auch format.info [1]... Breite, [2]...Anzahl Nachkommastellen, [3]...Exponential ja/nein
stopifnot(class(x)=="character")
res <- rep(0, length(x))
# remove evtl. exponents
x <- gsub(pattern="[eE].+$", replacement="", x=x)
res[grep("\\.",x)] <- nchar( sub("^.+[.]","",x) )[grep("\\.",x)]
return(res)
}
Prec <- function (x) {
# Function to return the most precise
# digit from a vector of real numbers
# Keep dividing by powers of 10 (pos and neg from trunc(log(max(x)) down)
# until the fractional portion is zero, then we have the highest precision
# digit in terms of a integer power of 10.
# Thanks to Thomas Lumley for help with machine precision
# Note: Turn this into a standalone function for "regularizing" a
# time-activity object with irregular time breaks.
init <- trunc(log10(max(x))) + 1
zero <- 0
y <- 1
while (any(y > zero)) {
init <- init - 1
x1 <- x*10^(-init)
y <- x1 - trunc(x1)
zero <- max(x1)*.Machine$double.eps
}
10^init
# sapply(c(1.235, 125.3, 1245), prec)
}
# other idea:
# precision <- function(x) {
# rng <- range(x, na.rm = TRUE)
#
# span <- if (zero_range(rng)) rng[1] else diff(rng)
# 10 ^ floor(log10(span))
# }
# References:
# http://stackoverflow.com/questions/3443687/formatting-decimal-places-in-r
# http://my.ilstu.edu/~jhkahn/apastats.html
# https://en.wikipedia.org/wiki/Significant_figures
# http://www.originlab.com/doc/Origin-Help/Options-Dialog-NumFormat-Tab
Format <- function(x, digits = NULL, sci = NULL
, big.mark=NULL, leading = NULL
, zero.form = NULL, na.form = NULL
, fmt = NULL, align = NULL, width = NULL
, lang = NULL, ...){
UseMethod("Format")
}
Format.data.frame <- function(x, digits = NULL, sci = NULL
, big.mark=NULL, leading = NULL
, zero.form = NULL, na.form = NULL
, fmt = NULL, align = NULL, width = NULL, lang = NULL, ...){
x[] <- lapply(x, Format, digits = digits,
sci = sci, big.mark = big.mark, leading = leading, zero.form = zero.form,
na.form = na.form, fmt = fmt, align = align, width = width,
lang = lang, ...)
class(x) <- c("Format", class(x))
return(x)
}
Format.matrix <- function(x, digits = NULL, sci = NULL
, big.mark=NULL, leading = NULL
, zero.form = NULL, na.form = NULL
, fmt = NULL, align = NULL, width = NULL, lang = NULL, ...){
x[,] <- Format.default(x=x, digits=digits, sci=sci, big.mark=big.mark,
leading=leading, zero.form=zero.form, na.form=na.form,
fmt=fmt, align=align, width=width, lang=lang, ...)
class(x) <- c("Format", class(x))
return(x)
}
Format.table <- function(x, digits = NULL, sci = NULL
, big.mark = NULL, leading = NULL
, zero.form = NULL, na.form = NULL
, fmt = NULL, align = NULL, width = NULL, lang = NULL, ...){
x[] <- Format.default(x=x, digits=digits, sci=sci, big.mark=big.mark,
leading=leading, zero.form=zero.form, na.form=na.form,
fmt=fmt, align=align, width=width, lang=lang, ...)
class(x) <- c("Format", class(x))
return(x)
}
as.CDateFmt <- function(fmt) {
# fine format codes
# http://www.autohotkey.com/docs/commands/FormatTime.htm
pat <- ""
fpat <- ""
i <- 1
# we used here:
# if(length(grep("\\bd{4}\\b", fmt)) > 0)
# which found dddd only as separated string from others (\b ... blank)
# this is not suitable for formats like yyyymmdd
# hence this was changed to d{4}
# if(length(grep("\\bd{4}\\b", fmt)) > 0) {
if(length(grep("d{4}", fmt)) > 0) {
fmt <- gsub(pattern = "dddd", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%A-", sep="")
i <- i+1
}
# if(length(grep("\\bd{3}\\b", fmt)) > 0) {
if(length(grep("d{3}", fmt)) > 0) {
fmt <- gsub(pattern = "ddd", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%a-", sep="")
i <- i+1
}
if(length(grep("d{2}", fmt)) > 0) {
fmt <- gsub(pattern = "dd", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%d-", sep="")
i <- i+1
}
if(length(grep("d{1}", fmt)) > 0) {
fmt <- gsub(pattern = "d", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "0?(.+)-", sep="")
fpat <- paste(fpat, "%e-", sep="")
i <- i+1
}
if(length(grep("m{4}", fmt)) > 0) {
fmt <- gsub(pattern = "mmmm", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%B-", sep="")
i <- i+1
}
if(length(grep("m{3}", fmt)) > 0) {
fmt <- gsub(pattern = "mmm", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%b-", sep="")
i <- i+1
}
if(length(grep("m{2}", fmt)) > 0) {
fmt <- gsub(pattern = "mm", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%m-", sep="")
i <- i+1
}
if(length(grep("m{1}", fmt)) > 0) {
fmt <- gsub(pattern = "m", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "0?(.+)-", sep="")
fpat <- paste(fpat, "%m-", sep="")
i <- i+1
}
if(length(grep("y{4}", fmt)) > 0) {
fmt <- gsub(pattern = "yyyy", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%Y-", sep="")
i <- i+1
}
if(length(grep("y{2}", fmt)) > 0) {
fmt <- gsub(pattern = "yy", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "(.+)-", sep="")
fpat <- paste(fpat, "%y-", sep="")
i <- i+1
}
if(length(grep("y{1}", fmt)) > 0) {
fmt <- gsub(pattern = "y", replacement = paste("\\\\", i, sep=""), x = fmt)
pat <- paste(pat, "0?(.+)-", sep="")
fpat <- paste(fpat, "%y-", sep="")
i <- i+1
}
sub(pat, fmt, fpat)
}
Format.default <- function(x, digits = NULL, sci = NULL
, big.mark = NULL, leading = NULL
, zero.form = NULL, na.form = NULL
, fmt = NULL, align = NULL, width = NULL, lang = NULL, ...){
.format.pval <- function(x){
# format p-values *********************************************************
# this is based on original code from format.pval
r <- character(length(is0 <- x < eps))
if (any(!is0)) {
rr <- x <- x[!is0]
expo <- floor(log10(ifelse(x > 0, x, 1e-50)))
fixp <- (expo >= -3)
if (any(fixp))
rr[fixp] <- format(x[fixp], digits = 4)
if (any(!fixp))
rr[!fixp] <- format(x[!fixp], digits=3, scientific=TRUE)
r[!is0] <- rr
}
if (any(is0)) {
r[is0] <- gettextf("< %s", format(eps, digits = 2))
}
return(r)
}
.format.stars <- function(x){
# format significance stars ***************************************************
# example: Format(c(0.3, 0.08, 0.042, 0.001), fmt="*")
breaks <- c(0,0.001,0.01,0.05,0.1,1)
labels <- c("***","** ","* ",". "," ")
res <- as.character(sapply(x, cut, breaks=breaks, labels=labels, include.lowest=TRUE))
return(res)
}
.leading.zero <- function(x, n){
# just add a given number of leading zeros
# split at the .
z <- strsplit(as.character(x), split=".", fixed = TRUE)
# left side
zl <- lapply(z, "[", 1)
zl <- sapply(zl, function(x) sprintf(paste0("%0", n + (x<0)*1, "i"), as.numeric(x)))
# right side
zr <- sapply(z, "[", 2)
zr <- ifelse(is.na(zr), "", paste(".", zr, sep=""))
paste(zl, zr, sep="")
}
.format.eng <- function(x, digits = NULL, leading = NULL
, zero.form = NULL, na.form = NULL){
s <- lapply(strsplit(format(x, scientific=TRUE), "e"), as.numeric)
y <- unlist(lapply(s, "[[", 1))
pwr <- unlist(lapply(s, "[", 2))
return(paste(Format(y * 10^(pwr %% 3), digits=digits, leading=leading,
zero.form = zero.form, na.form=na.form)
, "e"
, c("-","+")[(pwr >= 0) + 1]
, Format(abs((pwr - (pwr %% 3))), leading = "00", digits=0)
, sep="")
)
}
.format.engabb <- function(x, digits = NULL, leading = NULL
, zero.form = NULL, na.form = NULL){
s <- lapply(strsplit(format(x, scientific=TRUE), "e"), as.numeric)
y <- unlist(lapply(s, "[[", 1))
pwr <- unlist(lapply(s, "[", 2))
a <- paste("1e"
, c("-","+")[(pwr >= 0) + 1]
, Format(abs((pwr - (pwr %% 3))), leading = "00", digits=0)
, sep="")
am <- Lookup(as.numeric(a), d.prefix$mult, d.prefix$abbr)
a[!is.na(am)] <- am[!is.na(am)]
a[a == "1e+00"] <- ""
return(paste(Format(y * 10^(pwr %% 3), digits=digits, leading=leading,
zero.form = zero.form, na.form=na.form)
, " " , a
, sep="")
)
}
# We accept here a fmt class to be used as user templates
# example:
#
# fmt.int <- structure(list(
# digits = 5, sci = getOption("scipen"), big.mark = "",
# leading = NULL, zero.form = NULL, na.form = NULL,
# align = "left", width = NULL, txt="(%s), %s - CHF"), class="fmt"
# )
#
# Format(7845, fmt=fmt.int)
if(is.null(fmt)) fmt <- ""
if(class(fmt) == "fmt") {
# we want to offer the user the option to overrun format definitions
# consequence is, that all defaults of the function must be set to NULL
# as we cannot distinguish between defaults and user sets else
if(!is.null(digits)) fmt$digits <- digits
if(!is.null(sci)) fmt$sci <- sci
if(!is.null(big.mark)) fmt$big.mark <- big.mark
if(!is.null(leading)) fmt$leading <- leading
if(!is.null(zero.form)) fmt$zero.form <- zero.form
if(!is.null(na.form)) fmt$na.form <- na.form
if(!is.null(align)) fmt$align <- align
if(!is.null(width)) fmt$sci <- width
if(!is.null(lang)) fmt$lang <- lang
return(do.call(Format, c(fmt, x=list(x))))
}
# The defined decimal character:
# getOption("OutDec")
# set the defaults, if user says nothing
if(is.null(sci))
if(is.null(digits)){
# if given digits and sci NULL set sci to Inf
sci <- getOption("scipen", default = 7)
} else {
sci <- Inf
}
if(is.null(big.mark)) big.mark <- ""
if(is.null(na.form)) na.form <- "NA"
if ((has.na <- any(ina <- is.na(x))))
x <- x[!ina]
eps <- .Machine$double.eps
sci <- rep(sci, length.out=2)
if(all(class(x) == "Date")) {
# the language is only needed for date formats, so avoid looking up the option
# for other types
if(is.null(lang)) lang <- DescToolsOptions("lang")
if(lang=="engl"){
loc <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "C")
on.exit(Sys.setlocale("LC_TIME", loc))
}
r <- format(x, as.CDateFmt(fmt=fmt))
} else if(all(class(x) %in% c("character","factor","ordered"))) {
r <- format(x)
} else if(fmt=="*"){
r <- .format.stars(x)
} else if(fmt=="p"){
r <- .format.pval(x)
} else if(fmt=="eng"){
r <- .format.eng(x, digits=digits, leading=leading, zero.form=zero.form, na.form=na.form)
} else if(fmt=="engabb"){
r <- .format.engabb(x, digits=digits, leading=leading, zero.form=zero.form, na.form=na.form)
} else if(fmt=="e"){
r <- formatC(x, digits = digits, width = width, format = "e",
big.mark=big.mark, zero.print = zero.form)
} else if(fmt=="%"){
r <- paste(suppressWarnings(formatC(x * 100, digits = digits, width = width, format = "f",
big.mark=big.mark, drop0trailing = FALSE)),
"%", sep="")
} else if(fmt=="frac"){
r <- as.character(MASS::fractions(x))
} else { # format else ********************************************
if(all(is.na(sci))) {
# use is.na(sci) to inhibit scientific notation
r <- formatC(x, digits = digits, width = width, format = "f",
big.mark=big.mark)
} else {
idx <- (((abs(x) > .Machine$double.eps) & (abs(x) <= 10^-sci[2])) | (abs(x) >= 10^sci[1]))
r <- as.character(rep(NA, length(x)))
# use which here instead of res[idx], because of NAs
# formatC is barking, classes are of no interess here, so suppress warning...
# what's that exactly??
r[which(idx)] <- suppressWarnings(formatC(x[which(idx)], digits = digits, width = width, format = "e",
big.mark=big.mark, drop0trailing = FALSE))
# Warning messages:
# 1: In formatC(x[which(!idx)], digits = digits, width = width, format = "f", :
# class of 'x' was discarded
# formatC is barking, classes are of no interess here, so suppress warning...
r[which(!idx)] <- suppressWarnings(formatC(x[which(!idx)], digits = digits, width = width, format = "f",
big.mark=big.mark, drop0trailing = FALSE))
}
if(!is.null(leading)){
# handle leading zeros ------------------------------
if(leading %in% c("","drop")) {
# drop leading zeros
r <- gsub("(?<![0-9])0+\\.", "\\.", r, perl = TRUE)
# alternative:
# res <- gsub("(-?)[^[:digit:]]0+\\.", "\\.", res)
# old: mind the minus
# res <- gsub("[^[:digit:]]0+\\.","\\.", res)
} else if(grepl("^[0]*$", leading)){
# leading contains only zeros, so let's use them as leading zeros
# old:
# n <- nchar(leading) - unlist(lapply(lapply(strsplit(res, "\\."), "[", 1), nchar))
# old: did not handle - correctly
# res <- StrPad(res, pad = "0", width=nchar(res) + pmax(n, 0), adj="right")
r <- .leading.zero(r, nchar(leading))
}
}
}
if(!is.null(zero.form))
r[abs(x) < eps] <- zero.form
if (has.na) {
rok <- r
r <- character(length(ina))
r[!ina] <- rok
r[ina] <- na.form
}
if(!is.null(align)){
r <- StrAlign(r, sep = align)
}
class(r) <- c("Format", class(r))
return(r)
}
print.Format <- function (x, ...) {
class(x) <- class(x)[class(x)!="Format"]
NextMethod("print", quote = FALSE, right=TRUE, ...)
}
Fmt <- function(...){
# get format templates and modify on the fly, e.g. other digits
# x is the name of the template
def <- structure(
list(
abs=structure(list(digits = 0, big.mark = "'"),
label = "Number format for counts",
name="abs",
default=TRUE, class = "fmt"),
per=structure(list(digits = 1, fmt = "%"),
label = "Percentage number format",
name="per",
default=TRUE, class = "fmt"),
num=structure(list(digits = 0, big.mark = "'"),
label = "Number format for floating points",
name="num",
default=TRUE, class = "fmt")
), name="fmt")
# get a format from the fmt templates options
res <- DescToolsOptions("fmt")
# find other defined fmt in .GlobalEnv and append to list
# found <- ls(parent.frame())[ lapply(lapply(ls(parent.frame()), function(x) gettextf("class(%s)", x)),
# function(x) eval(parse(text=x))) == "fmt" ]
# if(length(found)>0){
# udf <- lapply(found, function(x) eval(parse(text=x)))
# names(udf) <- found
# }
# collect all found formats, defaults included if not set as option
# abs, per and num must always be available, even if not explicitly defined
res <- c(res, def[names(def) %nin% names(res)]) #, udf)
# get additional arguments
dots <- list(...)
# leave away all NULL values, these should not overwrite the defaults below
#dots <- dots[!is.null(dots)]
# functionality:
# Fmt() return all from options
# Fmt("abs") return abs
# Fmt("abs", digits=3) return abs with updated digits
# Fmt(c("abs","per")) return abs and per
# Fmt(nob=as.Fmt(digits=10, na.form="nodat")) set nob
if(length(dots)==0){
# no arguments supplied
# return list of defined formats
# just return(res)
} else {
# some dots supplied
# if first unnamed and the rest named, take as format name and overwrite other
if(is.null(names(dots))){
# if not names at all
# select the requested ones by name, the unnamed ones
fnames <- unlist(dots[is.null(names(dots))])
res <- res[fnames]
# return(res)
} else {
if(all(names(dots)!="")){
# if only names (no unnamed), take name as format name and define format
old <- options("DescTools")[[1]]
opt <- old
for(i in seq_along(dots))
attr(dots[[i]], "name") <- names(dots)[[i]]
opt$fmt[[names(dots)]] <- dots[[names(dots)]]
options(DescTools=opt)
# same behaviour as options
invisible(old)
} else {
# select the requested ones by name, the unnamed ones
fnames <- unlist(dots[names(dots)==""])
res <- res[fnames]
# modify additional arguments in the template definition
for(z in names(res)){
if(!is.null(res[[z]])){
# use named dots, but only those which are not NULL
idx <- names(dots) != "" & !sapply(dots[names(dots)], is.null)
# res[[z]][names(dots[names(dots)!=""])] <- dots[names(dots)!=""]
res[[z]][names(dots[idx])] <- dots[idx]
}
}
# return(res)
}
}
}
# simplify list
if(length(res)==1) res <- res[[1]]
return(res)
}
#
#
# # define some format templates
# .fmt_abs <- function()
# getOption("fmt.abs", structure(list(digits=0,
# big.mark="'"), class="fmt"))
# # there is an option Sys.localeconv()["thousands_sep"], but we can't change it
#
# .fmt_per <- function(digits=NULL){
#
# # we could use getOption("digits") as default here, but this is normally not a good choice
# # as numeric digits and percentage digits usually differ
# res <- getOption("fmt.per", structure(list(digits=1,
# fmt="%"), class="fmt"))
# # overwrite digits if given
# if(!is.null(digits))
# res["digits"] <- digits
# return(res)
# }
#
# .fmt_num <- function(digits = NULL){
# # check if fmt is defined
# res <- getOption("fmt.num")
#
# # if not: use a default, based on digfix
# if(is.null(res))
# res <- structure(list(digits=Coalesce(digits, DescToolsOptions("digits"), 3),
# big.mark=Sys.localeconv()["thousands_sep"]),
# class="fmt")
# else
# # if exists overwrite digits
# if(!is.null(digits)) res$digits <- digits
# # what should we do, when digits are neither defined in fmt.num nor given
# # in case the fmt.num exists?
#
# return(res)
# }
# .fmt <- function()
# getOption("fmt", default = list(
# per=structure(list(digits=1, fmt="%"), name="per", label="Percentage number format", class="fmt")
# , num=structure(list(digits=getOption("digfix", default=3), big.mark=Sys.localeconv()["thousands_sep"]), name="num", label="Number format for floating points", class="fmt")
# , abs=structure(list(digits=0, big.mark=Sys.localeconv()["thousands_sep"]), name="abs", label="Number format for counts", class="fmt")
# ) )
#
print.fmt <- function(x, ...){
CollapseList <- function(x){
z <- x
# opt <- options(useFancyQuotes=FALSE); on.exit(options(opt))
z[unlist(lapply(z, inherits, "character"))] <- shQuote(z[unlist(lapply(z, inherits, "character"))])
z <- paste(names(z), "=", z, sep="", collapse = ", ")
return(z)
}
cat(gettextf("Format name: %s%s\n", attr(x, "name"), # deparse(substitute(x)),
ifelse(identical(attr(x, "default"), TRUE), " (default)", "")), # deparse(substitute(x))),
gettextf("Description: %s\n", Label(x)),
gettextf("Definition: %s\n", CollapseList(x)),
gettextf("Example: %s\n", Format(pi * 1e5, fmt=x))
)
}
Frac <- function(x, dpwr = NA) { # fractional part
res <- abs(x) %% 1
# Alternative: res <- abs(x-trunc(x))
if (!missing(dpwr)) res <- round(10^dpwr * res)
res
}
MaxDigits <- function(x){
# How to find the significant digits of a number?
z <- na.omit(unlist(
lapply(strsplit(as.character(x),
split = getOption("OutDec"), fixed = TRUE),
"[", 2)))
if(length(z)==0)
res <- 0
else
res <- max(nchar(z))
return(res)
# Alternative: Sys.localeconv()["decimal_point"]
}
Recycle <- function(...){
lst <- list(...)
maxdim <- max(unlist(lapply(lst, length)))
# recycle all params to maxdim
res <- lapply(lst, rep_len, length.out=maxdim)
attr(res, "maxdim") <- maxdim
return(res)
}
###
## stats: strata sampling ----------------
Strata <- function (x, stratanames = NULL, size = 1,
method = c("srswor", "srswr", "poisson", "systematic"),
pik, description = FALSE) {
method <- match.arg(method, c("srswor", "srswr", "poisson", "systematic"))
# find non factors in stratanames
factor_fg <- unlist(lapply(x[, stratanames, drop=FALSE], is.factor))
# factorize nonfactors, get their levels and combine with levels of existing factors
lvl <- c(lapply(lapply(x[,names(which(!factor_fg)), drop=FALSE], factor), levels)
, lapply(x[,names(which(factor_fg)), drop=FALSE], levels))
# get the stratanames in the given order
strat <- expand.grid(lvl[stratanames])
strat$stratum <- factor(1:nrow(strat))
# set the size for the strata to sample
strat$size <- rep(size, length.out=nrow(strat))
# prepare the sample
x <- merge(x, strat)
x$id <- 1:nrow(x)
n <- table(x$stratum)
if(method %in% c("srswor", "srswr")) {
res <- do.call(rbind,
lapply(split(x, x$stratum),
function(z){
if(nrow(z)>0){
idx <- sample(x=nrow(z), size=z$size[1], replace=(method=="srswr"))
z[idx,]
} else {
z
}
}
)
)
} else if(method == "poisson") {
# still to implement!!! *********************
res <- do.call(rbind,
lapply(split(x, x$stratum),
function(z){
if(nrow(z)>0){
idx <- sample(x=nrow(z), size=z$size[1], replace=(method=="srswr"))
z[idx,]
} else {
z
}
}
)
)
} else if(method == "systematic") {
# still to implement!!! *********************
res <- do.call(rbind,
lapply(split(x, x$stratum),
function(z){
if(nrow(z)>0){
idx <- sample(x=nrow(z), size=z$size[1], replace=(method=="srswr"))
z[idx,]
} else {
z
}
}
)
)
}
return(res)
}
# Strata <- function (data, stratanames = NULL, size,
# method = c("srswor", "srswr", "poisson", "systematic"),
# pik, description = FALSE)
# {
#
# # Author: Yves Tille <yves.tille@unine.ch>, Alina Matei <alina.matei@unine.ch>
# # source: library(sampling)
#
# inclusionprobabilities <- function (a, n)
# {
# nnull = length(a[a == 0])
# nneg = length(a[a < 0])
# if (nnull > 0)
# warning("there are zero values in the initial vector a\n")
# if (nneg > 0) {
# warning("there are ", nneg, " negative value(s) shifted to zero\n")
# a[(a < 0)] = 0
# }
# if (identical(a, rep(0, length(a))))
# pik1 = a
# else {
# pik1 = n * a/sum(a)
# pik = pik1[pik1 > 0]
# list1 = pik1 > 0
# list = pik >= 1
# l = length(list[list == TRUE])
# if (l > 0) {
# l1 = 0
# while (l != l1) {
# x = pik[!list]
# x = x/sum(x)
# pik[!list] = (n - l) * x
# pik[list] = 1
# l1 = l
# list = (pik >= 1)
# l = length(list[list == TRUE])
# }
# pik1[list1] = pik
# }
# }
# pik1
# }
#
# srswor <- function (n, N)
# {
# s <- rep(0, times = N)
# s[sample(N, n)] <- 1
# s
# }
#
# srswr <- function (n, N)
# # as.vector(rmultinom(1, n, rep(n/N, times = N)))
# if(n==0) rep(0, N) else as.vector(rmultinom(1, n, rep(n/N, times = N)))
#
#
# UPsystematic <- function (pik, eps = 1e-06)
# {
# if (any(is.na(pik)))
# stop("there are missing values in the pik vector")
# list = pik > eps & pik < 1 - eps
# pik1 = pik[list]
# N = length(pik1)
# a = (c(0, cumsum(pik1)) - runif(1, 0, 1))%%1
# s1 = as.integer(a[1:N] > a[2:(N + 1)])
# s = pik
# s[list] = s1
# s
# }
#
# UPpoisson <- function (pik)
# {
# if (any(is.na(pik)))
# stop("there are missing values in the pik vector")
# as.numeric(runif(length(pik)) < pik)
# }
#
#
#
# if (missing(method)) {
# warning("the method is not specified; by default, the method is srswor")
# method = "srswor"
# }
# if (!(method %in% c("srswor", "srswr", "poisson", "systematic")))
# stop("the name of the method is wrong")
# if (method %in% c("poisson", "systematic") & missing(pik))
# stop("the vector of probabilities is missing")
# if (missing(stratanames) | is.null(stratanames)) {
# if (method == "srswor")
# result = data.frame((1:nrow(data))[srswor(size, nrow(data)) ==
# 1], rep(size/nrow(data), size))
# if (method == "srswr") {
# s = srswr(size, nrow(data))
# st = s[s != 0]
# l = length(st)
# result = data.frame((1:nrow(data))[s != 0])
# if (size <= nrow(data))
# result = cbind.data.frame(result, st, prob = rep(size/nrow(data),
# l))
# else {
# prob = rep(size/nrow(data), l)/sum(rep(size/nrow(data),
# l))
# result = cbind.data.frame(result, st, prob)
# }
# colnames(result) = c("id", "replicates", "prob")
# }
# if (method == "poisson") {
# pikk = inclusionprobabilities(pik, size)
# s = (UPpoisson(pikk) == 1)
# if (length(s) > 0)
# result = data.frame((1:nrow(data))[s], pikk[s])
# if (description)
# cat("\nPopulation total and number of selected units:",
# nrow(data), sum(s), "\n")
# }
# if (method == "systematic") {
# pikk = inclusionprobabilities(pik, size)
# s = (UPsystematic(pikk) == 1)
# result = data.frame((1:nrow(data))[s], pikk[s])
# }
# if (method != "srswr")
# colnames(result) = c("id", "prob")
# if (description & method != "poisson")
# cat("\nPopulation total and number of selected units:",
# nrow(data), sum(size), "\n")
# }
# else {
# data = data.frame(data)
# index = 1:nrow(data)
# m = match(stratanames, colnames(data))
# if (any(is.na(m)))
# stop("the names of the strata are wrong")
# data2 = cbind.data.frame(data[, m], index)
# colnames(data2) = c(stratanames, "index")
# x1 = data.frame(unique(data[, m]))
# colnames(x1) = stratanames
# result = NULL
# for (i in 1:nrow(x1)) {
# if (is.vector(x1[i, ]))
# data3 = data2[data2[, 1] == x1[i, ], ]
# else {
# as = data.frame(x1[i, ])
# names(as) = names(x1)
# data3 = merge(data2, as, by = intersect(names(data2),
# names(as)))
# }
# y = sort(data3$index)
# if (description & method != "poisson") {
# cat("Stratum", i, "\n")
# cat("\nPopulation total and number of selected units:",
# length(y), size[i], "\n")
# }
# if (method != "srswr" & length(y) < size[i]) {
# stop("not enough obervations in the stratum ",
# i, "\n")
# st = c(st, NULL)
# }
# else {
# if (method == "srswor") {
# st = y[srswor(size[i], length(y)) == 1]
# r = cbind.data.frame(data2[st, ], rep(size[i]/length(y),
# size[i]))
# }
# if (method == "systematic") {
# pikk = inclusionprobabilities(pik[y], size[i])
# s = (UPsystematic(pikk) == 1)
# st = y[s]
# r = cbind.data.frame(data2[st, ], pikk[s])
# }
# if (method == "srswr") {
# s = srswr(size[i], length(y))
# st = rep(y[s != 0], s[s != 0])
# l = length(st)
# if (size[i] <= length(y))
# r = cbind.data.frame(data2[st, ], prob = rep(size[i]/length(y),
# l))
# else {
# prob = rep(size[i]/length(y), l)/sum(rep(size[i]/length(y),
# l))
# r = cbind.data.frame(data2[st, ], prob)
# }
# }
# if (method == "poisson") {
# pikk = inclusionprobabilities(pik[y], size[i])
# s = (UPpoisson(pikk) == 1)
# if (any(s)) {
# st = y[s]
# r = cbind.data.frame(data2[st, ], pikk[s])
# if (description) {
# cat("Stratum", i, "\n")
# cat("\nPopulation total and number of selected units:",
# length(y), length(st), "\n")
# }
# }
# else {
# if (description) {
# cat("Stratum", i, "\n")
# cat("\nPopulation total and number of selected units:",
# length(y), 0, "\n")
# }
# r = NULL
# }
# }
# }
# # corrected 7.4.2014 for allowing size=0 for a stratum:
# # if (!is.null(r)) {
# if (!is.null(r) & nrow(r)>0) {
# r = cbind(r, i)
# result = rbind.data.frame(result, r)
# }
# }
#
# # original, seems a bit "over-ifed"
# # if (method == "srswr")
# # colnames(result) = c(stratanames, "ID_unit", "Prob", "Stratum")
# # else colnames(result) = c(stratanames, "ID_unit", "Prob", "Stratum")
#
# colnames(result) <- c(stratanames, "id", "prob", "stratum")
#
# if (description) {
# cat("Number of strata ", nrow(x1), "\n")
# if (method == "poisson")
# cat("Total number of selected units", nrow(result),
# "\n")
# else cat("Total number of selected units", sum(size),
# "\n")
# }
# }
# result
# }
SampleTwins <- function (x, stratanames = NULL, twins,
method = c("srswor", "srswr", "poisson", "systematic"),
pik, description = FALSE) {
# sort data first
x <- x[do.call("order", lapply(x[,stratanames], order)),]
# define the frequencies
twinsize <- as.data.frame.table(xtabs( as.formula(gettextf("~ %s", paste(stratanames, collapse="+"))), twins))
size <- merge(x=expand.grid(lapply(x[stratanames], unique)),
y=twinsize, all.x=TRUE, all.y=TRUE)
size$Freq[is.na(size$Freq)] <- 0
s <- Strata(x = x, stratanames = stratanames, size=size$Freq, method=method,
pik=pik, description=description)
if(!identical(table(s[,stratanames]), table(twins[,stratanames]))) {
warning("Could not find a twin for all records. Enlighten the restrictions!")
}
return(s)
}
## stats: distributions ---------------------------------
dBenf <- function(x, ndigits = 1, log = FALSE) {
if (!IsNumeric(ndigits, length.arg = 1,
positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
ans <- x * NA
indexTF <- is.finite(x) & (x >= lowerlimit)
ans[indexTF] <- log10(1 + 1/x[indexTF])
ans[!is.na(x) & !is.nan(x) &
((x < lowerlimit) |
(x > upperlimit) |
(x != round(x)))] <- 0.0
if (log.arg) log(ans) else ans
}
rBenf <- function(n, ndigits = 1) {
if (!IsNumeric(ndigits, length.arg = 1,
positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
use.n <- if ((length.n <- length(n)) > 1) length.n else
if (!IsNumeric(n, integer.valued = TRUE,
length.arg = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
myrunif <- runif(use.n)
ans <- rep(lowerlimit, length = use.n)
for (ii in (lowerlimit+1):upperlimit) {
indexTF <- (pBenf(ii-1, ndigits = ndigits) < myrunif) &
(myrunif <= pBenf(ii, ndigits = ndigits))
ans[indexTF] <- ii
}
ans
}
pBenf <- function(q, ndigits = 1, log.p = FALSE) {
if (!IsNumeric(ndigits, length.arg = 1,
positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
ans <- q * NA
floorq <- floor(q)
indexTF <- is.finite(q) & (floorq >= lowerlimit)
ans[indexTF] <- log10(1 + floorq[indexTF]) -
ifelse(ndigits == 1, 0, 1)
ans[!is.na(q) & !is.nan(q) & (q >= upperlimit)] <- 1
ans[!is.na(q) & !is.nan(q) & (q < lowerlimit)] <- 0
if (log.p) log(ans) else ans
}
qBenf <- function(p, ndigits = 1) {
if (!IsNumeric(ndigits, length.arg = 1,
positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
bad <- !is.na(p) & !is.nan(p) & ((p < 0) | (p > 1))
if (any(bad))
stop("bad input for argument 'p'")
ans <- rep(lowerlimit, length = length(p))
for (ii in (lowerlimit+1):upperlimit) {
indexTF <- is.finite(p) &
(pBenf(ii-1, ndigits = ndigits) < p) &
(p <= pBenf(ii, ndigits = ndigits))
ans[indexTF] <- ii
}
ans[ is.na(p) | is.nan(p)] <- NA
ans[!is.na(p) & !is.nan(p) & (p == 0)] <- lowerlimit
ans[!is.na(p) & !is.nan(p) & (p == 1)] <- upperlimit
ans
}
dRevGumbel <- function (x, location = 0, scale = 1) {
# from VGAM -- if (is.null(x)) FALSE else ifelse(is.na(x), FALSE, x)
if (!IsNumeric(scale, positive=TRUE))
stop("\"scale\" must be positive")
temp = exp((x - location)/scale)
temp * exp(-temp)/scale
}
pRevGumbel <- function (q, location = 0, scale = 1) {
if (!IsNumeric(scale, positive=TRUE))
stop("\"scale\" must be positive")
1-exp(-exp((q - location)/scale))
}
qRevGumbel <- function (p, location = 0, scale = 1)
{
if (!IsNumeric(scale, positive=TRUE))
stop("\"scale\" must be positive")
location + scale * log(-log(p))
}
qRevGumbelExp <- function (p) exp(qRevGumbel(p))
rRevGumbel <- function (n, location = 0, scale = 1)
{
if (!IsNumeric(scale, positive=TRUE, integer.valued=TRUE))
stop("bad input for argument \"n\"")
if (!IsNumeric(scale, positive=TRUE))
stop("\"scale\" must be positive")
location + scale * log(-log(runif(n)))
}
RndPairs <- function(n, r, rdist1 = rnorm(n=n, mean = 0, sd = 1), rdist2 = rnorm(n=n, mean = 0, sd = 1)){
# create correlated random pairs
data.frame(matrix(nrow=n, ncol=2, data=cbind(rdist1, rdist2)) %*%
chol(matrix(nrow=2, ncol=2, data=c(1, r, r, 1))))
}
RndWord <- function(size, length, x = LETTERS, replace = TRUE, prob = NULL){
sapply(1:size, function(i) paste(sample(x=x, size=length, replace=replace, prob=prob), collapse=""))
}
## basic finance functions ---------------
NPV <- function(i, cf, t=seq(along=cf)-1) {
# Net present value
sum(cf/(1+i)^t)
}
IRR <- function(cf, t=seq(along=cf)-1) {
# internal rate of return
uniroot(NPV, c(0,1), cf=cf, t=t)$root
}
OPR <- function (K, D = NULL, log = FALSE) {
# Einperiodenrenditen One-period-returns
if (is.null(D))
D <- rep(0, length(K))
if (!log){
res <- (D[-1] + K[-1] - K[-length(K)])/K[-length(K)]
} else {
res <- log((D[-1] + K[-1])/K[-length(K)])
}
return(res)
}
NPVFixBond <- function(i, Co, RV, n){
# net present value for fixed bonds
sum(Co / (1+i)^(1:n), RV / (1+i)^n)
}
YTM <- function(Co, PP, RV, n){
# yield to maturity (irr)
uniroot(function(i) -PP + sum(Co / (1+i)^(1:n), RV / (1+i)^n)
, c(0,1))$root
}
## utils: manipulation, utilities ====
InDots <- function(..., arg, default){
# was arg in the dots-args? parse dots.arguments
arg <- unlist(match.call(expand.dots=FALSE)$...[arg])
# if arg was not in ... then return default
if(is.null(arg)) arg <- default
return(arg)
}
FctArgs <- function(name, sort=FALSE) {
# got that somewhere, but don't know from where...
if(is.function(name)) name <- as.character(substitute(name))
a <- formals(get(name, pos=1))
if(is.null(a))
return(NULL)
arg.labels <- names(a)
arg.values <- as.character(a)
char <- sapply(a, is.character)
arg.values[char] <- paste("\"", arg.values[char], "\"", sep="")
if(sort)
{
ord <- order(arg.labels)
if(any(arg.labels == "..."))
ord <- c(ord[-which(arg.labels[ord]=="...")],
which(arg.labels=="..."))
arg.labels <- arg.labels[ord]
arg.values <- arg.values[ord]
}
output <- data.frame(value=I(arg.values), row.names=arg.labels)
print(output, right=FALSE)
invisible(output)
}
Keywords <- function( topic ) {
# verbatim from library(gtools)
file <- file.path(R.home("doc"),"KEYWORDS")
if(missing(topic))
{
file.show(file)
} else {
# ## Local copy of trim.character to avoid cyclic dependency with gdata ##
# trim <- function(s) {
#
# s <- sub(pattern="^[[:blank:]]+", replacement="", x=s)
# s <- sub(pattern="[[:blank:]]+$", replacement="", x=s)
# s
# }
kw <- scan(file=file, what=character(), sep="\n", quiet=TRUE)
kw <- grep("&", kw, value=TRUE)
kw <- gsub("&[^&]*$","", kw)
kw <- gsub("&+"," ", kw)
kw <- na.omit(StrTrim(kw))
ischar <- tryCatch(is.character(topic) && length(topic) ==
1L, error = identity)
if (inherits(ischar, "error"))
ischar <- FALSE
if (!ischar)
topic <- deparse(substitute(topic))
item <- paste("^",topic,"$", sep="")
# old, replaced by suggestion of K. Hornik 23.2.2015
# topics <- function(k) help.search(keyword=k)$matches[,"topic"]
topics <- function(k) {
matches <- help.search(keyword=k)$matches
matches[ , match("topic", tolower(colnames(matches)))]
}
matches <- lapply(kw, topics)
names(matches) <- kw
tmp <- unlist(lapply( matches, function(m) grep(item, m, value=TRUE) ))
names(tmp)
}
}
SysInfo <- function() {
## description << getSysinfo is a convenience function to compile some information about the
## computing system and environment used.
package.names <- sapply(sessionInfo()[['otherPkgs']],'[[','Package')
package.versions <- sapply(sessionInfo()[['otherPkgs']],'[[','Version')
packages.all <- paste(gettextf("%s (%s)", package.names, package.versions), collapse=", ")
pars.sys <- c('user', 'nodename', 'sysname', 'release')
R.system <- paste(sessionInfo()[[1]]$version.string)
sys.info <- paste(pars.sys, Sys.info()[pars.sys], collapse=', ', sep=': ')
all.info <- paste(c(sys.info,', ', R.system,', installed Packages: ', packages.all),
sep='', collapse='')
cat(gettextf("\nSystem: %s\nNodename: %s, User: %s",
paste(Sys.info()[c("sysname","release","version")], collapse=" ")
, Sys.info()["nodename"], Sys.info()["user"], "\n\n"))
cat(gettextf("\nTotal Memory: %s MB\n\n", memory.limit()))
cat(StrTrim(sessionInfo()$R.version$version.string), "\n")
cat(sessionInfo()$platform, "\n")
cat("\nLoaded Packages: \n", packages.all, "\n")
DescToolsOptions()
invisible(all.info)
}
FindRProfile <- function(){
candidates <- c( Sys.getenv("R_PROFILE"),
file.path(Sys.getenv("R_HOME"), "etc", "Rprofile.site"),
Sys.getenv("R_PROFILE_USER"),
file.path(getwd(), ".Rprofile") )
Filter(file.exists, candidates)
}
DescToolsOptions <- function (..., default = NULL, reset = FALSE) {
.Simplify <- function(x)
if(is.list(x) && length(x)==1L)
x[[1L]]
else
x
# all system defaults
def <- list(
col = c(hblue, hred, horange),
digits = 3,
fixedfont = structure(list(name = "Consolas", size = 7), class = "font"),
fmt = structure(list(
abs = structure(list(digits = 0, big.mark = "'"), .Names = c("digits", "big.mark"),
name = "abs", label = "Number format for counts",
default = TRUE, class = "fmt"),
per = structure(list(digits = 1, fmt = "%"), .Names = c("digits", "fmt"),
name = "per", label = "Percentage number format",
default = TRUE, class = "fmt"),
num = structure(list(digits = 3, big.mark = "'"), .Names = c("digits", "big.mark"),
name = "num", label = "Number format for floats",
default = TRUE, class = "fmt")), name = "fmt"),
footnote = c("'", "\"", "\"\""),
lang = "engl",
plotit = TRUE,
stamp = expression(gettextf("%s/%s", Sys.getenv("USERNAME"),
Format(Today(), fmt = "yyyy-mm-dd"))),
lastWrd=NULL,
lastXL=NULL,
lastPP=NULL
)
# potentionally evaluate dots
dots <- lapply(list(...), function(x) {
if (is.symbol(x))
eval(substitute(x, env = parent.frame()))
else
x
})
# reduce length[[1]] list to a list n (exclude single named argument)
if(length(dots)==1L && is.list(dots) &&
!(length(dots)==1 && !is.null(names(dots))))
dots <- dots[[1]]
# refuse to work with several options and defaults
if (length(dots) > 1L && !is.null(default))
stop("defaults can only be used with single options")
# ignore anything else, set the defaults and return old values
if (reset == TRUE)
invisible(options(DescTools = def))
# flag these values as defaults, not before they are potentially reset
# do not set on lastXYZ options (can't set attribute on NULL values)
for(i in seq_along(def)[-c(9:11)])
attr(def[[i]], "default") <- TRUE
opt <- getOption("DescTools")
# store such as to return as result
old <- opt
# take defaults and overwrite found entries in options
def[names(opt)] <- opt
opt <- def
# no names were given, so just return all options
if (length(dots) == 0) {
return(opt)
} else {
# entries were supplied, now check if there were named entries
# dots is then a list with length 1
if (is.null(names(dots))) {
# if no names, check default and return either the value
# or if this does not exist, the default
if (!is.null(default))
# a default is given, so get old option value and replace with user default
# when it's NULL
# note: in old are the original option values (no system defaults)
return(.Simplify(ifelse(is.null(old[[dots]]), default, old[[dots]])))
else
# no defaults given, so return options, evt. sys defaults
# reduce list to value, if length 1
return(.Simplify(opt[unlist(dots)]))
} else {
# there are named values, so these are to be stored
# restore old options in opt (no defaults should be stored)
opt <- old
if (is.null(opt))
opt <- list()
opt[names(dots)] <- dots
# store full option set
options(DescTools = opt)
# return only the new set variables
old <- old[names(dots)]
}
}
invisible(old)
}
# DescToolsOptions <- function(..., default=NULL, reset=FALSE){
#
# .Simplify <- function(x)
# # return first element of a list, if it's the only one
# if(is.list(x) && length(x)==1)
# x[[1]]
# else
# x
#
#
# def <- list(
# col=c(hred, hblue, hgreen),
# digits=3,
# fixedfont=structure(list(name="Consolas", size=7), class="font"),
# fmt=structure(
# list(
# abs=structure(list(digits = 0, big.mark = "'"),
# .Names = c("digits","big.mark"),
# name = "abs", label = "Number format for counts",
# default=TRUE, class = "fmt"),
# per=structure(list(digits = 1, fmt = "%"),
# .Names = c("digits","big.mark"), name = "per",
# label = "Percentage number format",
# default=TRUE, class = "fmt"),
# num=structure(list(digits = 3, big.mark = "'"),
# .Names = c("digits","big.mark"), name = "num",
# label = "Number format for floats",
# default=TRUE, class = "fmt")
# ), name="fmt"),
#
# footnote=c("'", '"', '""'),
# lang="engl",
# plotit=TRUE,
# stamp=expression(gettextf("%s/%s", Sys.getenv("USERNAME"), Format(Today(), fmt = "yyyy-mm-dd"))),
# lastWrd=NULL,
# lastXL=NULL,
# lastPP=NULL
# )
#
#
# # potentionally evaluate dots
# dots <- lapply(list(...), function(x){
# if(is.symbol(x))
# eval(substitute(x, env = parent.frame()))
# else
# x
# })
#
# # refuse to work with several options and defaults
# if(length(dots)>1 && !is.null(default))
# stop("defaults can only be used with single options")
#
# opt <- getOption("DescTools")
#
# old <- opt
#
# if(reset==TRUE)
# # reset the options and return old values invisible
# options(DescTools=def)
#
# if(length(dots)==0) {
# # no arguments, just return the options
# return(.Simplify(opt))
#
# } else {
# if(is.null(names(dots))){
# # get the option and return either value or the default
# if(!is.null(default))
# # just one allowed here, can we do better?? **********
# return(.Simplify(Coalesce(opt[dots[[1]]], default)))
#
# else
# # more values allowed
# return(.Simplify(opt[unlist(dots)]))
#
# } else {
# #set the options
# if(is.null(opt))
# opt <- list()
#
# opt[names(dots)[[1]]] <- dots[[1]]
#
# # let default options return the result
# .Simplify(options(DescTools=opt))
# }
# }
#
# invisible(old)
#
# }
fmt <- function(...){
# get format templates and modify on the fly, e.g. other digits
# x is the name of the template
def <- structure(
list(
abs=structure(list(digits = 0, big.mark = "'"),
label = "Number format for counts",
default=TRUE, class = "fmt"),
per=structure(list(digits = 1, fmt = "%"),
label = "Percentage number format",
default=TRUE, class = "fmt"),
num=structure(list(digits = 0, big.mark = "'"),
label = "Number format for floating points",
default=TRUE, class = "fmt")
), name="fmt")
# get a format from the fmt templates options
res <- DescToolsOptions("fmt")[[1]]
# find other defined fmt in .GlobalEnv and append to list
# found <- ls(parent.frame())[ lapply(lapply(ls(parent.frame()), function(x) gettextf("class(%s)", x)),
# function(x) eval(parse(text=x))) == "fmt" ]
# if(length(found)>0){
# udf <- lapply(found, function(x) eval(parse(text=x)))
# names(udf) <- found
# }
# collect all found formats, defaults included if not set as option
# abs, per and num must always be available, even if not explicitly defined
res <- c(res, def[names(def) %nin% names(res)]) #, udf)
# get additional arguments
dots <- match.call(expand.dots=FALSE)$...
# leave away all NULL values, these should not overwrite the defaults below
dots <- dots[is.null(dots)]
# functionality:
# Fmt() return all from options
# Fmt("abs") return abs
# Fmt("abs", digits=3) return abs with updated digits
# Fmt(c("abs","per")) return abs and per
# Fmt(nob=as.Fmt(digits=10, na.form="nodat")) set nob
if(all(!is.null(names(dots)))){
# set value
old <- options("DescTools")
opt <- old
opt$fmt[[names(dots)]] <- dots
options(DescTools=opt)
# same behaviour as options
invisible(old)
} else {
if(!length(dots))
return(res)
# select the requested ones by name
fnames <- unlist(dots[is.null(names(dots))])
res <- res[fnames]
# modify additional arguments in the template definition
for(z in names(res)){
if(!is.null(res[[z]]))
# use named dots
res[[z]][names(dots[!is.null(names(dots))])] <- dots[!is.null(names(dots))]
}
# set names as given, especially for returning the ones not found
# ???? names(res) <- fnames
# reduce list, this should not be necessary, but to make sure
# if(length(res)==1)
# res <- res[[1]]
return(res)
}
}
as.fmt <- function(...){
# dots <- match.call(expand.dots=FALSE)$...
# new by 0.99.22
dots <- list(...)
structure(dots,
.Names = names(dots),
label = "Number format",
class = "fmt")
}
ParseSASDatalines <- function(x, env = .GlobalEnv, overwrite = FALSE) {
# see: http://www.psychstatistics.com/2012/12/07/using-datalines-in-sas/
# or: http://www.ats.ucla.edu/stat/sas/library/SASRead_os.htm
# split command to list by means of ;
lst <- StrTrim(strsplit(x, ";")[[1]])
dsname <- lst[grep(pattern = "^[Dd][Aa][Tt][Aa] ", StrTrim(lst))] # this would be the dataname
dsname <- gsub(pattern = "^[Dd][Aa][Tt][Aa] +", "", dsname)
# get the columnnames from the input line
input <- lst[grep(pattern = "^[Ii][Nn][Pp][Uu][Tt]", StrTrim(lst))]
# get rid of potential single @
input <- gsub("[ \n\t]@+[ \n\t]*", "", input)
input <- gsub(pattern=" +\\$", "$", input)
input <- gsub(" +", " ", input)
cnames <- strsplit(input, " ")[[1]][-1]
# the default values for the variables
def <- rep(0, length(cnames))
def[grep("\\$$", cnames)] <- "''"
vars <- paste(gsub("\\$$","",cnames), def, sep="=", collapse=",")
datalines <- lst[grep("datalines|cards|cards4", tolower(lst))+1]
res <- eval(parse(text=gettextf(
"data.frame(scan(file=textConnection(datalines),
what=list(%s), quiet=TRUE))", vars)))
if(length(dsname) > 0){ # check if a dataname could be found
if( overwrite | ! exists(dsname, envir=env) ) {
assign(dsname, res, envir=env)
} else {
cat(gettextf("The file %s already exists in %s. Should it be overwritten? (y/n)\n"
, dsname, deparse(substitute(env))))
ans <- readline()
if(ans == "y")
assign(dsname, res, envir = env)
# stop(gettextf("%s already exists in %s. Use overwrite = TRUE to overwrite it.", dsname, deparse(substitute(env))))
}
}
return(res)
}
SetNames <- function (x, ...) {
# see also setNames()
# args <- match.call(expand.dots = FALSE)$...
args <- list(...)
if("colnames" %in% names(args))
colnames(x) <- args[["colnames"]]
if("rownames" %in% names(args))
rownames(x) <- args[["rownames"]]
if("names" %in% names(args))
names(x) <- args[["names"]]
x
}
InsRow <- function(m, x, i, row.names=NULL){
nr <- dim(m)[1]
x <- matrix(x, ncol=ncol(m))
if(!is.null(row.names))
row.names(x) <- row.names
if(i==1)
res <- rbind(x, m)
else if(i>nr)
res <- rbind(m, x)
else
res <- rbind(m[1:(i-1),], x, m[i:nr,])
colnames(res) <- colnames(m)
res
}
InsCol <- function(m, x, i, col.names=NULL){
nc <- dim(m)[2]
x <- matrix(x, nrow=nrow(m))
if(!is.null(col.names))
colnames(x) <- col.names
if(i==1)
res <- cbind(x, m)
else if(i > nc)
res <- cbind(m, x)
else
res <- cbind(m[,1:(i-1)], x, m[,i:nc])
rownames(res) <- rownames(m)
res
}
Rename <- function(x, ..., gsub=FALSE, fixed=TRUE, warn=TRUE){
subst <- c(...)
# if ... do not have names use those from x, assigned by sequence
if(is.null(names(subst)))
names(subst) <- names(x)[1:length(subst)]
if(gsub){
names.x <- names(x)
for(i in 1:length(subst)){
names.x <- gsub(names(subst[i]), subst[i], names.x, fixed=fixed)
}
names(x) <- names.x
} else {
i <- match(names(subst), names(x))
if(any(is.na(i))) {
if(warn) warning("unused name(s) selected")
if(any(!is.na(i)))
subst <- subst[!is.na(i)]
i <- i[!is.na(i)]
}
if(length(i))
names(x)[i] <- subst
}
return(x)
}
# This does not work, because x does not come as a reference
# AddLabel <- function(x, text = ""){
# ### add an attribute named "label" to a variable in a data.frame
# attr(x, "label") <- text
# }
# attr(d.pizza$driver, "label") <- "The driver delivering the pizza"
# AddLabel(d.pizza$driver, "lkj?lkjlkjlk?lkj lkj lkj lkadflkj alskd lkas")
# simplified from Hmisc
Label <- function(x) {
attributes(x)$label
}
"Label<-" <- function(x, value) {
if(is.list(value)) stop("cannot assign a list to be an object label")
if((length(value) != 1L) & !is.null(value)) stop("value must be character vector of length 1")
attr(x, "label") <- value
return(x)
}
# "Label<-.data.frame" <- function(x, self=(length(value)==1), ..., value) {
#
# if(!is.data.frame(x)) stop("x must be a data.frame")
#
# if(self){
# attr(x, "label") <- value
# } else {
# for (i in seq(along.with=x)) {
# Label(x[[i]]) <- value[[i]]
# }
# }
# return(x)
# }
# Label.data.frame <- function(x, ...) {
# labels <- mapply(FUN=Label, x=x)
# return(labels[unlist(lapply(labels, function(x) !is.null(x) ))])
# }
# SetLabel <- function (object = nm, nm) {
# Label(object) <- nm
# object
# }
`Unit<-` <- function (x, value) {
if (is.list(value))
stop("cannot assign a list to be an object label")
if ((length(value) != 1L) & !is.null(value))
stop("value must be character vector of length 1")
attr(x, "unit") <- value
return(x)
}
Unit <- function (x) attributes(x)$unit
#
# To Sort(., mixed=TRUE) for vectors
#
#
# SortMixed Order or Sort Strings With Embedded Numbers So That The Numbers
# Are In The Correct Order
# Description
# These functions sort or order character strings containing numbers so that the numbers are numerically
# sorted rather than sorted by character value. I.e. "Asprin 50mg" will come before "Asprin
# 100mg". In addition
#
Sort <- function(x, ...) {
UseMethod("Sort")
}
Sort.default <- function(x, ...) {
sort(x = x, ...)
}
Sort.data.frame <- function(x, ord = NULL, decreasing = FALSE, factorsAsCharacter = TRUE,
na.last = TRUE, ...) {
# why not using ord argument as in matrix and table instead of ord?
if(is.null(ord)) { ord <- 1:ncol(x) }
if(is.character(ord)) {
ord <- match(ord, c("row.names", names(x)))
} else if(is.numeric(ord)) {
ord <- as.integer(ord) + 1
}
# recycle decreasing and by
lgp <- list(decreasing = decreasing, ord = ord)
# recycle all params to maxdim = max(unlist(lapply(lgp, length)))
lgp <- lapply(lgp, rep, length.out = max(unlist(lapply(lgp, length))))
# decreasing is not recycled in order, so we use rev to change the sorting direction
# old: d.ord <- x[,lgp$ord, drop=FALSE] # preserve data.frame with drop = FALSE
d.ord <- data.frame(rn=rownames(x),x)[, lgp$ord, drop = FALSE] # preserve data.frame with drop = FALSE
if(factorsAsCharacter){
for( xn in which(sapply(d.ord, is.factor)) ){ d.ord[,xn] <- factor(d.ord[,xn], levels=sort(levels(d.ord[,xn]))) }
}
d.ord[, which(sapply(d.ord, is.character))] <- lapply(d.ord[,which(sapply(d.ord, is.character)), drop=FALSE], factor)
d.ord <- data.frame(lapply(d.ord, as.numeric))
d.ord[lgp$decreasing] <- lapply(d.ord[lgp$decreasing], "-")
x[ do.call("order", c(as.list(d.ord), na.last=na.last)), , drop = FALSE]
}
Sort.matrix <- function (x, ord = NULL, decreasing = FALSE, na.last = TRUE, ...) {
if (length(dim(x)) == 1 ){
# do not specially handle 1-dimensional matrices
res <- sort(x=x, decreasing=decreasing)
} else {
if (is.null(ord)) {
# default order by sequence of columns
ord <- 1:ncol(x)
}
# replace keyword by code
ord[ord=="row_names"] <- 0
# we have to coerce, as ord will be character if row_names is used
ord <- as.numeric(ord)
lgp <- list(decreasing = decreasing, ord = ord)
lgp <- lapply(lgp, rep, length.out = max(unlist(lapply(lgp, length))))
if( is.null(row.names(x))) {
d.x <- data.frame(cbind(rownr=1:nrow(x)), x)
} else {
d.x <- data.frame(cbind( rownr=as.numeric(factor(row.names(x))), x))
}
d.ord <- d.x[, lgp$ord + 1, drop = FALSE]
d.ord[lgp$decreasing] <- lapply(d.ord[lgp$decreasing], "-")
res <- x[do.call("order", c(as.list(d.ord), na.last=na.last)), , drop=FALSE]
# old version cannot be used for [n,1]-matrices, we switch to reset dim
# class(res) <- "matrix"
# 19.9.2013: dim kills rownames, so stick to drop = FALSE
# dim(res) <- dim(x)
}
return(res)
}
Sort.table <- function (x, ord = NULL, decreasing = FALSE, na.last = TRUE, ...) {
if (length(dim(x)) == 1 ){
# do not specially handle 1-dimensional tables
res <- sort(x=x, decreasing=decreasing)
} else {
if (is.null(ord)) {
ord <- 1:ncol(x)
}
lgp <- list(decreasing = decreasing, ord = ord)
lgp <- lapply(lgp, rep, length.out = max(unlist(lapply(lgp, length))))
d.x <- data.frame(cbind( rownr=as.numeric(factor(row.names(x))), x, mar=apply(x, 1, sum)))
d.ord <- d.x[, lgp$ord + 1, drop = FALSE]
d.ord[lgp$decreasing] <- lapply(d.ord[lgp$decreasing], "-")
res <- x[do.call("order", c(as.list(d.ord), na.last=na.last)), , drop=FALSE]
class(res) <- "table"
}
return(res)
}
Rev <- function(x, ...) {
# additional interface for rev...
UseMethod("Rev")
}
Rev.default <- function(x, ...){
# refuse accepting margins here
if(length(list(...)) > 0 && length(dim(x)) == 1 && !identical(list(...), 1))
warning("margin has been supplied and will be discarded.")
rev(x)
}
Rev.table <- function(x, margin, ...) {
if (!is.array(x))
stop("'x' is not an array")
newdim <- rep("", length(dim(x)))
newdim[margin] <- paste(dim(x), ":1", sep="")[margin]
z <- eval(parse(text=gettextf("x[%s, drop = FALSE]", paste(newdim, sep="", collapse=","))))
class(z) <- oldClass(x)
return(z)
}
Rev.matrix <- function(x, margin, ...) {
Rev.table(x, margin, ...)
}
Rev.data.frame <- function(x, margin, ...) {
if(1 %in% margin) x <- x[nrow(x):1L,]
if(2 %in% margin) x <- x[, ncol(x):1L]
return(x)
}
Untable <- function(x, ...){
UseMethod("Untable")
}
Untable.data.frame <- function(x, freq = "Freq", rownames = NULL, ...){
if(all(is.na(match(freq, names(x)))))
stop(gettextf("Frequency column %s does not exist!", freq))
res <- x[Untable(x[,freq], type="as.numeric")[,], -grep(freq, names(x))]
rownames(res) <- rownames
return(res)
}
Untable.default <- function(x, dimnames=NULL, type = NULL, rownames = NULL, colnames = NULL, ...) {
# recreates the data.frame out of a contingency table
# coerce to table, such as also be able to handle vectors
x <- as.table(x)
if(!is.null(dimnames)) dimnames(x) <- dimnames
if(is.null(dimnames) && identical(type, "as.numeric")) dimnames(x) <- list(seq_along(x))
# set a title for the table if it does not have one
# if(is.null(names(dimnames(x)))) names(dimnames(x)) <- ""
# if(length(dim(x))==1 && names(dimnames(x))=="") names(dimnames(x)) <- "Var1"
# replaced 26.3.2013
for( i in 1:length(dimnames(x)) )
if (is.null(names(dimnames(x)[i])) || names(dimnames(x)[i]) == "")
if (length(dimnames(x)) == 1) names(dimnames(x)) <- gettextf("Var%s", i)
else names(dimnames(x)[i]) <- gettextf("Var%s", i)
res <- as.data.frame(expand.grid(dimnames(x))[rep(1:prod(dim(x)), as.vector(x)),])
rownames(res) <- NULL
if(!all(names(dimnames(x))=="")) colnames(res) <- names(dimnames(x))
# return ordered factors, if wanted...
if(is.null(type)) type <- "as.factor"
# recycle type:
if(length(type) < ncol(res)) type <- rep(type, length.out=ncol(res))
for(i in 1:ncol(res)){
if(type[i]=="as.numeric"){
res[,i] <- as.numeric(as.character(res[,i]))
} else {
res[,i] <- eval(parse(text = gettextf("%s(res[,i])", type[i])))
}
}
# overwrite the dimnames, if requested
if(!is.null(rownames)) rownames(res) <- rownames
if(!is.null(colnames)) colnames(res) <- colnames
return(res)
}
# AddClass <- function(x, class, after=0) {
# class(x) <- append(class(x), class, after = after)
# x
# }
#
#
# RemoveClass <- function(x, class) {
# class(x) <- class(x)[class(x) %nin% class]
# x
# }
FixToTable <- function(txt, sep = " ", delim = "\t", trim = TRUE, header = TRUE){
# converts a fixed text to a delim separated table
# make all lines same width first
txt <- StrPad(txt, width=max(nchar(txt)))
m <- do.call("rbind", strsplit(txt, ""))
idx <- apply( m, 2, function(x) all(x == sep))
# replace all multiple delims by just one
idx[-1][(apply(cbind(idx[-1], idx[-length(idx)]), 1, sum) == 2)] <- FALSE
m[,idx] <- delim
tab <- apply( m, 1, paste, collapse="")
# trim the columns
if(trim) {
tab <- do.call("rbind", lapply(strsplit(tab, delim), StrTrim))
} else {
tab <- do.call("rbind", strsplit(tab, delim))
}
if(header) {
colnames(tab) <- tab[1,]
tab <- tab[-1,]
}
return(tab)
}
## GUI-Elements: select variables by dialog, FileOpen, DescDlg, ObjectBrowse ====
SaveAsDlg <- function(x, filename){
if(missing(filename))
filename <- file.choose()
if(! is.na(filename)) save(list=deparse(substitute(x)), file = filename)
else
warning("No filename supplied")
}
SelectVarDlg <- function (x, ...) {
UseMethod("SelectVarDlg")
}
.ToClipboard <- function (x, ...) {
# This fails on Linux with
#
# * checking examples ... ERROR
# Running examples in 'DescTools-Ex.R' failed The error most likely occurred in:
#
# > base::assign(".ptime", proc.time(), pos = "CheckExEnv") ### Name:
# > ToClipboard ### Title: Write Text to Clipboard ### Aliases:
# > ToClipboard
sn <- Sys.info()["sysname"]
if (sn == "Darwin") {
file <- pipe("pbcopy")
cat(x, file = file, ...)
close(file)
}
else if (sn == "Windows") {
cat(x, file = "clipboard", ...)
}
else {
stop("Writing to the clipboard is not implemented for your system (",
sn, ") in this package.")
}
}
SelectVarDlg.default <- function(x, useIndex = FALSE, ...){
# example: Sel(d.pizza)
xsel <- select.list(x, multiple = TRUE, graphics = TRUE)
if(useIndex == TRUE) {
xsel <- which(x %in% xsel)
} else {
xsel <- shQuote(xsel)
}
if(!identical(xsel, "\"\""))
txt <- paste("c(", paste(xsel, collapse=","),")", sep="")
else
txt <- ""
.ToClipboard(txt)
invisible(txt)
}
SelectVarDlg.numeric <- function(x, ...) {
if(!is.null(names(x)))
z <- names(x)
else
z <- as.character(x)
txt <- paste(deparse(substitute(x)), "[", SelectVarDlg.default( x = z, ...), "]",
sep="", collapse="")
.ToClipboard(txt)
invisible(txt)
}
SelectVarDlg.factor <- function(x, ...) { SelectVarDlg.default( x = levels(x), ...) }
SelectVarDlg.data.frame <- function(x, ...) {
sel <- SelectVarDlg.default( x = colnames(x), ...)
if(sel!="")
txt <- paste(deparse(substitute(x)), "[,",
sel, "]", sep="", collapse="")
else
txt <- ""
.ToClipboard(txt)
invisible(txt)
}
FileOpenCmd <- function(fmt=NULL) {
fn <- file.choose()
# fn <- tcltk::tclvalue(tcltk::tkgetOpenFile())
op <- options(useFancyQuotes = FALSE)
# switch from backslash to slash
fn <- gsub("\\\\", "/", fn)
# parse the filename into path, filename, filextension
fnamelong <- rev(unlist(strsplit(fn, "/")))[1]
ext <- rev(unlist(strsplit( fnamelong, "\\.")))[1]
fname <- substr(fnamelong, 1, nchar(fnamelong) - nchar(ext) - 1)
path <- substr(fn, 1, nchar(fn) - nchar(fname) - nchar(ext) - 1)
if(is.null(fmt)) {
if(ext %in% c("rda", "RData"))
fmt <- 3
else if(ext %in% c("dat", "csv"))
fmt <- 2
else
fmt <- 1
}
# read.table text:
if(fmt == 1) {
fmt <- "\"%path%%fname%.%ext%\""
} else if( fmt == 2) {
fmt="d.%fname% <- read.table(file = \"%path%%fname%.%ext%\", header = TRUE, sep = \";\", na.strings = c(\"NA\",\"NULL\"), strip.white = TRUE)"
} else if( fmt == 3) {
fmt="load(file = \"%path%%fname%.%ext%\")"
}
rcmd <- gsub("%fname%", fname, gsub("%ext%", ext, gsub( "%path%", path, fmt)))
# utils::writeClipboard(rcmd)
.ToClipboard(rcmd)
options(op)
invisible(rcmd)
}
.InitDlg <- function(width, height, x=NULL, y=NULL, resizex=FALSE, resizey=FALSE, main="Dialog", ico="R"){
top <- tcltk::tktoplevel()
if(is.null(x)) x <- as.integer(tcltk::tkwinfo("screenwidth", top))/2 - 50
if(is.null(y)) y <- as.integer(tcltk::tkwinfo("screenheight", top))/2 - 25
geom <- gettextf("%sx%s+%s+%s", width, height, x, y)
tcltk::tkwm.geometry(top, geom)
tcltk::tkwm.title(top, main)
tcltk::tkwm.resizable(top, resizex, resizey)
# alternative:
# system.file("extdata", paste(ico, "ico", sep="."), package="DescTools")
tcltk::tkwm.iconbitmap(top, file.path(find.package("DescTools"), "extdata", paste(ico, "ico", sep=".")))
return(top)
}
.ImportSPSS <- function(datasetname = "dataset") {
# read.spss
# function (file, use.value.labels = TRUE, to.data.frame = FALSE,
# max.value.labels = Inf, trim.factor.names = FALSE, trim_values = TRUE,
# reencode = NA, use.missings = to.data.frame)
e1 <- environment()
env.dsname <- character()
env.use.value.labels <- logical()
env.to.data.frame <- logical()
env.max.value.labels <- character()
env.trim.factor.names <- logical()
env.trim.values <- logical()
env.reencode <- character()
env.use.missings <- logical()
lst <- NULL
OnOK <- function() {
assign("lst", list(), envir = e1)
assign("env.dsname", tcltk::tclvalue(dsname), envir = e1)
assign("env.use.value.labels", tcltk::tclvalue(use.value.labels), envir = e1)
assign("env.to.data.frame", tcltk::tclvalue(to.data.frame), envir = e1)
assign("env.max.value.labels", tcltk::tclvalue(max.value.labels), envir = e1)
assign("env.trim.factor.names", tcltk::tclvalue(trim.factor.names), envir = e1)
assign("env.trim.values", tcltk::tclvalue(trim.values), envir = e1)
assign("env.reencode", tcltk::tclvalue(reencode), envir = e1)
assign("env.use.missings", tcltk::tclvalue(use.missings), envir = e1)
tcltk::tkdestroy(top)
}
top <- .InitDlg(350, 300, main="Import SPSS Dataset")
dsname <- tcltk::tclVar(datasetname)
dsnameFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
entryDsname <- tcltk::ttkentry(dsnameFrame, width=30, textvariable=dsname)
optionsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
use.value.labels <- tcltk::tclVar("1")
use.value.labelsCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Use value labels", variable=use.value.labels)
to.data.frame <- tcltk::tclVar("1")
to.data.frameCheckBox <- tcltk::ttkcheckbutton(optionsFrame,
text="Convert value labels to factor levels", variable=to.data.frame)
max.value.labels <- tcltk::tclVar("Inf")
entryMaxValueLabels <- tcltk::ttkentry(optionsFrame, width=30, textvariable=max.value.labels)
trim.values <- tcltk::tclVar("1")
trim.valuesCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Ignore trailing spaces when matching"
, variable=trim.values)
trim.factor.names <- tcltk::tclVar("1")
trim.factor.namesCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Trim trailing spaces from factor levels"
, variable=trim.factor.names)
reencode <- tcltk::tclVar("")
entryReencode <- tcltk::ttkentry(optionsFrame, width=30, textvariable=reencode)
use.missings <- tcltk::tclVar("1")
use.missingsCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Use missings",
variable=use.missings)
tcltk::tkgrid(tcltk::tklabel(dsnameFrame, text="Enter name for data set: "), entryDsname, sticky="w")
tcltk::tkgrid(dsnameFrame, columnspan=2, sticky="w")
tcltk::tkgrid(use.value.labelsCheckBox, sticky="w")
tcltk::tkgrid(to.data.frameCheckBox, sticky="nw")
tcltk::tkgrid(tcltk::ttklabel(optionsFrame, text="Maximal value label:"), sticky="nw")
tcltk::tkgrid(entryMaxValueLabels, padx=20, sticky="nw")
tcltk::tkgrid(trim.valuesCheckBox, sticky="w")
tcltk::tkgrid(trim.factor.namesCheckBox, sticky="w")
tcltk::tkgrid(tcltk::ttklabel(optionsFrame, text="Reencode character strings to the current locale:"), sticky="nw")
tcltk::tkgrid(entryReencode, padx=20, sticky="nw")
tcltk::tkgrid(use.missingsCheckBox, sticky="w")
tcltk::tkgrid(optionsFrame, sticky="w")
buttonsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
tfButOK <- tcltk::tkbutton(buttonsFrame, text = "OK", command = OnOK, width=10)
tfButCanc <- tcltk::tkbutton(buttonsFrame, width=10, text = "Cancel", command = function() tcltk::tkdestroy(top))
tcltk::tkgrid(tfButOK, tfButCanc)
tcltk::tkgrid.configure(tfButCanc, padx=c(6,6))
tcltk::tkgrid.columnconfigure(buttonsFrame, 0, weight=2)
tcltk::tkgrid.columnconfigure(buttonsFrame, 1, weight=1)
tcltk::tkgrid(buttonsFrame, sticky="ew")
tcltk::tkwait.window(top)
if(!is.null(lst)){
lst <- list(dsname=env.dsname, use.value.labels=as.numeric(env.use.value.labels),
to.data.frame=as.numeric(env.to.data.frame),
max.value.labels=env.max.value.labels, trim.factor.names=as.numeric(env.trim.factor.names),
trim.values=as.numeric(env.trim.values), reencode=env.reencode, use.missings=as.numeric(env.use.missings) )
}
return(lst)
}
.ImportSYSTAT <- function(datasetname = "dataset") {
e1 <- environment()
env.dsname <- character()
env.to.data.frame <- logical()
lst <- NULL
top <- .InitDlg(350, 140, main="Import SYSTAT Dataset")
OnOK <- function() {
assign("lst", list(), envir = e1)
assign("env.dsname", tcltk::tclvalue(dsname), envir = e1)
assign("env.to.data.frame", tcltk::tclvalue(to.data.frame ), envir = e1)
tcltk::tkdestroy(top)
}
dsname <- tcltk::tclVar(datasetname)
dsnameFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
entryDsname <- tcltk::ttkentry(dsnameFrame, width=30, textvariable=dsname)
optionsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
to.data.frame <- tcltk::tclVar("1")
to.data.frameCheckBox <- tcltk::ttkcheckbutton(optionsFrame,
text="Convert dataset to data.frame", variable=to.data.frame)
tcltk::tkgrid(tcltk::tklabel(dsnameFrame, text="Enter name for data set: "), entryDsname, sticky="w")
tcltk::tkgrid(dsnameFrame, columnspan=2, sticky="w")
tcltk::tkgrid(to.data.frameCheckBox, sticky="w")
tcltk::tkgrid(optionsFrame, sticky="w")
buttonsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
tfButOK <- tcltk::tkbutton(buttonsFrame, text = "OK", command = OnOK, width=10)
tfButCanc <- tcltk::tkbutton(buttonsFrame, width=10, text = "Cancel", command = function() tcltk::tkdestroy(top))
tcltk::tkgrid(tfButOK, tfButCanc)
tcltk::tkgrid.configure(tfButCanc, padx=c(6,6))
tcltk::tkgrid.columnconfigure(buttonsFrame, 0, weight=2)
tcltk::tkgrid.columnconfigure(buttonsFrame, 1, weight=1)
tcltk::tkgrid(buttonsFrame, sticky="ew")
tcltk::tkwait.window(top)
if(!is.null(lst)){
lst <- list(dsname=env.dsname, to.data.frame=as.numeric(env.to.data.frame))
}
return(lst)
}
.ImportStataDlg <- function(datasetname = "dataset") {
# function (file, convert.dates = TRUE, convert.factors = TRUE,
# missing.type = FALSE, convert.underscore = FALSE, warn.missing.labels = TRUE)
e1 <- environment()
env.dsname <- character()
env.convert.dates <- logical()
env.convert.factors <- logical()
env.convert.underscore <- logical()
env.missing.type <- logical()
env.warn.missing.labels <- logical()
lst <- NULL
OnOK <- function() {
assign("lst", list(), envir = e1)
assign("env.dsname", tcltk::tclvalue(dsname), envir = e1)
assign("env.convert.dates", tcltk::tclvalue(convert.dates), envir = e1)
assign("env.convert.factors", tcltk::tclvalue(convert.factors), envir = e1)
assign("env.convert.underscore", tcltk::tclvalue(convert.underscore), envir = e1)
assign("env.missing.type", tcltk::tclvalue(missing.type), envir = e1)
assign("env.warn.missing.labels", tcltk::tclvalue(warn.missing.labels), envir = e1)
tcltk::tkdestroy(top)
}
top <- .InitDlg(350, 220, main="Import Stata Dataset")
dsname <- tcltk::tclVar(datasetname)
dsnameFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
entryDsname <- tcltk::ttkentry(dsnameFrame, width=30, textvariable=dsname)
optionsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
convert.factors <- tcltk::tclVar("1")
convert.factorsCheckBox <- tcltk::ttkcheckbutton(optionsFrame,
text="Convert value labels to factor levels", variable=convert.factors)
convert.dates <- tcltk::tclVar("1")
convert.datesCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Convert dates to R format", variable=convert.dates)
missing.type <- tcltk::tclVar("1")
missing.typeCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Multiple missing types (>=Stata 8)"
, variable=missing.type)
convert.underscore <- tcltk::tclVar("1")
convert.underscoreCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Convert underscore to period"
, variable=convert.underscore)
warn.missing.labels <- tcltk::tclVar("1")
warn.missing.labelsCheckBox <- tcltk::ttkcheckbutton(optionsFrame, text="Warn on missing labels",
variable=warn.missing.labels)
tcltk::tkgrid(tcltk::tklabel(dsnameFrame, text="Enter name for data set: "), entryDsname, sticky="w")
tcltk::tkgrid(dsnameFrame, columnspan=2, sticky="w")
tcltk::tkgrid(convert.datesCheckBox, sticky="w")
tcltk::tkgrid(convert.factorsCheckBox, sticky="nw")
tcltk::tkgrid(missing.typeCheckBox, sticky="w")
tcltk::tkgrid(convert.underscoreCheckBox, sticky="w")
tcltk::tkgrid(warn.missing.labelsCheckBox, sticky="w")
tcltk::tkgrid(optionsFrame, sticky="w")
buttonsFrame <- tcltk::tkframe(top, padx = 10, pady = 10)
tfButOK <- tcltk::tkbutton(buttonsFrame, text = "OK", command = OnOK, width=10)
tfButCanc <- tcltk::tkbutton(buttonsFrame, width=10, text = "Cancel", command = function() tcltk::tkdestroy(top))
tcltk::tkgrid(tfButOK, tfButCanc)
tcltk::tkgrid.configure(tfButCanc, padx=c(6,6))
tcltk::tkgrid.columnconfigure(buttonsFrame, 0, weight=2)
tcltk::tkgrid.columnconfigure(buttonsFrame, 1, weight=1)
tcltk::tkgrid(buttonsFrame, sticky="ew")
tcltk::tkwait.window(top)
if(!is.null(lst)){
lst <- list(dsname=env.dsname, convert.factors=as.numeric(env.convert.factors),
convert.dates=as.numeric(env.convert.dates), convert.underscore=as.numeric(env.convert.underscore),
missing.type=as.numeric(env.missing.type), warn.missing.labels=as.numeric(env.warn.missing.labels) )
}
return(lst)
}
ImportFileDlg <- function(auto_type = TRUE, env = .GlobalEnv) {
requireNamespace("tcltk", quietly = FALSE)
filename <- tcltk::tclvalue(tcltk::tkgetOpenFile(filetypes= "{{All files} *}
{{SPSS Files} {.sav}} {{SAS xport files} {.xpt, .xport}}
{{SYSTAT} {*.sys, *.syd}} {{MiniTab} {.mtp}}
{{Stata Files} {.dta}}"))
# nicht topmost, aber wie mach ich das dann??
# tcl("wm", "attributes", root, topmost=TRUE)
if (filename=="") return()
path <- SplitPath(filename)
fformats <- c("SPSS","SAS","SYSTAT", "Minitab","Stata")
if(auto_type){
xsel <- switch(toupper(path$extension),
"SAV"="SPSS",
"DTA"="Stata",
"SYD"="SYSTAT",
"SYS"="SYSTAT",
"MTP"="MiniTab",
"XPT"="SAS",
"XPORT"="SAS",
"SAS"="SAS",
select.list(fformats, multiple = FALSE, graphics = TRUE))
} else {
xsel <- select.list(fformats, multiple = FALSE, graphics = TRUE)
}
switch(xsel,
"MiniTab"={
zz <- foreign::read.mtp(file=filename)
},
"SYSTAT"={
dlg <- .ImportSYSTAT(paste("d.", path$filename, sep=""))
if(is.null(dlg)) return()
zz <- foreign::read.systat(file=filename, to.data.frame = dlg$to.data.frame)
},
"SPSS"={
dlg <- .ImportSPSS(paste("d.", path$filename, sep=""))
if(is.null(dlg)) return()
zz <- foreign::read.spss(file=filename, use.value.labels = dlg$use.value.labels,
to.data.frame = dlg$to.data.frame,
max.value.labels = dlg$max.value.labels,
trim.factor.names = dlg$trim.factor.names,
trim_values = dlg$trim_value,
reencode = ifelse(dlg$reencode=="", NA, dlg$reencode),
use.missings = dlg$use.missings)
},
"SAS"={
print("not yet implemented.")
},
"Stata"={
dlg <- .ImportStataDlg(paste("d.", path$filename, sep=""))
if(is.null(dlg)) return()
zz <- foreign::read.dta(file=filename, convert.dates = dlg[["convert.dates"]], convert.factors = dlg[["convert.factors"]],
missing.type = dlg[["missing.type"]], convert.underscore = dlg[["convert.underscore"]],
warn.missing.labels = dlg[["warn.missing.labels"]])
})
assign(dlg[["dsname"]], zz, envir=env)
message(gettextf("Dataset %s has been successfully created!\n\n", dlg[["dsname"]]))
# Exec(gettextf("print(str(%s, envir = %s))", dlg[["dsname"]], deparse(substitute(env))))
}
PasswordDlg <- function() {
requireNamespace("tcltk", quietly = FALSE)
e1 <- environment()
pw <- character()
tfpw <- tcltk::tclVar("")
OnOK <- function() {
assign("pw", tcltk::tclvalue(tfpw), envir = e1)
tcltk::tkdestroy(root)
}
# do not update screen
tcltk::tclServiceMode(on = FALSE)
# create window
root <- .InitDlg(205, 110, resizex=FALSE, resizey=FALSE, main="Login", ico="key")
# define widgets
content <- tcltk::tkframe(root, padx=10, pady=10)
tfEntrPW <- tcltk::tkentry(content, width="30", textvariable=tfpw, show="*" )
tfButOK <- tcltk::tkbutton(content,text="OK",command=OnOK, width=6)
tfButCanc <- tcltk::tkbutton(content, text="Cancel", width=7,
command=function() tcltk::tkdestroy(root))
# build GUI
tcltk::tkgrid(content, column=0, row=0)
tcltk::tkgrid(tcltk::tklabel(content, text="Enter Password"), column=0, row=0,
columnspan=3, sticky="w")
tcltk::tkgrid(tfEntrPW, column=0, row=1, columnspan=3, pady=10)
tcltk::tkgrid(tfButOK, column=0, row=2, ipadx=15, sticky="w")
tcltk::tkgrid(tfButCanc, column=2, row=2, ipadx=5, sticky="e")
# binding event-handler
tcltk::tkbind(tfEntrPW, "<Return>", OnOK)
tcltk::tkfocus(tfEntrPW)
tcltk::tclServiceMode(on = TRUE)
tcltk::tcl("wm", "attributes", root, topmost=TRUE)
tcltk::tkwait.window(root)
return(pw)
}
ColorDlg <- function() {
requireNamespace("tcltk", quietly = FALSE)
return(as.character(tcltk::tcl("tk_chooseColor", title="Choose a color")))
}
IdentifyA <- function(x, ...){
UseMethod("IdentifyA")
}
IdentifyA.formula <- function(formula, data, subset, poly = FALSE, ...){
opt <- options(na.action=na.pass); on.exit(options(opt))
# identifies points in a plot, lying in a rectangle, spanned by upleft, botright
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "na.action", "subset"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
response <- attr(attr(mf, "terms"), "response")
vname <- attr(attr(attr(mf, "terms"), "dataClasses"), "names")
x <- setNames(mf[[-response]], vname[2])
y <- setNames(mf[[response]], vname[1])
IdentifyA(x=x, y=y, ...)
}
IdentifyA.default <- function(x, y=NULL, poly = FALSE, ...){
xlabel <- if (!missing(x))
deparse(substitute(x))
ylabel <- if (!missing(y))
deparse(substitute(y))
pxy <- xy.coords(x, y, xlabel, ylabel)
xlabel <- pxy$xlab
ylabel <- pxy$ylab
if(poly){
cat("Select polygon points and click on finish when done!\n")
xy <- locator(type="n")
polygon(xy, border="grey", lty="dotted")
idx <- PtInPoly(data.frame(pxy$x, pxy$y), do.call("data.frame", xy))$pip == 1
code <- paste("x %in% c(", paste(which(idx), collapse=","), ")", sep="")
} else {
cat("Select upper-left and bottom-right point!\n")
xy <- locator(n=2, type="n")[1:2]
rect(xy$x[1], xy$y[1], xy$x[2], xy$y[2], border="grey", lty="dotted")
idx <- (pxy$x %[]% range(xy$x) & pxy$y %[]% range(xy$y))
code <- paste(xlabel, " %[]% c(", xy$x[1], ", ", xy$x[2], ") & ", ylabel ," %[]% c(", xy$y[1], ", ", xy$y[2], "))", sep="")
}
res <- which(idx)
xy <- lapply(lapply(xy, range), signif, digits=4)
attr(x=res, which="cond") <- code
return(res)
}
PtInPoly <- function(pnts, poly.pnts) {
#check if pnts & poly is 2 column matrix or dataframe
pnts = as.matrix(pnts); poly.pnts = as.matrix(poly.pnts)
if (!(is.matrix(pnts) & is.matrix(poly.pnts))) stop('pnts & poly.pnts must be a 2 column dataframe or matrix')
if (!(dim(pnts)[2] == 2 & dim(poly.pnts)[2] == 2)) stop('pnts & poly.pnts must be a 2 column dataframe or matrix')
#ensure first and last polygon points are NOT the same
if (poly.pnts[1,1] == poly.pnts[nrow(poly.pnts),1] & poly.pnts[1,2] == poly.pnts[nrow(poly.pnts),2]) poly.pnts = poly.pnts[-1,]
#run the point in polygon code
out = .Call('pip', PACKAGE="DescTools", pnts[,1], pnts[,2], nrow(pnts), poly.pnts[,1], poly.pnts[,2], nrow(poly.pnts))
#return the value
return(data.frame(pnts,pip=out))
}
# Identify points in a plot using a formula.
# http://www.rforge.net/NCStats/files/
# Author: Derek Ogle <dogle@northland.edu>
identify.formula <- function(formula, data, subset, na.action, ...) {
# mf <- model.frame(x, data)
# x <- mf[,2]
# y <- mf[,1]
# identify(x, y, ...)
if (missing(formula) || (length(formula) != 3L) || (length(attr(terms(formula[-2L]),
"term.labels")) != 1L))
stop("'formula' missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m[[1L]] <- quote(stats::model.frame)
m$... <- NULL
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
identify(x=mf[[-response]], y=mf[[response]], ...)
}
# experimental: formula interface for split
split.formula <- function(x, f, drop = FALSE, data = NULL, ...) {
mf <- model.frame(x, data)
f <- mf[,2]
x <- mf[,1]
split(x, f, drop=drop, ...)
}
###
## helpers: PlotPar und PlotRCol
PlotPar <- function(){
# plots the most used plot parameters
usr <- par(no.readonly=TRUE); on.exit(par(usr))
if( !is.null(dev.list()) ){
curwin <- dev.cur()
on.exit({
dev.set(curwin)
par(usr)
})
}
# this does not work and CRAN does not allow windows()
# dev.new(width=7.2, height=4)
par( mar=c(0,0,0,0), mex=0.001, xaxt="n", yaxt="n", ann=F, xpd=TRUE)
plot( x=1:25, y=rep(11,25), pch=1:25, cex=2, xlab="", ylab=""
, frame.plot=FALSE, ylim=c(-1,15), col=2, bg=3)
points( x=1:25, y=rep(12.5,25), pch=1:35, cex=2, col=1)
text( x=1:25, y=rep(9.5,25), labels=1:25, cex=0.8 )
segments( x0=1, x1=4, y0=0:5, lty=6:1, lwd=3 )
text( x=5, y=6:0, adj=c(0,0.5), labels=c("0 = blank", "1 = solid (default)", "2 = dashed", "3 = dotted", "4 = dotdash", "5 = longdash", "6 = twodash") )
segments( x0=10, x1=12, y0=0:6, lty=1, lwd=7:1 )
text( x=13, y=0:6, adj=c(0,0.5), labels=7:1 )
points( x=rep(15,7), y=0:6, cex=rev(c(0.8,1,1.5,2,3,4,7)) )
text( x=16, y=0:6, adj=c(0,0.5), labels=rev(c(0.8,1,1.5,2,3,4,7)) )
text( x=c(1,1,10,15,18,18), y=c(14,7.5,7.5,7.5,7.5,2.5), labels=c("pch","lty","lwd","pt.cex","adj","col"), cex=1.3, col="grey40")
adj <- expand.grid(c(0,0.5,1),c(0,0.5,1))
for( i in 1:nrow(adj) ){
text( x=18+adj[i,1]*7, y=3.5+adj[i,2]*3, label=paste("text", paste(adj[i,], collapse=",") ), adj=unlist(adj[i,]), cex=0.8 )
}
points( x=18:25, y=rep(1,8), col=1:8, pch=15, cex=2 )
text( x=18:25, y=0, adj=c(0.5,0.5), labels=1:8, cex=0.8 )
}
PlotPch <- function (col = NULL, bg = NULL, newwin = FALSE) {
if (newwin == TRUE)
dev.new(width=2, height=5, noRStudioGD=TRUE)
# dev.new(width=3, height=2, xpos=100, ypos=600, noRStudioGD = TRUE)
usr <- par(no.readonly = TRUE)
on.exit(par(usr))
if (!is.null(dev.list())) {
curwin <- dev.cur()
on.exit({
dev.set(curwin)
par(usr)
})
}
if(is.null(col))
col <- hred
if(is.null(bg))
bg <- hecru
par(mar = c(0, 0, 0, 0), mex = 0.001, xaxt = "n", yaxt = "n",
ann = F, xpd = TRUE)
plot(y = 1:25, x = rep(3, 25), pch = 25:1, cex = 1.5, xlab = "",
ylab = "", frame.plot = FALSE, xlim = c(-1, 15))
points(y = 1:25, x = rep(6, 25), pch = 25:1, cex = 1.5,
col = col, bg = bg)
text(y = 25:1, x = rep(9, 25), labels = 1:25, cex = 0.8)
}
ColPicker <- function(locator=TRUE, ord=c("hsv","default"), label=c("text","hex","dec"),
mdim = c(38, 12), newwin = FALSE) {
usr <- par(no.readonly=TRUE)
opt <- options(locatorBell = FALSE)
on.exit({
par(usr)
options(opt)
})
# this does not work and CRAN does not allow windows()
# dev.new(width=13, height=7)
if(newwin == TRUE)
dev.new(width=13, height=7, noRStudioGD = TRUE)
# plots all named colors: PlotRCol(lbel="hex") hat noch zuviele Bezeichnungen
if( !is.null(dev.list()) ){
curwin <- dev.cur()
on.exit({
dev.set(curwin)
par(usr)
})
}
# colors without greys (and grays...) n = 453
cols <- colors()[-grep( pattern="^gr[ea]y", colors())]
# set order
switch( match.arg( arg=ord, choices=c("hsv","default") )
, "default" = { # do nothing
}
, "hsv" = {
rgbc <- col2rgb(cols)
hsvc <- rgb2hsv(rgbc[1,],rgbc[2,],rgbc[3,])
cols <- cols[ order(hsvc[1,],hsvc[2,],hsvc[3,]) ]
}
)
zeilen <- mdim[1]; spalten <- mdim[2] # 660 Farben
farben.zahlen <- matrix( 1:spalten, nrow=zeilen, ncol=spalten, byrow=TRUE) # Matrix fuer Punkte
if(zeilen*spalten > length(cols))
cols <- c(cols, rep(NA, zeilen*spalten - length(cols)) ) # um 3 NULL-Werte erweitern
x_offset <- 0.5
x <- farben.zahlen[, 1:spalten] # x-Werte (Zahlen)
y <- -rep(1:zeilen, spalten) # y-Werte (Zahlen)
par(mar=c(0,0,0,0), mex=0.001, xaxt="n", yaxt="n", ann=F)
plot( x, y
, pch=22 # Punkttyp Rechteck
, cex=2 # Vergroesserung Punkte
, col=NA
, bg=cols # Hintergrundfarben
, bty="n" # keine Box
, xlim=c(1, spalten+x_offset) # x-Wertebereich
)
switch( match.arg( arg=label, choices=c("text","hex","dec") )
, "text" = {
text( x+0.1, y, cols, adj=0, cex=0.6 ) # Text Farben
}
, "hex" = { # HEX-Codes
text( x+0.1, y, adj=0, cex=0.6,
c(apply(apply(col2rgb(cols[1:(length(cols)-3)]), 2, sprintf, fmt=" %02X"), 2, paste, collapse=""), rep("",3))
)
}
, "dec" = { # decimal RGB-Codes
text( x+0.1, y, adj=0, cex=0.6,
c(apply(apply(col2rgb(cols[1:(length(cols)-3)]), 2, sprintf, fmt=" %03d"), 2, paste, collapse=""), rep("",3))
)
}
)
z <- locator()
idx <- with(lapply(z, round), (x-1) * zeilen + abs(y))
return(cols[idx])
}
# not needed with gconvertX()
# FigUsr <- function() {
#
# usr <- par("usr")
# plt <- par("plt")
#
# res <- c(
# usr[1] - diff(usr[1:2])/diff(plt[1:2]) * (plt[1]) ,
# usr[2] + diff(usr[1:2])/diff(plt[1:2]) * (1-plt[2]),
# usr[3] - diff(usr[3:4])/diff(plt[3:4]) * (plt[3]) ,
# usr[4] + diff(usr[3:4])/diff(plt[3:4]) * (1-plt[4])
# )
#
# return(res)
#
# }
PlotMar <- function(){
par(oma=c(3,3,3,3)) # all sides have 3 lines of space
#par(omi=c(1,1,1,1)) # alternative, uncomment this and comment the previous line to try
# - The mar command represents the figure margins. The vector is in the same ordering of
# the oma commands.
#
# - The default size is c(5,4,4,2) + 0.1, (equivalent to c(5.1,4.1,4.1,2.1)).
#
# - The axes tick marks will go in the first line of the left and bottom with the axis
# label going in the second line.
#
# - The title will fit in the third line on the top of the graph.
#
# - All of the alternatives are:
# - mar: Specify the margins of the figure in number of lines
# - mai: Specify the margins of the figure in number of inches
par(mar=c(5,4,4,2) + 0.1)
#par(mai=c(2,1.5,1.5,.5)) # alternative, uncomment this and comment the previous line
# Plot
plot(x=1:10, y=1:10, type="n", xlab="X", ylab="Y") # type="n" hides the points
# Place text in the plot and color everything plot-related red
text(5,5, "Plot", col=hred, cex=2)
text(5,4, "text(5,5, \"Plot\", col=\"red\", cex=2)", col=hred, cex=1)
box("plot", col=hred)
# Place text in the margins and label the margins, all in green
mtext("Figure", side=3, line=2, cex=2, col=hgreen)
mtext("par(mar=c(5,4,4,2) + 0.1)", side=3, line=1, cex=1, col=hgreen)
mtext("Line 0", side=3, line=0, adj=1.0, cex=1, col=hgreen)
mtext("Line 1", side=3, line=1, adj=1.0, cex=1, col=hgreen)
mtext("Line 2", side=3, line=2, adj=1.0, cex=1, col=hgreen)
mtext("Line 3", side=3, line=3, adj=1.0, cex=1, col=hgreen)
mtext("Line 0", side=2, line=0, adj=1.0, cex=1, col=hgreen)
mtext("Line 1", side=2, line=1, adj=1.0, cex=1, col=hgreen)
mtext("Line 2", side=2, line=2, adj=1.0, cex=1, col=hgreen)
mtext("Line 3", side=2, line=3, adj=1.0, cex=1, col=hgreen)
box("figure", col=hgreen)
# Label the outer margin area and color it blue
# Note the 'outer=TRUE' command moves us from the figure margins to the outer
# margins.
mtext("Outer Margin Area", side=1, line=1, cex=2, col=horange, outer=TRUE)
mtext("par(oma=c(3,3,3,3))", side=1, line=2, cex=1, col=horange, outer=TRUE)
mtext("Line 0", side=1, line=0, adj=0.0, cex=1, col=horange, outer=TRUE)
mtext("Line 1", side=1, line=1, adj=0.0, cex=1, col=horange, outer=TRUE)
mtext("Line 2", side=1, line=2, adj=0.0, cex=1, col=horange, outer=TRUE)
box("outer", col=horange)
usr <- par("usr")
# inner <- par("inner")
fig <- par("fig")
plt <- par("plt")
# text("Figure", x=fig, y=ycoord, adj = c(1, 0))
text("Inner", x=usr[2] + (usr[2] - usr[1])/(plt[2] - plt[1]) * (1 - plt[2]),
y=usr[3] - diff(usr[3:4])/diff(plt[3:4]) * (plt[3]), adj = c(1, 0))
#text("Plot", x=usr[1], y=usr[2], adj = c(0, 1))
figusrx <- grconvertX(usr[c(1,2)], to="nfc")
figusry <- grconvertY(usr[c(3,4)], to="nfc")
points(x=figusrx[c(1,1,2,2)], y=figusry[c(3,4,3,4)], pch=15, cex=3, xpd=NA)
points(x=usr[c(1,1,2,2)], y=usr[c(3,4,3,4)], pch=15, col=hred, cex=2, xpd=NA)
arrows(x0 = par("usr")[1], 8, par("usr")[2], 8, col="black", cex=2, code=3, angle = 15, length = .2)
text(x = mean(par("usr")[1:2]), y=8.2, labels = "pin[1]", adj=c(0.5, 0))
}
Mar <- function(bottom=NULL, left=NULL, top=NULL, right=NULL, outer=FALSE){
if(outer){
if(is.null(bottom)) bottom <- par("oma")[1]
if(is.null(left)) left <- par("oma")[2]
if(is.null(top)) top <- par("oma")[3]
if(is.null(right)) right <- par("oma")[4]
res <- par(oma=c(bottom, left, top, right))
} else {
if(is.null(bottom)) bottom <- par("mar")[1]
if(is.null(left)) left <- par("mar")[2]
if(is.null(top)) top <- par("mar")[3]
if(is.null(right)) right <- par("mar")[4]
res <- par(mar=c(bottom, left, top, right))
}
invisible(res)
}
Xplore <- function (x) {
.PrepCmd <- function(xvar, yvar, data, dcol, col, dpch, pch, alpha, cex, grid, smooth, desc, show) {
if(desc){
if(yvar == "none"){
s <- gettextf("Desc(%s$%s, plotit=FALSE)", deparse(substitute(data)), xvar)
} else {
s <- gettextf("Desc(%s ~ %s, data=%s, plotit=FALSE)", yvar, xvar, deparse(substitute(data)))
}
} else {
if(xvar=="none" & yvar == "none"){
s <- "Canvas()"
} else if (yvar == "none") {
s <- gettextf("PlotDesc(%s$%s, na.rm=TRUE)",
deparse(substitute(data)), xvar)
} else {
s <- gettextf("plot(%s ~ %s, data=%s", yvar,
xvar, deparse(substitute(data)))
if (!is.na(dcol)) {
s <- paste(s, gettextf(", col=as.numeric(%s)", dcol))
} else if (!is.na(col)) {
s <- paste(s, gettextf(", col=SetAlpha('%s', %s)", col, alpha))
}
if (!is.na(dpch)) {
s <- paste(s, gettextf(", pch=as.numeric(%s)", dpch))
} else if (!is.na(pch)) {
s <- paste(s, gettextf(", pch=as.numeric(%s)", pch))
}
if (!is.na(cex)) {
s <- paste(s, gettextf(", cex=as.numeric(%s)", cex))
}
s <- paste(s, ")")
}
if (show)
cat(s, "\n")
}
if(grid) s <- paste(s, ";grid()")
if (!is.na(smooth)) {
scmd <- ""
if(smooth == "linear"){
scmd <- gettextf("lines(lm(%s ~ %s, data=%s))", yvar,
xvar, deparse(substitute(data)))
} else if(smooth == "loess"){
scmd <- gettextf("lines(loess(%s ~ %s, data=%s))", yvar,
xvar, deparse(substitute(data)))
}
s <- paste(s, ";", scmd)
}
return(s)
}
if (requireNamespace("manipulate", quietly = FALSE)){
# define the variables here, as the Rcmd check as CRAN will note miss a visible binding:
# Explore: no visible binding for global variable 'xvar'
xvar <- character()
yvar <- character()
dcol <- character()
dpch <- character()
col <- character()
pch <- character()
alpha <- character()
cex <- character()
desc <- logical()
show <- logical()
variables <- c("none", as.list(names(x)))
snames <- c(none = NA, as.list(names(x)[!sapply(x, IsNumeric)]))
cols <- as.list(colors())
smoothers <- as.list(c("none", "loess", "linear", "spline"))
manipulate::manipulate({
eval(parse(text = .PrepCmd(xvar, yvar, x, dcol, col, dpch, pch, alpha, cex, grid, smooth, desc, show)))
},
yvar = manipulate::picker(variables, initial = "none", label = "y-variable "),
xvar = manipulate::picker(variables, initial = "none", label = "x-variable "),
dcol = manipulate::picker(snames, initial = "none", label = "data color "),
col = manipulate::picker(cols, initial = "black", label = "color "),
dpch = manipulate::picker(snames, initial = "none", label = "data point character"),
pch = manipulate::picker(as.list(as.character(1:25)), initial = "1", label = "point character"),
alpha = manipulate::slider(min=0, max = 1, step = 0.1, ticks = TRUE, initial = 1, label = "transparency"),
cex = manipulate::slider(min=0.1, max = 5, step = 0.1, ticks = TRUE, initial = 1, label = "point character extension"),
grid = manipulate::checkbox(initial = FALSE, label = "grid"),
smooth = manipulate::picker(smoothers, initial = "none", label = "smoother "),
desc = manipulate::button("Describe"),
show = manipulate::button("Print command")
)
}
}
###
# PlotTools *************************************
## graphics: base ====
lines.loess <- function(x, col = Pal()[1], lwd = 2, lty = "solid", type = "l", n = 100
, conf.level = 0.95, args.band = NULL, ...){
newx <- seq(from = min(x$x, na.rm=TRUE), to = max(x$x, na.rm=TRUE), length = n)
fit <- predict(x, newdata=newx, se = !is.na(conf.level) )
if (!is.na(conf.level)) {
# define default arguments for ci.band
args.band1 <- list(col = SetAlpha(col, 0.30), border = NA)
# override default arguments with user defined ones
if (!is.null(args.band)) args.band1[names(args.band)] <- args.band
# add a confidence band before plotting the smoother
lwr.ci <- fit$fit + fit$se.fit * qnorm((1 - conf.level)/2)
upr.ci <- fit$fit - fit$se.fit * qnorm((1 - conf.level)/2)
do.call("DrawBand", c(args.band1, list(x=c(newx, rev(newx))), list(y=c(lwr.ci, rev(upr.ci)))) )
# reset fit for plotting line afterwards
fit <- fit$fit
}
lines( y = fit, x = newx, col = col, lwd = lwd, lty = lty, type = type)
}
lines.SmoothSpline <- function (x, col = Pal()[1], lwd = 2, lty = "solid",
type = "l", conf.level = 0.95, args.band = NULL,
...) {
# just pass on to lines
lines.smooth.spline(x, col, lwd, lty,
type, conf.level, args.band, ...)
}
lines.smooth.spline <- function (x, col = Pal()[1], lwd = 2, lty = "solid",
type = "l", conf.level = 0.95, args.band = NULL,
...) {
# newx <- seq(from = min(x$x, na.rm = TRUE), to = max(x$x, na.rm = TRUE), length = n)
newx <- x$x
fit <- predict(x, newdata = newx)
if (!is.na(conf.level)) {
args.band1 <- list(col = SetAlpha(col, 0.3), border = NA)
if (!is.null(args.band))
args.band1[names(args.band)] <- args.band
res <- (x$yin - x$y)/(1-x$lev) # jackknife residuals
sigma <- sqrt(var(res)) # estimate sd
upr.ci <- fit$y + qnorm((1 - conf.level)/2) * sigma * sqrt(x$lev) # upper 95% conf. band
lwr.ci <- fit$y - qnorm((1 - conf.level)/2) * sigma * sqrt(x$lev) # lower 95% conf. band
do.call("DrawBand", c(args.band1, list(x = c(newx, rev(newx))),
list(y = c(lwr.ci, rev(upr.ci)))))
}
lines(y = fit$y, x = fit$x, col = col, lwd = lwd, lty = lty, type = type)
}
lines.lm <- function (x, col = Pal()[1], lwd = 2, lty = "solid",
type = "l", n = 100, conf.level = 0.95, args.cband = NULL,
pred.level = NA, args.pband = NULL, ...) {
mod <- x$model
# we take simply the second column of the model data.frame to identify the x variable
# this will crash, if there are several resps and yield nonsense if there is
# more than one pred,
# so check for a simple regression model y ~ x (just one resp, just one pred)
# Note:
# The following will not work, because predict does not correctly recognise the newdata data.frame:
# lines(lm(d.pizza$temperature ~ d.pizza$delivery_min), col=hred, lwd=3)
# see what happens to the data.frame colnames in: predict(x, newdata=data.frame("d.pizza$delivery_min"=1:20))
# this predict won't work.
# always provide data: y ~ x, data
# thiss is not a really new problem:
# http://faustusnotes.wordpress.com/2012/02/16/problems-with-out-of-sample-prediction-using-r/
# we would only plot lines if there's only one predictor
pred <- all.vars(formula(x)[[3]])
if(length(pred) > 1) {
stop("Can't plot a linear model with more than 1 predictor.")
}
# the values of the predictor
xpred <- eval(x$call$data)[, pred]
newx <- data.frame(seq(from = min(xpred, na.rm = TRUE),
to = max(xpred, na.rm = TRUE), length = n))
colnames(newx) <- pred
fit <- predict(x, newdata = newx)
if (!(is.na(pred.level) || identical(args.pband, NA)) ) {
args.pband1 <- list(col = SetAlpha(col, 0.12), border = NA)
if (!is.null(args.pband))
args.pband1[names(args.pband)] <- args.pband
ci <- predict(x, interval="prediction", newdata=newx, level=pred.level) # Vorhersageband
do.call("DrawBand", c(args.pband1, list(x = c(unlist(newx), rev(unlist(newx)))),
list(y = c(ci[,2], rev(ci[,3])))))
}
if (!(is.na(conf.level) || identical(args.cband, NA)) ) {
args.cband1 <- list(col = SetAlpha(col, 0.12), border = NA)
if (!is.null(args.cband))
args.cband1[names(args.cband)] <- args.cband
ci <- predict(x, interval="confidence", newdata=newx, level=conf.level) # Vertrauensband
do.call("DrawBand", c(args.cband1, list(x = c(unlist(newx), rev(unlist(newx)))),
list(y = c(ci[,2], rev(ci[,3])))))
}
lines(y = fit, x = unlist(newx), col = col, lwd = lwd, lty = lty,
type = type)
}
SmoothSpline <- function(x, ...){
UseMethod("SmoothSpline")
}
SmoothSpline.default <- function (x, y = NULL, w = NULL, df, spar = NULL, cv = FALSE,
all.knots = FALSE, nknots = .nknots.smspl, keep.data = TRUE,
df.offset = 0, penalty = 1, control.spar = list(), tol = 0.000001 *
IQR(x), ...){
# just pass everything to smooth.spline
smooth.spline(x=x, y=y, w=w, df=df, spar=spar, cv=cv,
all.knots=all.knots, nknots=nknots, keep.data=keep.data,
df.offset=df.offset, penalty=penalty, control.spar=control.spar, tol=tol)
}
SmoothSpline.formula <- function(formula, data, subset, na.action, ...) {
# mf <- model.frame(x, data)
# x <- mf[,2]
# y <- mf[,1]
# identify(x, y, ...)
if (missing(formula) || (length(formula) != 3L) || (length(attr(terms(formula[-2L]),
"term.labels")) != 1L))
stop("'formula' missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m[[1L]] <- quote(stats::model.frame)
m$... <- NULL
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
SmoothSpline(x=mf[[-response]], y=mf[[response]], ...)
}
ErrBars <- function(from, to = NULL, pos = NULL, mid = NULL, horiz = FALSE, col = par("fg"), lty = par("lty"),
lwd = par("lwd"), code = 3, length=0.05,
pch = NA, cex.pch = par("cex"), col.pch = par("fg"), bg.pch = par("bg"), ... ) {
if(is.null(to)) {
if(dim(from)[2] %nin% c(2,3)) stop("'from' must be a kx2 or a kx3 matrix, when 'to' is not provided.")
if(dim(from)[2] == 2) {
to <- from[,2]
from <- from[,1]
} else {
mid <- from[,1]
to <- from[,3]
from <- from[,2]
}
}
if(is.null(pos)) pos <- 1:length(from)
if(horiz){
arrows( x0=from, x1=to, y0=pos, col=col, lty=lty, lwd=lwd, angle=90, code=code, length=length, ... )
} else {
arrows( x0=pos, y0=from, y1=to, col=col, lty=lty, lwd=lwd, angle=90, code=code, length=length, ... )
}
if(!is.na(pch)){
if(is.null(mid)) mid <- (from + to)/2
# plot points
if(horiz){
points(x=mid, y=pos, pch = pch, cex = cex.pch, col = col.pch, bg=bg.pch)
} else {
points(x=pos, y=mid, pch = pch, cex = cex.pch, col = col.pch, bg=bg.pch)
}
}
}
ColorLegend <- function( x, y=NULL, cols=rev(heat.colors(100)), labels=NULL
, width=NULL, height=NULL, horiz=FALSE
, xjust=0, yjust=1, inset=0, border=NA, frame=NA
, cntrlbl = FALSE
, adj=ifelse(horiz,c(0.5,1), c(1,0.5)), cex=1.0, ...){
# positionierungscode aus legend
auto <- if (is.character(x))
match.arg(x, c("bottomright", "bottom", "bottomleft",
"left", "topleft", "top", "topright", "right", "center"))
else NA
usr <- par("usr")
if( is.null(width) ) width <- (usr[2L] - usr[1L]) * ifelse(horiz, 0.92, 0.08)
if( is.null(height) ) height <- (usr[4L] - usr[3L]) * ifelse(horiz, 0.08, 0.92)
if (is.na(auto)) {
left <- x - xjust * width
top <- y + (1 - yjust) * height
} else {
inset <- rep(inset, length.out = 2)
insetx <- inset[1L] * (usr[2L] - usr[1L])
left <- switch(auto, bottomright = , topright = ,
right = usr[2L] - width - insetx, bottomleft = ,
left = , topleft = usr[1L] + insetx, bottom = ,
top = , center = (usr[1L] + usr[2L] - width)/2)
insety <- inset[2L] * (usr[4L] - usr[3L])
top <- switch(auto, bottomright = , bottom = , bottomleft = usr[3L] +
height + insety, topleft = , top = , topright = usr[4L] -
insety, left = , right = , center = (usr[3L] +
usr[4L] + height)/2)
}
xpd <- par(xpd=TRUE); on.exit(par(xpd))
ncols <- length(cols)
nlbls <- length(labels)
if(horiz) {
rect( xleft=left, xright=left+width/ncols*seq(ncols,0,-1), ytop=top, ybottom=top-height,
col=rev(cols), border=border)
if(!is.null(labels)){
if(cntrlbl) xlbl <- left + width/(2*ncols)+(width-width/ncols)/(nlbls-1) * seq(0,nlbls-1,1)
else xlbl <- left + width/(nlbls-1) * seq(0,nlbls-1,1)
text(y=top - (height + max(strheight(labels, cex=cex)) * 1.2)
# Gleiche Korrektur wie im vertikalen Fall
# , x=x+width/(2*ncols)+(width-width/ncols)/(nlbls-1) * seq(0,nlbls-1,1)
, x=xlbl, labels=labels, adj=adj, cex=cex, ...)
}
} else {
rect( xleft=left, ybottom=top-height, xright=left+width, ytop=top-height/ncols*seq(0,ncols,1),
col=rev(cols), border=border)
if(!is.null(labels)){
# Korrektur am 13.6:
# die groesste und kleinste Beschriftung sollen nicht in der Mitte der Randfarbkaestchen liegen,
# sondern wirklich am Rand des strips
# alt: , y=y-height/(2*ncols)- (height- height/ncols)/(nlbls-1) * seq(0,nlbls-1,1)
#, y=y-height/(2*ncols)- (height- height/ncols)/(nlbls-1) * seq(0,nlbls-1,1)
# 18.4.2015: reverse labels, as the logic below would misplace...
labels <- rev(labels)
if(cntrlbl) ylbl <- top - height/(2*ncols) - (height- height/ncols)/(nlbls-1) * seq(0, nlbls-1,1)
else ylbl <- top - height/(nlbls-1) * seq(0, nlbls-1, 1)
text(x=left + width + strwidth("0", cex=cex) + max(strwidth(labels, cex=cex)) * adj[1]
, y=ylbl, labels=labels, adj=adj, cex=cex, ... )
}
}
if(!is.na(frame)) rect( xleft=left, xright=left+width, ytop=top, ybottom=top-height, border=frame)
}
BubbleLegend <- function(x, y=NULL, area, cols
, labels=NULL, cols.lbl = "black"
, width = NULL, xjust = 0, yjust = 1, inset=0, border="black", frame=TRUE
, adj=c(0.5,0.5), cex=1.0, cex.names=1, bg = NULL, ...){
# positionierungscode aus legend
auto <- if(is.character(x))
match.arg(x, c("bottomright", "bottom", "bottomleft",
"left", "topleft", "top", "topright", "right", "center"))
else NA
radius <- sqrt((area * cex)/pi)
usr <- par("usr")
if(is.null(width))
width <- 2*max(radius) * 1.1 / Asp()
# if(is.null(asp)) # get aspect ratio from plot w/h
# asp <- par("pin")[1]/diff(par("usr")[1:2]) / par("pin")[2]/diff(par("usr")[3:4])
height <- width * Asp()
if (is.na(auto)) {
left <- x - xjust * width
top <- y + (1 - yjust) * height
} else {
inset <- rep(inset, length.out = 2)
insetx <- inset[1L] * (usr[2L] - usr[1L])
left <- switch(auto, bottomright = , topright = , right = usr[2L] -
width - insetx, bottomleft = , left = , topleft = usr[1L] +
insetx, bottom = , top = , center = (usr[1L] + usr[2L] -
width)/2)
insety <- inset[2L] * (usr[4L] - usr[3L])
top <- switch(auto, bottomright = , bottom = , bottomleft = usr[3L] +
height + insety, topleft = , top = , topright = usr[4L] -
insety, left = , right = , center = (usr[3L] + usr[4L] +
height)/2)
}
xpd <- par(xpd=TRUE); on.exit(par(xpd))
if(!is.na(frame))
rect( xleft=left, ybottom=top-height, xright=left+width, ytop=top,
col=bg, border=frame)
# DrawCircle(x = left + width/2, y = (top - height/2) + max(radius) - radius,
# r.out = radius, col=cols, border=border)
DrawEllipse(x = left + width/2, y = top-height/2 + max(radius) - radius,
radius.x = radius / Asp(), radius.y = radius,
col = cols, border=border)
if(!is.null(labels)){
d <- c(0, 2*radius)
# ylbl <- (top - height/2) + max(radius) - diff(d) /2 + d[-length(d)]
ylbl <- rev((top - height/2) + max(radius) - Midx(rev(2*radius), incl.zero = TRUE))
text(x=left + width/2, y=ylbl, labels=labels, adj=adj, cex=cex.names, col=cols.lbl, ... )
}
}
Canvas <- function(xlim=NULL, ylim=xlim, main=NULL, xpd=par("xpd"), mar=c(5.1,5.1,5.1,5.1),
asp=1, bg=par("bg"), usrbg="white", ...){
SetPars <- function(...){
# expand dots
arg <- unlist(match.call(expand.dots=FALSE)$...)
# match par arguments
par.args <- as.list(arg[names(par(no.readonly = TRUE)[names(arg)])])
# store old values
old <- par(no.readonly = TRUE)[names(par.args)]
# set new values
do.call(par, par.args)
# return old ones
invisible(old)
}
if(is.null(xlim)){
xlim <- c(-1,1)
ylim <- xlim
}
if(length(xlim)==1) {
xlim <- c(-xlim,xlim)
ylim <- xlim
}
oldpar <- par("xpd"=xpd, "mar"=mar, "bg"=bg) # ; on.exit(par(usr))
SetPars(...)
plot( NA, NA, xlim=xlim, ylim=ylim, main=main, asp=asp, type="n", xaxt="n", yaxt="n",
xlab="", ylab="", frame.plot = FALSE, ...)
if(usrbg != "white"){
usr <- par("usr")
rect(xleft=usr[1], ybottom=usr[3], xright=usr[2], ytop=usr[4], col=usrbg, border=NA)
}
# we might want to reset parameters afterwards
invisible(oldpar)
}
Midx <- function(x, incl.zero = FALSE, cumulate = FALSE){
if(incl.zero) x <- c(0, x)
res <- filter(x, rep(1/2,2))
res <- res[-length(res)]
if(cumulate) res <- cumsum(res)
return(res)
}
###
## graphics: colors ----
Pal <- function(pal, n=100, alpha=1) {
if(missing(pal)) {
res <- getOption("palette", default = structure(Pal("Helsana")[c(6,1:5,7:10)] ,
name = "Helsana", class = c("palette", "character")) )
} else {
palnames <- c("RedToBlack","RedBlackGreen","SteeblueWhite","RedWhiteGreen",
"RedWhiteBlue0","RedWhiteBlue1","RedWhiteBlue2","RedWhiteBlue3","Helsana","Tibco","RedGreen1",
"Spring","Soap","Maiden","Dark","Accent","Pastel","Fragile","Big","Long","Night","Dawn","Noon","Light")
if(is.numeric(pal)){
pal <- palnames[pal]
}
big <- c("#800000", "#C00000", "#FF0000", "#FFC0C0",
"#008000","#00C000","#00FF00","#C0FFC0",
"#000080","#0000C0", "#0000FF","#C0C0FF",
"#808000","#C0C000","#FFFF00","#FFFFC0",
"#008080","#00C0C0","#00FFFF","#C0FFFF",
"#800080","#C000C0","#FF00FF","#FFC0FF",
"#C39004","#FF8000","#FFA858","#FFDCA8")
switch(pal
, RedToBlack = res <- colorRampPalette(c("red","yellow","green","blue","black"), space = "rgb")(n)
, RedBlackGreen = res <- colorRampPalette(c("red", "black", "green"), space = "rgb")(n)
, SteeblueWhite = res <- colorRampPalette(c("steelblue","white"), space = "rgb")(n)
, RedWhiteGreen = res <- colorRampPalette(c("red", "white", "green"), space = "rgb")(n)
, RedWhiteBlue0 = res <- colorRampPalette(c("red", "white", "blue"))(n)
, RedWhiteBlue1 = res <- colorRampPalette(c("#67001F", "#B2182B", "#D6604D", "#F4A582", "#FDDBC7",
"#FFFFFF", "#D1E5F0", "#92C5DE", "#4393C3", "#2166AC", "#053061"))(n)
, RedWhiteBlue2 = res <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))(n)
, RedWhiteBlue3 = res <- colorRampPalette(c(hred, "white", hblue))(n)
, Helsana = res <- c("rot"="#9A0941", "orange"="#F08100", "gelb"="#FED037"
, "ecru"="#CAB790", "hellrot"="#D35186", "hellblau"="#8296C4", "hellgruen"="#B3BA12"
, "hellgrau"="#CCCCCC", "dunkelgrau"="#666666", "weiss"="#FFFFFF")
, Tibco = res <- apply( mcol <- matrix(c(
0,91,0, 0,157,69, 253,1,97, 60,120,177,
156,205,36, 244,198,7, 254,130,1,
96,138,138, 178,113,60
), ncol=3, byrow=TRUE), 1, function(x) rgb(x[1], x[2], x[3], maxColorValue=255))
, RedGreen1 = res <- c(rgb(227,0,11, maxColorValue=255), rgb(227,0,11, maxColorValue=255),
rgb(230,56,8, maxColorValue=255), rgb(234,89,1, maxColorValue=255),
rgb(236,103,0, maxColorValue=255), rgb(241,132,0, maxColorValue=255),
rgb(245,158,0, maxColorValue=255), rgb(251,184,0, maxColorValue=255),
rgb(253,195,0, maxColorValue=255), rgb(255,217,0, maxColorValue=255),
rgb(203,198,57, maxColorValue=255), rgb(150,172,98, maxColorValue=255),
rgb(118,147,108, maxColorValue=255))
, Spring = res <- c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3","#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999")
, Soap = res <- c("#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3","#A6D854", "#FFD92F", "#E5C494", "#B3B3B3")
, Maiden = res <- c("#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072","#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5", "#D9D9D9","#BC80BD","#CCEBC5")
, Dark = res <- c("#1B9E77", "#D95F02", "#7570B3", "#E7298A","#66A61E", "#E6AB02", "#A6761D", "#666666")
, Accent = res <- c("#7FC97F", "#BEAED4", "#FDC086", "#FFFF99","#386CB0", "#F0027F", "#BF5B17", "#666666")
, Pastel = res <- c("#FBB4AE", "#B3CDE3", "#CCEBC5", "#DECBE4","#FED9A6", "#FFFFCC", "#E5D8BD", "#FDDAEC", "#F2F2F2")
, Fragile = res <- c("#B3E2CD", "#FDCDAC", "#CBD5E8", "#F4CAE4","#E6F5C9", "#FFF2AE", "#F1E2CC", "#CCCCCC")
, Big = res <- big
, Long = res <- big[c(12,16,25,24,
2,11,6,15,18,26,23,
3,10,7,14,19,27,22,
4,8,20,28)]
, Night = res <- big[seq(1, 28, by=4)]
, Dawn = res <- big[seq(2, 28, by=4)]
, Noon = res <- big[seq(3, 28, by=4)]
, Light = res <- big[seq(4, 28, by=4)]
, GrandBudapest = res < c("#F1BB7B", "#FD6467", "#5B1A18", "#D67236")
, Moonrise1 = res <- c("#F3DF6C", "#CEAB07", "#D5D5D3", "#24281A")
, Royal1 = res <- c("#899DA4", "#C93312", "#FAEFD1", "#DC863B")
, Moonrise2 = res <- c("#798E87","#C27D38", "#CCC591", "#29211F")
, Cavalcanti = res <- c("#D8B70A", "#02401B","#A2A475", "#81A88D", "#972D15")
, Royal2 = res <- c("#9A8822", "#F5CDB4", "#F8AFA8", "#FDDDA0", "#74A089")
, GrandBudapest2 = res <- c("#E6A0C4", "#C6CDF7", "#D8A499", "#7294D4")
, Moonrise3 = res <- c("#85D4E3", "#F4B5BD", "#9C964A", "#CDC08C", "#FAD77B")
, Chevalier = res <- c("#446455", "#FDD262", "#D3DDDC", "#C7B19C")
, Zissou = res <- c("#3B9AB2", "#78B7C5", "#EBCC2A", "#E1AF00", "#F21A00")
, FantasticFox = res <- c("#DD8D29", "#E2D200", "#46ACC8", "#E58601", "#B40F20")
, Darjeeling = res <- c("#FF0000", "#00A08A", "#F2AD00", "#F98400", "#5BBCD6")
, Rushmore = res <- c("#E1BD6D", "#EABE94", "#0B775E", "#35274A", "#F2300F")
, BottleRocket = res <- c("#A42820", "#5F5647", "#9B110E", "#3F5151", "#4E2A1E", "#550307", "#0C1707")
, Darjeeling2 = res <- c("#ECCBAE", "#046C9A", "#D69C4E", "#ABDDDE", "#000000")
)
attr(res, "name") <- pal
class(res) <- append(class(res), "palette")
}
if(alpha != 1)
res <- SetAlpha(res, alpha = alpha)
return(res)
}
print.palette <- function(x, ...){
cat(attr(x, "name"), "\n")
cat(x, "\n")
}
plot.palette <- function(x, cex = 3, ...) {
# # use new window, but store active device if already existing
# if( ! is.null(dev.list()) ){
# curwin <- dev.cur()
# on.exit( {
# dev.set(curwin)
# par(oldpar)
# }
# )
# }
# windows(width=3, height=2.5, xpos=100, ypos=600)
oldpar <- par(mar=c(0,0,0,0), mex=0.001, xaxt="n", yaxt="n", ann=FALSE, xpd=NA)
on.exit(par(oldpar))
palname <- Coalesce(attr(x, "name"), "no name")
n <- length(x)
x <- rev(x)
plot( x=rep(1, n), y=1:n, pch=22, cex=cex, col="grey60", bg=x, xlab="", ylab="", axes=FALSE,
frame.plot=FALSE, ylim=c(0, n + 2), xlim=c(0.8, n))
text( x=4.5, y=n + 1.2, labels="alpha", adj=c(0,0.5), cex=0.8)
text( x=0.8, y=n + 2.0, labels=gettextf("\"%s\" Palette colors", palname), adj=c(0,0.5), cex=1.2)
text( x=c(1,2.75,3.25,3.75,4.25), y= n +1.2, adj=c(0.5,0.5), labels=c("1.0", 0.8, 0.6, 0.4, 0.2), cex=0.8 )
abline(h=n+0.9, col="grey")
palnames <- paste(n:1, names(x))
sapply(1:n, function(i){
xx <- c(2.75, 3.25, 3.75, 4.25)
yy <- rep(i, 4)
points(x=xx, y=yy, pch=22, cex=cex, col="grey60", bg=SetAlpha(x[i], alpha=c(0.8, 0.6, 0.4, 0.2)))
text(x=1.25, y=i, adj=c(0,0.5), cex=0.8, labels=palnames[i])
})
invisible()
# points( x=rep(2.75,7), y=1:7, pch=15, cex=2, col=hc(7:1, alpha=0.8) )
# points( x=rep(3.25,7), y=1:7, pch=15, cex=2, col=hc(7:1, alpha=0.6) )
# points( x=rep(3.75,7), y=1:7, pch=15, cex=2, col=hc(7:1, alpha=0.4) )
# points( x=rep(4.25,7), y=1:7, pch=15, cex=2, col=hc(7:1, alpha=0.2) )
}
# example:
# barplot(1:7, col=SetAlpha(PalHelsana[c("ecru","hellgruen","hellblau")], 1) )
###
## geometric primitives ====
Stamp <- function(txt=NULL, las=par("las"), cex=0.6) {
# set an option like:
# options(stamp=expression("gettextf('%s/%s', Sys.getenv('USERNAME'), Format(Today(), fmt='yyyy-mm-dd')))")
# if stamp is an expression, it will be evaluated
stamp <- function(x) {
# opar <- par(yaxt='s', xaxt='s', xpd=TRUE)
opar <- par(yaxt='s', xaxt='s', xpd=NA)
on.exit(par(opar))
plt <- par('plt')
usr <- par('usr')
## when a logrithmic scale is in use (i.e. par('xlog') is true),
## then the x-limits would be 10^par('usr')[1:2]. Similarly for
## the y axis
xcoord <- usr[2] + (usr[2] - usr[1])/(plt[2] - plt[1]) *
(1-plt[2]) - cex*strwidth('m')
ycoord <- usr[3] - diff(usr[3:4])/diff(plt[3:4])*(plt[3]) +
cex*strheight('m')
if(par('xlog')) xcoord <- 10^(xcoord)
if(par('ylog')) ycoord <- 10^(ycoord)
if(las==3){
srt <- 90
adj <- 0
} else {
srt <- 0
adj <- 1
}
## Print the text on the current plot
text(xcoord, ycoord, x, adj=adj, srt=srt, cex=cex)
invisible(x)
}
if(is.null(txt)) {
# get the option
txt <- DescToolsOptions("stamp")
if(is.null(txt)){
txt <- format(Sys.time(), '%Y-%m-%d')
} else {
if(is.expression(txt)){
txt <- eval(parse(text = txt))
}
}
}
invisible(stamp(txt))
}
BoxedText <- function(x, y = NULL, labels = seq_along(x), adj = NULL,
pos = NULL, offset = 0.5, vfont = NULL,
cex = 1, txt.col = NULL, font = NULL, srt = 0, xpad = 0.2, ypad=0.2,
density = NULL, angle = 45,
col = "white", border = par("fg"), lty = par("lty"), lwd = par("lwd"), ...) {
.BoxedText <- function(x, y = NULL, labels = seq_along(x), adj = NULL,
pos = NA, offset = 0.5, vfont = NULL,
cex = 1, txt.col = NULL, font = NULL, srt = 0, xpad = 0.2, ypad=0.2,
density = NULL, angle = 45,
col = "white", border = NULL, lty = par("lty"), lwd = par("lwd"), ...) {
if(is.na(pos)) pos <- NULL # we have to change default NULL to NA to be able to repeat it
if(is.na(vfont)) vfont <- NULL
w <- strwidth(labels, cex=cex, font=font, vfont=vfont)
h <- strheight(labels, cex=cex, font=font, vfont=vfont)
if(length(adj) == 1) adj <- c(adj, 0.5)
xl <- x - adj[1] * w - strwidth("M", cex=cex, font=font, vfont=vfont) * xpad
xr <- xl + w + 2*strwidth("M", cex=cex, font=font, vfont=vfont) * xpad
yb <- y - adj[2] * h - strheight("M", cex=cex, font=font, vfont=vfont) * ypad
yt <- yb + h + 2*strheight("M", cex=cex, font=font, vfont=vfont) * ypad
xy <- Rotate(x=c(xl,xl,xr,xr), y=c(yb,yt,yt,yb), mx=x, my=y, theta=DegToRad(srt))
polygon(x=xy$x, y=xy$y, col=col, density=density, angle=angle, border=border, lty=lty, lwd=lwd, ...)
text(x=x, y=y, labels=labels, adj=adj, pos=pos, offset=offset, vfont=vfont, cex=cex, col=txt.col, font=font, srt=srt)
}
if(is.null(adj))
adj <- c(0.5, 0.5)
else
adj <- rep(adj, length.out=2)
if (is.null(pos)) pos <- NA
if (is.null(vfont)) vfont <- NA
if (is.null(txt.col)) txt.col <- par("fg")
if (is.null(font)) font <- 1
if (is.null(density)) density <- NA
# recyle arguments:
# which parameter has the highest dimension
# attention: we cannot repeat NULLs but we can repeat NAs, so we swap NULLs to NAs and
# reset them to NULL above
lst <- list(x=x, y=y, labels=labels, pos=pos, offset=offset, vfont=vfont,
cex=cex, txt.col=txt.col, font=font, srt=srt, xpad=xpad, ypad=ypad,
density=density, angle=angle, col=col, border=border, lty=lty, lwd=lwd)
maxdim <- max(unlist(lapply(lst, length)))
# recycle all params to maxdim
lgp <- lapply(lst, rep, length.out=maxdim )
lgp$adj <- as.list(data.frame(replicate(adj, n=maxdim)))
for( i in 1:maxdim){
.BoxedText(
x=lgp$x[i], y=lgp$y[i], labels=lgp$labels[i], adj=lgp$adj[[i]], pos=lgp$pos[i], offset=lgp$offset[i]
, vfont=lgp$vfont[i], cex=lgp$cex[i], txt.col=lgp$txt.col[i], font=lgp$font[i]
, srt=lgp$srt[i], xpad=lgp$xpad[i], ypad=lgp$ypad[i], density=lgp$density[i]
, angle=lgp$angle[i], col=lgp$col[i], border=lgp$border[i], lty=lgp$lty[i], lwd=lgp$lwd[i] )
}
}
DrawBezier <- function (x = 0, y = x, nv = 100, col = par("col"), lty = par("lty")
, lwd = par("lwd"), plot = TRUE ) {
if (missing(y)) {
y <- x[[2]]
x <- x[[1]]
}
n <- length(x)
X <- Y <- single(nv)
Z <- seq(0, 1, length = nv)
X[1] <- x[1]
X[nv] <- x[n]
Y[1] <- y[1]
Y[nv] <- y[n]
for (i in 2:(nv - 1)) {
z <- Z[i]
xz <- yz <- 0
const <- (1 - z)^(n - 1)
for (j in 0:(n - 1)) {
xz <- xz + const * x[j + 1]
yz <- yz + const * y[j + 1]
const <- const * (n - 1 - j)/(j + 1) * z/(1 - z)
# debugging only:
# if (is.na(const)) print(c(i, j, z))
}
X[i] <- xz
Y[i] <- yz
}
if(plot) lines(x = as.single(X), y = as.single(Y), col=col, lty=lty, lwd=lwd )
invisible(list(x = as.single(X), y = as.single(Y)))
}
DrawRegPolygon <- function( x = 0, y = x, radius.x = 1, radius.y = radius.x, rot = 0, nv = 3,
border = par("fg"), col = par("bg"), lty = par("lty"), lwd = par("lwd"), plot = TRUE ) {
# The workhorse for the geom stuff
# example:
# plot(c(0,1),c(0,1), asp=1, type="n")
# DrawRegPolygon( x=0.5, y=0.5, radius.x=seq(0.5,0.1,-0.1), rot=0, nv=3:10, col=2)
# DrawRegPolygon( x=0.5+1:5*0.05, y=0.5, radius.x=seq(0.5,0.1,-0.1), rot=0, nv=100, col=1:5)
# which geom parameter has the highest dimension
lgp <- list(x=x, y=y, radius.x=radius.x, radius.y=radius.y, rot=rot, nv=nv)
maxdim <- max(unlist(lapply(lgp, length)))
# recycle all params to maxdim
lgp <- lapply( lgp, rep, length.out=maxdim )
# recycle shape properties
if (length(col) < maxdim) { col <- rep(col, length.out = maxdim) }
if (length(border) < maxdim) { border <- rep(border, length.out = maxdim) }
if (length(lwd) < maxdim) { lwd <- rep(lwd, length.out = maxdim) }
if (length(lty) < maxdim) { lty <- rep(lty, length.out = maxdim) }
lst <- list() # prepare result
for (i in 1:maxdim) {
theta.inc <- 2 * pi / lgp$nv[i]
theta <- seq(0, 2 * pi - theta.inc, by = theta.inc)
ptx <- cos(theta) * lgp$radius.x[i] + lgp$x[i]
pty <- sin(theta) * lgp$radius.y[i] + lgp$y[i]
if(lgp$rot[i] > 0){
# rotate the structure if the angle is > 0
dx <- ptx - lgp$x[i]
dy <- pty - lgp$y[i]
ptx <- lgp$x[i] + cos(lgp$rot[i]) * dx - sin(lgp$rot[i]) * dy
pty <- lgp$y[i] + sin(lgp$rot[i]) * dx + cos(lgp$rot[i]) * dy
}
if( plot )
polygon(ptx, pty, border = border[i], col = col[i], lty = lty[i],
lwd = lwd[i])
lst[[i]] <- list(x = ptx, y = pty)
}
lst <- lapply(lst, xy.coords)
if(length(lst)==1)
lst <- lst[[1]]
invisible(lst)
}
DrawCircle <- function (x = 0, y = x, r.out = 1, r.in = 0, theta.1 = 0,
theta.2 = 2 * pi, border = par("fg"), col = NA, lty = par("lty"),
lwd = par("lwd"), nv = 100, plot = TRUE) {
DrawSector <- function(x, y, r.in, r.out, theta.1,
theta.2, nv, border, col, lty, lwd, plot) {
# get arc coordinates
pts <- DrawArc(x = x, y = y, rx = c(r.out, r.in), ry = c(r.out, r.in),
theta.1 = theta.1, theta.2 = theta.2, nv = nv,
col = border, lty = lty, lwd = lwd, plot = FALSE)
is.ring <- (r.in != 0)
is.sector <- any( ((theta.1-theta.2) %% (2*pi)) != 0)
if(is.ring || is.sector) {
# we have an inner and an outer circle
ptx <- c(pts[[1]]$x, rev(pts[[2]]$x))
pty <- c(pts[[1]]$y, rev(pts[[2]]$y))
} else {
# no inner circle
ptx <- pts[[1]]$x
pty <- pts[[1]]$y
}
if (plot) {
if (is.ring & !is.sector) {
# we have angles, so plot polygon for the area and lines for borders
polygon(x = ptx, y = pty, col = col, border = NA,
lty = lty, lwd = lwd)
lines(x = pts[[1]]$x, y = pts[[1]]$y, col = border, lty = lty, lwd = lwd)
lines(x = pts[[2]]$x, y = pts[[2]]$y, col = border, lty = lty, lwd = lwd)
}
else {
polygon(x = ptx, y = pty, col = col, border = border,
lty = lty, lwd = lwd)
}
}
invisible(list(x = ptx, y = pty))
}
lgp <- DescTools::Recycle(x=x, y=y, r.in = r.in, r.out = r.out,
theta.1 = theta.1, theta.2 = theta.2, border = border,
col = col, lty = lty, lwd = lwd, nv = nv)
lst <- list()
for (i in 1L:attr(lgp, "maxdim")) {
pts <- with(lgp, DrawSector(x=x[i], y=y[i], r.in=r.in[i],
r.out=r.out[i], theta.1=theta.1[i],
theta.2=theta.2[i], nv=nv[i], border=border[i],
col=col[i], lty=lty[i], lwd=lwd[i],
plot = plot))
lst[[i]] <- pts
}
invisible(lst)
}
#
# DrawCircle <- function( x = 0, y = x, radius = 1, rot = 0, nv = 100, border = par("fg"), col = par("bg")
# , lty = par("lty"), lwd = par("lwd"), plot = TRUE ) {
# invisible( DrawRegPolygon( x = x, y = y, radius.x=radius, nv=nv, border=border, col=col, lty=lty, lwd=lwd, plot = plot ) )
# }
DrawEllipse <- function( x = 0, y = x, radius.x = 1, radius.y = 0.5, rot = 0, nv = 100, border = par("fg"), col = par("bg")
, lty = par("lty"), lwd = par("lwd"), plot = TRUE ) {
invisible( DrawRegPolygon( x = x, y = y, radius.x = radius.x, radius.y = radius.y, nv = nv, rot = rot
, border = border, col = col, lty = lty, lwd = lwd, plot = plot ) )
}
DrawArc <- function (x = 0, y = x, rx = 1, ry = rx, theta.1 = 0,
theta.2 = 2*pi, nv = 100, col = par("col"), lty = par("lty"),
lwd = par("lwd"), plot = TRUE) {
# recycle all params to maxdim
lgp <- DescTools::Recycle(x=x, y=y, rx = rx, ry = ry,
theta.1 = theta.1, theta.2 = theta.2, nv = nv,
col=col, lty=lty, lwd=lwd)
lst <- list()
for (i in 1L:attr(lgp, "maxdim")) {
dthetha <- lgp$theta.2[i] - lgp$theta.1[i]
theta <- seq(from = 0,
to = ifelse(dthetha < 0, dthetha + 2 * pi, dthetha),
length.out = lgp$nv[i]) + lgp$theta.1[i]
ptx <- (cos(theta) * lgp$rx[i] + lgp$x[i])
pty <- (sin(theta) * lgp$ry[i] + lgp$y[i])
if (plot) {
lines(ptx, pty, col = lgp$col[i], lty = lgp$lty[i], lwd = lgp$lwd[i])
}
lst[[i]] <- list(x = ptx, y = pty)
}
invisible(lst)
}
# replaced by 0.99.18:
#
# DrawArc <- function (x = 0, y = x, radius.x = 1, radius.y = radius.x, angle.beg = 0,
# angle.end = pi, nv = 100, col = par("col"), lty = par("lty"), lwd = par("lwd"), plot = TRUE) {
#
# # which geom parameter has the highest dimension
# lgp <- list(x = x, y = y, radius.x = radius.x, radius.y = radius.y,
# angle.beg = angle.beg, angle.end = angle.end, nv = nv)
# maxdim <- max(unlist(lapply(lgp, length)))
# # recycle all params to maxdim
# lgp <- lapply(lgp, rep, length.out = maxdim)
#
# # recycle shape properties
# if (length(col) < maxdim) {
# col <- rep(col, length.out = maxdim)
# }
# if (length(lwd) < maxdim) {
# lwd <- rep(lwd, length.out = maxdim)
# }
# if (length(lty) < maxdim) {
# lty <- rep(lty, length.out = maxdim)
# }
#
# lst <- list()
# for (i in 1:maxdim) {
# angdif <- lgp$angle.end[i] - lgp$angle.beg[i]
# theta <- seq(from = 0, to = ifelse(angdif < 0, angdif + 2*pi, angdif),
# length.out = lgp$nv[i]) + lgp$angle.beg[i]
# ptx <- (cos(theta) * lgp$radius.x[i] + lgp$x[i])
# pty <- (sin(theta) * lgp$radius.y[i] + lgp$y[i])
# if (plot) {
# lines(ptx, pty, col = col[i], lty = lty[i], lwd = lwd[i])
# }
# lst[[i]] <- list(x = ptx, y = pty)
# }
# invisible(lst)
# }
#
# DrawAnnulusSector <- function (x = 0, y = x, radius.in = 1, radius.out = 2, angle.beg = 0, angle.end = pi
# , nv = 100, border = par("fg"), col = par("bg"), lty = par("lty"), lwd = par("lwd"), plot = TRUE) {
#
# DrawSector <- function(x, y, radius.in, radius.out, angle.beg, angle.end
# , nv, border, col, lty, lwd, plot) {
# # let DrawArc calculate the 2 arcs
# pts <- DrawArc( x=x, y=y, radius.x = c(radius.out, radius.in), radius.y = c(radius.out, radius.in)
# , angle.beg = angle.beg, angle.end = angle.end, nv = nv
# , col = border, lty = lty, lwd = lwd, plot = FALSE )
# # combine the arcs to a annulus sector
# ptx <- c(pts[[1]]$x, rev(pts[[2]]$x))
# pty <- c(pts[[1]]$y, rev(pts[[2]]$y))
# if( plot ) { polygon(x = ptx, y = pty, col = col, border = border, lty = lty, lwd = lwd) }
# invisible(list(x = ptx, y = pty))
# }
#
# # which geom parameter has the highest dimension
# lgp <- list(x = x, y = y, radius.in = radius.in, radius.out = radius.out,
# angle.beg = angle.beg, angle.end = angle.end, nv = nv)
# maxdim <- max(unlist(lapply(lgp, length)))
# # recycle all params to maxdim
# lgp <- lapply(lgp, rep, length.out = maxdim)
#
# # recycle shape properties
# if (length(col) < maxdim) { col <- rep(col, length.out = maxdim) }
# if (length(border) < maxdim) { border <- rep(border, length.out = maxdim) }
# if (length(lwd) < maxdim) { lwd <- rep(lwd, length.out = maxdim) }
# if (length(lty) < maxdim) { lty <- rep(lty, length.out = maxdim) }
#
# # Draw the single sectors
# lst <- list()
# for (i in 1:maxdim) {
# pts <- DrawSector( x = lgp$x[i], y = lgp$y[i], radius.in = lgp$radius.in[i], radius.out = lgp$radius.out[i]
# , angle.beg = lgp$angle.beg[i], angle.end = lgp$angle.end[i], nv = lgp$nv[i]
# , border = border[i], col = col[i], lty = lty[i], lwd = lwd[i], plot = plot )
# lst[[i]] <- pts
# }
# invisible(lst)
#
# }
#
#
# DrawAnnulus <- function (x = 0, y = x, radius.in = 1, radius.out = 2, nv = 100, border = par("fg")
# , col = par("bg"), lty = par("lty"), lwd = par("lwd"), plot = TRUE) {
#
# pts.out <- DrawCircle(x = x, y = y, radius = radius.out, plot = FALSE)
# pts.in <- DrawCircle(x = x, y = y, radius = radius.in, plot = FALSE)
#
# ptx <- c( unlist(lapply(pts.out, "[", "x")), rev(unlist(lapply(pts.in, "[", "x"))) )
# pty <- c( unlist(lapply(pts.out, "[", "y")), rev(unlist(lapply(pts.in, "[", "y"))) )
#
# # we have to use polygon here, because of the transparent hole in the middle..
# # but don't know how to ged rid of the closing line, so draw polygon without border and then redraw circles
# polygon(x = ptx, y = pty, col = col, border = NA, lty = lty, lwd = lwd)
# lapply( pts.out, lines, col=border, lty=lty, lwd=lwd )
# lapply( pts.in, lines, col=border, lty=lty, lwd=lwd )
#
# invisible(list(x = ptx, y = pty))
#
# }
#
DrawBand <- function(x, y, col = SetAlpha("grey", 0.5), border = NA) {
# accept matrices but then only n x y
if(!identical(dim(y), dim(x))){
x <- as.matrix(x)
y <- as.matrix(y)
if(dim(x)[2] == 1 && dim(y)[2] == 2)
x <- x[, c(1,1)]
else if(dim(x)[2] == 2 && dim(y)[2] == 1)
y <- y[, c(1,1)]
else
stop("incompatible dimensions for matrices x and y")
x <- c(x[,1], rev(x[,2]))
y <- c(y[,1], rev(y[,2]))
}
# adds a band to a plot, normally used for plotting confidence bands
polygon(x=x, y=y, col = col, border = border)
}
Clockwise <- function(x, start=0){
# Calculates begin and end angles from a list of given angles
angles <- c(0, cumsum(x), 2*pi)
revang <- 2*pi - angles + start
return(data.frame( from=revang[-1], to=revang[-length(revang)]))
}
Rotate <- function( x, y=NULL, mx = NULL, my = NULL, theta=pi/3, asp=1 ) {
# # which geom parameter has the highest dimension
# lgp <- list(x=x, y=y)
# maxdim <- max(unlist(lapply(lgp, length)))
# # recycle all params to maxdim
# lgp <- lapply( lgp, rep, length.out=maxdim )
# polygon doesn't do that either!!
xy <- xy.coords(x, y)
if(is.null(mx))
mx <- mean(xy$x)
if(is.null(my))
my <- mean(xy$y)
# rotate the structure
dx <- xy$x - mx
dy <- xy$y - my
ptx <- mx + cos(theta) * dx - sin(theta) * dy / asp
pty <- my + sin(theta) * dx * asp + cos(theta) * dy
return(xy.coords(x=ptx, y=pty))
}
GeomTrans <- function(x, y=NULL, trans=0, scale=1, theta=0) {
# https://reference.wolfram.com/language/ref/ScalingTransform.html
xy <- xy.coords(x, y)
trans <- rep_len(trans, length.out=2)
scale <- rep_len(trans, length.out=2)
xy$x <- (xy$x * scale[1]) + trans[1]
xy$y <- (xy$y * scale[2]) + trans[2]
xy <- Rotate(xy, theta = theta)
return(xy)
}
Asp <- function(){
w <- par("pin")[1]/diff(par("usr")[1:2])
h <- par("pin")[2]/diff(par("usr")[3:4])
asp <- w/h
return(asp)
}
LineToUser <- function(line, side) {
# http://stackoverflow.com/questions/29125019/get-margin-line-locations-mgp-in-user-coordinates
# jbaums
# Converts line dimensions to user coordinates
lh <- par('cin')[2] * par('cex') * par('lheight')
x_off <- diff(grconvertX(0:1, 'inches', 'user'))
y_off <- diff(grconvertY(0:1, 'inches', 'user'))
switch(side,
`1` = par('usr')[3] - line * y_off * lh,
`2` = par('usr')[1] - line * x_off * lh,
`3` = par('usr')[4] + line * y_off * lh,
`4` = par('usr')[2] + line * x_off * lh,
stop("side must be 1, 2, 3, or 4", call.=FALSE))
}
Arrow <- function(x0, y0, x1, y1, col=par("bg"), border = par("fg"), head=1, cex=1, lwd=1, lty=1){
ArrowHead <- function(x=0, y=0, type=2, cex=1, theta=0){
# choose a default
rx <- par("pin")[1] / 100 * cex
# get aspect ratio for not allowing the arrowhead to lose form
asp <- Asp()
head <- DrawRegPolygon(x, y, radius.x = rx, radius.y = rx * asp, plot=FALSE)
if(type==3){
head$x <- append(head$x, head$x[1] - rx, 2)
head$y <- append(head$y, y, 2)
}
# Rotate the head
head <- Rotate(head, theta=theta, mx=x, my=y, asp = asp)
head$x <- head$x - rx * cos(theta)
head$y <- head$y - rx * sin(theta)
return(head)
}
if(head > 1){
segments(x0 = x0, y0 = y0, x1 = x1, y1 = y1, lty=lty, lwd=lwd)
head <- ArrowHead(x=x1, y=y1, type=head, cex=cex,
theta= (atan((y0-y1) / Asp() /(x0-x1)) + (x0 > x1) * pi))
polygon(head, col=col, border=border)
} else {
arrows(x0 = x0, y0 = y0, x1 = x1, y1 = y1, lty=lty, lwd=lwd)
}
invisible()
}
SpreadOut <- function(x, mindist = NULL, cex = 1.0) {
if(is.null(mindist))
mindist <- 0.9 * max(strheight(x, "inch", cex = cex))
if(sum(!is.na(x)) < 2) return(x)
xorder <- order(x)
goodx <- x[xorder][!is.na(x[xorder])]
gxlen <- length(goodx)
start <- end <- gxlen%/%2
# nicely spread groups of short intervals apart from their mean
while(start > 0) {
while(end < gxlen && goodx[end+1] - goodx[end] < mindist) end <- end+1
while(start > 1 && goodx[start] - goodx[start-1] < mindist) start <- start-1
if(start < end) {
nsqueezed <- 1+end-start
newx <- sum(goodx[start:end]) / nsqueezed - mindist * (nsqueezed %/% 2 - (nsqueezed / 2 == nsqueezed %/% 2) * 0.5)
for(stretch in start:end) {
goodx[stretch] <- newx
newx <- newx+mindist
}
}
start <- end <- start-1
}
start <- end <- length(goodx) %/% 2 + 1
while(start < gxlen) {
while(start > 1 && goodx[start] - goodx[start-1] < mindist) start <- start-1
while(end < gxlen && goodx[end+1] - goodx[end] < mindist) end <- end+1
if(start < end) {
nsqueezed <- 1 + end - start
newx <- sum(goodx[start:end]) / nsqueezed - mindist * (nsqueezed %/% 2 - (nsqueezed / 2 == nsqueezed %/% 2) * 0.5)
for(stretch in start:end) {
goodx[stretch] <- newx
newx <- newx+mindist
}
}
start <- end <- end+1
}
# force any remaining short intervals apart
if(any(diff(goodx) < mindist)) {
start <- gxlen %/% 2
while(start > 1) {
if(goodx[start] - goodx[start-1] < mindist)
goodx[start-1] <- goodx[start] - mindist
start <- start-1
}
end <- gxlen %/% 2
while(end < gxlen) {
if(goodx[end+1] - goodx[end] < mindist)
goodx[end+1] <- goodx[end]+mindist
end <- end+1
}
}
x[xorder][!is.na(x[xorder])] <- goodx
return(x)
}
BarText <- function(height, b, labels=height, beside = FALSE, horiz = FALSE,
cex=par("cex"), adj=NULL, top=TRUE, ...) {
if(beside){
if(horiz){
if(is.null(adj)) adj <- 0
if(top)
x <- height + par("cxy")[1] * cex
else
x <- height/2
text(y=b, x=x, labels=labels, cex=cex, xpd=TRUE, adj=adj, ...)
} else {
if(top)
y <- height + par("cxy")[2] * cex
else
y <- height/2
if(is.null(adj)) adj <- 0.5
text(x=b, y=y, labels=labels, cex=cex, xpd=TRUE, adj=adj, ...)
}
# The xpd=TRUE means to not plot the text even if it is outside
# of the plot area and par("cxy") gives the size of a typical
# character in the current user coordinate system.
} else {
if(horiz){
if(is.null(adj)) adj <- 0.5
x <- t(apply(height, 2, Midx, incl.zero=TRUE, cumulate=TRUE))
text(labels=t(labels), x=x, y=b, cex = cex, adj=adj, ...)
} else {
if(is.null(adj)) adj <- 0.5
x <- t(apply(height, 2, Midx, incl.zero=TRUE, cumulate=TRUE))
text(labels=t(labels), x=b, y=x, cex=cex, adj=adj, ...)
}
}
invisible()
}
ConnLines <- function(..., col = 1, lwd = 1, lty = "solid", xalign = c("mar","mid") ) {
# add connection lines to a barplot
# ... are the arguments, passed to barplot
b <- barplot(..., plot = FALSE)
arg <- unlist(match.call(expand.dots = FALSE)$...)
if(is.null(arg$horiz)) horiz <- FALSE else horiz <- eval(arg$horiz, parent.frame())
# debug: print(horiz)
nr <- nrow(eval(arg[[1]], parent.frame())) # nrow(height)
nc <- length(b)
if(!is.null(nr)) {
tmpcum <- apply(eval(arg[[1]], parent.frame()), 2, cumsum)
ypos1 <- tmpcum[, -nc]
ypos2 <- tmpcum[, -1]
} else {
tmpcum <- eval(arg[[1]], parent.frame())
ypos1 <- tmpcum[-nc]
ypos2 <- tmpcum[-1]
nr <- 1
}
xalign <- match.arg(xalign)
if(xalign=="mar"){
# the midpoints of the bars
mx <- (b[-1] + b[-length(b)]) / 2
if(is.null(arg$space)) space <- 0.2
else space <- eval(arg$space, parent.frame())
lx <- mx - space/2
rx <- mx + space/2
xpos1 <- rep(lx, rep(nr, length(lx)))
xpos2 <- rep(rx, rep(nr, length(rx)))
if(horiz == FALSE)
segments(xpos1, ypos1, xpos2, ypos2, col=col, lwd=lwd, lty=lty)
else
segments(ypos1, xpos1, ypos2, xpos2, col=col, lwd=lwd, lty=lty)
} else if(xalign=="mid") {
if(horiz == FALSE) {
if(nr > 1)
matlines(x=replicate(nr, b), y=t(tmpcum), lty=lty, lwd=lwd, col=col)
else
lines(x=b, y=tmpcum, lty=lty, lwd=lwd, col=col)
} else {
if(nr > 1)
matlines(y=replicate(nr, b), x=t(tmpcum), lty=lty, lwd=lwd, col=col)
else
lines(y=b, x=tmpcum, lty=lty, lwd=lwd, col=col)
}
}
invisible()
}
AxisBreak <- function (axis = 1, breakpos = NULL, pos = NA, bgcol = "white",
breakcol = "black", style = "slash", brw = 0.02) {
figxy <- par("usr")
xaxl <- par("xlog")
yaxl <- par("ylog")
xw <- (figxy[2] - figxy[1]) * brw
yw <- (figxy[4] - figxy[3]) * brw
if (!is.na(pos))
figxy <- rep(pos, 4)
if (is.null(breakpos))
breakpos <- ifelse(axis%%2, figxy[1] + xw * 2, figxy[3] +
yw * 2)
if (xaxl && (axis == 1 || axis == 3))
breakpos <- log10(breakpos)
if (yaxl && (axis == 2 || axis == 4))
breakpos <- log10(breakpos)
switch(axis, br <- c(breakpos - xw/2, figxy[3] - yw/2, breakpos +
xw/2, figxy[3] + yw/2), br <- c(figxy[1] - xw/2, breakpos -
yw/2, figxy[1] + xw/2, breakpos + yw/2), br <- c(breakpos -
xw/2, figxy[4] - yw/2, breakpos + xw/2, figxy[4] + yw/2),
br <- c(figxy[2] - xw/2, breakpos - yw/2, figxy[2] +
xw/2, breakpos + yw/2), stop("Improper axis specification."))
old.xpd <- par("xpd")
par(xpd = TRUE)
if (xaxl)
br[c(1, 3)] <- 10^br[c(1, 3)]
if (yaxl)
br[c(2, 4)] <- 10^br[c(2, 4)]
if (style == "gap") {
if (xaxl) {
figxy[1] <- 10^figxy[1]
figxy[2] <- 10^figxy[2]
}
if (yaxl) {
figxy[3] <- 10^figxy[3]
figxy[4] <- 10^figxy[4]
}
if (axis == 1 || axis == 3) {
rect(breakpos, figxy[3], breakpos + xw, figxy[4],
col = bgcol, border = bgcol)
xbegin <- c(breakpos, breakpos + xw)
ybegin <- c(figxy[3], figxy[3])
xend <- c(breakpos, breakpos + xw)
yend <- c(figxy[4], figxy[4])
if (xaxl) {
xbegin <- 10^xbegin
xend <- 10^xend
}
}
else {
rect(figxy[1], breakpos, figxy[2], breakpos + yw,
col = bgcol, border = bgcol)
xbegin <- c(figxy[1], figxy[1])
ybegin <- c(breakpos, breakpos + yw)
xend <- c(figxy[2], figxy[2])
yend <- c(breakpos, breakpos + yw)
if (xaxl) {
xbegin <- 10^xbegin
xend <- 10^xend
}
}
par(xpd = TRUE)
}
else {
rect(br[1], br[2], br[3], br[4], col = bgcol, border = bgcol)
if (style == "slash") {
if (axis == 1 || axis == 3) {
xbegin <- c(breakpos - xw, breakpos)
xend <- c(breakpos, breakpos + xw)
ybegin <- c(br[2], br[2])
yend <- c(br[4], br[4])
if (xaxl) {
xbegin <- 10^xbegin
xend <- 10^xend
}
}
else {
xbegin <- c(br[1], br[1])
xend <- c(br[3], br[3])
ybegin <- c(breakpos - yw, breakpos)
yend <- c(breakpos, breakpos + yw)
if (yaxl) {
ybegin <- 10^ybegin
yend <- 10^yend
}
}
}
else {
if (axis == 1 || axis == 3) {
xbegin <- c(breakpos - xw/2, breakpos - xw/4,
breakpos + xw/4)
xend <- c(breakpos - xw/4, breakpos + xw/4, breakpos +
xw/2)
ybegin <- c(ifelse(yaxl, 10^figxy[3 + (axis ==
3)], figxy[3 + (axis == 3)]), br[4], br[2])
yend <- c(br[4], br[2], ifelse(yaxl, 10^figxy[3 +
(axis == 3)], figxy[3 + (axis == 3)]))
if (xaxl) {
xbegin <- 10^xbegin
xend <- 10^xend
}
}
else {
xbegin <- c(ifelse(xaxl, 10^figxy[1 + (axis ==
4)], figxy[1 + (axis == 4)]), br[1], br[3])
xend <- c(br[1], br[3], ifelse(xaxl, 10^figxy[1 +
(axis == 4)], figxy[1 + (axis == 4)]))
ybegin <- c(breakpos - yw/2, breakpos - yw/4,
breakpos + yw/4)
yend <- c(breakpos - yw/4, breakpos + yw/4, breakpos +
yw/2)
if (yaxl) {
ybegin <- 10^ybegin
yend <- 10^yend
}
}
}
}
segments(xbegin, ybegin, xend, yend, col = breakcol, lty = 1)
par(xpd = FALSE)
}
###
## graphics: conversions ====
PolToCart <- function(r, theta) list(x=r*cos(theta), y=r*sin(theta))
CartToPol <- function(x, y) {
theta <- atan(y/x)
theta[x<0] <- theta[x<0] + pi # atan can't find the correct square (quadrant)
list(r = sqrt(x^2 + y^2), theta=theta)
}
CartToSph <- function (x, y, z, up = TRUE ) {
vphi <- CartToPol(x, y) # x, y -> c( w, phi )
R <- if (up) {
CartToPol(vphi$r, z) # ( w, z, -> r, theta )
} else {
CartToPol(z, vphi$r) # ( z, w, -> r, theta )
}
res <- c(R[1], R[2], vphi[2])
names(res) <- c("r", "theta", "phi")
return (res)
}
SphToCart <- function (r, theta, phi, up = TRUE) {
if (up) theta <- pi/2 - theta
vz <- PolToCart(r, theta)
xy <- PolToCart(vz$y, phi)
res <- list(x=xy$x, y=xy$x, z=vz$x)
return (res)
}
ColToHex <- function(col, alpha=1) {
col.rgb <- col2rgb(col)
col <- apply( col.rgb, 2, function(x) sprintf("#%02X%02X%02X", x[1], x[2], x[3]) )
if(alpha != 1 ) col <- paste( col, DecToHex( round( alpha * 255, 0)), sep="")
return(col)
# old: sprintf("#%02X%02X%02X", col.rgb[1], col.rgb[2], col.rgb[3])
}
HexToRgb <- function(hex) {
# converts a hexstring color to matrix with 3 red/green/blue rows
# example: HexToRgb(c("#A52A2A","#A52A3B"))
c2 <- do.call("cbind", lapply(hex, function(x) c(strtoi(substr(x,2,3), 16L), strtoi(substr(x,4,5), 16L), strtoi(substr(x,6,7), 16L))))
return(c2)
}
HexToCol <- function(hexstr, method="rgb", metric="euclidean")
RgbToCol(hexstr, method=method, metric=metric)
RgbToCol <- function(col, method="rgb", metric="euclidean") {
switch( match.arg( arg=method, choices=c("rgb","hsv") )
, "rgb" = {
# accepts either a matrix with 3 columns RGB or a hexstr
if(!is.matrix(col)) {
col <- lapply(col, function(x) c(strtoi(substr(x,2,3), 16L), strtoi(substr(x,4,5), 16L), strtoi(substr(x,6,7), 16L)))
col <- do.call("cbind", col)
}
coltab <- col2rgb(colors())
switch( match.arg( arg=metric, choices=c("euclidean","manhattan") )
, "euclidean" = {
colors()[apply(col, 2, function(x) which.min(apply(apply(coltab, 2, "-", x)^2, 2, sum)))]
}
, "manhattan" = {
colors()[apply(col, 2, function(x) which.min(apply(abs(apply(coltab, 2, "-", x)), 2, sum)))]
}
)
}
, "hsv" ={
# accepts either a matrix with 3 columns RGB or a hexstr
col <- ColToHsv(col)
if(!is.matrix(col)) {
col <- lapply(col, function(x) c(strtoi(substr(x,2,3), 16L), strtoi(substr(x,4,5), 16L), strtoi(substr(x,6,7), 16L)))
col <- do.call("cbind", col)
}
coltab <- ColToHsv(colors())
switch( match.arg( arg=metric, choices=c("euclidean","manhattan") )
, "euclidean" = {
colors()[apply(col, 2, function(x) which.min(apply(apply(coltab, 2, "-", x)^2, 2, sum)))]
}
, "manhattan" = {
colors()[apply(col, 2, function(x) which.min(apply(abs(apply(coltab, 2, "-", x)), 2, sum)))]
}
)
}
)
# alternative?
# Identify closest match to a color: plotrix::color.id
# old:
# coltab <- col2rgb(colors())
# cdist <- apply(coltab, 2, function(z) sum((z - col)^2))
# colors()[which(cdist == min(cdist))]
}
RgbToLong <- function(col) (c(1, 256, 256^2) %*% col)[1,]
# example: RgbToLong(ColToRgb(c("green", "limegreen")))
LongToRgb <- function(col)
sapply(col, function(x) c(x %% 256, (x %/% 256) %% 256, (x %/% 256^2) %% 256))
# if ever needed...
# '~~> LONG To RGB
# R = Col Mod 256
# G = (Col \ 256) Mod 256
# B = (Col \ 256 \ 256) Mod 256
# ColToDec is col2rgb??
ColToRgb <- function(col, alpha = FALSE) col2rgb(col, alpha)
ColToHsv <- function(col, alpha = FALSE) rgb2hsv(ColToRgb(col, alpha))
ColToGrey <- function(col){
rgb <- col2rgb(col)
g <- rbind( c(0.3, 0.59, 0.11) ) %*% rgb
rgb(g, g, g, maxColorValue=255)
}
ColToGray <- function(col){
ColToGrey(col)
}
# Add alpha channel to a HexCol
# paste("#00FF00", round(0.3 * 255,0), sep="" )
TextContrastColor <- function(col, method=c("glynn","sonego")) {
switch( match.arg( arg=method, choices=c("glynn","sonego") )
, "glynn" = {
# efg, Stowers Institute for Medical Research
# efg's Research Notes:
# http://research.stowers-institute.org/efg/R/Color/Chart
#
# 6 July 2004. Modified 23 May 2005.
# For a given col, define a text col that will have good contrast.
# Examples:
# > GetTextContrastcol("white")
# [1] "black"
# > GetTextContrastcol("black")
# [1] "white"
# > GetTextContrastcol("red")
# [1] "white"
# > GetTextContrastcol("yellow")
# [1] "black"
vx <- rep("white", length(col))
vx[ apply(col2rgb(col), 2, mean) > 127 ] <- "black"
}
, "sonego" = {
# another idea from Paolo Sonego in OneRTipaDay:
L <- c(0.2, 0.6, 0) %*% col2rgb(col) / 255
vx <- ifelse(L >= 0.2, "#000060", "#FFFFA0")
}
)
return(vx)
}
MixColor <- function (col1, col2, amount1=0.5) {
.mix <- function(col1, col2, amount1=0.5) {
# calculate mix
mix <- apply(col2rgb(c(col1, col2), alpha=TRUE), 1, function(x) amount1 * x[1] + (1-amount1) * x[2])
do.call("rgb", c(as.list(mix), maxColorValue=255))
}
m <- suppressWarnings(cbind(col1, col2, amount1))
apply(m, 1, function(x) .mix(col1=x[1], col2=x[2], amount1=as.numeric(x[3])))
}
FindColor <- function(x, cols=rev(heat.colors(100)), min.x=NULL, max.x=NULL,
all.inside = FALSE){
if(is.null(min.x)) min.x <- min(pretty(x))
if(is.null(max.x)) max.x <- max(pretty(x))
# Korrektur von min und max, wenn nicht standardmaessig
colrange <- range(c(min.x, max.x))
# Berechnung des entsprechenden Farb-Index
col.idx <- findInterval(x, seq(colrange[1], colrange[2], length = length(cols) + 1)
, rightmost.closed=TRUE, all.inside=all.inside)
col.idx[col.idx==0] <- NA # den Index 0 gibt es nicht im Farbenvektor
cols[col.idx]
# alt:
# cols[ findInterval( x, seq(colrange[1], colrange[2], length=length(cols)+1 ) ) ]
}
SetAlpha <- function(col, alpha=0.5) {
if (length(alpha) < length(col)) alpha <- rep(alpha, length.out = length(col))
if (length(col) < length(alpha)) col <- rep(col, length.out = length(alpha))
acol <- substr(ColToHex(col), 1, 7)
acol[!is.na(alpha)] <- paste(acol[!is.na(alpha)], DecToHex(round(alpha[!is.na(alpha)]*255,0)), sep="")
acol[is.na(col)] <- NA
return(acol)
}
###
PlotDev <- function(fn, type=c("tif", "pdf", "eps", "bmp", "png", "jpg"),
width=NULL, height=NULL, units="cm", res=300, open=TRUE,
compression="lzw",
expr, ...) {
# PlotDev(fn="bar", type="tiff", expr=
# barplot(1:5, col=Pal("Helsana"))
# )
type <- match.arg(type)
# golden ratio
golden <- (1+sqrt(5))/2
if(is.null(width))
width <- 8
if(is.null(height))
height <- width/golden
# check if filename fn contains a path, if not appende getwd()
if(!grepl("/", fn))
fn <- paste(getwd(), fn, sep="/")
switch(type,
"tif" = { fn <- paste(fn, ".tif", sep="")
tiff(filename = fn, width = width, height = height, units=units, res=res,
compression=compression, ...)
}
, "pdf" = { fn <- paste(fn, ".pdf", sep="")
pdf(file=fn, width = width, height = height)
}
, "eps" = { fn <- paste(fn, ".eps", sep="")
postscript(file=fn, width = width, height = height)
}
, "bmp" = { fn <- paste(fn, ".bmp", sep="")
bitmap(file=fn, width = width, height = height, units=units, res=res, ...)
}
, "png" = { fn <- paste(fn, ".png", sep="")
png(filename=fn, width = width, height = height, units=units, res=res, ...)
}
, "jpg" = { fn <- paste(fn, ".jpg", sep="")
jpeg(filename=fn, width = width, height = height, units=units, res=res, ...)
}
)
# http://stackoverflow.com/questions/4692231/r-passing-expression-to-an-inner-function
expr <- deparse(substitute(expr))
eval(parse(text=expr))
dev.off()
cat(gettextf("plot produced:\n %s\n", fn))
if(open)
shell(gettextf("\"%s\"", fn))
}
## plots: PlotBubble ====
PlotBubble <-function(x, ...)
UseMethod("PlotBubble")
PlotBubble.default <- function(x, y, area, col=NA, cex=1, border=par("fg"), xlim = NULL, ylim=NULL,
na.rm = FALSE, ...) {
# http://blog.revolutionanalytics.com/2010/11/how-to-make-beautiful-bubble-charts-with-r.html
d.frm <- Sort(as.data.frame(Recycle(x=x, y=y, area=area, col=col, border=border,
ry = sqrt((area * cex)/pi)),
stringsAsFactors=FALSE), ord=3, decreasing=TRUE)
if(na.rm) d.frm <- d.frm[complete.cases(d.frm),]
if(is.null(xlim))
xlim <- range(pretty( sqrt((area * cex / pi)[c(which.min(d.frm$x), which.max(d.frm$x))] / pi) * c(-1,1) + c(min(d.frm$x),max(d.frm$x)) ))
if(is.null(ylim))
ylim <- range(pretty( sqrt((area * cex / pi)[c(which.min(d.frm$y), which.max(d.frm$y))] / pi) * c(-1,1) + c(min(d.frm$y),max(d.frm$y)) ))
# make sure we see all the bubbles
plot(x = x, y = y, xlim=xlim, ylim=ylim, type="n", ...)
# symbols(x=x, y=y, circles=sqrt(area / pi), fg=border, bg=col, inches=inches, add=TRUE)
rx <- d.frm$ry / Asp()
DrawEllipse(x = d.frm$x, y = d.frm$y, radius.x = rx, radius.y = d.frm$ry,
col = d.frm$col, border=d.frm$border)
# if(!identical(args.legend, NA)){
#
# rx <- d.l$ry / Asp()
# DrawEllipse(x = d.l$x, y = d.l$y, radius.x = rx, radius.y = d.frm$ry,
# col = d.l$col, border=d.l$border)
# }
}
PlotBubble.formula <- function (formula, data = parent.frame(), ..., subset, ylab = varnames[response]) {
m <- match.call(expand.dots = FALSE)
eframe <- parent.frame()
md <- eval(m$data, eframe)
if (is.matrix(md))
m$data <- md <- as.data.frame(data)
dots <- lapply(m$..., eval, md, eframe)
nmdots <- names(dots)
if ("main" %in% nmdots)
dots[["main"]] <- enquote(dots[["main"]])
if ("sub" %in% nmdots)
dots[["sub"]] <- enquote(dots[["sub"]])
if ("xlab" %in% nmdots)
dots[["xlab"]] <- enquote(dots[["xlab"]])
# if ("panel.first" %in% nmdots)
# dots[["panel.first"]] <- match.fun(dots[["panel.first"]])
# http://r.789695.n4.nabble.com/panel-first-problem-when-plotting-with-formula-td3546110.html
m$ylab <- m$... <- NULL
subset.expr <- m$subset
m$subset <- NULL
m <- as.list(m)
m[[1L]] <- stats::model.frame.default
m <- as.call(c(m, list(na.action = NULL)))
mf <- eval(m, eframe)
if (!missing(subset)) {
s <- eval(subset.expr, data, eframe)
l <- nrow(mf)
dosub <- function(x) if (length(x) == l)
x[s]
else x
dots <- lapply(dots, dosub)
mf <- mf[s, ]
}
# horizontal <- FALSE
# if ("horizontal" %in% names(dots))
# horizontal <- dots[["horizontal"]]
response <- attr(attr(mf, "terms"), "response")
if (response) {
varnames <- names(mf)
y <- mf[[response]]
funname <- NULL
xn <- varnames[-response]
if (is.object(y)) {
found <- FALSE
for (j in class(y)) {
funname <- paste0("plot.", j)
if (exists(funname)) {
found <- TRUE
break
}
}
if (!found)
funname <- NULL
}
if (is.null(funname))
funname <- "PlotBubble"
if (length(xn)) {
if (!is.null(xlab <- dots[["xlab"]]))
dots <- dots[-match("xlab", names(dots))]
for (i in xn) {
xl <- if (is.null(xlab))
i
else xlab
yl <- ylab
# if (horizontal && is.factor(mf[[i]])) {
# yl <- xl
# xl <- ylab
# }
do.call(funname, c(list(mf[[i]], y, ylab = yl,
xlab = xl), dots))
}
}
else do.call(funname, c(list(y, ylab = ylab), dots))
}
print(c(list(y, ylab = ylab), dots))
invisible()
}
###
## plots: PlotFdist ====
PlotFdist <- function (x, main = deparse(substitute(x)), xlab = ""
, xlim = NULL
# , do.hist =NULL # !(all(IsWhole(x,na.rm=TRUE)) & length(unique(na.omit(x))) < 13)
# do.hist overrides args.hist, add.dens and rug
, args.hist = NULL # list( breaks = "Sturges", ...)
, args.rug = NA # list( ticksize = 0.03, side = 1, ...), pass NA if no rug
, args.dens = NULL # list( bw = "nrd0", col="#9A0941FF", lwd=2, ...), NA for no dens
, args.curve = NA # list( ...), NA for no dcurve
, args.boxplot = NULL # list( pars=list(boxwex=0.5), ...), NA for no boxplot
, args.ecdf = NULL # list( col="#8296C4FF", ...), NA for no ecdf
, args.curve.ecdf = NA # list( ...), NA for no dcurve
, heights = NULL # heights (hist, boxplot, ecdf) used by layout
, pdist = NULL # distances of the plots, default = 0
, na.rm = FALSE, cex.axis = NULL, cex.main = NULL, mar = NULL, las=1) {
.PlotMass <- function(x = x, xlab = "", ylab = "",
xaxt = ifelse(add.boxplot || add.ecdf, "n", "s"), xlim = xlim, ylim = NULL, main = NA, las = 1,
yaxt="n", col=1, lwd=3, pch=NA, col.pch=1, cex.pch=1, bg.pch=0, cex.axis=cex.axis, ...) {
pp <- prop.table(table(x))
if(is.null(ylim))
ylim <- c(0, max(pp))
plot(pp, type = "h", lwd=lwd, col=col,
xlab = "", ylab = "", cex.axis=cex.axis, xlim=xlim, ylim=ylim,
xaxt = xaxt, main = NA, frame.plot = FALSE,
las = las, panel.first = {
abline(h = axTicks(2), col = "grey", lty = "dotted")
abline(h = 0, col = "black")
})
if(!identical(pch, NA))
points(pp, type="p", pch=pch, col=col.pch, bg=bg.pch, cex=cex.pch)
}
# Plot function to display the distribution of a cardinal variable
# combines a histogram with a density curve, a boxplot and an ecdf
# rug can be added by using add.rug = TRUE
# default colors are Helsana CI-colors
# dev question: should dots be passed somewhere??
usr <- par(no.readonly=TRUE); on.exit(par(usr))
opt <- DescToolsOptions(stamp=NULL)
add.boxplot <- !identical(args.boxplot, NA)
add.rug <- !identical(args.rug, NA)
add.dens <- !identical(args.dens, NA)
add.ecdf <- !identical(args.ecdf, NA)
add.dcurve <- !identical(args.curve, NA)
add.pcurve <- !identical(args.curve.ecdf, NA)
# preset heights
if(is.null(heights)){
if(add.boxplot) {
if(add.ecdf) heights <- c(2, 0.5, 1.4)
else heights <- c(2, 1.4)
} else {
if(add.ecdf) heights <- c(2, 1.4)
}
}
if(is.null(pdist)) {
if(add.boxplot) pdist <- c(0, 0)
else pdist <- c(0, 1)
}
if (add.ecdf && add.boxplot) {
layout(matrix(c(1, 2, 3), nrow = 3, byrow = TRUE), heights = heights, TRUE)
if(is.null(cex.axis)) cex.axis <- 1.3
if(is.null(cex.main)) cex.main <- 1.7
} else {
if((add.ecdf || add.boxplot)) {
layout(matrix(c(1, 2), nrow = 2, byrow = TRUE), heights = heights[1:2], TRUE)
if(is.null(cex.axis)) cex.axis <- 0.9
} else {
if(is.null(cex.axis)) cex.axis <- 0.95
}
}
# plot histogram, change margin if no main title
par(mar = c(ifelse(add.boxplot || add.ecdf, 0, 5.1), 6.1, 2.1, 2.1))
if(!is.null(mar)) {
par(oma=mar)
} else {
if(!is.na(main)) { par(oma=c(0,0,3,0)) }
}
# wait for omitting NAs until all arguments are evaluated, e.g. main...
if(na.rm) x <- x[!is.na(x)]
if(!is.null(args.hist[["panel.last"]])) {
panel.last <- args.hist[["panel.last"]]
args.hist[["panel.last"]] <- NULL
} else {
panel.last <- NULL
}
if(is.null(args.hist$type)){
do.hist <- !(isTRUE(all.equal(x, round(x), tol = sqrt(.Machine$double.eps))) && length(unique(x)) < 13)
} else {
do.hist <- (args.hist$type == "hist")
args.hist$type <- NULL
}
# handle open list of arguments: args.legend in barplot is implemented this way...
# we need histogram anyway to define xlim
args.hist1 <- list(x = x, xlab = "", ylab = "", freq = FALSE,
xaxt = ifelse(add.boxplot || add.ecdf, "n", "s"), xlim = xlim, ylim = NULL, main = NA, las = 1,
col = "white", border = "grey70", yaxt="n")
if (!is.null(args.hist)) {
args.hist1[names(args.hist)] <- args.hist
}
x.hist <- DoCall("hist", c(args.hist1[names(args.hist1) %in%
c("x", "breaks", "include.lowest", "right", "nclass")], plot = FALSE))
x.hist$xname <- deparse(substitute(x))
if (is.null(xlim)) args.hist1$xlim <- range(pretty(x.hist$breaks))
args.histplot <- args.hist1[!names(args.hist1) %in% c("x", "breaks", "include.lowest", "right", "nclass")]
if (do.hist) {
# calculate max ylim for density curve, provided there should be one...
# what's the maximal value in density or in histogramm$densities?
# plot density
if (add.dens) {
# preset default values
args.dens1 <- list(x = x, bw = (if(length(x) > 1000){"nrd0"} else {"SJ"}),
col = Pal()[2], lwd = 2, lty = "solid")
if (!is.null(args.dens)) {
args.dens1[names(args.dens)] <- args.dens
}
# x.dens <- DoCall("density", args.dens1[-match(c("col",
# "lwd", "lty"), names(args.dens1))])
#
# # overwrite the ylim if there's a larger density-curve
# args.histplot[["ylim"]] <- range(pretty(c(0, max(c(x.dens$y, x.hist$density)))))
x.dens <- try( DoCall("density", args.dens1[-match(c("col", "lwd", "lty"), names(args.dens1))])
, silent=TRUE)
if(inherits(x.dens, "try-error")) {
warning(gettextf("density curve could not be added\n%s", x.dens))
add.dens <- FALSE
} else {
# overwrite the ylim if there's a larger density-curve
args.histplot[["ylim"]] <- range(pretty(c(0, max(c(x.dens$y, x.hist$density)))))
}
}
# plot histogram
DoCall("plot", append(list(x.hist), args.histplot))
# draw axis
ticks <- axTicks(2)
n <- max(floor(log(ticks, base = 10))) # highest power of ten
if(abs(n)>2) {
lab <- Format(ticks * 10^(-n), digits=max(Ndec(as.character(zapsmall(ticks*10^(-n))))))
axis(side=2, at=ticks, labels=lab, las=las, cex.axis=cex.axis)
text(x=par("usr")[1], y=par("usr")[4], bquote(~~~x~10^.(n)), xpd=NA, pos = 3, cex=cex.axis*0.9)
} else {
axis(side=2, cex.axis=cex.axis, las=las)
}
if(!is.null(panel.last)){
eval(parse(text=panel.last))
}
if (add.dens) {
lines(x.dens, col = args.dens1$col, lwd = args.dens1$lwd, lty = args.dens1$lty)
}
# plot special distribution curve
if (add.dcurve) {
# preset default values
args.curve1 <- list(expr = parse(text = gettextf("dnorm(x, %s, %s)", mean(x), sd(x))),
add = TRUE,
n = 500, col = Pal()[3], lwd = 2, lty = "solid")
if (!is.null(args.curve)) {
args.curve1[names(args.curve)] <- args.curve
}
if (is.character(args.curve1$expr)) args.curve1$expr <- parse(text=args.curve1$expr)
# do.call("curve", args.curve1)
# this throws an error heere:
# Error in eval(expr, envir, enclos) : could not find function "expr"
# so we roll back to do.call
do.call("curve", args.curve1)
}
if (add.rug) {
args.rug1 <- list(x = x, col = "grey")
if (!is.null(args.rug)) {
args.rug1[names(args.rug)] <- args.rug
}
DoCall("rug", args.rug1)
}
} else {
# do not draw a histogram, but a line bar chart
# PlotMass
args.hist1 <- list(x = x, xlab = "", ylab = "", xlim = xlim,
xaxt = ifelse(add.boxplot || add.ecdf, "n", "s"), ylim = NULL, main = NA, las = 1,
yaxt="n", col=1, lwd=3, pch=NA, col.pch=1, cex.pch=2, bg.pch=0, cex.axis=cex.axis)
if (is.null(xlim)) args.hist1$xlim <- range(pretty(x.hist$breaks))
if (!is.null(args.hist)) {
args.hist1[names(args.hist)] <- args.hist
if(is.null(args.hist$col.pch)) # use the same color for pch as for the line, when not defined
args.hist1$col.pch <- args.hist1$col
}
DoCall(.PlotMass, args.hist1)
# plot(prop.table(table(x)), type = "h", xlab = "", ylab = "",
# xaxt = "n", xlim = args.hist1$xlim, main = NA,
# frame.plot = FALSE, las = 1, cex.axis = cex.axis, panel.first = {
# abline(h = axTicks(2), col = "grey", lty = "dotted")
# abline(h = 0, col = "black")
# })
}
# boxplot
if(add.boxplot){
par(mar = c(ifelse(add.ecdf, 0, 5.1), 6.1, pdist[1], 2.1))
args.boxplot1 <- list(x = x, frame.plot = FALSE, main = NA, boxwex = 1,
horizontal = TRUE, ylim = args.hist1$xlim,
at = 1, xaxt = ifelse(add.ecdf, "n", "s"),
outcex = 1.3, outcol = rgb(0,0,0,0.5), cex.axis=cex.axis,
pch.mean=3, col.meanci="grey85")
if (!is.null(args.boxplot)) {
args.boxplot1[names(args.boxplot)] <- args.boxplot
}
plot(1, type="n", xlim=args.hist1$xlim, ylim=c(0,1)+.5, xlab="", ylab="", axes=FALSE)
grid(ny=NA)
if(length(x)>1){
ci <- MeanCI(x, na.rm=TRUE)
rect(xleft = ci[2], ybottom = 0.62, xright = ci[3], ytop = 1.35,
col=args.boxplot1$col.meanci, border=NA)
} else {
ci <- mean(x)
}
args.boxplot1$add = TRUE
DoCall("boxplot", args.boxplot1)
points(x=ci[1], y=1, cex=2, col="grey65", pch=args.boxplot1$pch.mean, bg="white")
}
# plot ecdf
if (add.ecdf) {
par(mar = c(5.1, 6.1, pdist[2], 2.1))
# args.ecdf1 <- list(x = x, frame.plot = FALSE, main = NA,
# xlim = args.hist1$xlim, col = getOption("col1", hblue), lwd = 2,
# xlab = xlab, yaxt = "n", ylab = "", verticals = TRUE,
# do.points = FALSE, cex.axis = cex.axis)
args.ecdf1 <- list(x = x, main = NA, breaks={if(length(x)>1000) 1000 else NULL}, ylim=c(0,1),
xlim = args.hist1$xlim, col = Pal()[1], lwd = 2,
xlab = "", yaxt = "n", ylab = "", cex.axis = cex.axis,
frame.plot = FALSE)
if (!is.null(args.ecdf)) {
args.ecdf1[names(args.ecdf)] <- args.ecdf
}
DoCall("PlotECDF", args.ecdf1)
# DoCall("plot.ecdf", args.ecdf1)
# axis(side = 2, at = seq(0, 1, 0.25), labels = gsub(pattern = "0\\.",
# replacement = " \\.", format(seq(0, 1, 0.25), 2)),
# las = 1, xaxs = "e", cex.axis = cex.axis)
# abline(h = c(0.25, 0.5, 0.75), col = "grey", lty = "dotted")
# grid(ny = NA)
# points(x=range(x), y=c(0,1), col=args.ecdf1$col, pch=3, cex=2)
# plot special distribution ecdf curve
if (add.pcurve) {
# preset default values
args.curve.ecdf1 <- list(expr = parse(text = gettextf("pnorm(x, %s, %s)", mean(x), sd(x))),
add = TRUE,
n = 500, col = Pal()[3], lwd = 2, lty = "solid")
if (!is.null(args.curve.ecdf)) {
args.curve.ecdf1[names(args.curve.ecdf)] <- args.curve.ecdf
}
if (is.character(args.curve.ecdf1$expr))
args.curve.ecdf1$expr <- parse(text=args.curve.ecdf1$expr)
# do.call("curve", args.curve1)
# this throws an error here:
# Error in eval(expr, envir, enclos) : could not find function "expr"
# so we roll back to do.call
do.call("curve", args.curve.ecdf1)
}
}
if(!is.na(main)) {
if(!is.null(cex.main)) par(cex.main=cex.main)
title(main=main, outer = TRUE)
}
DescToolsOptions(opt)
if(!is.null(DescToolsOptions("stamp")))
if(add.ecdf)
Stamp(cex=0.9)
else
Stamp()
layout(matrix(1)) # reset layout on exit
}
PlotECDF <- function(x, breaks=NULL, col=Pal()[1],
ylab="", lwd = 2, xlab = NULL, cex.axis = NULL, ...){
if(is.null(breaks)){
tab <- table(x)
xp <- as.numeric(names(tab))
xp <- c(head(xp,1), xp)
yp <- c(0, cumsum(tab))
} else {
xh <- hist(x, breaks=breaks, plot=FALSE)
xp <- xh$mids
xp <- c(head(xp,1), xp)
yp <- c(0, cumsum(xh$density))
}
yp <- yp * 1/tail(yp, 1)
if(is.null(xlab)) xlab <- deparse(substitute(x))
plot(yp ~ xp, lwd=lwd, type = "s", col=col, xlab= xlab, yaxt="n",
ylab = "", cex.axis=cex.axis, ...)
axis(side = 2, at = seq(0, 1, 0.25),
labels = gsub(pattern = "0\\.", replacement = " \\.", format(seq(0, 1, 0.25), 2)),
las = 1, xaxs = "e", cex.axis = cex.axis)
abline(h = c(0, 0.25, 0.5, 0.75, 1), col = "grey", lty = c("dashed","dotted","dotted","dotted","dashed"))
grid(ny = NA)
points(x = range(x), y = c(0, 1), col = col, pch = 3, cex = 2)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
###
## plots: PlotMultiDens ====
PlotMultiDens <- function (x, ...)
UseMethod("PlotMultiDens")
PlotMultiDens.formula <- function (formula, data, subset, na.action, ...) {
if (missing(formula) || (length(formula) != 3))
stop("formula missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m$... <- NULL
m[[1]] <- as.name("model.frame")
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
PlotMultiDens(split(mf[[response]], mf[-response]), ...)
}
PlotMultiDens.default <- function( x, xlim = NULL, ylim = NULL
, col = Pal(), lty = "solid", lwd = 1
, fill = NA
, xlab = "x", ylab = "density"
# , type = c("line", "stack", "cond")
, args.dens = NULL
, args.legend = NULL
, na.rm = FALSE, flipxy=FALSE, ...) {
# the input MUST be a numeric list, use split if there's no list:
# PlotMultiDens(list(x,y,z))
# Alternative:
# library(lattice)
# densityplot( ~ vl| vjdeck + region_x, data=d.set )
FlipDensXY <- function(x){
# flips x and y values of a density-object
tmp <- x$x
x$x <- x$y
x$y <- tmp
return(x)
}
# na.omit if wished
if(na.rm) x <- lapply(x, na.omit)
args.dens1 <- list(n = 2^12, kernel="epanechnikov") # default values
if (!is.null(args.dens)) {
args.dens1[names(args.dens)] <- args.dens
}
# recycle density arguments
maxdim <- max(length(x), unlist(lapply(args.dens1, length)))
args.dens1 <- lapply( args.dens1, rep, length.out=maxdim )
# recycle x
x <- rep(x, length.out=maxdim )
# let's calculate the densities
l.dens <- list()
for(i in 1:maxdim) {
if(length(x[[i]]) > 2)
l.dens[[i]] <- if(flipxy) {
FlipDensXY(do.call("density", append(list(x[[i]]), lapply(args.dens1,"[", i)) ))
} else {
do.call("density", append(list(x[[i]]), lapply(args.dens1,"[", i)) )
}
}
# recycle line attributes
# which geom parameter has the highest dimension
l.par <- list(lty=lty, lwd=lwd, col=col, fill=fill)
l.par <- lapply( l.par, rep, length.out = maxdim )
if( missing("xlim") ) xlim <- range(pretty( unlist(lapply(l.dens, "[", "x")) ) )
if( missing("ylim") ) ylim <- range(pretty( unlist(lapply(l.dens, "[", "y")) ))
dev.hold()
on.exit(dev.flush())
plot( x=1, y=1, xlim = xlim, ylim = ylim, type="n", xlab=xlab, ylab=ylab, ... )
# switch(match.arg(type,choices=c("line","stack","cond"))
# overlay = {
if(identical(fill, NA)){
for(i in 1:length(l.dens)) {
lines( l.dens[[i]], col=l.par$col[i], lty=l.par$lty[i], lwd=l.par$lwd[i] )
}
} else {
for(i in 1:length(l.dens)) {
polygon(x = l.dens[[i]]$x, y=l.dens[[i]]$y,
col = l.par$fill[i], border=l.par$col[i], lty=l.par$lty[i], lwd=l.par$lwd[i])
}
}
# },
# stack = { },
# cond = {
# }
# )
args.legend1 <- list( x="topright", inset=0, legend=if(is.null(names(x))){1:length(x)} else {names(x)}
, fill=col, bg="white", cex=0.8 )
if( length(unique(lwd))>1 || length(unique(lty))>1 ) {
args.legend1[["fill"]] <- NULL
args.legend1[["col"]] <- col
args.legend1[["lwd"]] <- lwd
args.legend1[["lty"]] <- lty
}
if ( !is.null(args.legend) ) { args.legend1[names(args.legend)] <- args.legend }
add.legend <- TRUE
if(!is.null(args.legend)) if(all(is.na(args.legend))) {add.legend <- FALSE}
if(add.legend) DoCall("legend", args.legend1)
res <- DoCall(rbind, lapply((lapply(l.dens, "[", c("bw","n"))), data.frame))
res$kernel <- unlist(args.dens1["kernel"])
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(res)
}
## plots: PlotMarDens ====
PlotMarDens <- function( x, y, grp=1, xlim = NULL, ylim = NULL
, col = rainbow(nlevels(factor(grp)))
, mardens = c("all","x","y"), pch=1, pch.cex=1.0, main=""
, na.rm = FALSE, args.legend = NULL
, args.dens = NULL, ...){
usr <- par("usr"); on.exit( par(usr) )
opt <- DescToolsOptions(stamp=NULL)
mardens <- match.arg(arg = mardens, choices = c("all", "x", "y"))
par(oma=c(0,0,3,0))
d.frm <- data.frame(x=x, y=y, grp=grp)
pch=rep(pch, length.out=nlevels(factor(grp))) # recycle pch
# this is plot.default defaults
xlim <- if (is.null(xlim)) range(x[is.finite(x)]) else xlim
ylim <- if (is.null(ylim)) range(y[is.finite(y)]) else ylim
switch( mardens
, "all" = { nf <- layout(matrix(c(2,0,1,3),2,2, byrow=TRUE), widths=c(9,1.5), heights=c(0.8,4), TRUE) }
, "x" = { nf <- layout(matrix(c(2,1), 2,1, byrow=TRUE), c(9), c(0.8,4), TRUE) }
, "y" = { nf <- layout(matrix(c(1,2),1,2, byrow=TRUE), c(9,1.5), c(4), TRUE) }
)
par(mar=c(5,5,1,1))
plot(x=d.frm$x, y=d.frm$y, xlim=xlim, ylim=ylim, type="n", ... )
s <- split(d.frm[,1:2], d.frm$grp)
for( i in seq_along(s) ){
points( x=s[[i]]$x, y=s[[i]]$y, col=col[i], pch=pch[i], cex=pch.cex)
}
args.legend1 <- list( x = "topright", inset = 0.02, legend = levels(factor(grp))
, col = col, pch = pch, bg = "white", cex = 0.8 )
if ( !is.null(args.legend) ) {
if(!all(is.na(args.legend))){
args.legend1[names(args.legend)] <- args.legend
} else {
args.legend1 <- NA
}
}
if(!all(is.na(args.legend1))) do.call("legend", args.legend1)
if(mardens %in% c("all","x")){
par(mar=c(0,5,0,1))
args.plotdens1 <- list(x = split(d.frm$x, d.frm$grp), na.rm = TRUE,
col = col, xlim = xlim, axes=FALSE,
args.legend = NA, xlab="", ylab="")
if (!is.null(args.dens)) {
args.plotdens1[names(args.dens)] <- args.dens
}
args.dens1 <- list(n = 4096, bw = "nrd0", kernel = "epanechnikov")
if (!is.null(args.dens)) {
ovr <- names(args.dens)[names(args.dens) %in% names(args.dens1)]
args.dens1[ovr] <- args.dens[ovr]
}
args.plotdens1$args.dens <- args.dens1
args.plotdens1 <- args.plotdens1[names(args.plotdens1) %nin% names(args.dens1)]
do.call("PlotMultiDens", args.plotdens1)
# PlotMultiDens( split(d.frm$x, d.frm$grp), col=col, na.rm=TRUE, xlim=xlim
# , axes=FALSE, args.legend = NA, xlab="", ylab="" )
}
if(mardens %in% c("all","y")){
par(mar=c(5,0,1,1))
args.plotdens1 <- list(x = split(d.frm$y, d.frm$grp), na.rm = TRUE,
col = col, ylim = ylim, axes=FALSE, flipxy=TRUE,
args.legend = NA, xlab="", ylab="")
if (!is.null(args.dens)) {
args.plotdens1[names(args.dens)] <- args.dens
}
args.dens1 <- list(n = 4096, bw = "nrd0", kernel = "epanechnikov")
if (!is.null(args.dens)) {
ovr <- names(args.dens)[names(args.dens) %in% names(args.dens1)]
args.dens1[ovr] <- args.dens[ovr]
}
args.plotdens1$args.dens <- args.dens1
args.plotdens1 <- args.plotdens1[names(args.plotdens1) %nin% names(args.dens1)]
do.call("PlotMultiDens", args.plotdens1)
# PlotMultiDens( split(d.frm$y, d.frm$grp), col=col, na.rm=TRUE, ylim=ylim
# , axes = FALSE, args.legend = NA, flipxy=TRUE, xlab="", ylab="" )
}
title(main=main, outer=TRUE)
options(opt)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
###
## plots: PlotArea ====
PlotArea <- function(x, ...) {
# PlotArea - mehrere Flaechen uebereinander
# source: http://r.789695.n4.nabble.com/PlotArea-td2255121.html
# arni...
UseMethod("PlotArea")
}
PlotArea.default <- function(x, y=NULL, prop=FALSE, add=FALSE, xlab=NULL, ylab=NULL,
col=NULL, frame.plot=FALSE, ...) {
if(is.ts(x)) { # ts/mts
if(is.null(ylab)) ylab <- deparse(substitute(x))
x <- data.frame(Time=time(x), x)
}
if(is.table(x)) { # table
if(is.null(ylab)) ylab <- deparse(substitute(x))
if(length(dim(x)) == 1)
x <- t(t(unclass(x)))
else
x <- unclass(x)
}
if(is.matrix(x)) { # matrix
if(!is.null(rownames(x)) && !any(is.na(suppressWarnings(as.numeric(rownames(x)))))) {
x <- data.frame(as.numeric(rownames(x)), x)
names(x)[1] <- ""
} else {
x <- data.frame(Index=seq_len(nrow(x)), x)
}
}
if(is.list(x)) { # data.frame or list
if(is.null(xlab)) xlab <- names(x)[1]
if(is.null(ylab)) {
if(length(x) == 2)
ylab <- names(x)[2]
else
ylab <- ""
}
y <- x[-1]
x <- x[[1]]
}
if(is.null(y)) { # one numeric vector passed, plot it on 1:n
if(is.null(xlab)) xlab <- "Index"
if(is.null(ylab)) ylab <- deparse(substitute(x))
y <- x
x <- seq_along(x)
}
if(is.null(xlab)) xlab <- deparse(substitute(x))
if(is.null(ylab)) ylab <- deparse(substitute(y))
y <- as.matrix(y)
if(is.null(col)) col <- gray.colors(ncol(y))
col <- rep(col, length.out=ncol(y))
if(prop) y <- prop.table(y, 1)
y <- t(rbind(0, apply(y, 1, cumsum)))
na <- is.na(x) | apply(is.na(y),1,any)
x <- x[!na][order(x[!na])]
y <- y[!na,][order(x[!na]),]
if(!add) suppressWarnings(matplot(x, y, type="n", xlab=xlab, ylab=ylab, frame.plot=frame.plot, ...))
xx <- c(x, rev(x))
for(i in 1:(ncol(y)-1)) {
yy <- c(y[,i+1], rev(y[,i]))
suppressWarnings(polygon(xx, yy, col=col[i], ...))
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(y[,-1])
}
PlotArea.formula <- function (formula, data, subset, na.action, ...) {
m <- match.call(expand.dots=FALSE)
if(is.matrix(eval(m$data,parent.frame()))) m$data <- as.data.frame(data)
m$... <- NULL
m[[1]] <- as.name("model.frame")
if(as.character(formula[[2]]==".")) {
rhs <- unlist(strsplit(deparse(formula[[3]])," *[:+] *"))
lhs <- sprintf("cbind(%s)", paste(setdiff(names(data), rhs),collapse=","))
m[[2]][[2]] <- parse(text=lhs)[[1]]
}
mf <- eval(m, parent.frame())
if(is.matrix(mf[[1]])) {
lhs <- as.data.frame(mf[[1]])
names(lhs) <- as.character(m[[2]][[2]])[-1]
PlotArea.default(cbind(mf[-1],lhs), ...)
} else {
PlotArea.default(mf[2:1], ...)
}
}
###
## plots: PlotDotCI ====
PlotDot <- function (x, labels = NULL, groups = NULL, gdata = NULL, cex = par("cex"),
pch = 21, gpch = 21, bg = par("bg"), color = par("fg"), gcolor = par("fg"),
lcolor = "gray", xlim = NULL, main = NULL, xlab = NULL, ylab = NULL, xaxt=NULL, yaxt=NULL,
add = FALSE, args.errbars = NULL, ...) {
ErrBarArgs <- function(from, to = NULL, pos = NULL, mid = NULL,
horiz = FALSE, col = par("fg"), lty = par("lty"), lwd = par("lwd"),
code = 3, length = 0.05, pch = NA, cex.pch = par("cex"),
col.pch = par("fg"), bg.pch = par("bg"), ...) {
if (is.null(to)) {
if (length(dim(x) != 1))
stop("'to' must be be provided, if x is a matrix.")
if (dim(from)[2] %nin% c(2, 3))
stop("'from' must be a kx2 or a kx3 matrix, when 'to' is not provided.")
if (dim(from)[2] == 2) {
to <- from[, 2]
from <- from[, 1]
}
else {
mid <- from[, 1]
to <- from[, 3]
from <- from[, 2]
}
}
if (length(dim(from)) ==2 )
from <- Rev(from, 2)
if (length(dim(to)) ==2 )
to <- Rev(to, 2)
if (length(dim(mid)) ==2 )
mid <- Rev(mid, 2)
return(list(from = from, to = to, mid = mid, col = col,
col.axis = 1, lty = lty, lwd = lwd, angle = 90, code = code,
length = length, pch = pch, cex.pch = cex.pch, col.pch = col.pch,
bg.pch = bg.pch))
}
x <- Rev(x, 1)
labels <- rev(labels)
groups <- rev(groups)
# gdata <- rev(gdata)
# gcolor <- Rev(gcolor)
lcolor <- Rev(lcolor)
color <- Rev(color)
pch <- Rev(pch)
bg <- Rev(bg)
cex <- rep(cex, length.out = 3)
if (!is.null(args.errbars))
errb <- do.call(ErrBarArgs, args.errbars)
if (!add && is.null(xlim)) {
if (is.null(args.errbars)) {
xlim <- range(x[is.finite(x)])
}
else {
rng <- c(errb$from, errb$to)
xlim <- range(pretty(rng[is.finite(rng)]))
}
}
opar <- par("mai", "mar", "cex", "yaxs")
on.exit(par(opar))
par(cex = cex[1], yaxs = "i")
if (!is.numeric(x))
stop("'x' must be a numeric vector or matrix")
n <- length(x)
if (is.matrix(x)) {
if (is.null(labels))
labels <- rownames(x)
if (is.null(labels))
labels <- as.character(1L:nrow(x))
labels <- rep_len(labels, n)
if (is.null(groups))
groups <- col(x, as.factor = TRUE)
glabels <- levels(groups)
}
else {
if (is.null(labels))
labels <- names(x)
glabels <- if (!is.null(groups))
levels(groups)
if (!is.vector(x)) {
warning("'x' is neither a vector nor a matrix: using as.numeric(x)")
x <- as.numeric(x)
}
}
if (!add)
plot.new()
linch <- if (!is.null(labels))
max(strwidth(labels, "inch"), na.rm = TRUE)
else 0
if (is.null(glabels)) {
ginch <- 0
goffset <- 0
}
else {
ginch <- max(strwidth(glabels, "inch"), na.rm = TRUE)
goffset <- 0.4
}
if (!(is.null(labels) && is.null(glabels) || identical(yaxt, "n"))) {
nmai <- par("mai")
nmai[2L] <- nmai[4L] + max(linch + goffset, ginch) +
0.1
par(mai = nmai)
}
if (is.null(groups)) {
o <- 1L:n
y <- o
ylim <- c(0, n + 1)
}
else {
o <- sort.list(as.numeric(groups), decreasing = TRUE)
x <- x[o]
groups <- groups[o]
# color <- rep_len(color, length(groups))[o]
# lcolor <- rep_len(lcolor, length(groups))[o]
offset <- cumsum(c(0, diff(as.numeric(groups)) != 0))
y <- 1L:n + 2 * offset
ylim <- range(0, y + 2)
}
if (!add)
plot.window(xlim = xlim, ylim = ylim, log = "")
lheight <- par("csi")
if (!is.null(labels)) {
linch <- max(strwidth(labels, "inch"), na.rm = TRUE)
loffset <- (linch + 0.1)/lheight
labs <- labels[o]
if (!identical(yaxt, "n"))
mtext(labs, side = 2, line = loffset, at = y, adj = 0,
col = color, las = 2, cex = cex[2], ...)
}
if (!add)
abline(h = y, lty = "dotted", col = lcolor)
points(x, y, pch = pch, col = color, bg = bg)
if (!is.null(groups)) {
gpos <- rev(cumsum(rev(tapply(groups, groups, length)) +
2) - 1)
ginch <- max(strwidth(glabels, "inch"), na.rm = TRUE)
goffset <- (max(linch + 0.2, ginch, na.rm = TRUE) + 0.1)/lheight
if (!identical(yaxt, "n"))
mtext(glabels, side = 2, line = goffset, at = gpos, adj = 0,
col = gcolor, las = 2, cex = cex[3], ...)
if (!is.null(gdata)) {
abline(h = gpos, lty = "dotted")
points(gdata, gpos, pch = gpch, col = gcolor, bg = bg, ...)
}
}
if (!(add || identical(xaxt, "n") ))
axis(1)
if (!add)
box()
if (!add)
title(main = main, xlab = xlab, ylab = ylab, ...)
if (!is.null(args.errbars)) {
arrows(x0 = rev(errb$from)[o], x1 = rev(errb$to)[o],
y0 = y, col = rev(errb$col), angle = 90, code = rev(errb$code),
lty = rev(errb$lty), lwd = rev(errb$lwd), length = rev(errb$length))
if (!is.null(errb$mid))
points(rev(errb$mid)[o], y = y, pch = rev(errb$pch), col = rev(errb$col.pch),
cex = rev(errb$cex.pch), bg = rev(errb$bg.pch))
}
if (!is.null(DescToolsOptions("stamp")))
Stamp()
# invisible(y[order(o, decreasing = TRUE)])
# replaced by 0.99.18:
invisible(y[order(y, decreasing = TRUE)])
}
TitleRect <- function(label, bg = "grey", border=1, col="black", xjust=0.5, line=2, ...){
xpd <- par(xpd=TRUE); on.exit(par(xpd))
usr <- par("usr")
rect(xleft = usr[1], ybottom = usr[4], xright = usr[2], ytop = LineToUser(line,3),
col="white", border = border)
rect(xleft = usr[1], ybottom = usr[4], xright = usr[2], ytop = LineToUser(line,3),
col=bg, border = border)
if(xjust==0) {
x <- usr[1]
} else if(xjust==0.5) {
x <- mean(usr[c(1,2)])
} else {
x <- usr[2]
}
text(x = x, y = mean(c(usr[4], LineToUser(line,3))), labels=label,
adj = c(xjust, 0.5), col=col, ...)
}
# not yet exported
PlotFacet <- function(x, FUN, mfrow, titles, main="", oma=NULL,
args.titles = NULL, ...){
par(mfrow=mfrow, xpd=TRUE)
nr <- mfrow[1]
nc <- mfrow[2]
if(is.null(oma))
oma <- c(5,5,5,2)
par(mar=c(0,0,2.0,0), oma=oma, las=par("las"))
args.titles1 <- list(col=1, bg="grey", border=1)
if(!is.null(args.titles))
args.titles1[names(args.titles)] <- args.titles
for(i in 1:length(x)){
# nur unterste Zeile, und auch da nur Beschriftung in jedem 2. Plot
xaxt <- c("s","n")[((i <= (max(nr)-1)*nc) || IsOdd(i)) + 1]
# nur unterste Zeile, und auch da nur Beschriftung in jedem 2. Plot
yaxt <- c("s","n")[((i %% nc) != 1) + 1]
# the plot function
FUN(x[[i]], xaxt, yaxt)
do.call(TitleRect, c(args.titles1, label=titles[i]))
}
title(main, outer=TRUE, xpd=NA)
}
PlotLinesA <- function(x, y, col=1:5, lty=1, lwd=1, lend = par("lend"), xlab = NULL,
ylab = NULL, xlim = NULL, ylim = NULL, xaxt=NULL, yaxt=NULL, cex = 1, args.legend = NULL,
main=NULL, grid=TRUE, mar=NULL, pch=NA, pch.col=par("fg"), pch.bg=par("bg"), pch.cex=1, ...){
# example:
#
# m <- matrix(c(3,4,5,1,5,4,2,6,2), nrow = 3,
# dimnames = list(dose = c("A","B","C"),
# age = c("2000","2001","2002")))
# PlotLinesA(m, col=rev(c(PalHelsana(), "grey")), main="Dosw ~ age", lwd=3, ylim=c(1,10))
.legend <- function(line, y, width, labels, lty, lwd, col, cex){
line <- rep(line, length.out=2)
mtext(side = 4, las=1, cex=cex, text = labels,
line = line[1] + ZeroIfNA(width + (!is.na(width)) * line[2]),
at = y
)
if(!is.na(width)){
x0 <- LineToUser(line[1], 4)
segments(x0 = x0, x1 = LineToUser(line[1] + width, 4), y0 = y,
lwd = lwd, lty=lty, lend = 1, col = col)
}
}
add.legend <- !identical(args.legend, NA)
last <- Sort(data.frame(t(tail(apply(as.matrix(x), 2, LOCF), 1))))
last <- setNames(last[,], nm = rownames(last))
if(is.null(mar)){
if(!identical(args.legend, NA))
# no convincing solution before plot.new is called
# http://stackoverflow.com/questions/16452368/calculate-strwidth-without-calling-plot-new
Mar(right = 10) # this would be nice, but there's no plot so far... max(strwidth(names(last))) * 1.2
} else {
do.call(Mar, as.list(mar))
}
matplot(x, y, type="n", las=1, xlim=xlim, ylim=ylim, xaxt="n", yaxt=yaxt, main=main, xlab=xlab, ylab=ylab, cex = cex, ...)
if(!identical(xaxt, "n"))
axis(side = 1, at=c(1:nrow(x)), rownames(x))
if(grid) grid()
matplot(x, type="l", lty=lty, col=col, lwd=lwd, lend=lend, xaxt="n", add=TRUE)
if(!is.na(pch))
matplot(x, type="p", pch=pch, col=pch.col, bg=pch.bg, cex=pch.cex, xaxt="n", add=TRUE)
oldpar <- par(xpd=TRUE); on.exit(par(oldpar))
if (add.legend) {
if(is.null(colnames(x)))
colnames(x) <- 1:ncol(x)
ord <- match(names(last), colnames(x))
lwd <- rep(lwd, length.out=ncol(x))
lty <- rep(lty, length.out=ncol(x))
col <- rep(col, length.out=ncol(x))
# default legend values
args.legend1 <- list(
line = c(1, 1) , # par("usr")[2] + diff(par("usr")[1:2]) * 0.02,
width = 1, # (par("usr")[2] + diff(par("usr")[1:2]) * 0.02 * 2) - (par("usr")[2] + diff(par("usr")[1:2]) * 0.02),
y = SpreadOut(unlist(last), mindist = 1.2 * strheight("M")),
labels=names(last), cex=par("cex"),
col = col[ord], lwd = lwd[ord], lty = lty[ord])
if (!is.null(args.legend)) {
args.legend1[names(args.legend)] <- args.legend
}
DoCall(".legend", args.legend1)
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
PlotLog <- function(x, ..., args.grid=NULL, log="xy"){
add.grid <- !identical(args.grid, NA)
# default grid arguments
args.grid1 <- list(
lwd = 1,
lty = 3, #"dotted",
col = "grey85",
lwd.min = 1,
lty.min = 3,
col.min = "grey60"
)
if (!is.null(args.grid)) {
args.grid1[names(args.grid)] <- args.grid
}
plot(x, ..., type="n", log=log, xaxt="n", yaxt="n", xaxs="i", yaxs="i")
if(grepl("x", log)){
# ticks <- do.call(seq, as.list(range(log(axTicks(1), 10))))
ticks <- do.call(seq, as.list(range(ceiling(log(10^par("usr")[1:2], 10)))))
# need a x log axis
sapply(ticks,
function(n) mtext(side=1, line=1, at = 10^n, text = bquote(~10^.(n))))
if(add.grid){
abline(v=unique(as.vector(sapply(c(ticks, tail(ticks, 1)+1), function(n) seq(0, 0.1, 0.01)*10^n))),
col=args.grid1$col, lty=args.grid1$lty, lwd=args.grid1$lwd)
abline(v=10^(ticks), col=args.grid1$col.min, lty=args.grid1$lty.min, lwd=args.grid1$lwd.min)
}
axis(1, at=c(0, 10^(ticks)), labels=NA)
}
if(grepl("y", log)){
# ticks <- do.call(seq, as.list(range(log(axTicks(1), 10))))
ticks <- do.call(seq, as.list(range(ceiling(log(10^par("usr")[3:4], 10)))))
# need a x log axis
sapply(ticks,
function(n) mtext(side=2, line=1, at = 10^n, text = bquote(~10^.(n)), las=1))
if(add.grid){
abline(h=unique(as.vector(sapply(c(ticks, tail(ticks, 1)+1), function(n) seq(0, 0.1, 0.01)*10^n))),
col=args.grid1$col, lty=args.grid1$lty, lwd=args.grid1$lwd)
abline(h=10^(ticks), col=args.grid1$col.min, lty=args.grid1$lty.min, lwd=args.grid1$lwd.min)
}
axis(2, at=c(0, 10^(ticks)), labels=NA)
}
box()
points(x, ...)
}
###
## plots: PlotFun ====
PlotFun <- function(FUN, args=NULL, from=NULL, to=NULL, by=NULL, xlim=NULL,
ylim = NULL, polar = FALSE, type="l",
col = par("col"), lwd= par("lwd"), lty=par("lty"), pch=NA, mar=NULL,
add = FALSE, ...){
# # all dot arguments
# dot.args <- match.call(expand.dots=FALSE)$...
# # the dot arguments which match PercTable.table
# # pt.args <- dot.args[names(dot.args) %in% names(formals(PercTable.table))]
# # the dot arguments which DO NOT match PercTable.table
# par.args <- dot.args[names(dot.args) %nin% names(formals(PlotFun))]
# see also Hmisc::minor.tick
if(is.null(mar))
Mar(1,1,1,1)
else
par(mar=mar)
vars <- all.vars(FUN)
vars <- vars[vars %nin% names(args)]
# this is not really smart ....
if(is.null(from)) from <- -5
if(is.null(to)) to <- 5
if(is.null(by)) by <- (to - from) / 500
# the independent variable
assign(vars, seq(from = from, to = to, by=by))
# define the parameters
for(i in seq_along(args)) {
assign(names(args)[i], unlist(args[i]))
# this does not work:
if(length(get(names(args)[i])) > 1) {
assign(names(args)[i], get(names(args)[i])[1])
warning(gettextf("first element used of '%s' argument", names(args)[i]))
}
}
# Inhibit model interpretation for function plot
FUN[[2]] <- as.formula("~" %c% gettextf("I(%s)", deparse(FUN[[2]])) )[[2]]
FUN[[3]] <- as.formula("~" %c% gettextf("I(%s)", deparse(FUN[[3]])) )[[2]]
# this will evaluate in parent.frame(), so in function's env
p <- ParseFormula(FUN)
y <- p$lhs$mf.eval[,1]
x <- p$rhs$mf.eval[,1]
if(polar){
cord <- PolToCart(r = y, theta = x)
y <- cord$y
x <- cord$x
}
if(is.null(xlim)){
xlim <- range(pretty(range(x[is.finite(x)])))
}
if(is.null(ylim)){
ylim <- range(pretty(range(y[is.finite(y)])))
}
# define plot parameters
m <- match.call(expand.dots = FALSE)
m$...$frame.plot <- InDots(..., arg="frame.plot", default = FALSE)
m$...$axes <- InDots(..., arg="axes", default = NULL)
m$...$asp <- InDots(..., arg="asp", default = 1)
m$...$xlab <- InDots(..., arg="xlab", default = "")
m$...$ylab <- InDots(..., arg="ylab", default = "")
if(is.null(m$...$axes)) {
add.axes <- TRUE
m$...$axes <- FALSE
} else {
add.axes <- FALSE
}
if(!add){
do.call(plot, c(list(y=1, x=1, xlim=xlim, ylim=ylim, type="n", mar=mar), m$...))
}
if(add.axes && !add) {
tck <- axTicks(side=1)
if(sign(min(tck)) != sign(max(tck)))
tck <- tck[tck!=0]
axis(1, pos = 0, col="darkgrey", at=tck)
# we set minor ticks for the axes, 4 ticks between 2 major ticks
axp <- par("xaxp")
axp[3] <- 5 * axp[3]
axis(1, pos = 0, TRUE, at=axTicks(side=1, axp=axp), labels = NA, tck=-0.01, col="darkgrey")
tck <- axTicks(side=2)
if(sign(min(tck)) != sign(max(tck)))
tck <- tck[tck!=0]
axis(2, pos = 0, las=1, col="darkgrey", at=tck)
axp <- par("yaxp")
axp[3] <- 5 * axp[3]
axis(2, pos = 0, TRUE, at=axTicks(side=1, axp=axp), labels=NA, tck=-0.01, col="darkgrey")
}
lines(y=y, x=x, type=type, col=col, lty=lty, lwd=lwd, pch=pch)
invisible(list(x=x, y=y))
}
# Shade <- function(FUN, col=par("fg"), xlim, density=10, step=0.01, ...) {
#
#
# # but works as well with function(x), but it doesn't
# # Shade(FUN=function(x) dt(x, df=5), xlim=c(qt(0.975, df=5), 6), col="red")
#
# if(is.function(FUN)) {
# # if FUN is a function, then save it under new name and
# # overwrite function name in FUN, which has to be character
# fct <- FUN
# FUN <- "fct"
# # FUN <- gettextf("%s(x)", FUN)
# FUN <- gettextf("function(x) %s", FUN)
# }
#
# from <- xlim[1]
# to <- xlim[2] # qt(0.025, df=degf)
#
# x <- seq(from, to, by = step)
# xval <- c(from, x, to)
#
# # Calculates the function for given xval
# yval <- c(0, eval(parse(text = FUN)), 0)
#
# polygon(xval, yval, col=col, density=density, ...)
#
# }
Shade <- function(FUN, col=par("fg"), breaks, density=10, step=0.01, ...) {
# but works as well with function(x), but it doesn't
# Shade(FUN=function(x) dt(x, df=5), xlim=c(qt(0.975, df=5), 6), col="red")
if(is.function(FUN)) {
# if FUN is a function, then save it under new name and
# overwrite function name in FUN, which has to be character
fct <- FUN
FUN <- "fct"
# FUN <- gettextf("%s(x)", FUN)
FUN <- gettextf("function(x) %s", FUN)
}
.Shade <- function(FUN, col, from, to, density, step, ...) {
x <- seq(from, to, by = step)
xval <- c(from, x, to)
# Calculates the function for given xval
yval <- c(0, eval(parse(text = FUN)), 0)
polygon(xval, yval, col=col, density=density, ...)
}
pars <- Recycle(from=head(breaks, -1), to=tail(breaks, -1), col=col, density=density)
for(i in 1:attr(pars, "maxdim"))
.Shade(FUN, pars$col[i], pars$from[i], pars$to[i], density=pars$density[i], step=step, ...)
}
## plots: PlotPyramid ====
PlotPyramid <- function(lx, rx = NA, ylab = "",
ylab.x = 0, col = c("red", "blue"), border = par("fg"),
main = "", lxlab = "", rxlab = "", xlim = NULL,
gapwidth = NULL, xaxt = TRUE,
args.grid = NULL,
cex.axis = par("cex.axis"), cex.lab = par("cex.axis"), cex.names = par("cex.axis"),
adj = 0.5, rev = FALSE, ...) {
if (missing(rx) && length(dim(lx)) > 0) {
rx <- lx[, 2]
lx <- lx[, 1]
}
if(rev==TRUE){
lx <- Rev(lx, margin=1)
rx <- Rev(rx, margin=1)
ylab <- Rev(ylab)
}
b <- barplot(-lx, horiz=TRUE, plot=FALSE, ...)
ylim <- c(0, max(b))
if(is.null(xlim)) xlim <- c(-max(lx), max(rx))
plot( 1, type="n", xlim=xlim, ylim=ylim, frame.plot=FALSE
, xlab="", ylab="", axes=FALSE, main=main)
if(is.null(gapwidth)) gapwidth <- max(strwidth(ylab, cex=cex.names)) + 3*strwidth("M", cex=cex.names)
at.left <- axTicks(1)[axTicks(1)<=0] - gapwidth/2
at.right <- axTicks(1)[axTicks(1)>=0] + gapwidth/2
# grid: define default arguments
if(!identical(args.grid, NA)){ # add grid
args.grid1 <- list(col="grey", lty="dotted")
# override default arguments with user defined ones
if (!is.null(args.grid)) {
args.grid1[names(args.grid)] <- args.grid
}
abline(v=c(at.left, at.right), col=args.grid1$col, lty=args.grid1$lty )
}
if(length(col) == 1) border <- rep(col, 2)
lcol <- rep(col[seq_along(col) %% 2 == 1], times=length(lx))
rcol <- rep(col[seq_along(col) %% 2 == 0], times=length(rx))
if(length(border) == 1) border <- rep(border, 2)
lborder <- rep(border[seq_along(border) %% 2 == 1], times=length(lx))
rborder <- rep(border[seq_along(border) %% 2 == 0], times=length(rx))
barplot(-lx, horiz=TRUE, col=lcol, add=T, axes=FALSE, names.arg="",
offset=-gapwidth/2, border=lborder, ...)
barplot(rx, horiz=TRUE, col=rcol, add=T, axes=FALSE, names.arg="",
offset=gapwidth/2, border=rborder, ...)
oldpar <- par(xpd=TRUE); on.exit(par(oldpar))
ylab.x <- ylab.x + sign(ylab.x) * gapwidth/2
text(ylab, x=ylab.x, y=b, cex=cex.names, adj = adj)
if(!xaxt == "n"){
axis(side=1, at=at.right, labels=axTicks(1)[axTicks(1)>=0], cex.axis=cex.axis)
axis(side=1, at=at.left, labels=-axTicks(1)[axTicks(1)<=0], cex.axis=cex.axis)
}
mtext(text=rxlab, side=1, at=mean(at.right), padj=0.5, line=2.5, cex=cex.lab)
mtext(text=lxlab, side=1, at=mean(at.left), padj=0.5, line=2.5, cex=cex.lab)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(b) # return the same result as barplot
}
###
## plots: PlotCorr ====
PlotCorr <- function(x, cols = colorRampPalette(c(Pal()[2], "white", Pal()[1]), space = "rgb")(20)
, breaks = seq(-1, 1, length = length(cols)+1), border="grey", lwd=1
, args.colorlegend = NULL, xaxt = par("xaxt"), yaxt = par("yaxt"), cex.axis = 0.8, las = 2
, mar = c(3,8,8,8), mincor=0, ...){
# example:
# m <- cor(d.pizza[,WhichNumerics(d.pizza)][,1:5], use="pairwise.complete.obs")
# PlotCorr(m)
# PlotCorr(m, args.colorlegend="n", las=1)
# PlotCorr(m, cols=colorRampPalette(c("red", "white", "blue"), space = "rgb")(4), args.colorlegend=list(xlab=sprintf("%.1f", seq(1,-1, length=5))) )
# PlotCorr(m, cols=colorRampPalette(c("red", "black", "green"), space = "rgb")(10))
# PlotCorr(round(CramerV(d.pizza[,c("driver","operator","city", "quality")]),3))
pars <- par(mar=mar); on.exit(par(pars))
# if mincor is set delete all correlations with abs. val. < mincor
if(mincor!=0)
x[abs(x) < abs(mincor)] <- NA
x <- x[,ncol(x):1]
image(x=1:nrow(x), y=1:ncol(x), xaxt="n", yaxt="n", z=x, frame.plot=FALSE, xlab="", ylab=""
, col=cols, breaks=breaks, ... )
if(xaxt!="n") axis(side=3, at=1:nrow(x), labels=rownames(x), cex.axis=cex.axis, las=las, lwd=-1)
if(yaxt!="n") axis(side=2, at=1:ncol(x), labels=colnames(x), cex.axis=cex.axis, las=las, lwd=-1)
if((is.list(args.colorlegend) || is.null(args.colorlegend))){
args.colorlegend1 <- list( labels=sprintf("%.1f", seq(-1,1, length=length(cols)/2+1))
, x=nrow(x)+0.5 + nrow(x)/20, y=ncol(x)+0.5
, width=nrow(x)/20, height=ncol(x), cols=cols, cex=0.8 )
if ( !is.null(args.colorlegend) ) { args.colorlegend1[names(args.colorlegend)] <- args.colorlegend }
do.call("ColorLegend", args.colorlegend1)
}
if(!is.na(border)) {
usr <- par("usr")
rect(xleft=0.5, xright=nrow(x)+0.5, ybottom=0.5, ytop=nrow(x)+0.5,
lwd=lwd, border=border)
usr <- par("usr")
clip(0.5, nrow(x)+0.5, 0.5, nrow(x)+0.5)
abline(h=seq(-2, nrow(x)+1,1)-0.5, v=seq(1,nrow(x)+1,1)-0.5, col=border,lwd=lwd)
do.call("clip", as.list(usr))
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
###
## plots: PlotViolin ====
PlotViolin <- function(x, ...) {
UseMethod("PlotViolin")
}
PlotViolin.default <- function (x, ..., horizontal = FALSE, bw = "SJ", na.rm = FALSE
, names = NULL, args.boxplot = NULL) {
# Make a simple violin plot call from violinplot. values are x,y to plot
vlnplt <- function(x, y, center, horizontal = FALSE,
col = NA , border = par("fg"), lty = 1, lwd = 1,
density = NULL, angle = 45, fillOddEven = FALSE, ...) {
# double up first
x <- c(x, rev(x))
y <- c(y, -rev(y))
y <- y + center
# swap x and y if horizontal
if (horizontal == FALSE) { tmp=x; x=y; y=tmp }
polygon(x=x, y=y, border=border, col=col, lty=lty, lwd=lwd,
density=density, angle=angle, fillOddEven=fillOddEven, ...)
}
# main *****************
m <- match.call(expand.dots = FALSE)
pars <- m$...[ names(m$...)[!is.na(match(names(m$...), c(
"cex","cex.axis","cex.lab","cex.main","cex.sub","col.axis","col.lab","col.main","col.sub","family",
"font","font.axis","font.lab","font.main","font.sub","las","tck","tcl","xaxt","xpd","yaxt"
)))]]
oldpar <- par(pars); on.exit(par(oldpar))
args <- list(x, ...)
namedargs <- if (!is.null(attributes(args)$names))
attributes(args)$names != ""
else
rep(FALSE, length = length(args))
groups <- if(is.list(x)) x else args[!namedargs]
if (0 == (n <- length(groups)))
stop("invalid first argument")
if (length(class(groups)))
groups <- unclass(groups)
if (!missing(names))
attr(groups, "names") <- names
else {
if (is.null(attr(groups, "names")))
attr(groups, "names") <- 1:n
names <- attr(groups, "names")
}
xvals <- matrix(0, nrow = 512, ncol = n)
yvals <- matrix(0, nrow = 512, ncol = n)
center <- 1:n
for (i in 1:n) {
if(na.rm) xi <- na.omit(groups[[i]])
else xi <- groups[[i]]
tmp.dens <- density(xi, bw = bw)
xvals[, i] <- tmp.dens$x
yvals.needtoscale <- tmp.dens$y
yvals.scaled <- 7/16 * yvals.needtoscale / max(yvals.needtoscale)
yvals[, i] <- yvals.scaled
}
if (horizontal == FALSE) {
xrange <- c(1/2, n + 1/2)
yrange <- range(xvals)
}
else {
xrange <- range(xvals)
# yrange <- c(min(yvals), max(yvals))
yrange <- c(1/2, n + 1/2)
}
plot.args <- m$...[names(m$...)[!is.na(match(names(m$...),
c("xlim","ylim","main","xlab","ylab","panel.first","panel.last","frame.plot","add")))]]
if(! "xlim" %in% names(plot.args)) plot.args <- c(plot.args, list(xlim=xrange))
if(! "ylim" %in% names(plot.args)) plot.args <- c(plot.args, list(ylim=yrange))
if(! "xlab" %in% names(plot.args)) plot.args <- c(plot.args, list(xlab=""))
if(! "ylab" %in% names(plot.args)) plot.args <- c(plot.args, list(ylab=""))
if(! "frame.plot" %in% names(plot.args)) plot.args <- c(plot.args, list(frame.plot=TRUE))
# plot only if add is not TRUE
if(! "add" %in% names(plot.args)) add <- FALSE else add <- plot.args$add
if(!add) do.call(plot, c(plot.args, list(x=0, y=0, type="n", axes=FALSE)))
# poly.args <- m$...[names(m$...)[!is.na(match(names(m$...), c("border","col","lty","density","angle","fillOddEven")))]]
# neu:
poly.args <- args[names(args)[!is.na(match(names(args), c("border","col","lty","lwd","density","angle","fillOddEven")))]]
poly.args <- lapply( poly.args, rep, length.out=n )
for (i in 1:n)
# do.call(vlnplt, c(poly.args[i], list(x=xvals[, i]), list(y=yvals[, i]),
# list(center=center[i]), list(horizontal = horizontal)))
do.call(vlnplt, c(lapply(poly.args, "[", i), list(x=xvals[, i]), list(y=yvals[, i]),
list(center=center[i]), list(horizontal = horizontal)))
axes <- Coalesce(unlist(m$...[names(m$...)[!is.na(match(names(m$...), c("axes")))]]), TRUE)
if(axes){
xaxt <- Coalesce(unlist(m$...[names(m$...)[!is.na(match(names(m$...), c("xaxt")))]]), TRUE)
if(xaxt!="n") if(horizontal == TRUE) axis(1) else axis(1, at = 1:n, labels = names)
yaxt <- Coalesce(unlist(m$...[names(m$...)[!is.na(match(names(m$...), c("yaxt")))]]), TRUE)
if(yaxt!="n") if(horizontal == TRUE) axis(2, at = 1:n, labels = names) else axis(2)
}
if(!identical(args.boxplot, NA)){
args1.boxplot <- list(col="black", add=TRUE, boxwex=0.05, axes=FALSE,
outline=FALSE, whisklty=1, staplelty=0, medcol="white")
args1.boxplot[names(args.boxplot)] <- args.boxplot
do.call(boxplot, c(list(x, horizontal = horizontal), args1.boxplot))
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
# PlotViolin.formula <- function (formula, data = NULL, ..., subset) {
PlotViolin.formula <- function (formula, data, subset, na.action, ...) {
if (missing(formula) || (length(formula) != 3))
stop("formula missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m$... <- NULL
m[[1]] <- as.name("model.frame")
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
PlotViolin(split(mf[[response]], mf[-response]), ...)
}
###
## plots: PlotPolar ====
PlotPolar <- function(r, theta = NULL, type="p"
, rlim = NULL, main="", lwd = par("lwd"), lty = par("lty"), col = par("col")
, pch = par("pch"), fill = NA, cex = par("cex")
, mar = c(2, 2, 5, 2), add = FALSE, ...) {
if( ncol(r <- as.matrix(r)) == 1) r <- t(r)
k <- nrow(r)
if(is.null(theta)) {
theta <- seq(0, 2*pi, length=ncol(r)+1)[-(ncol(r)+1)]
if( nrow(r) > 1 ){
theta <- matrix( rep(theta, times=nrow(r)), ncol=ncol(r), byrow = TRUE )
} else {
theta <- t(as.matrix(theta))
}
} else {
if( ncol(theta <- as.matrix(theta)) == 1) theta <- t(theta)
}
if (length(type) < k) type <- rep(type, length.out = k)
if (length(lty) < k) lty <- rep(lty, length.out = k)
if (length(lwd) < k) lwd <- rep(lwd, length.out = k)
if (length(pch) < k) pch <- rep(pch, length.out = k)
if (length(col) < k) col <- rep(col, length.out = k)
if (length(fill) < k) fill <- rep(fill, length.out = k)
if (length(cex) < k) cex <- rep(cex, length.out = k)
dev.hold()
on.exit(dev.flush())
# definition follows plot.default()
rlim <- if (is.null(rlim)) max(abs(r[is.finite(r)]))*1.12
if(!add){
par(mar = mar, pty = "s", xpd=TRUE)
plot(x=c(-rlim, rlim), y=c(-rlim, rlim),
type = "n", axes = FALSE, main = main, xlab = "", ylab = "", ...)
}
for (i in seq_len(k)) {
xy <- xy.coords( x=cos(theta[i,]) * r[i,], y=sin(theta[i,])*r[i,])
if(type[i] == "p"){
points( xy, pch = pch[i], col = col[i], cex = cex[i] )
} else if( type[i]=="l") {
polygon(xy, lwd = lwd[i], lty = lty[i], border = col[i], col = fill[i])
} else if( type[i]=="h") {
segments(x0=0, y0=0, x1=xy$x, y1=xy$y, lwd = lwd[i], lty = lty[i], col = col[i])
}
}
if(!add && !is.null(DescToolsOptions("stamp")))
Stamp()
}
PolarGrid <- function(nr = NULL, ntheta = NULL, col = "lightgray",
lty = "dotted", lwd = par("lwd"), rlabels = NULL, alabels = NULL,
lblradians = FALSE, cex.lab = 1, las = 1, adj = NULL, dist = NULL) {
if (is.null(nr)) { # use standard values with pretty axis values
# at <- seq.int(0, par("xaxp")[2L], length.out = 1L + abs(par("xaxp")[3L]))
at <- axTicks(1)[axTicks(1)>=0]
} else if (!all(is.na(nr))) { # use NA for suppress radial gridlines
if (length(nr) > 1) { # use nr as radius
at <- nr
} else {
at <- seq.int(0, par("xaxp")[2L], length.out = nr + 1)#[-c(1, nr + 1)]
}
} else {at <- NULL}
if(!is.null(at))
DrawCircle(x = 0, y = 0, r.out = at, border = col, lty = lty, col = NA)
if (is.null(ntheta)) { # use standard values with pretty axis values
at.ang <- seq(0, 2*pi, by=2*pi/12)
} else if (!all(is.na(ntheta))) { # use NA for suppress radial gridlines
if (length(ntheta) > 1) { # use ntheta as angles
at.ang <- ntheta
} else {
at.ang <- seq(0, 2*pi, by=2*pi/ntheta)
}
} else {at.ang <- NULL}
if(!is.null(at.ang)) segments(x0=0, y0=0, x1=max(par("usr"))*cos(at.ang)
, y1=max(par("usr"))*sin(at.ang), col = col, lty = lty, lwd = lwd)
# plot radius labels
if(!is.null(at)){
if(is.null(rlabels)) rlabels <- signif(at[-1], 3) # standard values
if(!all(is.na(rlabels)))
BoxedText(x=at[-1], y=0, labels=rlabels, border=FALSE, col="white", cex=cex.lab)
}
# # plot angle labels
# if(!is.null(at.ang)){
# if(is.null(alabels))
# if( lblradians == FALSE ){
# alabels <- RadToDeg(at.ang[-length(at.ang)]) # standard values in degrees
# } else {
# alabels <- Format(at.ang[-length(at.ang)], digits=2) # standard values in radians
# }
# if(!all(is.na(alabels)))
# BoxedText( x=par("usr")[2]*1.07*cos(at.ang)[-length(at.ang)], y=par("usr")[2]*1.07*sin(at.ang)[-length(at.ang)]
# , labels=alabels, border=FALSE, col="white")
# }
# plot angle labels
if(!is.null(at.ang)){
if(is.null(alabels))
if(lblradians == FALSE){
alabels <- RadToDeg(at.ang[-length(at.ang)]) # standard values in degrees
} else {
alabels <- Format(at.ang[-length(at.ang)], digits=2) # standard values in radians
}
if(is.null(dist))
dist <- par("usr")[2]*1.07
out <- DescTools::PolToCart(r = dist, theta=at.ang)
if(!all(is.na(alabels)))
# BoxedText(x=par("usr")[2]*1.07*cos(at.ang)[-length(at.ang)],
# y=par("usr")[2]*1.07*sin(at.ang)[-length(at.ang)]
# , labels=alabels, border=FALSE, col="white")
if(is.null(adj)) {
adj <- ifelse(at.ang %(]% c(pi/2, 3*pi/2), 1, 0)
adj[at.ang %in% c(pi/2, 3*pi/2)] <- 0.5
}
adj <- rep(adj, length_out=length(alabels))
if(las == 2){
sapply(seq_along(alabels),
function(i) text(out$x[i], out$y[i], labels=alabels[i], cex=cex.lab,
srt=DescTools::RadToDeg(atan(out$y[i]/out$x[i])), adj=adj[i]))
} else {
sapply(seq_along(alabels),
function(i) BoxedText(x=out$x[i], y=out$y[i], labels=alabels[i], cex=cex.lab,
srt=ifelse(las==3, 90, 0), adj=adj[i],
border=NA, col="white"))
# text(out, labels=alabels, cex=cex.lab, srt=ifelse(las==3, 90, 0), adj=adj)
# BoxedText(x=out$x, y=out$y, labels=alabels, cex=cex.lab,
# srt=ifelse(las==3, 90, 0), adj=adj, border=FALSE, col="white")
}
}
invisible()
}
###
## plots: PlotTernary =====
# clumsy *****************
# PlotTernary <- function(a, f, m, symb = 2, grid = FALSE, ...) {
# # source: cwhmisc:::triplot
# # author: Christian Hoffmann
PlotTernary <- function(x, y = NULL, z = NULL, args.grid=NULL, lbl = NULL, main = "", ...){
if(!(is.null(y) && is.null(z))){
if(is.null(lbl)) lbl <- c(names(x), names(y), names(z))
x <- cbind(x, y, z)
} else {
if(is.null(lbl)) lbl <- colnames(x)
x <- as.matrix(x)
}
if(any(x < 0)) stop("X must be non-negative")
s <- drop(x %*% rep(1, ncol(x)))
if(any(s<=0)) stop("each row of X must have a positive sum")
if(max(abs(s-1)) > 1e-6) {
warning("row(s) of X will be rescaled")
x <- x / s
}
oldpar <- par(xpd=TRUE)
on.exit(par(oldpar))
Canvas(mar=c(1,3,4,1) + .1, main=main)
sq3 <- sqrt(3)/2
# grid: define default arguments
if(!identical(args.grid, NA)){
args.grid1 <- list(col="grey", lty="dotted", nx=5)
# override default arguments with user defined ones
if (!is.null(args.grid)) {
args.grid1[names(args.grid)] <- args.grid
}
d <- seq(0, 2*sq3, sq3*2/(args.grid1$nx))
x0 <- -sq3 + (1) * d
segments(x0 = x0, y0 = -0.5, x1 = x0 + sq3 - d*.5, y1 = 1- d * sq3, col=args.grid1$col, lty=args.grid1$lty)
segments(x0 = x0, y0 = -0.5, x1 = -rev(x0 + sq3 - d*.5), y1 = rev(1- d * sq3), col=args.grid1$col, lty=args.grid1$lty)
segments(x0 = x0 + sq3 - d*.5, y0 = 1- d * sq3, x1 = rev(x0 -d*.5), y1 = 1- d * sq3, col=args.grid1$col, lty=args.grid1$lty)
}
DrawRegPolygon(nv = 3, rot = pi/2, radius.x = 1, col=NA)
eps <- 0.15
pts <- DrawRegPolygon(nv = 3, rot = pi/2, radius.x = 1+eps, plot=FALSE)
text(pts, labels = lbl[c(1,3,2)])
points((x[,2] - x[,3]) * sq3, x[,1] * 1.5 - 0.5, ...)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
## plots: PlotVenn ====
PlotVenn <- function (x, col = "transparent", plotit = TRUE, labels = NULL) {
n <- length(x)
if (n > 5)
stop("Can't plot a Venn diagram with more than 5 sets...")
xnames <- if(is.null(names(x))) LETTERS[1:n] else names(x)
if(is.null(labels)) labels <- xnames
tab <- table(unlist(x), unlist(lapply(1:length(x), function(i) rep(LETTERS[i], length(x[[i]])))))
venntab <- table(apply(tab, 1, function(x) paste(LETTERS[1:n][as.logical(x)], collapse = "")))
if (plotit) {
plot(x = c(-7, 7), y = c(-7, 7), asp = 1, type = "n",
xaxt = "n", yaxt = "n", xlab = "", ylab = "", frame.plot = FALSE)
if (n == 2) {
DrawCircle(x = c(2, -2), y = c(0, 0), r.out = 3, col = col)
xy <- data.frame(x = c(-3, 3, 0), y = c(0, 0, 0),
set = c("A", "B", "AB")
, frq=NA)
xy[match(rownames(venntab), xy$set),"frq"] <- venntab
text(xy$x, xy$y, labels=xy$frq) # labels=xy$set)
lbl <- data.frame(x = c(-6, 6), y = c(2.5, 2.5))
text(lbl$x, lbl$y, label = labels, cex = 2)
}
else if (n == 3) {
DrawCircle(x = c(2, -1, -1), y = c(0, 1.73, -1.73),
r.out = 3, col = col)
xy <- data.frame(x = c(3.5, -1.75, -1.75, 1, -2, 1, 0),
y = c(0, 3, -3, 1.75, 0, -1.75, 0),
set = c("A", "B", "C", "AB", "BC", "AC", "ABC")
, frq=NA)
xy[match(rownames(venntab), xy$set),"frq"] <- venntab
text(xy$x, xy$y, labels=xy$frq) # labels=xy$set)
lbl <- data.frame(x = c(6.5, -4.5, -4.5), y = c(0,4.8,-4.8))
text(lbl$x, lbl$y, label = labels, cex = 2)
}
else if (n == 4) {
DrawEllipse(x = c(0, 0, 2, -2), y = c(0, 0, -2, -2),
radius.x = 6, radius.y = 4, rot = c(1, 3) * pi/4,
col = col)
xy <- data.frame(x=c(-6.0,-4.0,-2.2,0.0,2.2,3.9,5.9,4.3,2.7,-3.1,-4.3,-2.6,-0.1,2.7,0.0)
, y=c(0.3,-2.9,-4.2,-5.7,-4.2,-2.9,0.2,2.3,4.2,4.0,2.3,0.9,-1.6,0.8,3.4)
, set=c("A","AC","ACD","AD","ABD","BD","D","CD","C","B","AB","ABC","ABCD","BCD","BC")
, frq=NA )
xy[match(rownames(venntab), xy$set),"frq"] <- venntab
text(xy$x, xy$y, labels=xy$frq) # labels=xy$set)
lbl <- data.frame(x = c(-8, -4.4, 4.5, 7.7), y = c(1.9, 5.4, 5.5, 2.5))
text(lbl$x, lbl$y, label = labels, cex = 2)
}
else if (n == 5) {
DrawEllipse(x=c(0,-1.5,-2,0,1), y=c(0,0,-2,-2.5,-1), radius.x=6, radius.y=3, rot=c(1.7,2.8,4.1,5.4,6.6), col=col)
xy <- data.frame(x=c(4.9,-0.7,-5.9,-4.3,3.1, 3.6,2.4,0.9,-2.3,-3.8,-4.7,-3.9,-1.5,1.2,3.3, 2.6,1.8,1.2,-0.5,-2.7,-3.7,-4.3,-2.6,-0.9,0.9,3.4, 2.1,-2.1,-3.4,-0.9,-0.5 )
, y=c(0.5,4.5,1.7,-5.5,-6.1, -1.1,1.8,2.7,2.9,1.5,-1.1,-3.1,-5,-4.7,-3.1, 0.1,2,1.4,2.4,2.2,0.2,-1.6,-3.3,-4.7,-3.8,-2.5, -2.1,1.5,-1.3,-3.8,-0.8 )
, set=c("B","A","E","D","C", "BE","AB","AD","AE","CE","DE","BD","CD","AC","BC"
,"ABE","ABD", "ABDE","ADE","ACE","CDE","BDE","BCD","ACD","ABC","BCE", "ABCE","ACDE","BCDE","ABCD","ABCDE" )
, frq=NA )
xy[match(rownames(venntab), xy$set),"frq"] <- venntab
text(xy$x, xy$y, labels=xy$frq) # labels=xy$set)
lbl <- data.frame(x=c(1.8,7.6,5.8,-7.5,-7.9), y=c(6.3,-0.8,-7.1,-6.8,3.9))
text( lbl$x, lbl$y, label=labels, cex=2)
}
xy$setx <- xy$set
# replace AB.. by names of the list
code <- data.frame(id=LETTERS[1:n], x=xnames)
levels(xy$setx) <- sapply(levels(xy$setx), function(x) paste(code$x[match(unlist(strsplit(x, split="")), code$id)], collapse=""))
names(venntab) <- sapply(names(venntab), function(x) paste(code$x[match(unlist(strsplit(x, split="")), code$id)], collapse=""))
}
else {
xy <- NA
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
return(list(venntab, xy))
}
###
## plots: PlotHorizBar (GanttChart) ----------
# info2 <- list(labels=c("Jim","Joe","Jim","John","John","Jake","Joe","Jed","Jake"),
# starts=c(8.1,8.7,13.0,9.1,11.6,9.0,13.6,9.3,14.2),
# ends=c(12.5,12.7,16.5,10.3,15.6,11.7,18.1,18.2,19.0))
#
# PlotHorizBar <- function (from, to, grp = 1, col = "lightgrey", border = "black",
# height = 0.6, add = FALSE, xlim = NULL, ylim = NULL, ...) {
#
# # needed?? 6.5.2014
# # if (is.null(dev.list())) plot.new()
#
# grp <- factor(grp)
#
# if(!add){
#
# par(mai = c(par("mai")[1], max(par("mai")[2], strwidth(levels(grp), "inch")) +
# 0.5, par("mai")[3], par("mai")[4]))
#
# if(is.null(xlim)) xlim <- range(pretty((c(from, to))))
# if(is.null(ylim)) ylim <- c(0, nlevels(grp) + 1)
# plot(1, xlim = xlim, ylim = ylim,
# type = "n", ylab = "", yaxt = "n", ...)
#
# mtext(levels(grp), side=2, line = 1, at=1:nlevels(grp), las=1)
#
# }
# xleft <- from
# xright <- to
# ytop <- as.numeric(grp) + height/2
# ybottom <- as.numeric(grp) - height/2
# rect(xleft, ybottom, xright, ytop, density = NULL, angle = 45,
# col = col, border = border, lty = par("lty"), lwd = par("lwd"))
#
# if(!is.null(DescToolsOptions("stamp")))
# Stamp()
#
# }
#
PlotMiss <- function(x, col = hred, bg=SetAlpha(hecru, 0.3), clust=FALSE,
main = NULL, ...){
x <- as.data.frame(x)
x <- Rev(x, 2)
n <- ncol(x)
inches_to_lines <- (par("mar") / par("mai") )[1] # 5
lab.width <- max(strwidth(colnames(x), units="inches")) * inches_to_lines
ymar <- lab.width + 3
Canvas(xlim=c(1, nrow(x)+1), ylim=c(0, n), asp=NA, xpd=TRUE, mar = c(5.1, ymar, 5.1, 5.1)
, main=main, ...)
usr <- par("usr") # set background color lightgrey
rect(xleft=0, ybottom=usr[3], xright=nrow(x)+1, ytop=usr[4], col=bg, border=NA)
axis(side = 1)
missingIndex <- as.matrix(is.na(x))
if(clust){
orderIndex <- order.dendrogram(as.dendrogram(hclust(dist(missingIndex * 1), method = "mcquitty")))
missingIndex <- missingIndex[orderIndex, ]
res <- orderIndex
} else {
res <- NULL
}
sapply(1:ncol(missingIndex), function(i){
xl <- which(missingIndex[,i])
if(length(xl) > 0)
rect(xleft=xl, xright=xl+1, ybottom=i-1, ytop=i, col=col, border=NA)
})
# for(i in 1:n){
# z <- x[, i]
# if(sum(is.na(z)) > 0)
# rect(xleft=which(is.na(z)), xright=which(is.na(z))+1, ybottom=i-1, ytop=i, col = col, border=NA)
# }
abline(h=1:ncol(x), col="white")
text(x = -0.03 * nrow(x), y = (1:n)-0.5, labels = colnames(x), las=1, adj = 1)
text(x = nrow(x) * 1.04, y = (1:n)-0.5, labels = sapply(x, function(y) sum(is.na(y))), las=1, adj=0)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(res)
}
###
## plots: PlotTreemap ====
# the code is strongly based on Jeff Enos' treemap in library(portfolio), jeff@kanecap.com,
# potential improvements:
# * make the position of the text more flexible (top-left, bottom-right etc.)
# * clip text to the specific rectangles and don't allow to write over the rect.
# * see examples at http://www.hiveondemand.com/portal/treemap_basics.jsp
PlotTreemap <- function(x, grp=NULL, labels=NULL, cex=1.0, text.col="black", col=rainbow(length(x)),
labels.grp=NULL, cex.grp=3, text.col.grp="black", border.grp="grey50",
lwd.grp=5, main="") {
SqMap <- function(x) {
.sqmap <- function(z, x0 = 0, y0 = 0, x1 = 1, y1 = 1, lst=list()) {
cz <- cumsum(z$area)/sum(z$area)
n <- which.min(abs(log(max(x1/y1, y1/x1) * sum(z$area) * ((cz^2)/z$area))))
more <- n < length(z$area)
a <- c(0, cz[1:n])/cz[n]
if (y1 > x1) {
lst <- list( data.frame(idx=z$idx[1:n],
x0=x0 + x1 * a[1:(length(a) - 1)],
y0=rep(y0, n), x1=x0 + x1 * a[-1], y1=rep(y0 + y1 * cz[n], n)))
if (more) {
lst <- append(lst, Recall(z[-(1:n), ], x0, y0 + y1 * cz[n], x1, y1 * (1 - cz[n]), lst))
}
} else {
lst <- list( data.frame(idx=z$idx[1:n],
x0=rep(x0, n), y0=y0 + y1 * a[1:(length(a) - 1)],
x1=rep(x0 + x1 * cz[n], n), y1=y0 + y1 * a[-1]))
if (more) {
lst <- append(lst, Recall(z[-(1:n), ], x0 + x1 * cz[n], y0, x1 * (1 - cz[n]), y1, lst))
}
}
lst
}
# z <- data.frame(idx=seq_along(z), area=z)
if(is.null(names(x))) names(x) <- seq_along(x)
x <- data.frame(idx=names(x), area=x)
res <- do.call(rbind, .sqmap(x))
rownames(res) <- x$idx
return(res[,-1])
}
PlotSqMap <- function(z, col = NULL, border=NULL, lwd=par("lwd"), add=FALSE){
if(is.null(col)) col <- as.character(z$col)
# plot squarified treemap
if(!add) Canvas(c(0,1), xpd=TRUE)
for(i in 1:nrow(z)){
rect(xleft=z[i,]$x0, ybottom=z[i,]$y0, xright=z[i,]$x1, ytop=z[i,]$y1,
col=col[i], border=border, lwd=lwd)
}
}
if(is.null(grp)) grp <- rep(1, length(x))
if(is.null(labels)) labels <- names(x)
# we need to sort the stuff
ord <- order(grp, -x)
x <- x[ord]
grp <- grp[ord]
labels <- labels[ord]
col <- col[ord]
# get the groups rects first
zg <- SqMap(Sort(tapply(x, grp, sum), decreasing=TRUE))
# the transformation information: x0 translation, xs stretching
tm <- cbind(zg[,1:2], xs=zg$x1 - zg$x0, ys=zg$y1 - zg$y0)
gmidpt <- data.frame(x=apply(zg[,c("x0","x1")], 1, mean),
y=apply(zg[,c("y0","y1")], 1, mean))
if(is.null(labels.grp))
if(nrow(zg)>1) {
labels.grp <- rownames(zg)
} else {
labels.grp <- NA
}
Canvas(c(0,1), xpd=TRUE, asp=NA, main=main)
res <- list()
for( i in 1:nrow(zg)){
# get the group index
idx <- grp == rownames(zg)[i]
xg.rect <- SqMap(Sort(x[idx], decreasing=TRUE))
# transform
xg.rect[,c(1,3)] <- xg.rect[,c(1,3)] * tm[i,"xs"] + tm[i,"x0"]
xg.rect[,c(2,4)] <- xg.rect[,c(2,4)] * tm[i,"ys"] + tm[i,"y0"]
PlotSqMap(xg.rect, col=col[idx], add=TRUE)
res[[i]] <- list(grp=gmidpt[i,],
child= cbind(x=apply(xg.rect[,c("x0","x1")], 1, mean),
y=apply(xg.rect[,c("y0","y1")], 1, mean)))
text( x=apply(xg.rect[,c("x0","x1")], 1, mean),
y=apply(xg.rect[,c("y0","y1")], 1, mean),
labels=labels[idx], cex=cex, col=text.col )
}
names(res) <- rownames(zg)
PlotSqMap(zg, col=NA, add=TRUE, border=border.grp, lwd=lwd.grp)
text( x=apply(zg[,c("x0","x1")], 1, mean),
y=apply(zg[,c("y0","y1")], 1, mean),
labels=labels.grp, cex=cex.grp, col=text.col.grp)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(res)
}
###
## plots: PlotCirc ====
PlotCirc <- function(tab, acol = rainbow(sum(dim(tab))), aborder = "darkgrey",
rcol = SetAlpha(acol[1:nrow(tab)], 0.5), rborder = "darkgrey",
gap = 5, main = "", labels = NULL, cex.lab = 1.0,
las = 1, adj = NULL, dist = 2){
ribbon <- function( angle1.beg, angle1.end, angle2.beg, angle2.end,
radius1 = 1, radius2 = radius1, col = "blue",
border ="darkgrey" ){
xy1 <- DescTools::PolToCart( radius1, angle1.beg )
xy2 <- DescTools::PolToCart( radius2, angle1.end )
xy3 <- DescTools::PolToCart( radius1, angle2.beg )
xy4 <- DescTools::PolToCart( radius2, angle2.end )
bez1 <- DescTools::DrawArc(rx = radius2, theta.1 = DescTools::CartToPol(xy2$x, xy2$y)$theta, theta.2 = DescTools::CartToPol(xy4$x, xy4$y)$theta, plot=FALSE)[[1]]
bez2 <- DescTools::DrawBezier( x = c(xy4$x, 0, xy3$x), y = c(xy4$y, 0, xy3$y), plot=FALSE )
bez3 <- DescTools::DrawArc(rx = radius1, theta.1=DescTools::CartToPol(xy3$x, xy3$y)$theta, theta.2 =DescTools::CartToPol(xy1$x, xy1$y)$theta, plot=FALSE )[[1]]
bez4 <- DescTools::DrawBezier(x = c(xy1$x, 0, xy2$x), y = c(xy1$y, 0, xy2$y), plot=FALSE )
polygon( x=c(bez1$x, bez2$x, bez3$x, bez4$x),
y=c(bez1$y, bez2$y, bez3$y, bez4$y), col=col, border=border)
}
n <- sum(tab)
ncol <- ncol(tab)
nrow <- nrow(tab)
d <- DegToRad(gap) # the gap between the sectors in radiant
acol <- rep(acol, length.out = ncol+nrow)
rcol <- rep(rcol, length.out = nrow)
aborder <- rep(aborder, length.out = ncol+nrow)
rborder <- rep(rborder, length.out = nrow)
mpts.left <- c(0, cumsum(as.vector(rbind(rev(apply(tab, 2, sum))/ n * (pi - ncol * d), d))))
mpts.right <- cumsum(as.vector(rbind(rev(apply(tab, 1, sum))/ n * (pi - nrow * d), d)))
mpts <- c(mpts.left, mpts.right + pi) + pi/2 + d/2
DescTools::Canvas(10, main=main, xpd=TRUE)
DescTools::DrawCircle(x=0, y=0, r.in=9.5, r.out=10,
theta.1=mpts[seq_along(mpts) %% 2 == 1],
theta.2=mpts[seq_along(mpts) %% 2 == 0],
col=acol, border=aborder)
if(is.null(labels)) labels <- rev(c(rownames(tab), colnames(tab)))
ttab <- rbind(DescTools::Rev(tab, margin=2) / n * (pi - ncol * d), d)
pts.left <- (c(0, cumsum(as.vector(ttab))))
ttab <- rbind(DescTools::Rev(t(tab), margin=2)/ n * (pi - nrow * d), d)
pts.right <- (c( cumsum(as.vector(ttab)))) + pi
pts <- c(pts.left, pts.right) + pi/2 + d/2
dpt <- data.frame(from=pts[-length(pts)], to=pts[-1])
for( i in 1:ncol) {
for( j in 1:nrow) {
lang <- dpt[(i-1)*(nrow+1)+j,]
rang <- DescTools::Rev(dpt[-nrow(dpt),], margin=1)[(j-1)*(ncol+1) + i,]
ribbon( angle1.beg=rang[,2], angle1.end=lang[,1], angle2.beg=rang[,1], angle2.end=lang[,2],
radius1 = 10, radius2 = 9, col = rcol[j], border = rborder[j])
}}
out <- DescTools::PolToCart(r = 10 + dist, theta=filter(mpts, rep(1/2,2))[seq(1,(nrow+ncol)*2, by=2)])
if(las == 2){
if(is.null(adj)) adj <- c(rep(1, nrow), rep(0,ncol))
adj <- rep(adj, length_out=length(labels))
sapply(seq_along(labels),
function(i) text(out$x[i], out$y[i], labels=labels[i], cex=cex.lab,
srt=DescTools::RadToDeg(atan(out$y[i]/out$x[i])), adj=adj[i]))
} else {
text(out, labels=labels, cex=cex.lab, srt=ifelse(las==3, 90, 0), adj=adj)
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(out)
}
###
## plots: PlotWeb ====
PlotWeb <- function(m, col=c(hred, hblue), lty=NULL, lwd = NULL, args.legend=NULL, pch=21, pt.cex=2,
pt.col="black", pt.bg="darkgrey", cex.lab = 1.0,
las = 1, adj = NULL, dist = 0.5, ... ){
# following an idee from library(LIM)
# example(plotweb)
oldpar <- par(c("lend","xpd"))
on.exit(par(oldpar))
w <- 4
par("xpd"=TRUE, lend="butt")
DescTools::Canvas(w, ...)
angles <- seq(0, 2*pi, length=nrow(m)+1)[-1]
xy <- DescTools::PolToCart(r=3, theta=angles)
xylab <- DescTools::PolToCart(r=3 + dist, theta=angles)
labels <- colnames(m)
if(las == 2){
if(is.null(adj)) adj <- (angles %[]% c(pi/2, 3*pi/2))*1
adj <- rep(adj, length_out=length(labels))
sapply(seq_along(labels),
function(i) text(xylab$x[i], xylab$y[i], labels=labels[i], cex=cex.lab,
srt=DescTools::RadToDeg(atan(xy$y[i]/xy$x[i])), adj=adj[i]))
} else {
if(is.null(adj)){
if(las==1)
adj <- (angles %[]% c(pi/2, 3*pi/2))*1
if(las==3)
adj <- (angles %[]% c(3*pi/4, 7*pi/4))*1
}
adj <- rep(adj, length_out=length(labels))
sapply(seq_along(labels),
function(i) text(xylab$x[i], xylab$y[i], labels=labels[i], cex=cex.lab,
srt=ifelse(las==3, 90, 0), adj=adj[i]))
}
# d.m <- data.frame( from=rep(colnames(m), nrow(m)), to=rep(colnames(m), each=nrow(m))
# , d=as.vector(m)
# , from.x=rep(xy$x, nrow(m)), from.y=rep(xy$y, nrow(m)), to.x=rep(xy$x, each=nrow(m)), to.y=rep(xy$y, each=nrow(m)) )
# d.m <- d.m[d.m$d > 0,]
# lineare transformation of linewidth
a <- 0.5
b <- 10
# d.m$d.sc <- (b-a) * (min(d.m$d)-a) + (b-a) /diff(range(d.m$d)) * d.m$d
i <- DescTools::CombPairs(1:dim(m)[1])
d.m <- data.frame(from=colnames(m)[i[,1]], from=colnames(m)[i[, 2]], d=m[lower.tri(m)],
from.x=xy[[1]][i[,2]], to.x=xy[[1]][i[,1]],
from.y=xy[[2]][i[,2]], to.y=xy[[2]][i[,1]])
if(is.null(lwd))
d.m$d.sc <- DescTools::LinScale(abs(d.m$d), newlow=a, newhigh=b )
else
d.m$d.sc <- lwd
if(is.null(lwd))
d.m$lty <- par("lty")
else
d.m$lty <- lty
col <- rep(col, length.out=2)
segments( x0=d.m$from.x, y0=d.m$from.y, x1 = d.m$to.x, y1 = d.m$to.y,
col = col[((sign(d.m$d)+1)/2)+1], lty = d.m$lty, lwd=d.m$d.sc, lend= 1)
points( xy, cex=pt.cex, pch=pch, col=pt.col, bg=pt.bg )
# find min/max negative value and min/max positive value
i <- c(which.min(d.m$d), which.max(ifelse(d.m$d<=0, d.m$d, NA)), which.min(ifelse(d.m$d>0, d.m$d, NA)), which.max(d.m$d))
args.legend1 <- list( x="bottomright",
legend=Format(d.m$d[i], digits=3, leading="drop"), lwd = d.m$d.sc[i],
col=rep(col, each=2), bg="white", cex=0.8)
if ( !is.null(args.legend) ) { args.legend1[names(args.legend)] <- args.legend }
add.legend <- TRUE
if(!is.null(args.legend)) if(all(is.na(args.legend))) {add.legend <- FALSE}
if(add.legend) do.call("legend", args.legend1)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(xy)
}
###
## plots: PlotCandlestick ====
PlotCandlestick <- function(x, y, xlim = NULL, ylim = NULL, col = c("springgreen4","firebrick"), border=NA, args.grid = NULL, ...) {
xlim <- if (is.null(xlim))
range(x[is.finite(x)])
else xlim
ylim <- if (is.null(ylim))
range(y[is.finite(y)])
else ylim
plot(x = 1, y = 1, xlim = xlim,
ylim = ylim, type = "n", xaxt = "n", xlab = "", ...)
add.grid <- TRUE
if(!is.null(args.grid)) if(all(is.na(args.grid))) {add.grid <- FALSE}
if (add.grid) {
args.grid1 <- list(lty="solid", col="grey83")
if (!is.null(args.grid)) {
args.grid1[names(args.grid)] <- args.grid
}
do.call("grid", args.grid1)
}
# open low high close
segments(x0 = x, y0 = y[,2], y1 = y[,3], col = col[(y[,1] > y[,4]) * 1 + 1])
rect(xleft = x - 0.3, ybottom = y[,1], xright = x + 0.3, ytop = y[, 4],
col = col[(y[,1] > y[,4]) * 1 + 1], border = border)
axis(side = 1, at = x, labels = x)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
###
## plots: PlotSuperbar
# ueberlagerte Barplots
# Superbarplot in UsingR
###
## plots: PlotMatrix ====
PlotMatrix <- function(x, y=NULL, data=NULL, panel=l.panel,
nrows=0, ncols=nrows, save=TRUE, robrange.=FALSE, range.=NULL,
pch=NULL, col=1, reference=0, ltyref=3,
log="", xaxs="r", yaxs="r", xaxmar=NULL, yaxmar=NULL,
vnames=NULL, main='', cex.points=NA, cex.lab=0.7, cex.text=1.3,
cex.title=1,
bty="o", oma=NULL, ...) {
# Purpose: pairs with different plotting characters, marks and/or colors
# showing submatrices of the full scatterplot matrix
# possibly on several pages
# ******************************************************************************
# Author: Werner Stahel, Date: 23 Jul 93; minor bug-fix+comments:
# M.Maechler
is.formula <- function(object) length(class(object))>0 && class(object)=="formula"
l.panel <- function(x,y,indx,indy,pch=1,col=1,cex=cex.points,...) {
if (is.character(pch)) text(x,y,pch,col=col,cex=cex) else
points(x,y,pch=pch,col=col,cex=cex,...)
}
oldpar <- par(c("mfrow","mar","cex","oma","mgp"))
on.exit(par(oldpar))
# **************** preparations **************
# data
if (is.formula(x)) {
if (length(x)==2)
x <- model.frame(x,data, na.action=NULL) else {
ld <- model.frame(x[c(1,3)],data, na.action=NULL)
ld <- cbind(ld, model.frame(x[1:2],data, na.action=NULL))
x <- ld
}
}
if (is.data.frame(x)) {
for (jj in 1:length(x)) x[[jj]] <- as.numeric(x[[jj]])
x <- as.matrix(x)
} else x <- cbind(x)
# stop("!PlotMatrix! first argument must either be a formula or a data.frame or matrix")
nv1 <- dim(x)[2]
lv1 <- lv2 <- 0
if (is.null(y)) {
ldata <- x
if (save) { nv1 <- nv1-1; lv2 <- 1 }
nv2 <- nv1
} else { # cbind y to data for easier preparations
save <- FALSE
if (is.formula(y)) {
ld <- model.frame(x[c(1,3)],data, na.action=NULL)
if (length(x)>2)
ld <- cbind(ld, model.frame(x[1:2],data, na.action=NULL))
x <- ld
}
if (is.formula(y)) {
if (length(y)==2)
y <- model.frame(y,data, na.action=NULL) else {
ld <- model.frame(y[c(1,3)],data, na.action=NULL)
ld <- cbind(ld, model.frame(y[1:2],data, na.action=NULL))
y <- ld
}
}
if (is.data.frame(y)) {
for (jj in 1:length(y)) y[[jj]] <- as.numeric(y[[jj]])
y <- as.matrix(y)
}
ldata <- cbind(x, as.matrix(y))
nv2 <- ncol(ldata)-nv1 ; lv2 <- nv1 }
nvv <- ncol(ldata)
tnr <- nrow(ldata)
# variable labels
if (missing(vnames)) vnames <- dimnames(ldata)[[2]]
if (is.null(vnames)) vnames <- paste("V",1:nvv)
# plotting characters
if (length(pch)==0) pch <- 1
# range
rg <- matrix(nrow=2,ncol=nvv,dimnames=list(c("min","max"),vnames))
if(is.matrix(range.)) {
if (is.null(colnames(range.))) {
if (ncol(range)==ncol(rg)) rg[,] <- range. else
warning('argument range. not suitable. ignored')
} else {
lj <- match(colnames(range.),vnames)
if (any(is.na(lj))) {
warning('variables', colnames(range.)[is.na(lj)],'not found')
if (any(!is.na(lj))) rg[,lj[!is.na(lj)]] <- range.[,!is.na(lj)]
}
}
}
else
if (length(range.)==2&&is.numeric(range.)) rg[,] <- matrix(range.,2,nvv)
lna <- apply(is.na(rg),2, any)
if (any(lna))
rg[,lna] <- apply(ldata[,lna,drop=FALSE],2,
Range, robust=robrange., na.rm=TRUE, finite=TRUE)
colnames(rg) <- vnames
# reference lines
tjref <- (length(reference)>0)&&!(is.logical(reference)&&!reference)
if (tjref) {
if(length(reference)==1) lref <- rep(reference,length=nvv) else {
lref <- rep(NA,nvv)
lref[match(names(reference),vnames)] <- reference
}
names(lref) <- vnames
}
# plot
jmain <- !is.null(main)&&main!=""
lpin <- par("pin")
lnm <- if (lpin[1]>lpin[2]) {
if (nv1==6 && nv2==6) c(6,6) else c(5,6) } else c(8,5)
if (is.na(nrows)||nrows<1) nrows <- ceiling(nv1/((nv1-1)%/%lnm[1]+1))
if (is.na(ncols)||ncols<1) ncols <- ceiling(nv2/((nv2-1)%/%lnm[2]+1))
if (is.null(xaxmar)) xaxmar <- 1+(nv1*nv2>1)
if (any(is.na(xaxmar))) xaxmar <- 1+(nv1*nv2>1)
xaxmar <- ifelse(xaxmar>1,3,1)
if (is.null(yaxmar)) yaxmar <- 2+(nv1*nv2>1)
if (any(is.na(yaxmar))) yaxmar <- 2+(nv1*nv2>1)
yaxmar <- ifelse(yaxmar>2,4,2)
if (length(oma)!=4)
oma <- c(2+(xaxmar==1), 2+(yaxmar==2),
1.5+(xaxmar==3)+cex.title*2*jmain,
2+(yaxmar==4))
# oma <- 2 + c(0,0,!is.null(main)&&main!="",1)
par(mfrow=c(nrows,ncols))
##- if (!is.na(cex)) par(cex=cex)
##- cex <- par("cex")
##- cexl <- cex*cexlab
##- cext <- cex*cextext
par(oma=oma*cex.lab, mar=rep(0.2,4), mgp=cex.lab*c(1,0.5,0))
if (is.na(cex.points)) cex.points <- max(0.2,min(1,1.5-0.2*log(tnr)))
#
# log
if (length(grep("x",log))>0) ldata[ldata[,1:nv1]<=0,1:nv1] <- NA
if (length(grep("y",log))>0) ldata[ldata[,lv2+1:nv2]<=0,lv2+1:nv2] <- NA
npgr <- ceiling(nv2/nrows)
npgc <- ceiling(nv1/ncols)
# ******************** plots **********************
for (ipgr in 1:npgr) {
lr <- (ipgr-1)*nrows
for (ipgc in 1:npgc) {
lc <- (ipgc-1)*ncols
if (save&&((lr+nrows)<=lc)) break
for (jr in 1:nrows) { #-- plot row [j]
jd2 <- lr+jr
j2 <- lv2 + jd2
if (jd2<=nv2) v2 <- ldata[,j2]
for (jc in 1:ncols) { #-- plot column [j2-lv2] = 1:nv2
jd1 <- lc+jc
j1 <- lv1 + jd1
if (jd2<=nv2 & jd1<=nv1) {
v1 <- ldata[,j1]
plot(v1,v2, type="n", xlab="", ylab="", axes=FALSE,
xlim <- rg[,j1], ylim <- rg[,j2],
xaxs=xaxs, yaxs=yaxs, log=log, cex=cex.points)
usr <- par("usr")
if (jr==nrows||jd2==nv2) {
if (xaxmar==1) axis(1)
mtext(vnames[j1], side=1, line=(0.5+1.2*(xaxmar==1))*cex.lab,
cex=cex.lab, at=mean(usr[1:2]))
}
if (jc==1) {
if (yaxmar==2) axis(2)
mtext(vnames[j2], side=2, line=(0.5+1.2*(yaxmar==2))*cex.lab,
cex=cex.lab, at=mean(usr[3:4]))
}
if (jr==1&&xaxmar==3) axis(3,xpd=TRUE)
if (jc==ncols||jd1==nv1) if (yaxmar==4) axis(4,xpd=TRUE)
box(bty=bty)
if (any(v1!=v2,na.rm=TRUE)) { # not diagonal
panel(v1,v2,jd1,jd2, pch, col, ...)
if (tjref) abline(h=lref[j1],v=lref[j2],lty=ltyref)
}
else { uu <- par("usr") # diagonal: print variable name
text(mean(uu[1:2]),mean(uu[3:4]), vnames[j1], cex=cex.text) }
}
else frame()
}
}
if (jmain) mtext(main,3,oma[3]*0.9-2*cex.title,outer=TRUE,cex=cex.title)
##- stamp(sure=FALSE,line=par("mgp")[1]+0.5)
# stamp(sure=FALSE,line=oma[4]-1.8) ### ??? why does it need so much space?
}}
on.exit(par(oldpar))
"PlotMatrix: done"
}
###
## plots: ACF, GACF and other TimeSeries plots ----------
PlotACF <- function(series, lag.max = 10*log10(length(series)), ...) {
## Purpose: time series plot with correlograms
# Original name: f.acf
## ---
## Arguments: series : time series
## lag.max : the maximum number of lags for the correlograms
## ---
## Author: Markus Huerzeler, Date: 15 Jun 94
## Revision: Christian Keller, 5 May 98
## Revision: Markus Huerzeler, 11. Maerz 04
# the stamp option should only be active for the third plot, so deactivate it here
opt <- DescToolsOptions(stamp=NULL)
if (!is.null(dim(series)))
stop("f.acf is only implemented for univariate time series")
par(mfrow=c(1,1))
old.par <- par(mar=c(3,3,1,1), mgp=c(1.5,0.5,0))
on.exit(par(old.par))
split.screen(figs=matrix(c(0,1,0.33,1, 0,0.5,0,0.33, 0.5,1,0,0.33),
ncol=4, byrow=T), erase=TRUE)
## screen(1)
plot.ts(series, cex=0.7, ylab=deparse(substitute(series)), ...)
screen(2)
PlotGACF(series, lag.max=lag.max, cex=0.7)
screen(3)
# Stamp only the last plot
options(opt)
PlotGACF(series, lag.max=lag.max, type="part", cex=0.7)
close.screen(all.screens=TRUE)
invisible(par(old.par))
}
PlotGACF <- function(series, lag.max=10*log10(length(series)), type="cor", ylab=NULL, ...) {
## Author: Markus Huerzeler, Date: 6 Jun 94
## Revision: Christian Keller, 27 Nov 98
## Revision: Markus Huerzeler, 11 Mar 02
## Correction for axis labels with ts-objects and deletion of ACF(0), Andri/10.01.2014
# original name g.plot.acf
# erg <- acf(series, type=type, plot=FALSE, lag.max=lag.max, na.action=na.omit)
# debug: series <- AirPassengers
type <- match.arg(type, c("cor","cov","part"))
erg <- acf(na.omit(series), type=type, plot=FALSE, lag.max=lag.max)
erg.acf <- erg$acf
# set the first acf(0) = 1 to 0
if(type=="cor") {
erg.acf[1] <- 0
if(is.null(ylab)) ylab <- "ACF"
}
if(type=="part") {
# add a 0-value to the partial corr. fct.
erg.acf <- c(0, erg.acf)
if(is.null(ylab)) ylab <- "PACF"
}
erg.konf <- 2/sqrt(erg$n.used)
yli <- range(c(erg.acf, erg.konf, -erg.konf))*c(1.1, 1.1)
# old: erg.lag <- as.vector(erg$lag)
# new: get rid of the phases and use lags even with timeseries
erg.lag <- seq_along(erg.acf)-1
## Labels fuer x-Achse definieren:
## 1. Label ist immer erg.lag[1]
pos <- pretty(c(0, erg.lag))
n <- length(pos)
d <- pos[2] - pos[1] ; f <- pos[1]-erg.lag[1]
pos <- c(erg.lag[1], pos[1][f > d/2], pos[2:n])
plot(erg.lag, erg.acf, type="h", ylim=yli, xlab="Lag k", ylab=ylab,
xaxt="n", xlim=c(0,length(erg.acf)), ...)
axis(1, at=pos, ...)
abline(0,0)
abline(h=c(erg.konf, - erg.konf), lty=2, col="blue")
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible()
}
PlotMonth <- function(x, type = "l", labels, xlab = "", ylab = deparse(substitute(x)), ...)
#--
# Funktion fuer univariate Zeitreihen, zeichnet die Monats- oder Saisoneffekte
#
# von S+5 uebernommen und an R angepasst
#
# x muss eine univariate Zeitreihe sein
#--
{
if(length(dim(x)))
stop("This implementation is only for univariate time series")
old.opts <- options(warn = -1)
on.exit(options(old.opts))
if(!(type == "l" || type == "h"))
stop(paste("type is \"", type, "\", it must be \"l\" or \"h\"",
sep = ""))
f <- frequency(x)
cx <- cycle(x)
m <- tapply(x, cx, mean)
if(cx[1] != 1 || cx[length(x)] != f) {
x <- ts(c(rep(NA, cx[1] - 1), x, rep(NA, f - cx[length(x)])),
start = start(x, format = T)[1], end = c(end(x, format
= T)[1], f), frequency = f)
cx <- cycle(x)
}
i <- order(cx)
n <- length(x)
if(missing(labels))
labels <- if(f == 12) c("Jan", "Feb", "Mar", "Apr", "May",
"Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
) else if(f == 4)
c("First", "Second", "Third", "Fourth")
else 1:f
if(length(labels) != f)
stop(paste("There must be", f, "labels"))
p <- n/f
hx <- seq(1, n, by = p) + (0:(f - 1))
hy <- rep(m, rep(2, length(m)))
X <- as.vector(outer(0:(p - 1), hx, "+"))
plot(c(1, n + f), range(x[!is.na(x)]), type = "n", axes = F, xlab =
xlab, ylab = ylab, ...)
dotdot <- list(...)
ddttl <- match(c("main", "sub", "axes", "ylim"), names(dotdot), nomatch
= 0)
ddttl <- ddttl[ddttl != 0]
add.axes <- T
if(length(ddttl)) {
if(any(names(dotdot) == "axes"))
add.axes <- dotdot$axes
dotdot <- dotdot[ - ddttl]
}
if(type == "l")
for(j in 1:f)
do.call("lines", c(list(hx[j]:(hx[j] + p - 1), x[i][
((j - 1) * p + 1):(j * p)]), dotdot))
else if(type == "h")
do.call("segments", c(list(X, x[i], X, m[cx][i]), dotdot))
do.call("segments", c(list(hx, m, hx + p, m), dotdot))
if(add.axes) {
box()
axis(2)
axis(1, at = hx + p/2, labels = labels)
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible()
}
PlotQQ <- function(x, qdist, main=NULL, xlab=NULL, ylab=NULL, add=FALSE,
args.qqline=NULL, conf.level=0.95, args.cband = NULL, ...) {
# qqplot for an optional distribution
# example:
# y <- rexp(100, 1/10)
# PlotQQ(y, function(p) qexp(p, rate=1/10))
y <- sort(x)
p <- ppoints(y)
x <- qdist(p)
if(is.null(main)) main <- gettextf("Q-Q-Plot", qdist)
if(is.null(xlab)) xlab <- "Theoretical Quantiles"
if(is.null(ylab)) ylab <- "Sample Quantiles"
if(!add)
plot(x=x, y, main=main, xlab=xlab, ylab=ylab, type="n", ...)
# add confidence band if desired
if (!(is.na(conf.level) || identical(args.cband, NA)) ) {
cix <- qdist(ppoints(x))
ciy <- replicate(1000, sort(qdist(runif(length(x)))))
args.cband1 <- list(col = SetAlpha(Pal()[1], 0.25), border = NA)
if (!is.null(args.cband))
args.cband1[names(args.cband)] <- args.cband
ci <- apply(ciy, 1, quantile, c(-1, 1) * conf.level/2 + 0.5)
do.call("DrawBand", c(args.cband1,
list(x = c(cix, rev(cix))),
list(y = c(ci[1,], rev(ci[2,])) )
))
}
points(x=x, y=y, ...)
# John Fox implements a envelope option in car::qqplot, in the sense of:
# (unfortunately using ddist...)
#
# # add qqline if desired
# if(!identical(args.band, NA)) {
# n <- length(x)
# zz <- qnorm(1 - (1 - args.band$conf.level) / 2)
# SE <- (slope / d.function(z, ...)) * sqrt(p * (1 - p) / n)
# fit.value <- int + slope * z
#
# upper <- fit.value + zz * SE
# lower <- fit.value - zz * SE
#
# lines(z, upper, lty = 2, lwd = lwd, col = col.lines)
# lines(z, lower, lty = 2, lwd = lwd, col = col.lines)
# }
# add qqline if desired
if(!identical(args.qqline, NA)) {
# define default arguments for ci.band
args.qqline1 <- list(probs = c(0.25, 0.75), qtype=7, col=par("fg"), lwd=par("lwd"), lty=par("lty"))
# override default arguments with user defined ones
if (!is.null(args.qqline)) args.qqline1[names(args.qqline)] <- args.qqline
# estimate qqline, instead of set it to abline(a = 0, b = 1)
# plot qqline through the 25% and 75% quantiles (same as qqline does for normal dist)
ly <- quantile(y, prob=args.qqline1[["probs"]], type=args.qqline1[["qtype"]], na.rm = TRUE)
lx <- qdist(args.qqline1[["probs"]])
slope <- diff(ly) / diff(lx)
int <- ly[1L] - slope * lx[1L]
do.call("abline", c(args.qqline1[c("col","lwd","lty")], list(a=int, b=slope)) )
}
if(!is.null(DescToolsOptions("stamp")))
Stamp()
}
## Describe ====
# not needed anymore, by 0.99.19
# .txtline <- function(txt, width, space="", ind="") {
# paste(
# ind, paste(format(names(txt), width=width, justify="right"), collapse=space), "\n",
# ind, paste(format(txt, width=width, justify="right"), collapse=space), "\n",
# sep="" )
# }
TOne <- function(x, grp = NA, add.length=TRUE,
colnames=NULL, vnames=NULL, total=TRUE,
align="\\l", FUN = NULL, NUMTEST = NULL, numtestlab = NULL){
afmt <- Fmt("abs")
pfmt <- Fmt("per")
nfmt <- Fmt("num")
if(is.null(vnames)){
vnames <- if(is.null(colnames(x))) "Var1" else colnames(x)
default_vnames <- TRUE
} else {
default_vnames <- TRUE
}
# creates the table one in a study
if(is.null(FUN)){
num_fun <- function(x){
# wie soll die einzelne Zelle fuer numerische Daten aussehen
gettextf("%s (%s)",
Format(mean(x, na.rm=TRUE), fmt=nfmt),
Format(sd(x, na.rm=TRUE), fmt=nfmt))
}
} else {
num_fun <- FUN
}
# define test for numeric values
if(is.null(NUMTEST)){
num_test <- function(x, g){
# how should the test be calculated and represented
Format(kruskal.test(x = x, g = g)$p.value, fmt="*", na.form = " ")
}
numtestlab <- "Kruskal-Wallis test"
} else {
num_test <- NUMTEST
if(is.null(numtestlab)) numtestlab <- "numeric test"
}
# replaced for flexible test in 0.99.19
# num_row <- function(x, g, total=TRUE, test="kruskal.test", vname = deparse(substitute(x))){
# # wie soll die zeile aussehen fuer numerische Daten
# p <- eval(parse(text=gettextf("%s(x ~ g)", test)))
# cbind(var=vname, total = num_fun(x), rbind(tapply(x, g, num_fun)),
# # paste(Format(p$p.value, fmt="*", na.form = " "), ifelse(is.na(p), "", .FootNote(1))))
# paste(Format(p$p.value, fmt="*", na.form = " "), ifelse(is.na(p$p.value), "", .FootNote(1))))
# }
num_row <- function(x, g, total=TRUE, vname = deparse(substitute(x))){
if(!identical(g, NA)) {
res <- num_test(x, g)
num_test_label <- names(res)
} else {
res <- ""
}
cbind(var=vname, total = num_fun(x), rbind(tapply(x, g, num_fun)),
paste(res, .FootNote(1)))
}
cat_mat <- function(x, g, vname=deparse(substitute(x))){
if(class(x)=="character")
x <- factor(x)
tab <- table(x, g)
ptab <- prop.table(tab, margin = 2)
tab <- addmargins(tab, 2)
ptab <- cbind(ptab, Sum=prop.table(table(x)))
# crunch tab and ptab
m <- matrix(NA, nrow=nrow(tab), ncol=ncol(tab))
m[,] <- gettextf("%s (%s)",
Format(tab, fmt=afmt),
Format(ptab, fmt=pfmt))
# totals to the left
m <- m[, c(ncol(m), 1:(ncol(m)-1))]
# set rownames
m <- cbind( c(vname, paste(" ", levels(x))),
rbind("", m))
# add test
if(nrow(tab)>1)
p <- chisq.test(tab)$p.value
else
p <- NA
m <- cbind(m, c(paste(Format(p, fmt="*", na.form = " "), ifelse(is.na(p), "", .FootNote(3))), rep("", nlevels(x))))
if(nrow(m) <=3) {
m[2,1] <- gettextf("%s (= %s)", m[1, 1], row.names(tab)[1])
m <- m[2, , drop=FALSE]
}
colnames(m) <- c("var","total", head(colnames(tab), -1), "")
m
}
dich_mat <- function(x, g, vname=deparse(substitute(x))){
tab <- table(x, g)
if(identical(dim(tab), c(2L,2L))){
p <- fisher.test(tab)$p.value
foot <- .FootNote(2)
} else {
p <- chisq.test(tab)$p.value
foot <- .FootNote(3)
}
ptab <- prop.table(tab, 2)
tab <- addmargins(tab, 2)
ptab <- cbind(ptab, Sum = prop.table(tab[,"Sum"]))
m <- matrix(NA, nrow=nrow(tab), ncol=ncol(tab))
m[,] <- gettextf("%s (%s)",
Format(tab, fmt=afmt),
Format(ptab, fmt=pfmt))
# totals to the left
m <- m[, c(ncol(m), 1:(ncol(m)-1)), drop=FALSE]
m <- rbind(c(vname, m[1,], paste(Format(p, fmt="*", na.form = " "), foot)))
colnames(m) <- c("var","total", head(colnames(tab), -1), "")
m
}
if(mode(x) %in% c("logical","numeric","complex","character"))
x <- data.frame(x)
# find description types
ctype <- sapply(x, class)
# should we add "identical type": only one value??
ctype[sapply(x, IsDichotomous, strict=TRUE, na.rm=TRUE)] <- "dich"
ctype[sapply(ctype, function(x) any(x %in% c("numeric","integer")))] <- "num"
ctype[sapply(ctype, function(x) any(x %in% c("factor","ordered","character")))] <- "cat"
lst <- list()
for(i in 1:ncol(x)){
if(ctype[i] == "num"){
lst[[i]] <- num_row(x[,i], grp, vname=vnames[i])
} else if(ctype[i] == "cat") {
lst[[i]] <- cat_mat(x[,i], grp, vname=vnames[i])
} else if(ctype[i] == "dich") {
if(default_vnames){
# only declare the ref level on default_vnames
lst[[i]] <- dich_mat(x[,i], grp, vname=gettextf("%s (= %s)", vnames[i], head(levels(factor(x[,i])), 1)))
} else {
# the user is expected to define ref level, if he wants one
lst[[i]] <- dich_mat(x[,i], grp, vname=gettextf("%s", vnames[i]))
}
} else {
lst[[i]] <- rbind(c(colnames(x)[i], rep(NA, nlevels(grp) + 2)))
}
}
res <- do.call(rbind, lst)
if(add.length)
res <- rbind(c("n", c(Format(sum(!is.na(grp)), fmt=afmt),
paste(Format(table(grp), fmt=afmt), " (",
Format(prop.table(table(grp)), fmt=pfmt), ")", sep=""), ""))
, res)
if(!is.null(colnames))
colnames(res) <- colnames
# align the table
if(align != "\\l")
res[,-c(1, ncol(res))] <- StrAlign(res[,-c(1, ncol(res))], sep = align)
attr(res, "legend") <- gettextf("%s) %s, %s) Fisher exact test, %s) Chi-Square test\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1",
.FootNote(1), numtestlab, .FootNote(2), .FootNote(3))
if(!total)
res <- res[, -2]
class(res) <- "TOne"
return(res)
}
.FootNote <- function(i){
# internal function, not exported
# x <- getOption("footnote")
x <- DescToolsOptions("footnote")
if(is.null(x))
x <- c("'", '"', '""')
return(x[i])
}
print.TOne <- function(x, ...){
write.table(format(rbind(colnames(x), x), justify="left"),
row.names=FALSE, col.names=FALSE, quote=FALSE)
cat("---\n")
cat(attr(x, "legend"), "\n\n")
}
Flags <- function(x, na.rm=FALSE){
res <- x[, sapply(x, IsDichotomous, na.rm=TRUE)]
class(res) <- "flags"
return(res)
}
PlotMosaic <- function (x, main = deparse(substitute(x)), horiz = TRUE, cols = NULL,
off = 0.02, mar = NULL, xlab = NULL, ylab = NULL, cex=par("cex"), las=2, ...) {
if(length(dim(x))>2){
warning("PlotMosaic is restricted to max. 2 dimensions")
invisible()
}
if (is.null(xlab))
xlab <- Coalesce(names(dimnames(x)[2]), "x")
if (is.null(ylab))
ylab <- Coalesce(names(dimnames(x)[1]), "y")
if (is.null(mar)){
# ymar <- 5.1
# xmar <- 6.1
inches_to_lines <- (par("mar") / par("mai") )[1] # 5
lab.width <- max(strwidth(colnames(x), units="inches")) * inches_to_lines
xmar <- lab.width + 1
lab.width <- max(strwidth(rownames(x), units="inches")) * inches_to_lines
ymar <- lab.width + 1
mar <- c(ifelse(is.na(xlab), 2.1, 5.1), ifelse(is.na(ylab), ymar, ymar+2),
ifelse(is.na(main), xmar, xmar+4), 1.6)
# par(mai = c(par("mai")[1], max(par("mai")[2], strwidth(levels(grp), "inch")) +
# 0.5, par("mai")[3], par("mai")[4]))
}
Canvas(xlim = c(0, 1), ylim = c(0, 1), asp = NA, mar = mar)
col1 <- Pal()[1]
col2 <- Pal()[2]
oldpar <- par(xpd = TRUE)
on.exit(par(oldpar))
if(any(dim(x)==1)) {
if (is.null(cols))
cols <- colorRampPalette(c(col1, "white", col2), space = "rgb")(length(x))
if(horiz){
ptab <- prop.table(as.vector(x))
pxt <- ptab * (1 - (length(ptab) - 1) * off)
y_from <- c(0, cumsum(pxt) + (1:(length(ptab))) * off)[-length(ptab) - 1]
y_to <- cumsum(pxt) + (0:(length(ptab) - 1)) * off
if(nrow(x) > ncol(x))
x <- t(x)
x_from <- y_from
x_to <- y_to
y_from <- 0
y_to <- 1
} else {
ptab <- rev(prop.table(as.vector(x)))
pxt <- ptab * (1 - (length(ptab) - 1) * off)
y_from <- c(0, cumsum(pxt) + (1:(length(ptab))) * off)[-length(ptab) - 1]
y_to <- cumsum(pxt) + (0:(length(ptab) - 1)) * off
x_from <- 0
x_to <- 1
if(ncol(x) > nrow(x))
x <- t(x)
}
rect(xleft = x_from, ybottom = y_from, xright = x_to, ytop = y_to, col = cols)
txt_y <- apply(cbind(y_from, y_to), 1, mean)
txt_x <- Midx(c(x_from, 1))
} else {
if (horiz) {
if (is.null(cols))
cols <- colorRampPalette(c(col1, "white", col2), space = "rgb")(ncol(x))
ptab <- Rev(prop.table(x, 1), margin = 1)
ptab <- ptab * (1 - (ncol(ptab) - 1) * off)
pxt <- Rev(prop.table(margin.table(x, 1)) * (1 - (nrow(x) - 1) * off))
y_from <- c(0, cumsum(pxt) + (1:(nrow(x))) * off)[-nrow(x) - 1]
y_to <- cumsum(pxt) + (0:(nrow(x) - 1)) * off
x_from <- t((apply(cbind(0, ptab), 1, cumsum) + (0:ncol(ptab)) * off)[-(ncol(ptab) + 1), ])
x_to <- t((apply(ptab, 1, cumsum) + (0:(ncol(ptab) - 1) * off))[-(ncol(ptab) + 1), ])
for (j in 1:nrow(ptab)) {
rect(xleft = x_from[j,], ybottom = y_from[j],
xright = x_to[j,], ytop = y_to[j], col = cols)
}
txt_y <- apply(cbind(y_from, y_to), 1, mean)
txt_x <- apply(cbind(x_from[nrow(x_from),], x_to[nrow(x_from),]), 1, mean)
# srt.x <- if (las > 1) 90 else 0
# srt.y <- if (las == 0 || las == 3) 90 else 0
#
# text(labels = Rev(rownames(x)), y = txt_y, x = -0.04, adj = ifelse(srt.y==90, 0.5, 1), cex=cex, srt=srt.y)
# text(labels = colnames(x), x = txt_x, y = 1.04, adj = ifelse(srt.x==90, 0, 0.5), cex=cex, srt=srt.x)
} else {
if (is.null(cols))
cols <- colorRampPalette(c(col1, "white", col2), space = "rgb")(nrow(x))
ptab <- Rev(prop.table(x, 2), margin = 1)
ptab <- ptab * (1 - (nrow(ptab) - 1) * off)
pxt <- (prop.table(margin.table(x, 2)) * (1 - (ncol(x) - 1) * off))
x_from <- c(0, cumsum(pxt) + (1:(ncol(x))) * off)[-ncol(x) - 1]
x_to <- cumsum(pxt) + (0:(ncol(x) - 1)) * off
y_from <- (apply(rbind(0, ptab), 2, cumsum) + (0:nrow(ptab)) *
off)[-(nrow(ptab) + 1), ]
y_to <- (apply(ptab, 2, cumsum) + (0:(nrow(ptab) - 1) *
off))[-(nrow(ptab) + 1), ]
for (j in 1:ncol(ptab)) {
rect(xleft = x_from[j], ybottom = y_from[, j], xright = x_to[j],
ytop = y_to[, j], col = cols)
}
txt_y <- apply(cbind(y_from[, 1], y_to[, 1]), 1, mean)
txt_x <- apply(cbind(x_from, x_to), 1, mean)
# srt.x <- if (las > 1) 90 else 0
# srt.y <- if (las == 0 || las == 3) 90 else 0
#
# text(labels = Rev(rownames(x)), y = txt_y, x = -0.04, adj = ifelse(srt.y==90, 0.5, 1), cex=cex, srt=srt.y)
# text(labels = colnames(x), x = txt_x, y = 1.04, adj = ifelse(srt.x==90, 0, 0.5), cex=cex, srt=srt.x)
}
}
srt.x <- if (las > 1) 90 else 0
srt.y <- if (las == 0 || las == 3) 90 else 0
text(labels = Rev(rownames(x)), y = txt_y, x = -0.04, adj = ifelse(srt.y==90, 0.5, 1), cex=cex, srt=srt.y)
text(labels = colnames(x), x = txt_x, y = 1.04, adj = ifelse(srt.x==90, 0, 0.5), cex=cex, srt=srt.x)
if (!is.na(main)) {
usr <- par("usr")
plt <- par("plt")
ym <- usr[4] + diff(usr[3:4])/diff(plt[3:4])*(plt[3]) + (1.2 + is.na(xlab)*4) * strheight('m', cex=1.2, font=2)
text(x=0.5, y=ym, labels = main, cex=1.2, font=2)
}
if (!is.na(xlab)) title(xlab = xlab, line = 1)
if (!is.na(ylab)) title(ylab = ylab)
if(!is.null(DescToolsOptions("stamp")))
Stamp()
invisible(list(x = txt_x, y = txt_y))
}
###
# see also package Mosaic
# modelVars extract predictor variables from a model
ParseFormula <- function(formula, data=parent.frame(), drop = TRUE) {
xhs <- function(formula, data = parent.frame(), na.action=na.pass){
# get all variables out of the formula
vars <- attr(terms(formula, data=data), "term.labels")
# evaluate model.frame
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "na.action"), names(mf), 0)
mf <- mf[c(1, m)]
mf$na.action <- na.action
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
mf.rhs <- eval.parent(mf)
# model frame does not evaluate interaction, so let's do that here
d.tmp <- mf.rhs[,FALSE] # create a new data.frame
for(x in vars){
if( length(grep(":", x))>0 ) # there's a : in the variable
d.tmp <- data.frame(d.tmp,
interaction( mf.rhs[, names(mf.rhs)[names(mf.rhs) %in% unlist(strsplit(x, ":"))]],
sep=":", drop = drop) # set drop unused levels to TRUE here by default
)
else
d.tmp <- data.frame(d.tmp, mf.rhs[,x])
}
names(d.tmp) <- vars
return(list(formula=formula, mf=mf.rhs, mf.eval=d.tmp, vars=vars))
}
f1 <- formula
# evaluate subset
m <- match.call(expand.dots = FALSE)
# do not support . on both sides of the formula
if( (length(grep("^\\.$", all.vars(f1[[2]])))>0) && (length(grep("^\\.$", all.vars(f1[[3]])))>0) )
stop("dot argument on both sides of the formula are not supported")
# swap left and right hand side and take just the right side
# so both sides are evaluated with right side logic, but independently
lhs <- xhs(formula(paste("~", deparse(f1[[2]])), data=data), data=data)
rhs <- xhs(formula(paste("~", deparse(f1[[3]])), data=data), data=data)
# now handle the dot argument
if(any(all.vars(f1[[2]]) == ".")){ # dot on the left side
lhs$vars <- lhs$vars[!lhs$vars %in% rhs$vars]
lhs$mf <- lhs$mf[lhs$vars]
lhs$mf.eval <- lhs$mf.eval[lhs$vars]
} else if(any(all.vars(f1[[3]]) == ".")){ # dot on the right side
rhs$vars <- rhs$vars[!rhs$vars %in% lhs$vars]
rhs$mf <- rhs$mf[rhs$vars]
rhs$mf.eval <- rhs$mf.eval[rhs$vars]
} else { # no dot: do nothing
}
list(formula=formula, lhs=list(mf=lhs$mf, mf.eval=lhs$mf.eval, vars=lhs$vars),
rhs=list(mf=rhs$mf, mf.eval=rhs$mf.eval, vars=rhs$vars))
}
###
## Word fundamentals ====
createCOMReference <- function(ref, className) {
RDCOMClient::createCOMReference(ref, className)
}
GetCurrWrd <- function() {
# stopifnot(require(RDCOMClient))
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
# there's no "get"-function in RDCOMClient, so just create a new here..
hwnd <- RDCOMClient::COMCreate("Word.Application", existing=TRUE)
if(is.null(hwnd)) warning("No running Word application found!")
# options(lastWord = hwnd)
DescToolsOptions(lastWord = hwnd)
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
wrd <- NULL
}
invisible(hwnd)
}
GetNewWrd <- function(visible = TRUE, template = "Normal", header=FALSE
, main="Descriptive report") {
# stopifnot(require(RDCOMClient))
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
# Starts the Word application with wrd as handle
hwnd <- RDCOMClient::COMCreate("Word.Application", existing=FALSE)
DescToolsOptions(lastWord = hwnd)
if( visible == TRUE ) hwnd[["Visible"]] <- TRUE
# Create a new document based on template
# VBA code:
# Documents.Add Template:= _
# "O:\G\GI\_Admin\Administration\09_Templates\newlogo_GI_doc_bericht.dot", _
# NewTemplate:=False, DocumentType:=0
#
newdoc <- hwnd[["Documents"]]$Add(template, FALSE, 0)
# prepare word document, with front page, table of contents, footer ...
if(header) .WrdPrepRep( wrd=hwnd, main=main )
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
hwnd <- NULL
}
invisible( hwnd )
}
WrdKill <- function(){
# Word might not always quit and end the task
# so killing the task is "ultima ratio"...
shell('taskkill /F /IM WINWORD.EXE')
}
.WrdPrepRep <- function(wrd, main="Bericht" ){
# only internal user out from GetNewWrd()
# creates new word instance and prepares document for report
# constants
# wdPageBreak <- 7
# wdSeekCurrentPageHeader <- 9 ### Kopfzeile
# wdSeekCurrentPageFooter <- 10 ### Fusszeile
# wdSeekMainDocument <- 0
# wdPageFitBestFit <- 2
# wdFieldEmpty <- -1
# Show DocumentMap
wrd[["ActiveWindow"]][["DocumentMap"]] <- TRUE
wrdWind <- wrd[["ActiveWindow"]][["ActivePane"]][["View"]][["Zoom"]]
wrdWind[["PageFit"]] <- wdConst$wdPageFitBestFit
wrd[["Selection"]]$TypeParagraph()
wrd[["Selection"]]$TypeParagraph()
wrd[["Selection"]]$WholeStory()
# 15.1.2012 auskommentiert: WrdSetFont(wrd=wrd)
# Idee: ueberschrift definieren (geht aber nicht!)
#wrd[["ActiveDocument"]][["Styles"]]$Item("ueberschrift 2")[["Font"]][["Name"]] <- "Consolas"
#wrd[["ActiveDocument"]][["Styles"]]$Item("ueberschrift 2")[["Font"]][["Size"]] <- 10
#wrd[["ActiveDocument"]][["Styles"]]$Item("ueberschrift 2")[["Font"]][["Bold"]] <- TRUE
#wrd[["ActiveDocument"]][["Styles"]]$Item("ueberschrift 2")[["ParagraphFormat"]]["Borders"]]$Item(wdBorderTop)[["LineStyle"]] <- wdConst$wdLineStyleSingle
WrdCaption( main, wrd=wrd)
wrd[["Selection"]]$TypeText(gettextf("%s/%s\n",format(Sys.time(), "%d.%m.%Y"), Sys.getenv("username")))
wrd[["Selection"]]$InsertBreak( wdConst$wdPageBreak)
# Inhaltsverzeichnis einfuegen ***************
wrd[["ActiveDocument"]][["TablesOfContents"]]$Add( wrd[["Selection"]][["Range"]] )
# Original VB-Code:
# With ActiveDocument
# .TablesOfContents.Add Range:=Selection.Range, RightAlignPageNumbers:= _
# True, UseHeadingStyles:=True, UpperHeadingLevel:=1, _
# LowerHeadingLevel:=2, IncludePageNumbers:=True, AddedStyles:="", _
# UseHyperlinks:=True, HidePageNumbersInWeb:=True, UseOutlineLevels:= _
# True
# .TablesOfContents(1).TabLeader = wdTabLeaderDots
# .TablesOfContents.Format = wdIndexIndent
# End With
# Fusszeile ***************
wrdView <- wrd[["ActiveWindow"]][["ActivePane"]][["View"]]
wrdView[["SeekView"]] <- wdConst$wdSeekCurrentPageFooter
wrd[["Selection"]]$TypeText( gettextf("%s/%s\t\t",format(Sys.time(), "%d.%m.%Y"), Sys.getenv("username")) )
wrd[["Selection"]][["Fields"]]$Add( wrd[["Selection"]][["Range"]], wdConst$wdFieldEmpty, "PAGE" )
# Roland wollte das nicht (23.11.2014):
# wrd[["Selection"]]$TypeText("\n\n")
wrdView[["SeekView"]] <- wdConst$wdSeekMainDocument
wrd[["Selection"]]$InsertBreak( wdConst$wdPageBreak)
invisible()
}
# put that to an example...
# WrdPageBreak <- function( wrd = .lastWord ) {
# wrd[["Selection"]]$InsertBreak(wdConst$wdPageBreak)
# }
ToWrd <- function(x, font=NULL, ..., wrd=DescToolsOptions("lastWord")){
UseMethod("ToWrd")
}
ToWrd.default <- function(x, font=NULL, ..., wrd=DescToolsOptions("lastWord")){
ToWrd.character(x=.CaptOut(x), font=font, ..., wrd=wrd)
invisible()
}
ToWrd.TOne <- function(x, font=NULL, para=NULL, main=NULL, align=NULL,
autofit=TRUE, ..., wrd=DescToolsOptions("lastWord")){
wTab <- ToWrd.table(x, main=NULL, font=font, align=align, autofit=autofit, wrd=wrd, ...)
if(!is.null(para)){
wTab$Select()
WrdParagraphFormat(wrd) <- para
# move out of table
wrd[["Selection"]]$EndOf(wdConst$wdTable)
wrd[["Selection"]]$MoveRight(wdConst$wdCharacter, 2, 0)
}
if(is.null(font)) font <- list()
if(is.null(font$size))
font$size <- WrdFont(wrd)$size - 2
else
font$size <- font$size - 2
ToWrd.character(paste("\n", attr(x, "legend"), "\n\n", sep=""),
font=font, wrd=wrd)
if(!is.null(main)){
sel <- wrd$Selection() # "Abbildung"
sel$InsertCaption(Label=wdConst$wdCaptionTable, Title=paste(" - ", main, sep=""))
sel$TypeParagraph()
}
invisible(wTab)
}
ToWrd.abstract <- function(x, font=NULL, autofit=TRUE, ..., wrd=DescToolsOptions("lastWord")){
WrdCaption(x=attr(x, "main"), wrd=wrd)
if(!is.null(attr(x, "label"))){
if(is.null(font)){
lblfont <- list(fontsize=8)
} else {
lblfont <- font
lblfont$fontsize <- 8
}
ToWrd.character(paste("\n", attr(x, "label"), "\n", sep=""),
font = lblfont, wrd=wrd)
}
ToWrd.character(gettextf("\ndata.frame: %s obs. of %s variables\n\n", attr(x, "nrow"), attr(x, "ncol"))
, font=font, wrd=wrd)
wTab <- ToWrd.data.frame(x, wrd=wrd, autofit=autofit, font=font, align="l", ...)
invisible(wTab)
}
ToWrd.lm <- function(x, font=NULL, ..., wrd=DescToolsOptions("lastWord")){
invisible()
}
ToWrd.character <- function (x, font = NULL, para = NULL, style = NULL, ..., wrd = DescToolsOptions("lastWord")) {
# we will convert UTF-8 strings to Latin-1, if the local info is Latin-1
if(any(l10n_info()[["Latin-1"]] & Encoding(x)=="UTF-8"))
x <- iconv(x, from="UTF-8", to="latin1")
wrd[["Selection"]]$InsertAfter(paste(x, collapse = "\n"))
if (!is.null(style))
WrdStyle(wrd) <- style
if (!is.null(para))
WrdParagraphFormat(wrd) <- para
if(identical(font, "fix")){
font <- DescToolsOptions("fixedfont")
if(is.null(font))
font <- structure(list(name="Courier New", size=8), class="font")
}
if(!is.null(font)){
currfont <- WrdFont(wrd)
WrdFont(wrd) <- font
on.exit(WrdFont(wrd) <- currfont)
}
wrd[["Selection"]]$Collapse(Direction=wdConst$wdCollapseEnd)
invisible()
}
WrdCaption <- function(x, index = 1, wrd = DescToolsOptions("lastWord")){
ToWrd.character(paste(x, "\n", sep=""),
style=eval(parse(text=gettextf("wdConst$wdStyleHeading%s", index))))
invisible()
}
ToWrd.PercTable <- function(x, font=NULL, main = NULL, ..., wrd = DescToolsOptions("lastWord")){
ToWrd.ftable(x$ftab, font=font, main=main, ..., wrd=wrd)
}
ToWrd.data.frame <- function(x, font=NULL, main = NULL, row.names=NULL, ..., wrd = DescToolsOptions("lastWord")){
x <- apply(x, 2, as.character)
if(is.null(row.names))
if(identical(row.names(x), as.character(1:nrow(x))))
row.names <- FALSE
else
row.names <- TRUE
ToWrd.table(x=x, font=font, main=main, row.names=row.names, ..., wrd=wrd)
}
# ToWrd.data.frame <- function(x, font=NULL, main = NULL, row.names=NULL, as.is=FALSE, ..., wrd = DescToolsOptions("lastWord")){
#
# if(as.is)
# x <- apply(x, 2, as.character)
# else
# x <- FixToTable(capture.output(x))
#
# if(is.null(row.names))
# if(identical(row.names, seq_along(1:nrow(x))))
# row.names <- FALSE
# else
# row.names <- TRUE
#
# if(row.names==TRUE)
# x <- cbind(row.names(x), x)
#
# ToWrd.table(x=x, font=font, main=main, ..., wrd=wrd)
# }
ToWrd.matrix <- function(x, font=NULL, main = NULL, ..., wrd = DescToolsOptions("lastWord")){
ToWrd.table(x=x, font=font, main=main, ..., wrd=wrd)
}
ToWrd.Freq <- function(x, font=NULL, main = NULL, ..., wrd = DescToolsOptions("lastWord")){
x[,c(3,5)] <- sapply(round(x[,c(3,5)], 3), Format, digits=3)
res <- ToWrd.data.frame(x=x, main=main, font=font, wrd=wrd)
invisible(res)
}
ToWrd.ftable <- function (x, font = NULL, main = NULL, align=NULL, method = "compact", ..., wrd = DescToolsOptions("lastWord")) {
# simple version:
# x <- FixToTable(capture.output(x))
# ToWrd.character(x, font=font, main=main, ..., wrd=wrd)
# let R do all the complicated formatting stuff
# but we can't import a not exported function, so we provide an own copy of it
# so this is a verbatim copy of it
.format.ftable <- function (x, quote = TRUE, digits = getOption("digits"), method = c("non.compact",
"row.compact", "col.compact", "compact"), lsep = " | ", ...)
{
if (!inherits(x, "ftable"))
stop("'x' must be an \"ftable\" object")
charQuote <- function(s) if (quote && length(s))
paste0("\"", s, "\"")
else s
makeLabels <- function(lst) {
lens <- lengths(lst)
cplensU <- c(1, cumprod(lens))
cplensD <- rev(c(1, cumprod(rev(lens))))
y <- NULL
for (i in rev(seq_along(lst))) {
ind <- 1 + seq.int(from = 0, to = lens[i] - 1) *
cplensD[i + 1L]
tmp <- character(length = cplensD[i])
tmp[ind] <- charQuote(lst[[i]])
y <- cbind(rep(tmp, times = cplensU[i]), y)
}
y
}
makeNames <- function(x) {
nmx <- names(x)
if (is.null(nmx))
rep_len("", length(x))
else nmx
}
l.xrv <- length(xrv <- attr(x, "row.vars"))
l.xcv <- length(xcv <- attr(x, "col.vars"))
method <- match.arg(method)
if (l.xrv == 0) {
if (method == "col.compact")
method <- "non.compact"
else if (method == "compact")
method <- "row.compact"
}
if (l.xcv == 0) {
if (method == "row.compact")
method <- "non.compact"
else if (method == "compact")
method <- "col.compact"
}
LABS <- switch(method, non.compact = {
cbind(rbind(matrix("", nrow = length(xcv), ncol = length(xrv)),
charQuote(makeNames(xrv)), makeLabels(xrv)), c(charQuote(makeNames(xcv)),
rep("", times = nrow(x) + 1)))
}, row.compact = {
cbind(rbind(matrix("", nrow = length(xcv) - 1, ncol = length(xrv)),
charQuote(makeNames(xrv)), makeLabels(xrv)), c(charQuote(makeNames(xcv)),
rep("", times = nrow(x))))
}, col.compact = {
cbind(rbind(cbind(matrix("", nrow = length(xcv), ncol = length(xrv) -
1), charQuote(makeNames(xcv))), charQuote(makeNames(xrv)),
makeLabels(xrv)))
}, compact = {
xrv.nms <- makeNames(xrv)
xcv.nms <- makeNames(xcv)
mat <- cbind(rbind(cbind(matrix("", nrow = l.xcv - 1,
ncol = l.xrv - 1), charQuote(makeNames(xcv[-l.xcv]))),
charQuote(xrv.nms), makeLabels(xrv)))
mat[l.xcv, l.xrv] <- paste(tail(xrv.nms, 1), tail(xcv.nms,
1), sep = lsep)
mat
}, stop("wrong method"))
DATA <- rbind(if (length(xcv))
t(makeLabels(xcv)), if (method %in% c("non.compact",
"col.compact"))
rep("", times = ncol(x)), format(unclass(x), digits = digits,
...))
cbind(apply(LABS, 2L, format, justify = "left"), apply(DATA,
2L, format, justify = "right"))
}
tab <- .format.ftable(x, quote=FALSE, method=method, lsep="")
tab <- StrTrim(tab)
if(is.null(align))
align <- c(rep("l", length(attr(x, "row.vars"))), rep("r", ncol(x)))
wtab <- ToWrd.table(tab, font=font, main=main, align=align, ..., wrd=wrd)
invisible(wtab)
}
ToWrd.table <- function (x, font = NULL, main = NULL, align=NULL, tablestyle=NULL, autofit = TRUE,
row.names=FALSE, col.names=TRUE, ..., wrd = DescToolsOptions("lastWord")) {
x[] <- as.character(x)
# add column names to character table
if(col.names)
x <- rbind(colnames(x), x)
if(row.names){
rown <- rownames(x)
# if(col.names)
# rown <- c("", rown)
x <- cbind(rown, x)
}
# replace potential \n in table with /cr, as convertToTable would make a new cell for them
x <- gsub(pattern= "\n", replacement = "/cr", x = x)
# paste the cells and separate by \t
txt <- paste(apply(x, 1, paste, collapse="\t"), collapse="\n")
nc <- ncol(x)
nr <- nrow(x)
# insert and convert
wrd[["Selection"]]$InsertAfter(txt)
wrdTable <- wrd[["Selection"]]$ConvertToTable(Separator = wdConst$wdSeparateByTabs,
NumColumns = nc, NumRows = nr,
AutoFitBehavior = wdConst$wdAutoFitFixed)
wrdTable[["ApplyStyleHeadingRows"]] <- col.names
# replace /cr by \n again in word
wrd[["Selection"]][["Find"]]$ClearFormatting()
wsel <- wrd[["Selection"]][["Find"]]
wsel[["Text"]] <- "/cr"
wrep <- wsel[["Replacement"]]
wrep[["Text"]] <- "^l"
wsel$Execute(Replace=wdConst$wdReplaceAll)
# http://www.thedoctools.com/downloads/DocTools_List_Of_Built-in_Style_English_Danish_German_French.pdf
if(is.null(tablestyle)){
WrdTableBorders(wrdTable, from=c(1,1), to=c(1, nc),
border = wdConst$wdBorderTop, wrd=wrd)
if(col.names)
WrdTableBorders(wrdTable, from=c(1,1), to=c(1, nc),
border = wdConst$wdBorderBottom, wrd=wrd)
WrdTableBorders(wrdTable, from=c(nr, 1), to=c(nr, nc),
border = wdConst$wdBorderBottom, wrd=wrd)
space <- RoundTo((if(is.null(font$size)) WrdFont(wrd)$size else font$size) * .2, multiple = .5)
wrdTable$Rows(1)$Select()
WrdParagraphFormat(wrd) <- list(SpaceBefore=space, SpaceAfter=space)
if(col.names){
wrdTable$Rows(2)$Select()
WrdParagraphFormat(wrd) <- list(SpaceBefore=space)
}
wrdTable$Rows(nr)$Select()
WrdParagraphFormat(wrd) <- list(SpaceAfter=space)
# wrdTable[["Style"]] <- -115 # code for "Tabelle Klassisch 1"
} else
if(!is.na(tablestyle))
wrdTable[["Style"]] <- tablestyle
# align the columns
if(is.null(align))
align <- c(rep("l", row.names), rep(x = "r", nc-row.names))
else
align <- rep(align, length.out=nc)
align[align=="l"] <- wdConst$wdAlignParagraphLeft
align[align=="c"] <- wdConst$wdAlignParagraphCenter
align[align=="r"] <- wdConst$wdAlignParagraphRight
for(i in seq_along(align)){
wrdTable$Columns(i)$Select()
wrdSel <- wrd[["Selection"]]
wrdSel[["ParagraphFormat"]][["Alignment"]] <- align[i]
}
if(!is.null(font)){
wrdTable$Select()
WrdFont(wrd) <- font
}
if(autofit)
wrdTable$Columns()$AutoFit()
# Cursor aus der Tabelle auf die letzte Postition im Dokument setzten
# This code will get you out of the table and put the text cursor directly behind it:
wrdTable$Select()
wrd[["Selection"]]$Collapse(wdConst$wdCollapseEnd)
# instead of goint to the end of the document ...
# Selection.GoTo What:=wdGoToPercent, Which:=wdGoToLast
# wrd[["Selection"]]$GoTo(What = wdConst$wdGoToPercent, Which= wdConst$wdGoToLast)
if(!is.null(main)){
# insert caption
sel <- wrd$Selection() # "Abbildung"
sel$InsertCaption(Label=wdConst$wdCaptionTable, Title=paste(" - ", main, sep=""))
sel$TypeParagraph()
}
wrd[["Selection"]]$TypeParagraph()
invisible(wrdTable)
}
WrdTableBorders <- function (wtab, from = NULL, to = NULL, border = NULL,
lty = wdConst$wdLineStyleSingle, col=wdConst$wdColorBlack,
lwd = wdConst$wdLineWidth050pt, wrd) {
# paint borders of a table
if(is.null(from))
from <- c(1,1)
if(is.null(to))
to <- c(wtab[["Rows"]]$Count(), wtab[["Columns"]]$Count())
rng <- wrd[["ActiveDocument"]]$Range(start=wtab$Cell(from[1], from[2])[["Range"]][["Start"]],
end=wtab$Cell(to[1], to[2])[["Range"]][["End"]])
rng$Select()
if(is.null(border))
# use all borders by default
border <- wdConst[c("wdBorderTop","wdBorderBottom","wdBorderLeft","wdBorderRight",
"wdBorderHorizontal","wdBorderVertical")]
for(b in border){
wborder <- wrd[["Selection"]]$Borders(b)
wborder[["LineStyle"]] <- lty
wborder[["Color"]] <- col
wborder[["LineWidth"]] <- lwd
}
invisible()
}
WrdCellRange <- function(wtab, rstart, rend) {
# returns a handle for the table range
wtrange <- wtab[["Parent"]]$Range(
wtab$Cell(rstart[1], rstart[2])[["Range"]][["Start"]],
wtab$Cell(rend[1], rend[2])[["Range"]][["End"]]
)
return(wtrange)
}
WrdMergeCells <- function(wtab, rstart, rend) {
rng <- WrdCellRange(wtab, rstart, rend)
rng[["Cells"]]$Merge()
}
WrdFormatCells <- function(wtab, rstart, rend, col=NULL, bg=NULL, font=NULL,
border=NULL, align=NULL){
rng <- WrdCellRange(wtab, rstart, rend)
shad <- rng[["Shading"]]
if (!is.null(col))
shad[["ForegroundPatternColor"]] <- col
if (!is.null(bg))
shad[["BackgroundPatternColor"]] <- bg
wrdFont <- rng[["Font"]]
if (!is.null(font$name))
wrdFont[["Name"]] <- font$name
if (!is.null(font$size))
wrdFont[["Size"]] <- font$size
if (!is.null(font$bold))
wrdFont[["Bold"]] <- font$bold
if (!is.null(font$italic))
wrdFont[["Italic"]] <- font$italic
if (!is.null(font$color))
wrdFont[["Color"]] <- font$color
if (!is.null(align)) {
align <- match.arg(align, choices = c("l", "c", "r"))
align <- Lookup(align, ref = c("l", "c", "r"),
val = unlist(wdConst[c("wdAlignParagraphLeft",
"wdAlignParagraphCenter",
"wdAlignParagraphRight")]))
rng[["ParagraphFormat"]][["Alignment"]] <- align
}
if(!is.null(border)) {
if(identical(border, TRUE))
# set default values
border <- list(border=c(wdConst$wdBorderBottom,
wdConst$wdBorderLeft,
wdConst$wdBorderTop,
wdConst$wdBorderRight),
linestyle=wdConst$wdLineStyleSingle,
linewidth=wdConst$wdLineWidth025pt,
color=wdConst$wdColorBlack)
if(is.null(border$border))
border$border <- c(wdConst$wdBorderBottom,
wdConst$wdBorderLeft,
wdConst$wdBorderTop,
wdConst$wdBorderRight)
if(is.null(border$linestyle))
border$linestyle <- wdConst$wdLineStyleSingle
border <- do.call(Recycle, border)
for(i in 1:attr(border, which = "maxdim")) {
b <- rng[["Borders"]]$Item(border$border[i])
if(!is.null(border$linestyle[i]))
b[["LineStyle"]] <- border$linestyle[i]
if(!is.null(border$linewidth[i]))
b[["LineWidth"]] <- border$linewidth[i]
if(!is.null(border$color))
b[["Color"]] <- border$color[i]
}
}
}
# Get and set font
WrdFont <- function(wrd = DescToolsOptions("lastWord") ) {
# returns the font object list: list(name, size, bold, italic) on the current position
wrdSel <- wrd[["Selection"]]
wrdFont <- wrdSel[["Font"]]
currfont <- list(
name = wrdFont[["Name"]] ,
size = wrdFont[["Size"]] ,
bold = wrdFont[["Bold"]] ,
italic = wrdFont[["Italic"]],
color = setNames(wrdFont[["Color"]], names(which(
wdConst==wrdFont[["Color"]] & grepl("wdColor", names(wdConst)))))
)
class(currfont) <- "font"
return(currfont)
}
`WrdFont<-` <- function(wrd, value){
wrdSel <- wrd[["Selection"]]
wrdFont <- wrdSel[["Font"]]
# set the new font
if(!is.null(value$name)) wrdFont[["Name"]] <- value$name
if(!is.null(value$size)) wrdFont[["Size"]] <- value$size
if(!is.null(value$bold)) wrdFont[["Bold"]] <- value$bold
if(!is.null(value$italic)) wrdFont[["Italic"]] <- value$italic
if(!is.null(value$color)) wrdFont[["Color"]] <- value$color
return(wrd)
}
# Get and set ParagraphFormat
WrdParagraphFormat <- function(wrd = DescToolsOptions("lastWord") ) {
wrdPar <- wrd[["Selection"]][["ParagraphFormat"]]
currpar <- list(
LeftIndent =wrdPar[["LeftIndent"]] ,
RightIndent =wrdPar[["RightIndent"]] ,
SpaceBefore =wrdPar[["SpaceBefore"]] ,
SpaceBeforeAuto =wrdPar[["SpaceBeforeAuto"]] ,
SpaceAfter =wrdPar[["SpaceAfter"]] ,
SpaceAfterAuto =wrdPar[["SpaceAfterAuto"]] ,
LineSpacingRule =wrdPar[["LineSpacingRule"]],
Alignment =wrdPar[["Alignment"]],
WidowControl =wrdPar[["WidowControl"]],
KeepWithNext =wrdPar[["KeepWithNext"]],
KeepTogether =wrdPar[["KeepTogether"]],
PageBreakBefore =wrdPar[["PageBreakBefore"]],
NoLineNumber =wrdPar[["NoLineNumber"]],
Hyphenation =wrdPar[["Hyphenation"]],
FirstLineIndent =wrdPar[["FirstLineIndent"]],
OutlineLevel =wrdPar[["OutlineLevel"]],
CharacterUnitLeftIndent =wrdPar[["CharacterUnitLeftIndent"]],
CharacterUnitRightIndent =wrdPar[["CharacterUnitRightIndent"]],
CharacterUnitFirstLineIndent=wrdPar[["CharacterUnitFirstLineIndent"]],
LineUnitBefore =wrdPar[["LineUnitBefore"]],
LineUnitAfter =wrdPar[["LineUnitAfter"]],
MirrorIndents =wrdPar[["MirrorIndents"]]
# wrdPar[["TextboxTightWrap"]] <- TextboxTightWrap
)
class(currpar) <- "paragraph"
return(currpar)
}
`WrdParagraphFormat<-` <- function(wrd, value){
wrdPar <- wrd[["Selection"]][["ParagraphFormat"]]
# set the new font
if(!is.null(value$LeftIndent)) wrdPar[["LeftIndent"]] <- value$LeftIndent
if(!is.null(value$RightIndent)) wrdPar[["RightIndent"]] <- value$RightIndent
if(!is.null(value$SpaceBefore)) wrdPar[["SpaceBefore"]] <- value$SpaceBefore
if(!is.null(value$SpaceBeforeAuto)) wrdPar[["SpaceBeforeAuto"]] <- value$SpaceBeforeAuto
if(!is.null(value$SpaceAfter)) wrdPar[["SpaceAfter"]] <- value$SpaceAfter
if(!is.null(value$SpaceAfterAuto)) wrdPar[["SpaceAfterAuto"]] <- value$SpaceAfterAuto
if(!is.null(value$LineSpacingRule)) wrdPar[["LineSpacingRule"]] <- value$LineSpacingRule
if(!is.null(value$Alignment)) {
if(is.character(value$Alignment))
switch(match.arg(value$Alignment, choices = c("left","center","right"))
, left=value$Alignment <- wdConst$wdAlignParagraphLeft
, center=value$Alignment <- wdConst$wdAlignParagraphCenter
, right=value$Alignment <- wdConst$wdAlignParagraphRight
)
wrdPar[["Alignment"]] <- value$Alignment
}
if(!is.null(value$WidowControl)) wrdPar[["WidowControl"]] <- value$WidowControl
if(!is.null(value$KeepWithNext)) wrdPar[["KeepWithNext"]] <- value$KeepWithNext
if(!is.null(value$KeepTogether)) wrdPar[["KeepTogether"]] <- value$KeepTogether
if(!is.null(value$PageBreakBefore)) wrdPar[["PageBreakBefore"]] <- value$PageBreakBefore
if(!is.null(value$NoLineNumber)) wrdPar[["NoLineNumber"]] <- value$NoLineNumber
if(!is.null(value$Hyphenation)) wrdPar[["Hyphenation"]] <- value$Hyphenation
if(!is.null(value$FirstLineIndent)) wrdPar[["FirstLineIndent"]] <- value$FirstLineIndent
if(!is.null(value$OutlineLevel)) wrdPar[["OutlineLevel"]] <- value$OutlineLevel
if(!is.null(value$CharacterUnitLeftIndent)) wrdPar[["CharacterUnitLeftIndent"]] <- value$CharacterUnitLeftIndent
if(!is.null(value$CharacterUnitRightIndent)) wrdPar[["CharacterUnitRightIndent"]] <- value$CharacterUnitRightIndent
if(!is.null(value$CharacterUnitFirstLineIndent)) wrdPar[["CharacterUnitFirstLineIndent"]] <- value$CharacterUnitFirstLineIndent
if(!is.null(value$LineUnitBefore)) wrdPar[["LineUnitBefore"]] <- value$LineUnitBefore
if(!is.null(value$LineUnitAfter)) wrdPar[["LineUnitAfter"]] <- value$LineUnitAfter
if(!is.null(value$MirrorIndents)) wrdPar[["MirrorIndents"]] <- value$MirrorIndents
return(wrd)
}
WrdStyle <- function (wrd = DescToolsOptions("lastWord")) {
wrdSel <- wrd[["Selection"]]
wrdStyle <- wrdSel[["Style"]][["NameLocal"]]
return(wrdStyle)
}
`WrdStyle<-` <- function (wrd, value) {
wrdSel <- wrd[["Selection"]][["Paragraphs"]]
wrdSel[["Style"]] <- value
return(wrd)
}
IsValidWrd <- function(wrd = DescToolsOptions("lastWord")){
# returns TRUE if the selection of the wrd pointer can be evaluated
# meaning the pointer points to a running word instance and so far valid
res <- tryCatch(wrd[["Selection"]], error=function(e) {e})
return(!inherits(res, "simpleError")) # Error in
}
# This has been replaced by ToWrd.character in 0.99.18
# WrdText <- function(txt, fixedfont=TRUE, fontname=NULL,
# fontsize=NULL, bold=FALSE, italic=FALSE, col=NULL,
# alignment = c("left","right","center"), spaceBefore=0, spaceAfter=0,
# lineSpacingRule = wdConst$wdLineSpaceSingle,
# appendCR=TRUE, wrd=DescToolsOptions("lastWord") ){
#
# if(fixedfont){
# fontname <- Coalesce(fontname, getOption("fixedfont", "Consolas"))
# fontsize <- Coalesce(fontsize, getOption("fixedfontsize", 7))
# }
#
# if (!inherits(txt, "character")) txt <- .CaptOut(txt)
#
# wrdSel <- wrd[["Selection"]]
# wrdFont <- wrdSel[["Font"]]
#
# currfont <- list(
# name = wrdFont[["Name"]] ,
# size = wrdFont[["Size"]] ,
# bold = wrdFont[["Bold"]] ,
# italic = wrdFont[["Italic"]],
# color = wrdFont[["Color"]]
# )
#
# if(!is.null(fontname)) wrdFont[["Name"]] <- fontname
# if(!is.null(fontsize)) wrdFont[["Size"]] <- fontsize
# wrdFont[["Bold"]] <- bold
# wrdFont[["Italic"]] <- italic
# wrdFont[["Color"]] <- Coalesce(col, wdConst$wdColorBlack)
#
# alignment <- switch(match.arg(alignment),
# "left"= wdConst$wdAlignParagraphLeft,
# "right"= wdConst$wdAlignParagraphRight,
# "center"= wdConst$wdAlignParagraphCenter
# )
#
# wrdSel[["ParagraphFormat"]][["Alignment"]] <- alignment
# wrdSel[["ParagraphFormat"]][["SpaceBefore"]] <- spaceBefore
# wrdSel[["ParagraphFormat"]][["SpaceAfter"]] <- spaceAfter
# wrdSel[["ParagraphFormat"]][["LineSpacingRule"]] <- lineSpacingRule
#
# wrdSel$TypeText( paste(txt,collapse="\n") )
# if(appendCR) wrdSel$TypeParagraph()
#
# # Restore old font
# wrdFont[["Name"]] <- currfont[["name"]]
# wrdFont[["Size"]] <- currfont[["size"]]
# wrdFont[["Bold"]] <- currfont[["bold"]]
# wrdFont[["Italic"]] <- currfont[["italic"]]
# wrdFont[["Color"]] <- currfont[["color"]]
#
# invisible(currfont)
#
# }
WrdGoto <- function (name, what = wdConst$wdGoToBookmark, wrd = DescToolsOptions("lastWord")) {
wrdSel <- wrd[["Selection"]]
wrdSel$GoTo(what=what, Name=name)
invisible()
}
WrdInsertBookmark <- function (name, wrd = DescToolsOptions("lastWord")) {
# With ActiveDocument.Bookmarks
# .Add Range:=Selection.Range, Name:="entb"
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
wrdBookmarks <- wrd[["ActiveDocument"]][["Bookmarks"]]
wrdBookmarks$Add(name)
invisible()
}
WrdUpdateBookmark <- function (name, text, what = wdConst$wdGoToBookmark, wrd = DescToolsOptions("lastWord")) {
# With ActiveDocument.Bookmarks
# .Add Range:=Selection.Range, Name:="entb"
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
wrdSel <- wrd[["Selection"]]
wrdSel$GoTo(What=what, Name=name)
wrdSel[["Text"]] <- text
# the bookmark will be deleted, how can we avoid that?
wrdBookmarks <- wrd[["ActiveDocument"]][["Bookmarks"]]
wrdBookmarks$Add(name)
invisible()
}
# This has been made defunct in 0.99.18
#
# WrdR <- function(x, wrd = DescToolsOptions("lastWord") ){
#
# WrdText(paste("> ", x, sep=""), wrd=wrd, fontname="Courier New", fontsize=10, bold=TRUE, italic=TRUE)
# txt <- .CaptOut(eval(parse(text=x)))
# if(sum(nchar(txt))>0) WrdText(txt, wrd=wrd, fontname="Courier New", fontsize=10, bold=TRUE)
#
# invisible()
#
# }
# Example: WrdPlot(picscale=30)
# WrdPlot(width=8)
.CentimetersToPoints <- function(x) x * 28.35
.PointsToCentimeters <- function(x) x / 28.35
# http://msdn.microsoft.com/en-us/library/bb214076(v=office.12).aspx
WrdPlot <- function( type="png", append.cr=TRUE, crop=c(0,0,0,0), main = NULL,
picscale=100, height=NA, width=NA, res=300, dfact=1.6, wrd = DescToolsOptions("lastWord") ){
# png is considered a good choice for export to word (Smith)
# http://blog.revolutionanalytics.com/2009/01/10-tips-for-making-your-r-graphics-look-their-best.html
# height, width in cm!
# scale will be overidden, if height/width defined
# handle missing height or width values
if (is.na(width) ){
if (is.na(height)) {
width <- 14
height <- par("pin")[2] / par("pin")[1] * width
} else {
width <- par("pin")[1] / par("pin")[2] * height
}
} else {
if (is.na(height) ){
height <- par("pin")[2] / par("pin")[1] * width
}
}
# get a [type] tempfilename:
fn <- paste( tempfile(pattern = "file", tmpdir = tempdir()), ".", type, sep="" )
# this is a problem for RStudio....
# savePlot( fn, type=type )
# png(fn, width=width, height=height, units="cm", res=300 )
dev.copy(eval(parse(text=type)), fn, width=width*dfact, height=height*dfact, res=res, units="cm")
d <- dev.off()
# add it to our word report
res <- wrd[["Selection"]][["InlineShapes"]]$AddPicture( fn, FALSE, TRUE )
wrdDoc <- wrd[["ActiveDocument"]]
pic <- wrdDoc[["InlineShapes"]]$Item( wrdDoc[["InlineShapes"]][["Count"]] )
pic[["LockAspectRatio"]] <- -1 # = msoTrue
picfrmt <- pic[["PictureFormat"]]
picfrmt[["CropBottom"]] <- .CentimetersToPoints(crop[1])
picfrmt[["CropLeft"]] <- .CentimetersToPoints(crop[2])
picfrmt[["CropTop"]] <- .CentimetersToPoints(crop[3])
picfrmt[["CropRight"]] <- .CentimetersToPoints(crop[4])
if( is.na(height) & is.na(width) ){
# or use the ScaleHeight/ScaleWidth attributes:
pic[["ScaleHeight"]] <- picscale
pic[["ScaleWidth"]] <- picscale
} else {
# Set new height:
if( is.na(width) ) width <- height / .PointsToCentimeters( pic[["Height"]] ) * .PointsToCentimeters( pic[["Width"]] )
if( is.na(height) ) height <- width / .PointsToCentimeters( pic[["Width"]] ) * .PointsToCentimeters( pic[["Height"]] )
pic[["Height"]] <- .CentimetersToPoints(height)
pic[["Width"]] <- .CentimetersToPoints(width)
}
if( append.cr == TRUE ) { wrd[["Selection"]]$TypeText("\n")
} else {
wrd[["Selection"]]$MoveRight(wdConst$wdCharacter, 1, 0)
}
if( file.exists(fn) ) { file.remove(fn) }
if(!is.null(main)){
# insert caption
sel <- wrd$Selection() # "Abbildung"
sel$InsertCaption(Label=wdConst$wdCaptionFigure, Title=main)
sel$TypeParagraph()
}
invisible(pic)
}
WrdTable <- function(nrow = 1, ncol = 1, heights = NULL, widths = NULL, main = NULL, wrd = DescToolsOptions("lastWord")){
res <- wrd[["ActiveDocument"]][["Tables"]]$Add(wrd[["Selection"]][["Range"]],
NumRows = nrow, NumColumns = ncol)
if(!is.null(widths)) {
widths <- rep(widths, length.out=ncol)
for(i in 1:ncol){
# set column-widths
tcol <- res$Columns(i)
tcol[["Width"]] <- .CentimetersToPoints(widths[i])
}
}
if(!is.null(heights)) {
heights <- rep(heights, length.out=nrow)
for(i in 1:nrow){
# set row heights
tcol <- res$Rows(i)
tcol[["Height"]] <- .CentimetersToPoints(heights[i])
}
}
if(!is.null(main)){
# insert caption
sel <- wrd$Selection() # "Abbildung"
sel$InsertCaption(Label=wdConst$wdCaptionTable, Title=main)
sel$TypeParagraph()
}
invisible(res)
}
Phrase <- function(x, g, glabels=NULL, xname=NULL, unit=NULL, lang="engl") {
if(is.null(xname))
xname <- deparse(substitute(x))
if(is.null(glabels))
glabels <- levels(g)
if(is.null(unit))
unit <- ""
if(lang=="engl"){
txt1 <- "The collective consists of a total of %s elements. Of these, %s are %s (%s, mean %s %s %s) and %s %s (%s, mean %s %s %s).\n"
txt2 <- "The difference is significant (t-test, p = %s) and is %s %s [%s, %s] (95%s CI)."
txt3 <- "The difference is not significant.\n"
} else {
txt1 <- "Das Kollektiv besteht aus insgesamt %s Elementen. Davon sind %s %s (%s, mittleres %s %s %s) und %s %s (%s, mittleres %s %s %s).\n"
txt2 <- "Der Unterschied ist signifikant (t-test, p = %s) und betraegt %s %s [%s, %s] (95%s-CI).\n"
txt3 <- "Der Unterschied ist nicht signifikant.\n"
}
lst <- split(x, g)
names(lst) <- c("x","y")
n <- tapply(x, g, length)
meanage <- tapply(x, g, mean)
txt <- gettextf(txt1
, Format(sum(n), digits=0, big.mark="'")
, Format(n[1], digits=0, big.mark="'")
, glabels[1]
, Format(n[1]/sum(n), digits=1, fmt="%")
, xname
, round(meanage[1], 1)
, unit
, Format(n[2], digits=0, big.mark="'")
, glabels[2]
, Format(n[2]/sum(n), digits=1, fmt="%")
, xname
, round(meanage[2],1)
, unit
)
r.t <- t.test(lst$x, lst$y)
if(r.t$p.value < 0.05){
md <- round(MeanDiffCI(lst$x, lst$y), 1)
txt <- paste(txt, gettextf(txt2, format.pval(r.t$p.value), md[1], unit, md[2], md[3], "%"), sep="" )
} else {
txt <- paste(txt, txt3, sep="")
}
# pasting "" uses collapse character, so get rid of multiple spaces here
gsub(" )", ")", gsub(" +", " ", txt))
}
###
# ## Word Table - experimental code
#
# WrdTable <- function(tab, main = NULL, wrd = DescToolsOptions("lastWord"), row.names = FALSE, ...){
# UseMethod("WrdTable")
#
# }
#
#
# WrdTable.Freq <- function(tab, main = NULL, wrd = DescToolsOptions("lastWord"), row.names = FALSE, ...){
#
# tab[,c(3,5)] <- sapply(round(tab[,c(3,5)], 3), Format, digits=3)
# res <- WrdTable.default(tab=tab, wrd=wrd)
#
# if(!is.null(main)){
# # insert caption
# sel <- wrd$Selection() # "Abbildung"
# sel$InsertCaption(Label=wdConst$wdCaptionTable, Title=main)
# sel$TypeParagraph()
# }
#
# invisible(res)
#
# }
#
# WrdTable.ftable <- function(tab, main = NULL, wrd = DescToolsOptions("lastWord"), row.names = FALSE, ...) {
# tab <- FixToTable(capture.output(tab))
# NextMethod()
# }
#
#
# WrdTable.default <- function (tab, font = NULL, align=NULL, autofit = TRUE, main = NULL,
# wrd = DescToolsOptions("lastWord"), row.names=FALSE,
# ...) {
#
# dim1 <- ncol(tab)
# dim2 <- nrow(tab)
# if(row.names) dim1 <- dim1 + 1
#
# # wdConst ist ein R-Objekt (Liste mit 2755 Objekten!!!)
#
# write.table(tab, file = "clipboard", sep = "\t", quote = FALSE, row.names=row.names)
#
# myRange <- wrd[["Selection"]][["Range"]]
# bm <- wrd[["ActiveDocument"]][["Bookmarks"]]$Add("PasteHere", myRange)
# myRange$Paste()
#
# if(row.names) wrd[["Selection"]]$TypeText("\t")
#
# myRange[["Start"]] <- bm[["Range"]][["Start"]]
# myRange$Select()
# bm$Delete()
# wrd[["Selection"]]$ConvertToTable(Separator = wdConst$wdSeparateByTabs,
# NumColumns = dim1,
# NumRows = dim2,
# AutoFitBehavior = wdConst$wdAutoFitFixed)
#
# wrdTable <- wrd[["Selection"]][["Tables"]]$Item(1)
# # http://www.thedoctools.com/downloads/DocTools_List_Of_Built-in_Style_English_Danish_German_French.pdf
# wrdTable[["Style"]] <- -115 # "Tabelle Klassisch 1"
# wrdSel <- wrd[["Selection"]]
#
#
# # align the columns
# if(is.null(align))
# align <- c("l", rep(x = "r", ncol(tab)-1))
# else
# align <- rep(align, length.out=ncol(tab))
#
# align[align=="l"] <- wdConst$wdAlignParagraphLeft
# align[align=="c"] <- wdConst$wdAlignParagraphCenter
# align[align=="r"] <- wdConst$wdAlignParagraphRight
#
# for(i in seq_along(align)){
# wrdTable$Columns(i)$Select()
# wrd[["Selection"]][["ParagraphFormat"]][["Alignment"]] <- align[i]
# }
#
# if(!is.null(font)){
# wrdTable$Select()
# WrdFont(wrd) <- font
# }
#
# if(autofit)
# wrdTable$Columns()$AutoFit()
#
# # Cursor aus der Tabelle auf die letzte Postition im Dokument setzten
# # Selection.GoTo What:=wdGoToPercent, Which:=wdGoToLast
# wrd[["Selection"]]$GoTo(What = wdConst$wdGoToPercent, Which= wdConst$wdGoToLast)
#
# if(!is.null(main)){
# # insert caption
# sel <- wrd$Selection() # "Abbildung"
# sel$InsertCaption(Label=wdConst$wdCaptionTable, Title=main)
# sel$TypeParagraph()
#
# }
#
# invisible(wrdTable)
#
# }
#
# WrdTable <- function(tab, wrd){
# ### http://home.wanadoo.nl/john.hendrickx/statres/other/PasteAsTable.html
# write.table(tab, file="clipboard", sep="\t", quote=FALSE)
# myRange <- wrd[["Selection"]][["Range"]]
# bm <- wrd[["ActiveDocument"]][["Bookmarks"]]$Add("PasteHere", myRange)
# myRange$Paste()
# wrd[["Selection"]]$TypeText("\t")
# myRange[["Start"]] <- bm[["Range"]][["Start"]]
# myRange$Select()
# bm$Delete()
# wrd[["Selection"]]$ConvertToTable(Separator=wdConst$wdSeparateByTabs, NumColumns=4,
# NumRows=9, AutoFitBehavior=wdConst$wdAutoFitFixed)
# wrdTable <- wrd[["Selection"]][["Tables"]]$Item(1)
# wrdTable[["Style"]] <- "Tabelle Klassisch 1"
# wrdSel <- wrd[["Selection"]]
# wrdSel[["ParagraphFormat"]][["Alignment"]] <- wdConst$wdAlignParagraphRight
# #left align the first column
# wrdTable[["Columns"]]$Item(1)$Select()
# wrd[["Selection"]][["ParagraphFormat"]][["Alignment"]] <- wdConst$wdAlignParagraphLeft
# ### wtab[["ApplyStyleHeadingRows"]] <- TRUE
# ### wtab[["ApplyStyleLastRow"]] <- FALSE
# ### wtab[["ApplyStyleFirstColumn"]] <- TRUE
# ### wtab[["ApplyStyleLastColumn"]] <- FALSE
# ### wtab[["ApplyStyleRowBands"]] <- TRUE
# ### wtab[["ApplyStyleColumnBands"]] <- FALSE
# ### With Selection.Tables(1)
# #### If .Style <> "Tabellenraster" Then
# ### .Style = "Tabellenraster"
# ### End If
# ### wrd[["Selection"]]$ConvertToTable( Separator=wdConst$wdSeparateByTabs, AutoFit=TRUE, Format=wdConst$wdTableFormatSimple1,
# ### ApplyBorders=TRUE, ApplyShading=TRUE, ApplyFont=TRUE,
# ### ApplyColor=TRUE, ApplyHeadingRows=TRUE, ApplyLastRow=FALSE,
# ### ApplyFirstColumn=TRUE, ApplyLastColumn=FALSE)
# ### wrd[["Selection"]][["Tables"]]$Item(1)$Select()
# #wrd[["Selection"]][["ParagraphFormat"]][["Alignment"]] <- wdConst$wdAlignParagraphRight
# ### ### left align the first column
# ### wrd[["Selection"]][["Columns"]]$Item(1)$Select()
# ### wrd[["Selection"]][["ParagraphFormat"]][["Alignment"]] <- wdConst$wdAlignParagraphLeft
# ### wrd[["Selection"]][["ParagraphFormat"]][["Alignment"]] <- wdConst$wdAlignParagraphRight
# }
# require ( xtable )
# data ( tli )
# fm1 <- aov ( tlimth ~ sex + ethnicty + grade + disadvg , data = tli )
# fm1.table <- print ( xtable (fm1), type ="html")
# Tabellen-Studie via HTML FileExport
# WrdInsTable <- function( tab, wrd ){
# htmtab <- print(xtable(tab), type ="html")
# ### Let's create a summary file and insert it
# ### get a tempfile:
# fn <- paste(tempfile(pattern = "file", tmpdir = tempdir()), ".txt", sep="")
# write(htmtab, file=fn)
# wrd[["Selection"]]$InsertFile(fn)
# wrd[["ActiveDocument"]][["Tables"]]$Item(
# wrd[["ActiveDocument"]][["Tables"]][["Count"]] )[["Style"]] <- "Tabelle Klassisch 1"
# }
# WrdInsTable( fm1, wrd=wrd )
# data(d.pizza)
# txt <- Desc( temperature ~ driver, data=d.pizza )
# WrdInsTable( txt, wrd=wrd )
# WrdPlot(PlotDescNumFact( temperature ~ driver, data=d.pizza, newwin=T )
# , wrd=wrd, width=17, crop=c(0,0,60,0))
###
## Excel functions ====
GetNewXL <- function( visible = TRUE ) {
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
# Starts the Excel with xl as handle
hwnd <- RDCOMClient::COMCreate("Excel.Application")
if( visible == TRUE ) hwnd[["Visible"]] <- TRUE
# Create a new workbook
newwb <- hwnd[["Workbooks"]]$Add
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
hwnd <- NULL
}
invisible(hwnd)
}
GetCurrXL <- function() {
# stopifnot(require(RDCOMClient))
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
# try to get a handle to a running XL instance
# there's no "get"-function in RDCOMClient, so just create a new here..
hwnd <- RDCOMClient::COMCreate("Excel.Application", existing=TRUE)
if(is.null(hwnd)) warning("No running Excel application found!")
# options(lastXL = hwnd)
DescToolsOptions(lastXL = hwnd)
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
hwnd <- NULL
}
invisible(hwnd)
}
XLView <- function (x, col.names = TRUE, row.names = FALSE, na = "") {
# define some XL constants
xlToRight <- -4161
fn <- paste(tempfile(pattern = "file", tmpdir = tempdir()),
".csv", sep = "")
xl <- GetNewXL()
owb <- xl[["Workbooks"]]
if(!missing(x)){
if(class(x) == "ftable"){
x <- FixToTable(capture.output(x), sep = " ", header = FALSE)
col.names <- FALSE
}
write.table(x, file = fn, sep = ";", col.names = col.names,
qmethod = "double", row.names = row.names, na=na)
ob <- owb$Open(fn)
# if row.names are saved there's the first cell in the first line missing
# I don't actually see, how to correct this besides inserting a cell in XL
if(row.names) xl$Cells(1, 1)$Insert(Shift=xlToRight)
xl[["Cells"]][["EntireColumn"]]$AutoFit()
} else {
owb$Add()
awb <- xl[["ActiveWorkbook"]]
# delete sheets(2,3) without asking, if it's ok
xl[["DisplayAlerts"]] <- FALSE
xl$Sheets(c(2,3))$Delete()
xl[["DisplayAlerts"]] <- TRUE
awb$SaveAs( Filename=fn, FileFormat=6 )
}
invisible(fn)
}
XLGetRange <- function (file = NULL, sheet = NULL, range = NULL, as.data.frame = TRUE,
header = FALSE, stringsAsFactors = FALSE, echo = FALSE, datecols = NA) {
A1ToZ1S1 <- function(x){
xlcol <- c( LETTERS
, sort(c(outer(LETTERS, LETTERS, paste, sep="" )))
, sort(c(outer(LETTERS, c(outer(LETTERS, LETTERS, paste, sep="" )), paste, sep="")))
)[1:16384]
z1s1 <- function(x) {
colnr <- match( regmatches(x, regexec("^[[:alpha:]]+", x)), xlcol)
rownr <- as.numeric(regmatches(x, regexec("[[:digit:]]+$", x)))
return(c(rownr, colnr))
}
lapply(unlist(strsplit(toupper(x),":")), z1s1)
}
# main function *******************************
# to do: 30.8.2015
# we could / should check for a running XL instance here...
# ans <- RDCOMClient::getCOMInstance("Excel.Application", force = FALSE, silent = TRUE)
# if (is.null(ans) || is.character(ans)) print("not there")
if(is.null(file)){
xl <- GetCurrXL()
ws <- xl$ActiveSheet()
if(is.null(range)) {
# if there is a selection in XL then use it, if only one cell selected use currentregion
sel <- xl$Selection()
if(sel$Cells()$Count() == 1 ){
range <- xl$ActiveCell()$CurrentRegion()$Address(FALSE, FALSE)
} else {
range <- sapply(1:sel$Areas()$Count(), function(i) sel$Areas()[[i]]$Address(FALSE, FALSE) )
# old: this did not work on some XL versions with more than 28 selected areas
# range <- xl$Selection()$Address(FALSE, FALSE)
# range <- unlist(strsplit(range, ";"))
# there might be more than 1 single region, split by ;
# (this might be a problem for other locales)
}
}
} else {
xl <- GetNewXL()
wb <- xl[["Workbooks"]]$Open(file)
# set defaults for sheet and range here
if(is.null(sheet))
sheet <- 1
if(is.null(range))
range <- xl$Cells(1,1)$CurrentRegion()$Address(FALSE, FALSE)
ws <- wb$Sheets(sheet)$select()
}
lst <- list()
# for(i in 1:length(range)){ # John Chambers prefers seq_along: (why actually?)
for(i in seq_along(range)){
zs <- A1ToZ1S1(range[i])
rr <- xl$Range(xl$Cells(zs[[1]][1], zs[[1]][2]), xl$Cells(zs[[2]][1], zs[[2]][2]) )
lst[[i]] <- rr[["Value2"]]
names(lst)[i] <- range[i]
}
# implement na.strings:
# if(!identical(na.strings, NA)){
# for(s in na.strings){
# lst[[i]] <- replace(lst[[i]], list = na.strings, values = NA)
# }
# }
# replace NULL values by NAs, as NULLs are evil while coercing to data.frame!
if(as.data.frame){
# for(i in 1:length(lst)){ # original
for(i in seq_along(lst)){
# for(j in 1:length(lst[[i]])){
for(j in seq_along(lst[[i]])){
lst[[i]][[j]][unlist(lapply(lst[[i]][[j]], is.null))] <- NA
}
xnames <- unlist(lapply(lst[[i]], "[", 1)) # define the names in case header = TRUE
if(header) lst[[i]] <- lapply(lst[[i]], "[", -1) # delete the first row
lst[[i]] <- do.call(data.frame, c(lapply(lst[[i]][], unlist), stringsAsFactors = stringsAsFactors))
if(header){
names(lst[[i]]) <- xnames
} else {
names(lst[[i]]) <- paste("X", 1:ncol(lst[[i]]), sep="")
}
}
# convert date columns to date
if(!identical(datecols, NA)){
# apply to all selections
for(i in seq_along(lst)){
# switch to colindex if given as text
if(!is.numeric(datecols) && header)
datecols <- which(names(lst[[i]]) %in% datecols)
for(j in datecols)
lst[[i]][,j] <- as.Date(XLDateToPOSIXct(lst[[i]][,j]))
}
}
}
# just return a single object (for instance data.frame) if only one range was supplied
if(length(lst)==1) lst <- lst[[1]]
# opt <- options(useFancyQuotes=FALSE); on.exit(options(opt))
attr(lst,"call") <- gettextf("XLGetRange(file = %s, sheet = %s,
range = c(%s),
as.data.frame = %s, header = %s, stringsAsFactors = %s)",
gsub("\\\\", "\\\\\\\\",
shQuote(paste(xl$ActiveWorkbook()$Path(),
xl$ActiveWorkbook()$Name(), sep="\\"))),
shQuote(xl$ActiveSheet()$Name()),
# gettextf(paste(dQuote(names(lst)), collapse=",")),
gettextf(paste(shQuote(range), collapse=",")),
as.data.frame, header, stringsAsFactors)
if(!is.null(file)) xl$Quit() # only quit, if a new XL-instance was created before
if(echo)
cat(attr(lst,"call"))
return(lst)
}
# XLGetWorkbook <- function (file) {
#
# xlLastCell <- 11
#
# xl <- GetNewXL()
# wb <- xl[["Workbooks"]]$Open(file)
#
# lst <- list()
# for( i in 1:wb[["Sheets"]][["Count"]]){
# ws <- wb[["Sheets", i]]
# ws[["Range", "A1"]][["Select"]]
# rngLast <- xl[["ActiveCell"]][["SpecialCells", xlLastCell]][["Address"]]
# lst[[i]] <- ws[["Range", paste("A1",rngLast, sep=":")]][["Value2"]]
# }
#
# xl$Quit()
# return(lst)
#
# }
# New in 0.99.18:
XLGetWorkbook <- function (file, compactareas = TRUE) {
IsEmptySheet <- function(sheet)
sheet$UsedRange()$Rows()$Count() == 1 &
sheet$UsedRange()$columns()$Count() == 1 &
is.null(sheet$cells(1,1)$Value())
CompactArea <- function(lst)
do.call(cbind, lapply(lst, cbind))
xlCellTypeConstants <- 2
xlCellTypeFormulas <- -4123
xl <- GetNewXL()
wb <- xl[["Workbooks"]]$Open(file)
lst <- list()
for (i in 1:wb$Sheets()$Count()) {
if(!IsEmptySheet(sheet=xl$Sheets(i))) {
# has.formula is TRUE, when all cells contain formula, FALSE when no cell contains a formula
# and NULL else, thus: !identical(FALSE) for having some or all
if(!identical(xl$Sheets(i)$UsedRange()$HasFormula(), FALSE))
areas <- xl$union(
xl$Sheets(i)$UsedRange()$SpecialCells(xlCellTypeConstants),
xl$Sheets(i)$UsedRange()$SpecialCells(xlCellTypeFormulas))$areas()
else
areas <- xl$Sheets(i)$UsedRange()$SpecialCells(xlCellTypeConstants)$areas()
alst <- list()
for ( j in 1:areas$count())
alst[[j]] <- areas[[j]]$Value2()
lst[[xl$Sheets(i)$name()]] <- alst
}
}
if(compactareas)
lst <- lapply(lst, function(x) lapply(x, CompactArea))
# close without saving
wb$Close(FALSE)
xl$Quit()
return(lst)
}
XLKill <- function(){
# Excel would only quit, when all workbooks are closed before, someone said.
# http://stackoverflow.com/questions/15697282/excel-application-not-quitting-after-calling-quit
# We experience, that it would not even then quit, when there's no workbook loaded at all.
# maybe gc() would help
# so killing the task is "ultima ratio"...
shell('taskkill /F /IM EXCEL.EXE')
}
XLDateToPOSIXct <- function (x, tz = "GMT", xl1904 = FALSE) {
# https://support.microsoft.com/en-us/kb/214330
if(xl1904)
origin <- "1904-01-01"
else
origin <- "1899-12-30"
as.POSIXct(x * (60 * 60 * 24), origin = origin, tz = tz)
}
###
## PowerPoint functions ====
GetNewPP <- function (visible = TRUE, template = "Normal") {
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
hwnd <- RDCOMClient::COMCreate("PowerPoint.Application")
if (visible == TRUE) { hwnd[["Visible"]] <- TRUE }
newpres <- hwnd[["Presentations"]]$Add(TRUE)
ppLayoutBlank <- 12
newpres[["Slides"]]$Add(1, ppLayoutBlank)
# options("lastPP" = hwnd)
DescToolsOptions(lastPP = hwnd)
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
hwnd <- NULL
}
invisible(hwnd)
}
GetCurrPP <- function() {
if (requireNamespace("RDCOMClient", quietly = FALSE)) {
# there's no "get"-function in RDCOMClient, so just create a new here..
hwnd <- RDCOMClient::COMCreate("PowerPoint.Application", existing=TRUE)
if(is.null(hwnd)) warning("No running PowerPoint application found!")
# options("lastPP" = hwnd)
DescToolsOptions(lastPP = hwnd)
} else {
if(Sys.info()["sysname"] == "Windows")
warning("RDCOMClient is not available. To install it use: install.packages('RDCOMClient', repos = 'http://www.stats.ox.ac.uk/pub/RWin/')")
else
warning(gettextf("RDCOMClient is unfortunately not available for %s systems (Windows-only).", Sys.info()["sysname"]))
hwnd <- NULL
}
invisible(hwnd)
}
PpAddSlide <- function(pos = NULL, pp = DescToolsOptions("lastPP")){
slides <- pp[["ActivePresentation"]][["Slides"]]
if(is.null(pos)) pos <- slides$Count()+1
slides$AddSlide(pos, slides$Item(1)[["CustomLayout"]])$Select()
invisible()
}
PpText <- function (txt, x=1, y=1, height=50, width=100, fontname = "Calibri", fontsize = 18, bold = FALSE,
italic = FALSE, col = "black", bg = "white", hasFrame = TRUE, pp = DescToolsOptions("lastPP")) {
msoShapeRectangle <- 1
if (class(txt) != "character")
txt <- .CaptOut(txt)
# slide <- pp[["ActivePresentation"]][["Slides"]]$Item(1)
slide <- pp$ActiveWindow()$View()$Slide()
shape <- slide[["Shapes"]]$AddShape(msoShapeRectangle, x, y, x + width, y+height)
textbox <- shape[["TextFrame"]]
textbox[["TextRange"]][["Text"]] <- txt
tbfont <- textbox[["TextRange"]][["Font"]]
tbfont[["Name"]] <- fontname
tbfont[["Size"]] <- fontsize
tbfont[["Bold"]] <- bold
tbfont[["Italic"]] <- italic
tbfont[["Color"]] <- RgbToLong(ColToRgb(col))
textbox[["MarginBottom"]] <- 10
textbox[["MarginLeft"]] <- 10
textbox[["MarginRight"]] <- 10
textbox[["MarginTop"]] <- 10
shp <- shape[["Fill"]][["ForeColor"]]
shp[["RGB"]] <- RgbToLong(ColToRgb(bg))
shp <- shape[["Line"]]
shp[["Visible"]] <- hasFrame
invisible(shape)
}
PpPlot <- function( type="png", crop=c(0,0,0,0),
picscale=100, x=1, y=1, height=NA, width=NA, res=200, dfact=1.6, pp = DescToolsOptions("lastPP") ){
# height, width in cm!
# scale will be overidden, if height/width defined
# Example: PpPlot(picscale=30)
# PpPlot(width=8)
.CentimetersToPoints <- function(x) x * 28.35
.PointsToCentimeters <- function(x) x / 28.35
# http://msdn.microsoft.com/en-us/library/bb214076(v=office.12).aspx
# handle missing height or width values
if (is.na(width) ){
if (is.na(height)) {
width <- 14
height <- par("pin")[2] / par("pin")[1] * width
} else {
width <- par("pin")[1] / par("pin")[2] * height
}
} else {
if (is.na(height) ){
height <- par("pin")[2] / par("pin")[1] * width
}
}
# get a [type] tempfilename:
fn <- paste( tempfile(pattern = "file", tmpdir = tempdir()), ".", type, sep="" )
# this is a problem for RStudio....
# savePlot( fn, type=type )
# png(fn, width=width, height=height, units="cm", res=300 )
dev.copy(eval(parse(text=type)), fn, width=width*dfact, height=height*dfact, res=res, units="cm")
d <- dev.off()
# slide <- pp[["ActivePresentation"]][["Slides"]]$Item(1)
slide <- pp$ActiveWindow()$View()$Slide()
pic <- slide[["Shapes"]]$AddPicture(fn, FALSE, TRUE, x, y)
picfrmt <- pic[["PictureFormat"]]
picfrmt[["CropBottom"]] <- .CentimetersToPoints(crop[1])
picfrmt[["CropLeft"]] <- .CentimetersToPoints(crop[2])
picfrmt[["CropTop"]] <- .CentimetersToPoints(crop[3])
picfrmt[["CropRight"]] <- .CentimetersToPoints(crop[4])
if( is.na(height) & is.na(width) ){
# or use the ScaleHeight/ScaleWidth attributes:
msoTrue <- -1
msoFalse <- 0
pic$ScaleHeight(picscale/100, msoTrue)
pic$ScaleWidth(picscale/100, msoTrue)
} else {
# Set new height:
if( is.na(width) ) width <- height / .PointsToCentimeters( pic[["Height"]] ) * .PointsToCentimeters( pic[["Width"]] )
if( is.na(height) ) height <- width / .PointsToCentimeters( pic[["Width"]] ) * .PointsToCentimeters( pic[["Height"]] )
pic[["Height"]] <- .CentimetersToPoints(height)
pic[["Width"]] <- .CentimetersToPoints(width)
}
if( file.exists(fn) ) { file.remove(fn) }
invisible( pic )
}
CourseData <- function(name, url=NULL, header=TRUE, sep=";", ...){
if(length(grep(pattern = "\\..{3}", x = name))==0)
name <- paste(name, ".txt", sep="")
if(is.null(url))
url <- "http://www.signorell.net/hwz/datasets/"
url <- gettextf(paste(url, "%s", sep=""), name)
read.table(file = url, header = header, sep = sep, ...)
}
###
## Entwicklungs-Ideen ====
# With ActiveDocument.Bookmarks
# .Add Range:=Selection.Range, Name:="start"
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
# Selection.TypeText Text:="Hier kommt mein Text"
# Selection.TypeParagraph
# Selection.TypeText Text:="und auf weiteren Zeilen"
# Selection.TypeParagraph
# With ActiveDocument.Bookmarks
# .Add Range:=Selection.Range, Name:="stop"
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
# Selection.GoTo What:=wdGoToBookmark, Name:="start"
# Selection.GoTo What:=wdGoToBookmark, Name:="stop"
# With ActiveDocument.Bookmarks
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
# Selection.MoveLeft Unit:=wdWord, Count:=2, Extend:=wdExtend
# Selection.HomeKey Unit:=wdStory, Extend:=wdExtend
# Selection.Font.Name = "Arial Black"
# Selection.EndKey Unit:=wdStory
# Selection.GoTo What:=wdGoToBookmark, Name:="stop"
# Selection.Find.ClearFormatting
# With Selection.Find
# .Text = "0."
# .Replacement.Text = " ."
# .Forward = True
# .Wrap = wdFindContinue
# .Format = False
# .MatchCase = False
# .MatchWholeWord = False
# .MatchWildcards = False
# .MatchSoundsLike = False
# .MatchAllWordForms = False
# End With
# ActiveDocument.Bookmarks("start").Delete
# With ActiveDocument.Bookmarks
# .DefaultSorting = wdSortByName
# .ShowHidden = False
# End With
# End Sub
# wdSortByName =0
# wdGoToBookmark = -1
# wdFindContinue = 1
# wdStory = 6
# Bivariate Darstellungen gute uebersicht
# pairs( lapply( lapply( c( d.set[,-1], list()), "as.numeric" ), "jitter" ), col=rgb(0,0,0,0.2) )
# Gruppenweise Mittelwerte fuer den ganzen Recordset
# wrdInsertText( "Mittelwerte zusammengefasst\n\n" )
# wrdInsertSummary(
# signif( cbind(
# t(as.data.frame( lapply( d.frm, tapply, grp, "mean", na.rm=T )))
# , tot=mean(d.frm, na.rm=T)
# ), 3)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gg_scalelocation.R
\name{gg_scalelocation}
\alias{gg_scalelocation}
\title{Plot scale-location (also called spread-location plot) in ggplot.}
\usage{
gg_scalelocation(fitted.lm, method = "loess", scale.factor = 1,
se = FALSE)
}
\arguments{
\item{fitted.lm}{a fitted linear model (i.e. lm, glm) that contains fitted regression}
\item{method}{smoothing method of fitted line on scale-location plot.
eg. "lm", "glm", "gam", "loess", "rlm". See \url{http://docs.ggplot2.org/current/geom_smooth.html}
for more details.}
\item{scale.factor}{numeric; scales the point size and linewidth to allow customized viewing. Defaults to 1.}
\item{se}{logical; determines whether se belt should be plotted on plot}
}
\value{
A ggplot object that contains scale-location graph
}
\description{
Plot scale-location (also called spread-location plot) in ggplot.
}
\examples{
library(MASS)
data(Cars93)
cars_lm <- lm(Price ~ Passengers + Length + RPM, data = Cars93)
gg_scalelocation(cars_lm)
}
|
/man/gg_scalelocation.Rd
|
no_license
|
alienzj/lindia
|
R
| false
| true
| 1,056
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gg_scalelocation.R
\name{gg_scalelocation}
\alias{gg_scalelocation}
\title{Plot scale-location (also called spread-location plot) in ggplot.}
\usage{
gg_scalelocation(fitted.lm, method = "loess", scale.factor = 1,
se = FALSE)
}
\arguments{
\item{fitted.lm}{a fitted linear model (i.e. lm, glm) that contains fitted regression}
\item{method}{smoothing method of fitted line on scale-location plot.
eg. "lm", "glm", "gam", "loess", "rlm". See \url{http://docs.ggplot2.org/current/geom_smooth.html}
for more details.}
\item{scale.factor}{numeric; scales the point size and linewidth to allow customized viewing. Defaults to 1.}
\item{se}{logical; determines whether se belt should be plotted on plot}
}
\value{
A ggplot object that contains scale-location graph
}
\description{
Plot scale-location (also called spread-location plot) in ggplot.
}
\examples{
library(MASS)
data(Cars93)
cars_lm <- lm(Price ~ Passengers + Length + RPM, data = Cars93)
gg_scalelocation(cars_lm)
}
|
anolis.data <- read.csv("anolis.data.csv", header=TRUE)
|
/dataSources/anolis.data.R
|
no_license
|
ghthomas/motmot
|
R
| false
| false
| 56
|
r
|
anolis.data <- read.csv("anolis.data.csv", header=TRUE)
|
# Hierarcical Clustering
# Load The dataset
data("iris")
dataset <- as.data.frame(iris)
# Delete Species column
dataset <- dataset[-5]
dendrogram = hclust(d = dist(dataset, method = 'euclidean'), method = 'ward.D')
plot(dendrogram,
main = paste('Dendrogram'),
xlab = 'Customers',
ylab = 'Euclidean distances')
# Fitting Hierarchical Clustering to the dataset
hc = hclust(d = dist(dataset, method = 'euclidean'), method = 'ward.D')
y_hc = cutree(hc, 3)
plot(y_hc)
|
/R/Clustering/Hierarcical Clustering.R
|
no_license
|
ferianrian/Trainee
|
R
| false
| false
| 482
|
r
|
# Hierarcical Clustering
# Load The dataset
data("iris")
dataset <- as.data.frame(iris)
# Delete Species column
dataset <- dataset[-5]
dendrogram = hclust(d = dist(dataset, method = 'euclidean'), method = 'ward.D')
plot(dendrogram,
main = paste('Dendrogram'),
xlab = 'Customers',
ylab = 'Euclidean distances')
# Fitting Hierarchical Clustering to the dataset
hc = hclust(d = dist(dataset, method = 'euclidean'), method = 'ward.D')
y_hc = cutree(hc, 3)
plot(y_hc)
|
german_credit <- read.csv("C:/Users/asif/Downloads/german_credit.csv",header = T)
str(german_credit)
summary(german_credit$installment_as_income_perc)
german_credit$default<-as.factor(german_credit$default)
str(german_credit)
summary(german_credit$credit_history)
# installment_as_income perc,present_res_since,credit_this_bank,job as factors
german_credit$installment_as_income_perc<-as.factor(german_credit$installment_as_income_perc)
german_credit$present_res_since<-as.factor(german_credit$present_res_since)
german_credit$credits_this_bank<-as.factor(german_credit$credits_this_bank)
german_credit$job<-as.factor(german_credit$job)
german_credit$people_under_maintenance<-as.factor(german_credit$people_under_maintenance)
str(german_credit)
library(caret)
library(lattice)
library(ggplot2)
ind<-createDataPartition(german_credit$default,p=0.75,list=F)
set.seed(123)
training_default<-german_credit[ind,]
testing_default<-german_credit[-ind,]
default_model<-glm(default~.,data=training_default,family = "binomial")
default_null<-glm(default~1,data=training_default,family = "binomial")
summary(default_model)
# run stepwise regression
step(default_null,direction = "forward",scope=list(lower=default_null,upper=default_model))
step(default_null,direction="backward",scope=list(lower=default_null,upper=default_model))
bin_model<-glm(formula = default ~ account_check_status + credit_history +
duration_in_month + housing + purpose + foreign_worker +
present_emp_since + personal_status_sex + installment_as_income_perc +
credit_amount + other_installment_plans + age, family = "binomial",
data = training_default)
training_default$pred_prob<-predict(bin_model,type="response")
head(training_default)
pred<-prediction(training_default$pred_prob,training_default$default)# prediction-probability value
training_default$pred_default<-ifelse(training_default$pred_prob>0.35,"1","0")
table(predicted=training_default$default,actual=training_default$pred_default)
perf<-performance(pred,"tpr","fpr")#perf<-ROCR::performance
plot(perf,colorize=T,print.cutoffs.at=seq(0.1,by=0.1))
(410+152)/750 # accuracy
152/(152+115) # sensitivity
410/(410+73) # specificity
confusionMatrix(table(predicted=training_default$default,actual=training_default$pred_default))
# Naive Bayes- mutually exclusive
|
/NaiveBayes.R
|
no_license
|
prathmesh2998/R-program
|
R
| false
| false
| 2,314
|
r
|
german_credit <- read.csv("C:/Users/asif/Downloads/german_credit.csv",header = T)
str(german_credit)
summary(german_credit$installment_as_income_perc)
german_credit$default<-as.factor(german_credit$default)
str(german_credit)
summary(german_credit$credit_history)
# installment_as_income perc,present_res_since,credit_this_bank,job as factors
german_credit$installment_as_income_perc<-as.factor(german_credit$installment_as_income_perc)
german_credit$present_res_since<-as.factor(german_credit$present_res_since)
german_credit$credits_this_bank<-as.factor(german_credit$credits_this_bank)
german_credit$job<-as.factor(german_credit$job)
german_credit$people_under_maintenance<-as.factor(german_credit$people_under_maintenance)
str(german_credit)
library(caret)
library(lattice)
library(ggplot2)
ind<-createDataPartition(german_credit$default,p=0.75,list=F)
set.seed(123)
training_default<-german_credit[ind,]
testing_default<-german_credit[-ind,]
default_model<-glm(default~.,data=training_default,family = "binomial")
default_null<-glm(default~1,data=training_default,family = "binomial")
summary(default_model)
# run stepwise regression
step(default_null,direction = "forward",scope=list(lower=default_null,upper=default_model))
step(default_null,direction="backward",scope=list(lower=default_null,upper=default_model))
bin_model<-glm(formula = default ~ account_check_status + credit_history +
duration_in_month + housing + purpose + foreign_worker +
present_emp_since + personal_status_sex + installment_as_income_perc +
credit_amount + other_installment_plans + age, family = "binomial",
data = training_default)
training_default$pred_prob<-predict(bin_model,type="response")
head(training_default)
pred<-prediction(training_default$pred_prob,training_default$default)# prediction-probability value
training_default$pred_default<-ifelse(training_default$pred_prob>0.35,"1","0")
table(predicted=training_default$default,actual=training_default$pred_default)
perf<-performance(pred,"tpr","fpr")#perf<-ROCR::performance
plot(perf,colorize=T,print.cutoffs.at=seq(0.1,by=0.1))
(410+152)/750 # accuracy
152/(152+115) # sensitivity
410/(410+73) # specificity
confusionMatrix(table(predicted=training_default$default,actual=training_default$pred_default))
# Naive Bayes- mutually exclusive
|
#' Get global preferences for the current logged in user
#'
#' @export
#' @param parse (logical) Attempt to parse to data.frame's if possible. Default: \code{TRUE}
#' @template curl
#' @return either a data.frame or a list
#' @examples \dontrun{
#' prefs()
#' }
prefs <- function(parse = TRUE, ...) {
res <- asp_GET("current_global_preferences", list(), ...)
asp_parse(res, parse)
}
|
/R/prefs.R
|
no_license
|
sckott/aspacer
|
R
| false
| false
| 387
|
r
|
#' Get global preferences for the current logged in user
#'
#' @export
#' @param parse (logical) Attempt to parse to data.frame's if possible. Default: \code{TRUE}
#' @template curl
#' @return either a data.frame or a list
#' @examples \dontrun{
#' prefs()
#' }
prefs <- function(parse = TRUE, ...) {
res <- asp_GET("current_global_preferences", list(), ...)
asp_parse(res, parse)
}
|
#Write a R program to print the numbers from 1 to 100 and print
#"Fizz" for multiples of 3, print "Buzz" for multiples of 5,
#and print "FizzBuzz" for multiples of both.
for (n in 1:100) {
if (n %% 3 == 0 & n %% 5 == 0) {print("FizzBuzz")}
else if (n %% 3 == 0) {print("Fizz")}
else if (n %% 5 == 0) {print("Buzz")}
else print(n)
}
|
/WID W9 Homework extra2.R
|
no_license
|
Faybeee/Session-9-Homework
|
R
| false
| false
| 350
|
r
|
#Write a R program to print the numbers from 1 to 100 and print
#"Fizz" for multiples of 3, print "Buzz" for multiples of 5,
#and print "FizzBuzz" for multiples of both.
for (n in 1:100) {
if (n %% 3 == 0 & n %% 5 == 0) {print("FizzBuzz")}
else if (n %% 3 == 0) {print("Fizz")}
else if (n %% 5 == 0) {print("Buzz")}
else print(n)
}
|
# Run this script to generate all the country PDF reports for Investment Climate (FCV) only
# List of countries is based on intersection of TCdata360 country list and Harmonized FCV 2017 list (from WBG IC-FCS team)
##################################
# setwd() to handle images and other files
setwd('/Users/mrpso/Documents/GitHub/reportGenerator360/')
# source('global_utils.R') # data and functions needed
source('helper_functions.R') # charts and table functions needed
# source('templates/FCV_charts.R') # run preprocessing code in FCV_charts.R
# Create the data reports --------------------------------------
fcv_coulist <- read.csv('templates/FCV_iso3_countrylist.csv', header=FALSE)
include <- fcv_coulist$V1
for (couName in filter(countries, (iso3 %in% include))$name) {
.reportGenerator(couName, "FCV")
}
|
/Report_Generator_FCVonly.R
|
no_license
|
asRodelgo/reportGenerator360
|
R
| false
| false
| 817
|
r
|
# Run this script to generate all the country PDF reports for Investment Climate (FCV) only
# List of countries is based on intersection of TCdata360 country list and Harmonized FCV 2017 list (from WBG IC-FCS team)
##################################
# setwd() to handle images and other files
setwd('/Users/mrpso/Documents/GitHub/reportGenerator360/')
# source('global_utils.R') # data and functions needed
source('helper_functions.R') # charts and table functions needed
# source('templates/FCV_charts.R') # run preprocessing code in FCV_charts.R
# Create the data reports --------------------------------------
fcv_coulist <- read.csv('templates/FCV_iso3_countrylist.csv', header=FALSE)
include <- fcv_coulist$V1
for (couName in filter(countries, (iso3 %in% include))$name) {
.reportGenerator(couName, "FCV")
}
|
# ____________________________________________________________________________
# Server ####
library(shiny)
library(plotly)
library(magrittr)
library(shinyjs)
library(stringr)
library(RColorBrewer)
library(DT)
library(shinyBS)
library(shinycssloaders)
library(STDAP)
library(shinyalert)
library(shinyWidgets)
library(waiter)
shinyServer(function(session, input, output) {
waiter_hide() # hide the waiter
kegg_species <- reactive({
readRDS("www/Species/kegg_species.rds")
})
observe({ kegg_species() })
source("modules/1-server-get-start.R", local = T)
source("modules/2-server-condition.R", local = T)
source("modules/3-server-pca.R", local = T)
source("modules/4-server-hierarchical-cluster.R", local = T)
source("modules/5-server-sample-distance.R", local = T)
source("modules/6-server-sample-correlation.R", local = T)
source("modules/7-server-differential-analysis.R", local = T)
source("modules/8-server-degs-patterns.R", local = T)
source("modules/9-server-expression-visualization.R", local = T)
source("modules/10-server-wgcna-prepare-data.R", local = T)
source("modules/11-server-wgcna-detect-module.R", local = T)
source("modules/12-server-wgcna-module-trait.R", local = T)
source("modules/13-server-clusterProfiler.R", local = T)
source("modules/14-server-gProfiler.R", local = T)
## ............................................................................
## Neighborhood browser ####
## ............................................................................
## Map chart ####
})
|
/inst/shiny/myApp/server.R
|
permissive
|
XPL1986/QRseq
|
R
| false
| false
| 1,754
|
r
|
# ____________________________________________________________________________
# Server ####
library(shiny)
library(plotly)
library(magrittr)
library(shinyjs)
library(stringr)
library(RColorBrewer)
library(DT)
library(shinyBS)
library(shinycssloaders)
library(STDAP)
library(shinyalert)
library(shinyWidgets)
library(waiter)
shinyServer(function(session, input, output) {
waiter_hide() # hide the waiter
kegg_species <- reactive({
readRDS("www/Species/kegg_species.rds")
})
observe({ kegg_species() })
source("modules/1-server-get-start.R", local = T)
source("modules/2-server-condition.R", local = T)
source("modules/3-server-pca.R", local = T)
source("modules/4-server-hierarchical-cluster.R", local = T)
source("modules/5-server-sample-distance.R", local = T)
source("modules/6-server-sample-correlation.R", local = T)
source("modules/7-server-differential-analysis.R", local = T)
source("modules/8-server-degs-patterns.R", local = T)
source("modules/9-server-expression-visualization.R", local = T)
source("modules/10-server-wgcna-prepare-data.R", local = T)
source("modules/11-server-wgcna-detect-module.R", local = T)
source("modules/12-server-wgcna-module-trait.R", local = T)
source("modules/13-server-clusterProfiler.R", local = T)
source("modules/14-server-gProfiler.R", local = T)
## ............................................................................
## Neighborhood browser ####
## ............................................................................
## Map chart ####
})
|
library(SSrat)
### Name: example1.rat
### Title: Example 1 of rating data that can be processed further to obtain
### social status determinations
### Aliases: example1.rat
### Keywords: datasets
### ** Examples
data(example1.rat)
|
/data/genthat_extracted_code/SSrat/examples/example1.rat.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 242
|
r
|
library(SSrat)
### Name: example1.rat
### Title: Example 1 of rating data that can be processed further to obtain
### social status determinations
### Aliases: example1.rat
### Keywords: datasets
### ** Examples
data(example1.rat)
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
test.glm.bin.accessors <- function(conn) {
Log.info("Making glm with and without validation_frame...")
pros.hex <- h2o.uploadFile(conn, locate("smalldata/prostate/prostate.csv.zip"))
pros.hex[,2] <- as.factor(pros.hex[,2])
pros.hex[,4] <- as.factor(pros.hex[,4])
pros.hex[,5] <- as.factor(pros.hex[,5])
pros.hex[,6] <- as.factor(pros.hex[,6])
pros.hex[,9] <- as.factor(pros.hex[,9])
p.sid <- h2o.runif(pros.hex)
pros.train <- h2o.assign(pros.hex[p.sid > .2, ], "pros.train")
pros.test <- h2o.assign(pros.hex[p.sid <= .2, ], "pros.test")
pros.glm <- h2o.glm(x = 3:9, y = 2, training_frame = pros.train, family = "binomial")
pros.glm.valid <- h2o.glm(x = 3:9, y = 2, training_frame = pros.train,
validation_frame = pros.test, family = "binomial")
Log.info("MSE...")
mse.basic <- h2o.mse(pros.glm)
print(mse.basic)
expect_warning(h2o.mse(pros.glm, valid = TRUE))
mse.valid.F <- h2o.mse(pros.glm.valid)
mse.valid.T <- h2o.mse(pros.glm.valid,valid = TRUE)
print(mse.valid.T)
expect_equal(mse.basic, mse.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(mse.basic, mse.valid.T))
Log.info("R^2...")
r2.basic <- h2o.r2(pros.glm)
print(r2.basic)
expect_warning(h2o.r2(pros.glm, valid = TRUE))
r2.valid.F <- h2o.r2(pros.glm.valid)
r2.valid.T <- h2o.r2(pros.glm.valid,valid = TRUE)
print(r2.valid.T)
expect_equal(r2.basic, r2.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(r2.basic, r2.valid.T))
Log.info("LogLoss...")
ll.basic <- h2o.logloss(pros.glm)
print(ll.basic)
expect_warning(h2o.logloss(pros.glm, valid = TRUE))
ll.valid.F <- h2o.logloss(pros.glm.valid)
ll.valid.T <- h2o.logloss(pros.glm.valid, valid = TRUE)
print(ll.valid.T)
expect_equal(ll.basic, ll.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(ll.basic, ll.valid.T))
Log.info("AUC...")
auc.basic <- h2o.auc(pros.glm)
print(auc.basic)
expect_warning(h2o.auc(pros.glm, valid = TRUE))
auc.valid.F <- h2o.auc(pros.glm.valid)
auc.valid.T <- h2o.auc(pros.glm.valid, valid = TRUE)
print(auc.valid.T)
expect_equal(auc.basic, auc.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(auc.basic, auc.valid.T))
Log.info("Gini...")
gini.basic <- h2o.giniCoef(pros.glm)
print(gini.basic)
expect_warning(h2o.giniCoef(pros.glm, valid = TRUE))
gini.valid.F <- h2o.giniCoef(pros.glm.valid)
gini.valid.T <- h2o.giniCoef(pros.glm.valid, valid = TRUE)
print(gini.valid.T)
expect_equal(gini.basic, gini.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(gini.basic, gini.valid.T))
Log.info("Null Deviance...")
nuldev.basic <- h2o.null_deviance(pros.glm)
print(nuldev.basic)
expect_warning(h2o.null_deviance(pros.glm, valid = TRUE))
nuldev.valid.F <- h2o.null_deviance(pros.glm.valid)
nuldev.valid.T <- h2o.null_deviance(pros.glm.valid, valid = TRUE)
print(nuldev.valid.T)
expect_equal(nuldev.basic, nuldev.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(nuldev.basic, nuldev.valid.T))
Log.info("Residual Deviance...")
resdev.basic <- h2o.residual_deviance(pros.glm)
print(resdev.basic)
expect_warning(h2o.residual_deviance(pros.glm, valid = TRUE))
resdev.valid.F <- h2o.residual_deviance(pros.glm.valid)
resdev.valid.T <- h2o.residual_deviance(pros.glm.valid, valid = TRUE)
print(resdev.valid.T)
expect_equal(resdev.basic, resdev.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(resdev.basic, resdev.valid.T))
Log.info("AIC...")
aic.basic <- h2o.aic(pros.glm)
print(aic.basic)
expect_warning(h2o.aic(pros.glm, valid = TRUE))
aic.valid.F <- h2o.aic(pros.glm.valid)
aic.valid.T <- h2o.aic(pros.glm.valid, valid = TRUE)
print(aic.valid.T)
expect_equal(aic.basic, aic.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(aic.basic, aic.valid.T))
Log.info("Degrees of Freedom...")
dof.basic <- h2o.residual_dof(pros.glm)
print(dof.basic)
expect_warning(h2o.residual_dof(pros.glm, valid = TRUE))
dof.valid.F <- h2o.residual_dof(pros.glm.valid)
dof.valid.T <- h2o.residual_dof(pros.glm.valid, valid = TRUE)
print(dof.valid.T)
expect_equal(dof.basic, dof.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(dof.basic, dof.valid.T))
Log.info("Null Degrees of Freedom...")
nulldof.basic <- h2o.null_dof(pros.glm)
print(nulldof.basic)
expect_warning(h2o.null_dof(pros.glm, valid = TRUE))
nulldof.valid.F <- h2o.null_dof(pros.glm.valid)
nulldof.valid.T <- h2o.null_dof(pros.glm.valid, valid = TRUE)
print(nulldof.valid.T)
expect_equal(nulldof.basic, nulldof.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(nulldof.basic, nulldof.valid.T))
Log.info("Variable Importance...")
print(h2o.varimp(pros.glm))
testEnd()
}
doTest("Testing model accessors for GLM", test.glm.bin.accessors)
|
/h2o-r/tests/testdir_algos/glm/runit_NOPASS_GLM_accessors_binomial.R
|
permissive
|
dts3/h2o-3
|
R
| false
| false
| 5,140
|
r
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
test.glm.bin.accessors <- function(conn) {
Log.info("Making glm with and without validation_frame...")
pros.hex <- h2o.uploadFile(conn, locate("smalldata/prostate/prostate.csv.zip"))
pros.hex[,2] <- as.factor(pros.hex[,2])
pros.hex[,4] <- as.factor(pros.hex[,4])
pros.hex[,5] <- as.factor(pros.hex[,5])
pros.hex[,6] <- as.factor(pros.hex[,6])
pros.hex[,9] <- as.factor(pros.hex[,9])
p.sid <- h2o.runif(pros.hex)
pros.train <- h2o.assign(pros.hex[p.sid > .2, ], "pros.train")
pros.test <- h2o.assign(pros.hex[p.sid <= .2, ], "pros.test")
pros.glm <- h2o.glm(x = 3:9, y = 2, training_frame = pros.train, family = "binomial")
pros.glm.valid <- h2o.glm(x = 3:9, y = 2, training_frame = pros.train,
validation_frame = pros.test, family = "binomial")
Log.info("MSE...")
mse.basic <- h2o.mse(pros.glm)
print(mse.basic)
expect_warning(h2o.mse(pros.glm, valid = TRUE))
mse.valid.F <- h2o.mse(pros.glm.valid)
mse.valid.T <- h2o.mse(pros.glm.valid,valid = TRUE)
print(mse.valid.T)
expect_equal(mse.basic, mse.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(mse.basic, mse.valid.T))
Log.info("R^2...")
r2.basic <- h2o.r2(pros.glm)
print(r2.basic)
expect_warning(h2o.r2(pros.glm, valid = TRUE))
r2.valid.F <- h2o.r2(pros.glm.valid)
r2.valid.T <- h2o.r2(pros.glm.valid,valid = TRUE)
print(r2.valid.T)
expect_equal(r2.basic, r2.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(r2.basic, r2.valid.T))
Log.info("LogLoss...")
ll.basic <- h2o.logloss(pros.glm)
print(ll.basic)
expect_warning(h2o.logloss(pros.glm, valid = TRUE))
ll.valid.F <- h2o.logloss(pros.glm.valid)
ll.valid.T <- h2o.logloss(pros.glm.valid, valid = TRUE)
print(ll.valid.T)
expect_equal(ll.basic, ll.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(ll.basic, ll.valid.T))
Log.info("AUC...")
auc.basic <- h2o.auc(pros.glm)
print(auc.basic)
expect_warning(h2o.auc(pros.glm, valid = TRUE))
auc.valid.F <- h2o.auc(pros.glm.valid)
auc.valid.T <- h2o.auc(pros.glm.valid, valid = TRUE)
print(auc.valid.T)
expect_equal(auc.basic, auc.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(auc.basic, auc.valid.T))
Log.info("Gini...")
gini.basic <- h2o.giniCoef(pros.glm)
print(gini.basic)
expect_warning(h2o.giniCoef(pros.glm, valid = TRUE))
gini.valid.F <- h2o.giniCoef(pros.glm.valid)
gini.valid.T <- h2o.giniCoef(pros.glm.valid, valid = TRUE)
print(gini.valid.T)
expect_equal(gini.basic, gini.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(gini.basic, gini.valid.T))
Log.info("Null Deviance...")
nuldev.basic <- h2o.null_deviance(pros.glm)
print(nuldev.basic)
expect_warning(h2o.null_deviance(pros.glm, valid = TRUE))
nuldev.valid.F <- h2o.null_deviance(pros.glm.valid)
nuldev.valid.T <- h2o.null_deviance(pros.glm.valid, valid = TRUE)
print(nuldev.valid.T)
expect_equal(nuldev.basic, nuldev.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(nuldev.basic, nuldev.valid.T))
Log.info("Residual Deviance...")
resdev.basic <- h2o.residual_deviance(pros.glm)
print(resdev.basic)
expect_warning(h2o.residual_deviance(pros.glm, valid = TRUE))
resdev.valid.F <- h2o.residual_deviance(pros.glm.valid)
resdev.valid.T <- h2o.residual_deviance(pros.glm.valid, valid = TRUE)
print(resdev.valid.T)
expect_equal(resdev.basic, resdev.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(resdev.basic, resdev.valid.T))
Log.info("AIC...")
aic.basic <- h2o.aic(pros.glm)
print(aic.basic)
expect_warning(h2o.aic(pros.glm, valid = TRUE))
aic.valid.F <- h2o.aic(pros.glm.valid)
aic.valid.T <- h2o.aic(pros.glm.valid, valid = TRUE)
print(aic.valid.T)
expect_equal(aic.basic, aic.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(aic.basic, aic.valid.T))
Log.info("Degrees of Freedom...")
dof.basic <- h2o.residual_dof(pros.glm)
print(dof.basic)
expect_warning(h2o.residual_dof(pros.glm, valid = TRUE))
dof.valid.F <- h2o.residual_dof(pros.glm.valid)
dof.valid.T <- h2o.residual_dof(pros.glm.valid, valid = TRUE)
print(dof.valid.T)
expect_equal(dof.basic, dof.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(dof.basic, dof.valid.T))
Log.info("Null Degrees of Freedom...")
nulldof.basic <- h2o.null_dof(pros.glm)
print(nulldof.basic)
expect_warning(h2o.null_dof(pros.glm, valid = TRUE))
nulldof.valid.F <- h2o.null_dof(pros.glm.valid)
nulldof.valid.T <- h2o.null_dof(pros.glm.valid, valid = TRUE)
print(nulldof.valid.T)
expect_equal(nulldof.basic, nulldof.valid.F) # basic should equal valid with valid = FALSE
expect_error(expect_equal(nulldof.basic, nulldof.valid.T))
Log.info("Variable Importance...")
print(h2o.varimp(pros.glm))
testEnd()
}
doTest("Testing model accessors for GLM", test.glm.bin.accessors)
|
library(XML)
api.url <-"http://apis.data.go.kr/1611000/BldEngyService/getBeElctyUsgInfo?"#공공데이터 주소 입력
service.key<-"8PZnRzZb4yXsXJQVBDX74xuf8kHhF4cmY5XnEO9apteNWtahGwpA9%2FjrthHB0tX7GBlm9zN1A%2F0rKCx3wGe27g%3D%3D"#Service. key 값 입력
#특정 데이터를 볼러오기 위한 인수 입력/전부다 필요한 것은 아님
rnum<-vector(mode="numeric",length=8),# 순번
useYm<-vector(mode="character",length=6), #사용년월(필수)
platPlc<-vector(mode="character",length=500), #대지위치(필수)
newPlatPlc<-vector(mode="character",length=400), # 도로명 대지위치
sigunguCd<-vector(mode="character",length=5),#시군구 코드
bjdongCd<-vector(mode="character",length=5),#법정동 코드(필수)
platGbCd<-vector(mode="character",length=1), # 대지구분코드(필수)
bun<-vector(mode="character",length=4), # 번
ji<-vector(mode="character",length=4), #지
naRoadCd<-vector(mode="character",length=4), #새주소도로 코드
naUgrndCd<-vector(mode="character",length=12), # 새주소 지상지
naMainBun<-vector(mode="character",length=5), # 새주소 본번
naSubBun<-vector(mode="character",length=5), # 새주소 부번
useQty<-vector(mode="character",length=22) # 사용량
final.url<-paste0(api.url,"sigunguCd=",sigunguCd,"&bjdongCd=",bjdongCd,"&bun=",bun,"&ji=",ji,"&ServiceKey=",service.key)
req<-GET(final.url)
building.energy.data<-content(req,as="parsed",type="application/json",encoding = "utf-8")
# 아직은 모든 변수를 수기로 입력해야 하는 단점이 있습니다.
# 행정정보 시스템의 법정동 코드 시스템과 연계가 필요합니다
|
/building-energy.R
|
no_license
|
youngji-cho/energy-finance
|
R
| false
| false
| 1,626
|
r
|
library(XML)
api.url <-"http://apis.data.go.kr/1611000/BldEngyService/getBeElctyUsgInfo?"#공공데이터 주소 입력
service.key<-"8PZnRzZb4yXsXJQVBDX74xuf8kHhF4cmY5XnEO9apteNWtahGwpA9%2FjrthHB0tX7GBlm9zN1A%2F0rKCx3wGe27g%3D%3D"#Service. key 값 입력
#특정 데이터를 볼러오기 위한 인수 입력/전부다 필요한 것은 아님
rnum<-vector(mode="numeric",length=8),# 순번
useYm<-vector(mode="character",length=6), #사용년월(필수)
platPlc<-vector(mode="character",length=500), #대지위치(필수)
newPlatPlc<-vector(mode="character",length=400), # 도로명 대지위치
sigunguCd<-vector(mode="character",length=5),#시군구 코드
bjdongCd<-vector(mode="character",length=5),#법정동 코드(필수)
platGbCd<-vector(mode="character",length=1), # 대지구분코드(필수)
bun<-vector(mode="character",length=4), # 번
ji<-vector(mode="character",length=4), #지
naRoadCd<-vector(mode="character",length=4), #새주소도로 코드
naUgrndCd<-vector(mode="character",length=12), # 새주소 지상지
naMainBun<-vector(mode="character",length=5), # 새주소 본번
naSubBun<-vector(mode="character",length=5), # 새주소 부번
useQty<-vector(mode="character",length=22) # 사용량
final.url<-paste0(api.url,"sigunguCd=",sigunguCd,"&bjdongCd=",bjdongCd,"&bun=",bun,"&ji=",ji,"&ServiceKey=",service.key)
req<-GET(final.url)
building.energy.data<-content(req,as="parsed",type="application/json",encoding = "utf-8")
# 아직은 모든 변수를 수기로 입력해야 하는 단점이 있습니다.
# 행정정보 시스템의 법정동 코드 시스템과 연계가 필요합니다
|
library(ggpubr)
library(dplyr)
#### read data ####
load("SFig2.RData")
#### pplot ####
med_dat <- dat %>% group_by(x3,x6) %>% summarise(med = median(x4))
p <- ggplot(dat, aes(x=x3, y=x5, group=x3)) + geom_boxplot(aes(color=x3),outlier.shape = NA) +theme_classic(base_size=10) +
ylab("Proportion") + xlab("Method") + theme(legend.position="none") + ylim(0.25,0.4) +
geom_hline(data= med_dat, aes( yintercept=med, col=x3),linetype="dotted" ) + facet_grid(.~x6)
ggsave("CC_prop.png", p, dpi=500)
|
/paper/Figures/SFig2.R
|
permissive
|
Sandyyy123/PGS-LMM
|
R
| false
| false
| 502
|
r
|
library(ggpubr)
library(dplyr)
#### read data ####
load("SFig2.RData")
#### pplot ####
med_dat <- dat %>% group_by(x3,x6) %>% summarise(med = median(x4))
p <- ggplot(dat, aes(x=x3, y=x5, group=x3)) + geom_boxplot(aes(color=x3),outlier.shape = NA) +theme_classic(base_size=10) +
ylab("Proportion") + xlab("Method") + theme(legend.position="none") + ylim(0.25,0.4) +
geom_hline(data= med_dat, aes( yintercept=med, col=x3),linetype="dotted" ) + facet_grid(.~x6)
ggsave("CC_prop.png", p, dpi=500)
|
#' Save API credentials for later use
#'
#' This functions caches the credentials to avoid need for entering it when
#' calling other functions
#' @param app_key application key
#' @examples
#' # since not checking is preformed not to waste API calls
#' # it falls on the user to save correct information
#' save_walmart_credentials("APP_KEY")
#' @export
save_walmart_credentials <- function(app_key) {
if (app_key != "") {
assign("KEY", app_key, envir = auth_cache)
}
}
|
/R/client.R
|
permissive
|
EmilHvitfeldt/walmartAPI
|
R
| false
| false
| 479
|
r
|
#' Save API credentials for later use
#'
#' This functions caches the credentials to avoid need for entering it when
#' calling other functions
#' @param app_key application key
#' @examples
#' # since not checking is preformed not to waste API calls
#' # it falls on the user to save correct information
#' save_walmart_credentials("APP_KEY")
#' @export
save_walmart_credentials <- function(app_key) {
if (app_key != "") {
assign("KEY", app_key, envir = auth_cache)
}
}
|
library(FactoMineR)
library(dimRed)
library(reshape2)
library(ggplot2)
library(FactoMineR)
library(tm)
library(stringr)
library(NMIcode)
library(LICORS)
library(readr)
library(keras)
library(mclust)
pen.tra = read.table("/Users/jzk/Documents/M2/reducDimold/penDigitss/pendigits.tra", sep = ",")
pen.tes = read.table("/Users/jzk/Documents/M2/reducDimold/penDigitss/pendigits.tes", sep = ",")
pen = rbind(pen.tra, pen.tes)
dim(pen.tra)
X.train=pen.tra[,-17]
Class.train=pen.tra[,17]
X.test=pen.tes[,-17]
Class.test=pen.tes[,17]
library(tensorflow)
datasets <- tf$contrib$learn$datasets
mnist <- datasets$mnist$read_data_sets("MNIST-data", one_hot = FALSE)
dim(mnist$train$images)
X.train=mnist$test$images
Class.train=as.vector(mnist$test$labels)
X.train=mnist$train$images
Class.train=as.vector(mnist$train$labels)
source("http://bioconductor.org/biocLite.R")
biocLite("rhdf5")
library(rhdf5)
usps=h5read("/Users/jzk/Documents/M2/reducDim/usps.h5","/train")
usps_tr=usps$data
usps_class=usps$target
X.train=t(usps_tr)
Class.train=as.vector(usps_class)
X.train=read_csv("/Users/jzk/Documents/M2/reducDimold/fashionmnist/fashion-mnist_test.csv",col_types = cols(.default = "i"))
Class.train=X.train$label
head(X.train)
X.train=X.train[,-1]
X.train=as.matrix(X.train)
dim(X.train)
#########ACP
library(FactoMineR)
library(aricode)
library(MLmetrics)
library(caret)
PCA=FactoMineR::PCA(X.train)
barplot(PCA$eig[,1],main="Eigenvalues",names.arg=1:nrow(PCA$eig))
summary(PCA)
PCA$ind$coord
NMI=c()
ARI=c()
clustering=Mclust(PCA$ind$coord,G=10)
for(i in 1:10){
clustering=Mclust(PCA$ind$coord,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMIPCA=as.vector(NMI);mean(NMIPCA)
ARIPCA=as.vector(ARI);mean(ARIPCA)
boxplot(NMIPCA)
boxplot(ARIPCA)
write.csv(NMIPCA,"/Users/jzk/Documents/M2/projet/NMI/NMIPCAFMNIST.csv")
write.csv(ARIPCA,"/Users/jzk/Documents/M2/projet/ARI/ARIPCAFMNIST.csv")
####KPCA
library(kernlab)
KPCA=emb2 <- embed(X.train, "kPCA")
KPCA <- kpca(X.train)
KPCA <- kpca(~.,data=X.train,kernel="rbfdot",kpar=list(sigma=0.2),features=2)
slot(KPCA,"xmatrix")
NMI=c()
ARI=c()
for(i in 1:10){
clustering=kmeans(slot(KPCA,"xmatrix")[,c(1:2)],10)
NMI=cbind(NMI,NMI(clustering$cluster,Class.train))
ARI=cbind(ARI,ARI(clustering$cluster,Class.train))
}
NMIKPCA=as.vector(NMI)
ARIKPCA=as.vector(ARI)
boxplot(NMIKPCA)
boxplot(ARIKPCA)
####Isomap
library(dimRed)
ISO <- embed(X.train, "Isomap", .mute = NULL, knn = 15,ndim=5)
plot(ISO, type = "2vars")
red=ISO@data@data
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMIISO=as.vector(NMI)
ARIISO=as.vector(ARI)
boxplot(NMIISO)
boxplot(ARIISO)
write.csv(NMIISO,"/Users/jzk/Documents/M2/projet/NMI/NMIISOFMNIST.csv")
write.csv(ARIISO,"/Users/jzk/Documents/M2/projet/ARI/ARIISOFMNIST.csv")
##MDS
library(MASS)
d <- dist(X.train,method="euclidean") # euclidean distances between the rows
fit <- isoMDS(d, k=7) # k is the number of dim
fit # view results
red=fit$points
red=read_csv("/Users/jzk/Documents/M2/projet/projet/MDS.csv");red=as.matrix(red)
Class.train=read_csv("/Users/jzk/Documents/M2/projet/projet/label.csv")
Class.train=Class.train$`7.000000000000000000e+00`
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMIMDS=as.vector(NMI)
ARIMDS=as.vector(ARI)
boxplot(NMIMDS)
boxplot(ARIMDS)
write.csv(NMIMDS,"/Users/jzk/Documents/M2/projet/NMI/NMIMDSFMNIST.csv")
write.csv(ARIMDS,"/Users/jzk/Documents/M2/projet/ARI/ARIMDSFMNIST.csv")
NMIMDS
###LLE
library(lle)
red <- lle(X.train, m=2, k=10, reg=2, ss=FALSE, id=TRUE, v=0.9 )
red=read_csv("/Users/jzk/Documents/M2/projet/projet/LLE.csv");red=as.matrix(red)
red=red$Y
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMILLE=as.vector(NMI)
ARILLE=as.vector(ARI)
boxplot(NMILLE)
boxplot(ARILLE)
write.csv(NMILLE,"/Users/jzk/Documents/M2/projet/NMI/NMILLEFMNIST.csv")
write.csv(ARILLE,"/Users/jzk/Documents/M2/projet/ARI/ARILLEFMNIST.csv")
library(Rdimtools)
red=do.ltsa(X.train, ndim = 5, type =c("proportion",0.1))
red=red$Y
red=read_csv("/Users/jzk/Documents/M2/projet/projet/LTSA.csv")
Class.train=read_csv("/Users/jzk/Documents/M2/projet/projet/label.csv")
Class.train=Class.train$`7.000000000000000000e+00`
red=as.matrix(red)
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMILLE=as.vector(NMI)
ARILLE=as.vector(ARI)
boxplot(NMILLE);mean(NMILLE)
boxplot(ARILLE);mean(ARILLE)
write.csv(NMILLE,"/Users/jzk/Documents/M2/projet/NMI/NMILTSAFMNIST.csv")
write.csv(ARILLE,"/Users/jzk/Documents/M2/projet/ARI/ARILTSAFMNIST.csv")
###UMAP
red=read_csv("/Users/jzk/Documents/M2/projet/projet/UMAP.csv")
Class.train=read_csv("/Users/jzk/Documents/M2/projet/projet/label.csv")
Class.train=Class.train$`7.000000000000000000e+00`
red=as.matrix(red)
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMILLE=as.vector(NMI)
ARILLE=as.vector(ARI)
boxplot(NMILLE);mean(NMILLE)
boxplot(ARILLE);mean(ARILLE)
write.csv(NMILLE,"/Users/jzk/Documents/M2/projet/NMI/NMIUMAPFMNIST.csv")
write.csv(ARILLE,"/Users/jzk/Documents/M2/projet/ARI/ARIUMAPFMNIST.csv")
library(Matrix)
library(NMF)
library(readr)
library(Matrix)
library(NMF)
library(tidytext)
library(tm)
library(slam)
library(dplyr)
library(SnowballC)
library(skmeans)
library(textir)
library(stm)
library(factoextra)
library(foreach)
library(doParallel)
library(fastICA)
library(wordcloud)
library(topicmodels)
data_used.tfidf=X.train
weight=Matrix(rep(1,dim(data_used.tfidf)[1]*dim(data_used.tfidf)[2]),nrow=dim(data_used.tfidf)[1]);dim(weight)
res=nmf(X.train,10,method="ls-nmf", .options="vt",seed='nndsvd',weight=as.matrix(weight))
res.coef <- coef(res)####on r??cup??re H
res.bas <- basis(res)####on r??cup??re W
heatmap(res.bas)
red=res.bas
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMILLE=as.vector(NMI)
ARILLE=as.vector(ARI)
boxplot(NMILLE)
boxplot(ARILLE)
write.csv(NMILLE,"/Users/jzk/Documents/M2/projet/NMI/NMINMFMNIST.csv")
write.csv(ARILLE,"/Users/jzk/Documents/M2/projet/ARI/ARINMFMNIST.csv")
####On load les donn??es
NMIAE <- read_csv("~/Documents/M2/projet/NMI/AE_NMI-2.csv")
NMIAE=NMIAE$x
NMIAE=c(0.619148480401683,0.622058475409507,0.599357598184059,0.611669946777276,0.612980116108482,0.6194605223113,0.611809445804786,
0.609646195077058,
0.620292913556716,
0.629093044390368)
NMINMF=read_csv("~/Documents/M2/projet/NMI/NMF_NMI.csv")
NMINMF=NMINMF$x
NMIAELLE=read_csv("/Users/jzk/Downloads/DAELEE2_NMI.csv")
NMIAELLE=NMIAELLE$x
NMIAELLE=c(0.613604966649526,
0.625035309545582,
0.641735829193394,
0.638362630665628,
0.633768885678127,
0.64301349189263,
0.633658811165764,
0.609628723016092,
0.636312615366251,
0.613706282593635)
NMINMF=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMINMFMNIST.csv")
NMINMF=NMINMF$x
NMIPCA=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMIPCAFMNIST.csv")
NMIPCA=NMIPCA$x
NMIKPCA=read_csv("/Users/jzk/Documents/M2/projet/NMI/KERNALPCA_NMI.csv")
NMIKPCA=NMIKPCA$x
NMIMDS=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMIMDSFMNIST.csv")
NMIMDS=NMIMDS$x
NMILTSA=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMILTSAFMNIST.csv")
NMILTSA=NMILTSA$x
NMIISO=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMIISOFMNIST.csv")
NMIISO=NMIISO$x
NMILLE=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMILLEFMNIST.csv")
NMILLE=NMILLE$x
NMIUMAP=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMIUMAPFMNIST.csv")
NMIUMAP=NMIUMAP$x
boxplot(NMIPCA,NMINMF,NMIMDS,NMIISO,NMILLE,NMILTSA,NMIAE,NMIUMAP,NMIAELLE,names=c("PCA","NMF","MDS","ISOMAP","LLE","LTSA","AE","UMAP","DeepDr"))
####On load les donn??es ARI
ARIAE <- read_csv("~/Documents/M2/projet/NMI/AE_ARI-2.csv")
ARIAE=ARIAE$x
ARIAE=c(0.471868884412653,0.473620546320627,0.454919443867452,0.459938250291911,0.461576024312166,0.45192332803295,
0.460523771935872,
0.48255091040172,
0.471828078063006,
0.500065401234325)
ARIAELLE=read_csv("/Users/jzk/Downloads/DAELLE2_ARI.csv")
ARIAELLE=ARIAELLE$x
ARIAELLE=c(0.464421652293046,
0.482780377909208,
0.519405790093007,
0.51715003262984,
0.491345880509513,
0.515244485708418,
0.507963014939459,
0.466197618532168,
0.51884289802953,
0.464945734338518)
ARINMF=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARINMFMNIST.csv")
ARINMF=ARINMF$x
ARIPCA=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARIPCAFMNIST.csv")
ARIPCA=ARIPCA$x
ARIMDS=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARIMDSFMNIST.csv")
ARIMDS=ARIMDS$x
ARILTSA=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARILTSAFMNIST.csv")
ARILTSA=ARILTSA$x
ARIISO=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARIISOFMNIST.csv")
ARIISO=ARIISO$x
ARILLE=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARILLEFMNIST.csv")
ARILLE=ARILLE$x
ARINMF=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARINMFMNIST.csv")
ARINMF=ARINMF$x
ARIUMAP=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARIUMAPFMNIST.csv")
ARIUMAP=ARIUMAP$x
boxplot(ARIPCA,ARINMF,ARIMDS,ARIISO,ARILLE,ARILTSA,ARIAE,ARIUMAP,ARIAELLE,names=c("PCA","NMF","MDS","ISOMAP","LLE","LTSA","AE","UMAP","DeepDr"))
t.test(NMIAELLE,NMIPCA, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMINMF, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMIMDS, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMIISO, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMILLE, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMILTSA, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMIAE, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMIUMAP, paired = FALSE ,alternative ="greater")
|
/.ipynb_checkpoints/ProjetReducDim-checkpoint.r
|
no_license
|
yannistannier/deepdr-dae-with-lle
|
R
| false
| false
| 10,627
|
r
|
library(FactoMineR)
library(dimRed)
library(reshape2)
library(ggplot2)
library(FactoMineR)
library(tm)
library(stringr)
library(NMIcode)
library(LICORS)
library(readr)
library(keras)
library(mclust)
pen.tra = read.table("/Users/jzk/Documents/M2/reducDimold/penDigitss/pendigits.tra", sep = ",")
pen.tes = read.table("/Users/jzk/Documents/M2/reducDimold/penDigitss/pendigits.tes", sep = ",")
pen = rbind(pen.tra, pen.tes)
dim(pen.tra)
X.train=pen.tra[,-17]
Class.train=pen.tra[,17]
X.test=pen.tes[,-17]
Class.test=pen.tes[,17]
library(tensorflow)
datasets <- tf$contrib$learn$datasets
mnist <- datasets$mnist$read_data_sets("MNIST-data", one_hot = FALSE)
dim(mnist$train$images)
X.train=mnist$test$images
Class.train=as.vector(mnist$test$labels)
X.train=mnist$train$images
Class.train=as.vector(mnist$train$labels)
source("http://bioconductor.org/biocLite.R")
biocLite("rhdf5")
library(rhdf5)
usps=h5read("/Users/jzk/Documents/M2/reducDim/usps.h5","/train")
usps_tr=usps$data
usps_class=usps$target
X.train=t(usps_tr)
Class.train=as.vector(usps_class)
X.train=read_csv("/Users/jzk/Documents/M2/reducDimold/fashionmnist/fashion-mnist_test.csv",col_types = cols(.default = "i"))
Class.train=X.train$label
head(X.train)
X.train=X.train[,-1]
X.train=as.matrix(X.train)
dim(X.train)
#########ACP
library(FactoMineR)
library(aricode)
library(MLmetrics)
library(caret)
PCA=FactoMineR::PCA(X.train)
barplot(PCA$eig[,1],main="Eigenvalues",names.arg=1:nrow(PCA$eig))
summary(PCA)
PCA$ind$coord
NMI=c()
ARI=c()
clustering=Mclust(PCA$ind$coord,G=10)
for(i in 1:10){
clustering=Mclust(PCA$ind$coord,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMIPCA=as.vector(NMI);mean(NMIPCA)
ARIPCA=as.vector(ARI);mean(ARIPCA)
boxplot(NMIPCA)
boxplot(ARIPCA)
write.csv(NMIPCA,"/Users/jzk/Documents/M2/projet/NMI/NMIPCAFMNIST.csv")
write.csv(ARIPCA,"/Users/jzk/Documents/M2/projet/ARI/ARIPCAFMNIST.csv")
####KPCA
library(kernlab)
KPCA=emb2 <- embed(X.train, "kPCA")
KPCA <- kpca(X.train)
KPCA <- kpca(~.,data=X.train,kernel="rbfdot",kpar=list(sigma=0.2),features=2)
slot(KPCA,"xmatrix")
NMI=c()
ARI=c()
for(i in 1:10){
clustering=kmeans(slot(KPCA,"xmatrix")[,c(1:2)],10)
NMI=cbind(NMI,NMI(clustering$cluster,Class.train))
ARI=cbind(ARI,ARI(clustering$cluster,Class.train))
}
NMIKPCA=as.vector(NMI)
ARIKPCA=as.vector(ARI)
boxplot(NMIKPCA)
boxplot(ARIKPCA)
####Isomap
library(dimRed)
ISO <- embed(X.train, "Isomap", .mute = NULL, knn = 15,ndim=5)
plot(ISO, type = "2vars")
red=ISO@data@data
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMIISO=as.vector(NMI)
ARIISO=as.vector(ARI)
boxplot(NMIISO)
boxplot(ARIISO)
write.csv(NMIISO,"/Users/jzk/Documents/M2/projet/NMI/NMIISOFMNIST.csv")
write.csv(ARIISO,"/Users/jzk/Documents/M2/projet/ARI/ARIISOFMNIST.csv")
##MDS
library(MASS)
d <- dist(X.train,method="euclidean") # euclidean distances between the rows
fit <- isoMDS(d, k=7) # k is the number of dim
fit # view results
red=fit$points
red=read_csv("/Users/jzk/Documents/M2/projet/projet/MDS.csv");red=as.matrix(red)
Class.train=read_csv("/Users/jzk/Documents/M2/projet/projet/label.csv")
Class.train=Class.train$`7.000000000000000000e+00`
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMIMDS=as.vector(NMI)
ARIMDS=as.vector(ARI)
boxplot(NMIMDS)
boxplot(ARIMDS)
write.csv(NMIMDS,"/Users/jzk/Documents/M2/projet/NMI/NMIMDSFMNIST.csv")
write.csv(ARIMDS,"/Users/jzk/Documents/M2/projet/ARI/ARIMDSFMNIST.csv")
NMIMDS
###LLE
library(lle)
red <- lle(X.train, m=2, k=10, reg=2, ss=FALSE, id=TRUE, v=0.9 )
red=read_csv("/Users/jzk/Documents/M2/projet/projet/LLE.csv");red=as.matrix(red)
red=red$Y
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMILLE=as.vector(NMI)
ARILLE=as.vector(ARI)
boxplot(NMILLE)
boxplot(ARILLE)
write.csv(NMILLE,"/Users/jzk/Documents/M2/projet/NMI/NMILLEFMNIST.csv")
write.csv(ARILLE,"/Users/jzk/Documents/M2/projet/ARI/ARILLEFMNIST.csv")
library(Rdimtools)
red=do.ltsa(X.train, ndim = 5, type =c("proportion",0.1))
red=red$Y
red=read_csv("/Users/jzk/Documents/M2/projet/projet/LTSA.csv")
Class.train=read_csv("/Users/jzk/Documents/M2/projet/projet/label.csv")
Class.train=Class.train$`7.000000000000000000e+00`
red=as.matrix(red)
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMILLE=as.vector(NMI)
ARILLE=as.vector(ARI)
boxplot(NMILLE);mean(NMILLE)
boxplot(ARILLE);mean(ARILLE)
write.csv(NMILLE,"/Users/jzk/Documents/M2/projet/NMI/NMILTSAFMNIST.csv")
write.csv(ARILLE,"/Users/jzk/Documents/M2/projet/ARI/ARILTSAFMNIST.csv")
###UMAP
red=read_csv("/Users/jzk/Documents/M2/projet/projet/UMAP.csv")
Class.train=read_csv("/Users/jzk/Documents/M2/projet/projet/label.csv")
Class.train=Class.train$`7.000000000000000000e+00`
red=as.matrix(red)
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMILLE=as.vector(NMI)
ARILLE=as.vector(ARI)
boxplot(NMILLE);mean(NMILLE)
boxplot(ARILLE);mean(ARILLE)
write.csv(NMILLE,"/Users/jzk/Documents/M2/projet/NMI/NMIUMAPFMNIST.csv")
write.csv(ARILLE,"/Users/jzk/Documents/M2/projet/ARI/ARIUMAPFMNIST.csv")
library(Matrix)
library(NMF)
library(readr)
library(Matrix)
library(NMF)
library(tidytext)
library(tm)
library(slam)
library(dplyr)
library(SnowballC)
library(skmeans)
library(textir)
library(stm)
library(factoextra)
library(foreach)
library(doParallel)
library(fastICA)
library(wordcloud)
library(topicmodels)
data_used.tfidf=X.train
weight=Matrix(rep(1,dim(data_used.tfidf)[1]*dim(data_used.tfidf)[2]),nrow=dim(data_used.tfidf)[1]);dim(weight)
res=nmf(X.train,10,method="ls-nmf", .options="vt",seed='nndsvd',weight=as.matrix(weight))
res.coef <- coef(res)####on r??cup??re H
res.bas <- basis(res)####on r??cup??re W
heatmap(res.bas)
red=res.bas
NMI=c()
ARI=c()
clustering=Mclust(red,G=10)
for(i in 1:10){
clustering=Mclust(red,G=10)
NMI=cbind(NMI,NMI(clustering$classification,Class.train))
ARI=cbind(ARI,ARI(clustering$classification,Class.train))
}
NMILLE=as.vector(NMI)
ARILLE=as.vector(ARI)
boxplot(NMILLE)
boxplot(ARILLE)
write.csv(NMILLE,"/Users/jzk/Documents/M2/projet/NMI/NMINMFMNIST.csv")
write.csv(ARILLE,"/Users/jzk/Documents/M2/projet/ARI/ARINMFMNIST.csv")
####On load les donn??es
NMIAE <- read_csv("~/Documents/M2/projet/NMI/AE_NMI-2.csv")
NMIAE=NMIAE$x
NMIAE=c(0.619148480401683,0.622058475409507,0.599357598184059,0.611669946777276,0.612980116108482,0.6194605223113,0.611809445804786,
0.609646195077058,
0.620292913556716,
0.629093044390368)
NMINMF=read_csv("~/Documents/M2/projet/NMI/NMF_NMI.csv")
NMINMF=NMINMF$x
NMIAELLE=read_csv("/Users/jzk/Downloads/DAELEE2_NMI.csv")
NMIAELLE=NMIAELLE$x
NMIAELLE=c(0.613604966649526,
0.625035309545582,
0.641735829193394,
0.638362630665628,
0.633768885678127,
0.64301349189263,
0.633658811165764,
0.609628723016092,
0.636312615366251,
0.613706282593635)
NMINMF=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMINMFMNIST.csv")
NMINMF=NMINMF$x
NMIPCA=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMIPCAFMNIST.csv")
NMIPCA=NMIPCA$x
NMIKPCA=read_csv("/Users/jzk/Documents/M2/projet/NMI/KERNALPCA_NMI.csv")
NMIKPCA=NMIKPCA$x
NMIMDS=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMIMDSFMNIST.csv")
NMIMDS=NMIMDS$x
NMILTSA=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMILTSAFMNIST.csv")
NMILTSA=NMILTSA$x
NMIISO=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMIISOFMNIST.csv")
NMIISO=NMIISO$x
NMILLE=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMILLEFMNIST.csv")
NMILLE=NMILLE$x
NMIUMAP=read_csv("/Users/jzk/Documents/M2/projet/NMI/NMIUMAPFMNIST.csv")
NMIUMAP=NMIUMAP$x
boxplot(NMIPCA,NMINMF,NMIMDS,NMIISO,NMILLE,NMILTSA,NMIAE,NMIUMAP,NMIAELLE,names=c("PCA","NMF","MDS","ISOMAP","LLE","LTSA","AE","UMAP","DeepDr"))
####On load les donn??es ARI
ARIAE <- read_csv("~/Documents/M2/projet/NMI/AE_ARI-2.csv")
ARIAE=ARIAE$x
ARIAE=c(0.471868884412653,0.473620546320627,0.454919443867452,0.459938250291911,0.461576024312166,0.45192332803295,
0.460523771935872,
0.48255091040172,
0.471828078063006,
0.500065401234325)
ARIAELLE=read_csv("/Users/jzk/Downloads/DAELLE2_ARI.csv")
ARIAELLE=ARIAELLE$x
ARIAELLE=c(0.464421652293046,
0.482780377909208,
0.519405790093007,
0.51715003262984,
0.491345880509513,
0.515244485708418,
0.507963014939459,
0.466197618532168,
0.51884289802953,
0.464945734338518)
ARINMF=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARINMFMNIST.csv")
ARINMF=ARINMF$x
ARIPCA=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARIPCAFMNIST.csv")
ARIPCA=ARIPCA$x
ARIMDS=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARIMDSFMNIST.csv")
ARIMDS=ARIMDS$x
ARILTSA=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARILTSAFMNIST.csv")
ARILTSA=ARILTSA$x
ARIISO=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARIISOFMNIST.csv")
ARIISO=ARIISO$x
ARILLE=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARILLEFMNIST.csv")
ARILLE=ARILLE$x
ARINMF=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARINMFMNIST.csv")
ARINMF=ARINMF$x
ARIUMAP=read_csv("/Users/jzk/Documents/M2/projet/ARI/ARIUMAPFMNIST.csv")
ARIUMAP=ARIUMAP$x
boxplot(ARIPCA,ARINMF,ARIMDS,ARIISO,ARILLE,ARILTSA,ARIAE,ARIUMAP,ARIAELLE,names=c("PCA","NMF","MDS","ISOMAP","LLE","LTSA","AE","UMAP","DeepDr"))
t.test(NMIAELLE,NMIPCA, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMINMF, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMIMDS, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMIISO, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMILLE, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMILTSA, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMIAE, paired = TRUE, alternative = "greater")
t.test(NMIAELLE, NMIUMAP, paired = FALSE ,alternative ="greater")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_pattern.R
\name{find_pattern}
\alias{find_pattern}
\alias{is_in_file}
\title{Find a pattern in files from a directory}
\usage{
find_pattern(pattern, where = here(), full_names = FALSE)
is_in_file(pattern, file)
}
\arguments{
\item{pattern}{the pattern to find. Can be regex}
\item{where}{the path to the directory where you want to search}
\item{full_names}{a logical value as in \code{\link[base]{list.files}}. If TRUE, the directory path is prepended to the file names to give a relative file path. If FALSE, the file names (rather than paths) are returned.}
\item{file}{the path to the file where you want to search}
}
\value{
a vector with all the files where the pattern was found for \code{find_pattern} or a logical value for \code{is_on_file}
}
\description{
\code{find_pattern} search a pattern in all files from a directory and \code{is_on_file} search
for a pattern in one file.
}
\examples{
find_pattern(pattern = "usethis::", where = system.file(package = "benutils"))
\dontrun{
# if you are in a R project you can just specify the pattern
find_pattern("my_pattern")
}
}
|
/man/find_pattern.Rd
|
permissive
|
BenjaminLouis/benutils
|
R
| false
| true
| 1,172
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_pattern.R
\name{find_pattern}
\alias{find_pattern}
\alias{is_in_file}
\title{Find a pattern in files from a directory}
\usage{
find_pattern(pattern, where = here(), full_names = FALSE)
is_in_file(pattern, file)
}
\arguments{
\item{pattern}{the pattern to find. Can be regex}
\item{where}{the path to the directory where you want to search}
\item{full_names}{a logical value as in \code{\link[base]{list.files}}. If TRUE, the directory path is prepended to the file names to give a relative file path. If FALSE, the file names (rather than paths) are returned.}
\item{file}{the path to the file where you want to search}
}
\value{
a vector with all the files where the pattern was found for \code{find_pattern} or a logical value for \code{is_on_file}
}
\description{
\code{find_pattern} search a pattern in all files from a directory and \code{is_on_file} search
for a pattern in one file.
}
\examples{
find_pattern(pattern = "usethis::", where = system.file(package = "benutils"))
\dontrun{
# if you are in a R project you can just specify the pattern
find_pattern("my_pattern")
}
}
|
library(tidyverse)
library(extrafont)
library(ggthemr)
ggthemr(palette = "chalk", type = "outer")
fonts()
cran_code <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-11-12/loc_cran_packages.csv")
lines_code <- cran_code %>%
group_by(pkg_name) %>%
summarise(lines = sum(code)) %>%
arrange(desc(lines)) %>%
head(10)
lines_code %>%
ggplot(aes(fct_reorder(pkg_name, lines), lines)) +
geom_bar(stat = "identity") +
ggtitle("Top 10 Most Heavily Coded R Packages") +
xlab(NULL) +
ylab("Lines of Code") +
theme(legend.position = "none", text = element_text(family = "Impact")) +
coord_flip()
|
/Tidy Tuesday #8 - CRAN/cran.R
|
no_license
|
skybett/Tidy-Tuesdays
|
R
| false
| false
| 664
|
r
|
library(tidyverse)
library(extrafont)
library(ggthemr)
ggthemr(palette = "chalk", type = "outer")
fonts()
cran_code <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-11-12/loc_cran_packages.csv")
lines_code <- cran_code %>%
group_by(pkg_name) %>%
summarise(lines = sum(code)) %>%
arrange(desc(lines)) %>%
head(10)
lines_code %>%
ggplot(aes(fct_reorder(pkg_name, lines), lines)) +
geom_bar(stat = "identity") +
ggtitle("Top 10 Most Heavily Coded R Packages") +
xlab(NULL) +
ylab("Lines of Code") +
theme(legend.position = "none", text = element_text(family = "Impact")) +
coord_flip()
|
#' Homogeneization of GNSS series
#'
#' fit a segmentation in the mean model by taken into account for a functional part and a heterogeneous variance (default is monthly)
#'
#' @param Data a data frame, with size [n x 2], containing the signal (e.g. the daily GPS-ERAI series for GNSS) and the dates (in format yyyy-mm-dd of type "calendar time" (class POSIXct))
#' @param lyear the length of the year in the signal. Default is 365.25
#' @param lmin the minimum length of the segments. Default is 1
#' @param Kmax the maximal number of segments (must be lower than n). Default is 30
#' @param selection.K a name indicating the model selection criterion to select the number of segments K (\code{mBIC}, \code{Lav}, \code{BM_BJ} or \code{BM_slope}). \code{"none"} indicates that no selection is claimed and the procedure considers \code{Kmax} segments or \code{Kmax}-1 changes. If \code{selection.K="All"}, the results for the four possible criteria are given. Default is \code{"mBIC"}
#' @param S the threshold used in the Lav's criterion. Default is 0.75
#' @param f a boolean indicating if the functional part is taking into account in the model. Default is TRUE and note that if \code{f=FALSE}, only a segmentation is performed
#' @param selection.f a boolean indicating if a selection on the functions of the Fourier decomposition of order 4 is performed. Default is FALSE
#' @param threshold a numeric value lower than 1 used for the selection of the functions of the Fourier decomposition of order 4. Default is 0.001
#' @param tol the stopping rule for the iterative procedure. Default is 1e-4
#'
#' @return A file containing
#' \itemize{
#' \item \code{K} that corresponds to the selected number of segments or \code{K}-1 corresponds to the number of changes. If \code{selection.K="none"}, the number of segments is \code{Kmax}.
#' \item \code{seg} that corresponds to the estimation of the segmentation parameters (the begin and the end positions of each segment with the estimated mean).
#' \item \code{funct} that corresponds to the estimation of the functional part. If \code{f==FALSE}, \code{funct} is FALSE
#' \item \code{coeff} that corresponds to the estimation of the coefficients of the Fourier decomposition. The vector contains 8 coefficients if \code{selection.f=FALSE} or as many coefficients as the number of selected functions if \code{selection.f=TRUE}. If \code{f==FALSE}, \code{coeff} is FALSE
#' \item \code{variances} that corresponds to the estimated variances of each fixed interval
#' \item \code{SSR} that corresponds to the Residuals Sum of Squares for k=1,...,\code{Kmax}. If \code{selection.K="none"}, it contains only the SSR for \code{Kmax} segments
#' \item \code{Tot} is a list. Each component contains all the results k segments (k=1,...,\code{Kmax}). If \code{selection.K="none"}, \code{Tot} is NA
#' }
#' If \code{selection.K="All"}, the outputs \code{K}, \code{seg}, \code{funct} and \code{coeff} are each a list containing the corresponding results obtained for the four model selection criteria
#'
#' @details
#' The function performs homogeneization of GNSS series. The considered model is such that: (1) the average is composed of a piecewise function (changes in the mean) with a functional part and (2) the variance is heterogeneous on fixed intervals. By default the latter intervals are the months.
#' The inference procedure consists in two steps. First, the number of segments is fixed to \code{Kmax} and the parameters are estimated using the maximum likelihood procedure using the following procedure: first the variances are robustly estimated and then the segmentation and the functional parts are iteratively estimated. Then the number of segments is chosen using model selection criteria. The possible criteria are \code{mBIC} the modified BIC criterion REFEREF, \code{Lav} the criterion proposed by REFEF, \code{BM_BJ} and \code{BM_slope} the criteria proposed by REFEF where the penalty constant is calibrated using the Biggest Jump and the slope respectively REFERF.
#' \itemize{
#' \item The data is a data frame with 2 columns: $signal is the signal to be homogeneized (a daily series) and $date is the date. The date will be in format yyyy-mm-dd of type "calendar time" (class POSIXct).
#' \item The function part is estimated using a Fourier decomposition of order 4 with \code{selection.f=FALSE}. \code{selection.f=TRUE} consists in selecting the significative functions of the Fourier decomposition of order 4 (for which p.values are lower than \code{threshold})
#' \item If \code{selection.K="none"}, the procedure is performed with \code{Kmax} segments.
#' \item Missing data in the signal are accepted.
#' }
#'
#' @examples
#' data(Data)
#' lyear=365.25
#' Kmax=10
#' lmin=1
#' result=GNSSseg(Data,lyear,Kmax=Kmax,selection.K="none")
#' plot_GNSS(Data,result$seg,result$funct)
#' @export
GNSSseg=function(Data,lyear=365.25,lmin=1,Kmax=30,selection.K="BM_BJ",S=0.75,f=TRUE,selection.f=FALSE,threshold=0.001,tol=1e-4){
result = list()
Data.X = c()
cond1=TRUE
cond2=TRUE
#For NA
present.data = which(!is.na(Data$signal))
Data.X = Data[present.data,]
n.Data=length(Data$signal)
n.X=length(Data.X$signal)
Kseq=1:Kmax
#The conditions to be fulfilled
if (class(Data$date)[1]!="POSIXct"){
cond1=FALSE
cat("date must be in format yyyy-mm-dd of type GMT in class POSIXct/POSIXt")
}
if (Kmax >n.X) {
cond2=FALSE
cat("The maximal number of segments Kmax", Kmax," needs to be lower than the length of the series without NA that is " ,n.present,"\n")
}
if ((cond1==TRUE) & (cond2==TRUE)){
Data.X$year=as.factor(format(Data.X$date,format='%Y'))
Data.X$month=as.factor(format(Data.X$date,format='%m'))
Data.X$month = droplevels(Data.X$month)
Data.X$year = droplevels(Data.X$year)
#Used function for NA
add_NA=function(res,present.data,n.Data,segf){
res.with.NA=list()
#Segmentation
Tmu.temp=res$Tmu
Tmu.temp$begin=present.data[Tmu.temp$begin]
Tmu.temp$end=present.data[Tmu.temp$end]
Tmu.temp$end[length(Tmu.temp$end)]=n.Data
#Function
if (segf==TRUE){
f.temp=rep(NA,n.Data)
f.temp[present.data]=res$f
res.with.NA$f=f.temp
} else {f.temp=FALSE}
res.with.NA$Tmu=Tmu.temp
return(res.with.NA)
}
#Estimation of the Montly variances
sigma.est.month=RobEstiMonthlyVariance(Data.X)
var.est.month=sigma.est.month^2
if (f==TRUE){
#Option for estimating f
Used.function=c()
if (selection.f==TRUE){
Used.function='Seg_funct_selbK'
} else{
Used.function='Seg_funct_totK'
}
if (selection.K=="none"){
res.segfunct=c()
request=paste(paste0("res.segfunct=",Used.function,'(Data.X,var.est.month,Kmax,lmin,lyear,threshold,tol)'),sep="")
eval(parse(text=request))
res.segfunct.with.NA= add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu=res.segfunct.with.NA$Tmu
funct=res.segfunct.with.NA$f
Kh=Kmax
res.LoopK=NA
coeff=res.segfunct$coeff
SSwg=res.segfunct$SSwg
}
if (selection.K=="Lav"){
res.LoopK=Loop.K.procedure(Data.X,var.est.month,lyear,lmin,Kmax,Used.function,threshold,tol)
res=sapply(res.LoopK,function(e) {
return(c(SSwg =e$SSwg, LogLg = e$LogLg))
})
SSwg=res[1,]
Kh=MLcriterion(SSwg, Kseq,S)
res.segfunct=res.LoopK[[Kh]]
res.segfunct.with.NA<- add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu=res.segfunct.with.NA$Tmu
funct=res.segfunct.with.NA$f
coeff=res.segfunct$coeff
}
if (selection.K=="BM_BJ"){
res.LoopK=Loop.K.procedure(Data.X,var.est.month,lyear,lmin,Kmax,Used.function,threshold,tol)
res=sapply(res.LoopK,function(e) {
return(c(SSwg =e$SSwg, LogLg = e$LogLg))
})
SSwg=res[1,]
pen=5*Kseq+2*Kseq*log(n.X/Kseq)
Kh=BMcriterion(SSwg,pen)
res.segfunct=res.LoopK[[Kh]]
res.segfunct.with.NA<- add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu=res.segfunct.with.NA$Tmu
funct=res.segfunct.with.NA$f
coeff=res.segfunct$coeff
}
if (selection.K=="BM_slope"){
res.LoopK=Loop.K.procedure(Data.X,var.est.month,lyear,lmin,Kmax,Used.function,threshold,tol)
res=sapply(res.LoopK,function(e) {
return(c(SSwg =e$SSwg, LogLg = e$LogLg))
})
SSwg=res[1,]
pen=5*Kseq+2*Kseq*log(n.X/Kseq)
DataForCa=data.frame(model=paste("K=",Kseq),pen=pen,complexity=Kseq,contrast=SSwg)
Kh=Kseq[which(capushe::DDSE(DataForCa)@model==DataForCa$model)]
res.segfunct=res.LoopK[[Kh]]
res.segfunct.with.NA<- add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu=res.segfunct.with.NA$Tmu
funct=res.segfunct.with.NA$f
coeff=res.segfunct$coeff
}
if (selection.K=="mBIC"){
res.LoopK=Loop.K.procedure(Data.X,var.est.month,lyear,lmin,Kmax,Used.function,threshold,tol)
res=sapply(res.LoopK,function(e) {
return(c(SSwg =e$SSwg, LogLg = e$LogLg))
})
SSwg=res[1,]
LogLg=res[2,]
Kh=mBICcriterion(SSwg,LogLg,n.X,Kseq)$Kh
res.segfunct=res.LoopK[[Kh]]
res.segfunct.with.NA<- add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu=res.segfunct.with.NA$Tmu
funct=res.segfunct.with.NA$f
coeff=res.segfunct$coeff
}
if (selection.K=="All"){
res.LoopK=Loop.K.procedure(Data.X,var.est.month,lyear,lmin,Kmax,Used.function,threshold,tol)
res=sapply(res.LoopK,function(e) {
return(c(SSwg =e$SSwg, LogLg = e$LogLg))
})
SSwg=res[1,]
LogLg=res[2,]
pen=5*Kseq+2*Kseq*log(n.X/Kseq)
Tmu=list()
Kh=list()
funct=list()
coeff=list()
#1=mBIC
Kh$mBIC=mBICcriterion(SSwg,LogLg,n.X,Kseq)$Kh
res.segfunct=c()
res.segfunct=res.LoopK[[Kh$mBIC]]
res.segfunct.mBIC= add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu$mBIC=res.segfunct.mBIC$Tmu
funct$mBIC=res.segfunct.mBIC$f
coeff$mBIC=res.segfunct$coeff
#2=ML
Kh$Lav=MLcriterion(SSwg, Kseq,S)
res.segfunct=c()
res.segfunct=res.LoopK[[Kh$Lav]]
res.segfunct.ML= add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu$Lav=res.segfunct.ML$Tmu
funct$Lav=res.segfunct.ML$f
coeff$Lav=res.segfunct$coeff
#3=BM_BJ
Kh$BM_BJ=BMcriterion(SSwg,pen)
res.segfunct=c()
res.segfunct=res.LoopK[[Kh$BM_BJ]]
res.segfunct.BM_BJ= add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu$BM_BJ=res.segfunct.BM_BJ$Tmu
funct$BM_BJ=res.segfunct.BM_BJ$f
coeff$BM_BJ=res.segfunct$coeff
#4=BM2
DataForCa=data.frame(model=paste("K=",Kseq),pen=pen,complexity=Kseq,contrast=SSwg)
Kh$BM_slope=Kseq[which(capushe::DDSE(DataForCa)@model==DataForCa$model)]
res.segfunct=c()
res.segfunct=res.LoopK[[Kh$BM_slope]]
res.segfunct.BM_slope= add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu$BM_slope=res.segfunct.BM_slope$Tmu
funct$BM_slope=res.segfunct.BM_slope$f
coeff$BM_slope=res.segfunct$coeff
}
} else {
funct=FALSE
coeff=FALSE
var.est.t=var.est.month[as.numeric(Data.X$month)]
res.seg=SegMonthlyVarianceK(Data.X,Kmax,lmin,var.est.t)
SSwg=res.seg$SSwg
pen=5*Kseq+2*Kseq*log(n.X/Kseq)
res.LoopK=res.seg$res.LoopK
if (selection.K=="none"){
res.seg.with.NA<- add_NA(res.seg,present.data,n.Data,segf=FALSE)
Tmu=res.seg.with.NA$Tmu
Kh=Kmax
}
if (selection.K=="Lav"){
Kh=MLcriterion(SSwg, Kseq,S)
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh,lmin,var.est.t)
res.seg.with.NA<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
Tmu=res.seg.with.NA$Tmu
}
if (selection.K=="BM_BJ"){
Kh=BMcriterion(SSwg,pen)
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh,lmin,var.est.t)
res.seg.with.NA<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
Tmu=res.seg.with.NA$Tmu
}
if (selection.K=="BM_slope"){
DataForCa=data.frame(model=paste("K=",Kseq),pen=pen,complexity=Kseq,contrast=SSwg)
Kh=Kseq[which(capushe::DDSE(DataForCa)@model==DataForCa$model)]
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh,lmin,var.est.t)
res.seg.with.NA<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
Tmu=res.seg.with.NA$Tmu
}
if (selection.K=="mBIC"){
Kh=mBICcriterion(SSwg,res.seg$LogLg,n.X,Kseq)$Kh
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh,lmin,var.est.t)
res.seg.with.NA<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
Tmu=res.seg.with.NA$Tmu
}
if (selection.K=="All"){
Tmu=list()
Kh=list()
#1=mBIC
Kh.mBIC=mBICcriterion(SSwg,res.seg$LogLg,n.X,Kseq)$Kh
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh.mBIC,lmin,var.est.t)
res.seg.mBIC<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
Tmu$mBIC=res.seg.mBIC$Tmu
Kh$mBIC=Kh.mBIC
#2=ML
Kh.ML=MLcriterion(SSwg, Kseq,S)
Kh$Lav=Kh.ML
if (Kh.ML==Kh.mBIC){
res.seg.ML=res.seg.mBIC
} else{
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh.ML,lmin,var.est.t)
res.seg.ML<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
}
Tmu$Lav=res.seg.ML$Tmu
#3=BM_BJ
Kh.BM_BJ=BMcriterion(SSwg,pen)
Kh$BM_BJ=Kh.BM_BJ
if ((Kh.BM_BJ==Kh.mBIC)) {
res.seg.BM_BJ=res.seg.mBIC
} else if ((Kh.BM_BJ==Kh.ML)) {
res.seg.BM_BJ=res.seg.ML
} else {
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh.BM_BJ,lmin,var.est.t)
res.seg.BM_BJ<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
}
Tmu$BM_BJ=res.seg.BM_BJ$Tmu
#4=BM2
DataForCa=data.frame(model=paste("K=",Kseq),pen=pen,complexity=Kseq,contrast=SSwg)
Kh.BM_slope=Kseq[which(capushe::DDSE(DataForCa)@model==DataForCa$model)]
Kh$BM_slope=Kh.BM_slope
if ((Kh.BM_slope==Kh.mBIC)) {
res.seg.BM_slope=res.seg.mBIC
} else if ((Kh.BM_slope==Kh.ML)) {
res.seg.BM_slope=res.seg.ML
} else if ((Kh.BM_slope==Kh.BM_BJ)) {
res.seg.BM_slope=res.seg.BM_BJ
} else {
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh.BM_slope,lmin,var.est.t)
res.seg.BM_slope= add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
}
Tmu$BM_slope=res.seg.BM_slope$Tmu
}
}
#Obtained segmentation
result$K=Kh
result$seg=Tmu
result$funct=funct
result$coeff=coeff
#Global results
result$variances=var.est.month
result$SSR=SSwg
result$Tot=res.LoopK
return(result)
}
}
|
/R/GNSSseg.R
|
no_license
|
arq16/GNSSseg
|
R
| false
| false
| 16,073
|
r
|
#' Homogeneization of GNSS series
#'
#' fit a segmentation in the mean model by taken into account for a functional part and a heterogeneous variance (default is monthly)
#'
#' @param Data a data frame, with size [n x 2], containing the signal (e.g. the daily GPS-ERAI series for GNSS) and the dates (in format yyyy-mm-dd of type "calendar time" (class POSIXct))
#' @param lyear the length of the year in the signal. Default is 365.25
#' @param lmin the minimum length of the segments. Default is 1
#' @param Kmax the maximal number of segments (must be lower than n). Default is 30
#' @param selection.K a name indicating the model selection criterion to select the number of segments K (\code{mBIC}, \code{Lav}, \code{BM_BJ} or \code{BM_slope}). \code{"none"} indicates that no selection is claimed and the procedure considers \code{Kmax} segments or \code{Kmax}-1 changes. If \code{selection.K="All"}, the results for the four possible criteria are given. Default is \code{"mBIC"}
#' @param S the threshold used in the Lav's criterion. Default is 0.75
#' @param f a boolean indicating if the functional part is taking into account in the model. Default is TRUE and note that if \code{f=FALSE}, only a segmentation is performed
#' @param selection.f a boolean indicating if a selection on the functions of the Fourier decomposition of order 4 is performed. Default is FALSE
#' @param threshold a numeric value lower than 1 used for the selection of the functions of the Fourier decomposition of order 4. Default is 0.001
#' @param tol the stopping rule for the iterative procedure. Default is 1e-4
#'
#' @return A file containing
#' \itemize{
#' \item \code{K} that corresponds to the selected number of segments or \code{K}-1 corresponds to the number of changes. If \code{selection.K="none"}, the number of segments is \code{Kmax}.
#' \item \code{seg} that corresponds to the estimation of the segmentation parameters (the begin and the end positions of each segment with the estimated mean).
#' \item \code{funct} that corresponds to the estimation of the functional part. If \code{f==FALSE}, \code{funct} is FALSE
#' \item \code{coeff} that corresponds to the estimation of the coefficients of the Fourier decomposition. The vector contains 8 coefficients if \code{selection.f=FALSE} or as many coefficients as the number of selected functions if \code{selection.f=TRUE}. If \code{f==FALSE}, \code{coeff} is FALSE
#' \item \code{variances} that corresponds to the estimated variances of each fixed interval
#' \item \code{SSR} that corresponds to the Residuals Sum of Squares for k=1,...,\code{Kmax}. If \code{selection.K="none"}, it contains only the SSR for \code{Kmax} segments
#' \item \code{Tot} is a list. Each component contains all the results k segments (k=1,...,\code{Kmax}). If \code{selection.K="none"}, \code{Tot} is NA
#' }
#' If \code{selection.K="All"}, the outputs \code{K}, \code{seg}, \code{funct} and \code{coeff} are each a list containing the corresponding results obtained for the four model selection criteria
#'
#' @details
#' The function performs homogeneization of GNSS series. The considered model is such that: (1) the average is composed of a piecewise function (changes in the mean) with a functional part and (2) the variance is heterogeneous on fixed intervals. By default the latter intervals are the months.
#' The inference procedure consists in two steps. First, the number of segments is fixed to \code{Kmax} and the parameters are estimated using the maximum likelihood procedure using the following procedure: first the variances are robustly estimated and then the segmentation and the functional parts are iteratively estimated. Then the number of segments is chosen using model selection criteria. The possible criteria are \code{mBIC} the modified BIC criterion REFEREF, \code{Lav} the criterion proposed by REFEF, \code{BM_BJ} and \code{BM_slope} the criteria proposed by REFEF where the penalty constant is calibrated using the Biggest Jump and the slope respectively REFERF.
#' \itemize{
#' \item The data is a data frame with 2 columns: $signal is the signal to be homogeneized (a daily series) and $date is the date. The date will be in format yyyy-mm-dd of type "calendar time" (class POSIXct).
#' \item The function part is estimated using a Fourier decomposition of order 4 with \code{selection.f=FALSE}. \code{selection.f=TRUE} consists in selecting the significative functions of the Fourier decomposition of order 4 (for which p.values are lower than \code{threshold})
#' \item If \code{selection.K="none"}, the procedure is performed with \code{Kmax} segments.
#' \item Missing data in the signal are accepted.
#' }
#'
#' @examples
#' data(Data)
#' lyear=365.25
#' Kmax=10
#' lmin=1
#' result=GNSSseg(Data,lyear,Kmax=Kmax,selection.K="none")
#' plot_GNSS(Data,result$seg,result$funct)
#' @export
GNSSseg=function(Data,lyear=365.25,lmin=1,Kmax=30,selection.K="BM_BJ",S=0.75,f=TRUE,selection.f=FALSE,threshold=0.001,tol=1e-4){
result = list()
Data.X = c()
cond1=TRUE
cond2=TRUE
#For NA
present.data = which(!is.na(Data$signal))
Data.X = Data[present.data,]
n.Data=length(Data$signal)
n.X=length(Data.X$signal)
Kseq=1:Kmax
#The conditions to be fulfilled
if (class(Data$date)[1]!="POSIXct"){
cond1=FALSE
cat("date must be in format yyyy-mm-dd of type GMT in class POSIXct/POSIXt")
}
if (Kmax >n.X) {
cond2=FALSE
cat("The maximal number of segments Kmax", Kmax," needs to be lower than the length of the series without NA that is " ,n.present,"\n")
}
if ((cond1==TRUE) & (cond2==TRUE)){
Data.X$year=as.factor(format(Data.X$date,format='%Y'))
Data.X$month=as.factor(format(Data.X$date,format='%m'))
Data.X$month = droplevels(Data.X$month)
Data.X$year = droplevels(Data.X$year)
#Used function for NA
add_NA=function(res,present.data,n.Data,segf){
res.with.NA=list()
#Segmentation
Tmu.temp=res$Tmu
Tmu.temp$begin=present.data[Tmu.temp$begin]
Tmu.temp$end=present.data[Tmu.temp$end]
Tmu.temp$end[length(Tmu.temp$end)]=n.Data
#Function
if (segf==TRUE){
f.temp=rep(NA,n.Data)
f.temp[present.data]=res$f
res.with.NA$f=f.temp
} else {f.temp=FALSE}
res.with.NA$Tmu=Tmu.temp
return(res.with.NA)
}
#Estimation of the Montly variances
sigma.est.month=RobEstiMonthlyVariance(Data.X)
var.est.month=sigma.est.month^2
if (f==TRUE){
#Option for estimating f
Used.function=c()
if (selection.f==TRUE){
Used.function='Seg_funct_selbK'
} else{
Used.function='Seg_funct_totK'
}
if (selection.K=="none"){
res.segfunct=c()
request=paste(paste0("res.segfunct=",Used.function,'(Data.X,var.est.month,Kmax,lmin,lyear,threshold,tol)'),sep="")
eval(parse(text=request))
res.segfunct.with.NA= add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu=res.segfunct.with.NA$Tmu
funct=res.segfunct.with.NA$f
Kh=Kmax
res.LoopK=NA
coeff=res.segfunct$coeff
SSwg=res.segfunct$SSwg
}
if (selection.K=="Lav"){
res.LoopK=Loop.K.procedure(Data.X,var.est.month,lyear,lmin,Kmax,Used.function,threshold,tol)
res=sapply(res.LoopK,function(e) {
return(c(SSwg =e$SSwg, LogLg = e$LogLg))
})
SSwg=res[1,]
Kh=MLcriterion(SSwg, Kseq,S)
res.segfunct=res.LoopK[[Kh]]
res.segfunct.with.NA<- add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu=res.segfunct.with.NA$Tmu
funct=res.segfunct.with.NA$f
coeff=res.segfunct$coeff
}
if (selection.K=="BM_BJ"){
res.LoopK=Loop.K.procedure(Data.X,var.est.month,lyear,lmin,Kmax,Used.function,threshold,tol)
res=sapply(res.LoopK,function(e) {
return(c(SSwg =e$SSwg, LogLg = e$LogLg))
})
SSwg=res[1,]
pen=5*Kseq+2*Kseq*log(n.X/Kseq)
Kh=BMcriterion(SSwg,pen)
res.segfunct=res.LoopK[[Kh]]
res.segfunct.with.NA<- add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu=res.segfunct.with.NA$Tmu
funct=res.segfunct.with.NA$f
coeff=res.segfunct$coeff
}
if (selection.K=="BM_slope"){
res.LoopK=Loop.K.procedure(Data.X,var.est.month,lyear,lmin,Kmax,Used.function,threshold,tol)
res=sapply(res.LoopK,function(e) {
return(c(SSwg =e$SSwg, LogLg = e$LogLg))
})
SSwg=res[1,]
pen=5*Kseq+2*Kseq*log(n.X/Kseq)
DataForCa=data.frame(model=paste("K=",Kseq),pen=pen,complexity=Kseq,contrast=SSwg)
Kh=Kseq[which(capushe::DDSE(DataForCa)@model==DataForCa$model)]
res.segfunct=res.LoopK[[Kh]]
res.segfunct.with.NA<- add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu=res.segfunct.with.NA$Tmu
funct=res.segfunct.with.NA$f
coeff=res.segfunct$coeff
}
if (selection.K=="mBIC"){
res.LoopK=Loop.K.procedure(Data.X,var.est.month,lyear,lmin,Kmax,Used.function,threshold,tol)
res=sapply(res.LoopK,function(e) {
return(c(SSwg =e$SSwg, LogLg = e$LogLg))
})
SSwg=res[1,]
LogLg=res[2,]
Kh=mBICcriterion(SSwg,LogLg,n.X,Kseq)$Kh
res.segfunct=res.LoopK[[Kh]]
res.segfunct.with.NA<- add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu=res.segfunct.with.NA$Tmu
funct=res.segfunct.with.NA$f
coeff=res.segfunct$coeff
}
if (selection.K=="All"){
res.LoopK=Loop.K.procedure(Data.X,var.est.month,lyear,lmin,Kmax,Used.function,threshold,tol)
res=sapply(res.LoopK,function(e) {
return(c(SSwg =e$SSwg, LogLg = e$LogLg))
})
SSwg=res[1,]
LogLg=res[2,]
pen=5*Kseq+2*Kseq*log(n.X/Kseq)
Tmu=list()
Kh=list()
funct=list()
coeff=list()
#1=mBIC
Kh$mBIC=mBICcriterion(SSwg,LogLg,n.X,Kseq)$Kh
res.segfunct=c()
res.segfunct=res.LoopK[[Kh$mBIC]]
res.segfunct.mBIC= add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu$mBIC=res.segfunct.mBIC$Tmu
funct$mBIC=res.segfunct.mBIC$f
coeff$mBIC=res.segfunct$coeff
#2=ML
Kh$Lav=MLcriterion(SSwg, Kseq,S)
res.segfunct=c()
res.segfunct=res.LoopK[[Kh$Lav]]
res.segfunct.ML= add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu$Lav=res.segfunct.ML$Tmu
funct$Lav=res.segfunct.ML$f
coeff$Lav=res.segfunct$coeff
#3=BM_BJ
Kh$BM_BJ=BMcriterion(SSwg,pen)
res.segfunct=c()
res.segfunct=res.LoopK[[Kh$BM_BJ]]
res.segfunct.BM_BJ= add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu$BM_BJ=res.segfunct.BM_BJ$Tmu
funct$BM_BJ=res.segfunct.BM_BJ$f
coeff$BM_BJ=res.segfunct$coeff
#4=BM2
DataForCa=data.frame(model=paste("K=",Kseq),pen=pen,complexity=Kseq,contrast=SSwg)
Kh$BM_slope=Kseq[which(capushe::DDSE(DataForCa)@model==DataForCa$model)]
res.segfunct=c()
res.segfunct=res.LoopK[[Kh$BM_slope]]
res.segfunct.BM_slope= add_NA(res.segfunct,present.data,n.Data,segf=TRUE)
Tmu$BM_slope=res.segfunct.BM_slope$Tmu
funct$BM_slope=res.segfunct.BM_slope$f
coeff$BM_slope=res.segfunct$coeff
}
} else {
funct=FALSE
coeff=FALSE
var.est.t=var.est.month[as.numeric(Data.X$month)]
res.seg=SegMonthlyVarianceK(Data.X,Kmax,lmin,var.est.t)
SSwg=res.seg$SSwg
pen=5*Kseq+2*Kseq*log(n.X/Kseq)
res.LoopK=res.seg$res.LoopK
if (selection.K=="none"){
res.seg.with.NA<- add_NA(res.seg,present.data,n.Data,segf=FALSE)
Tmu=res.seg.with.NA$Tmu
Kh=Kmax
}
if (selection.K=="Lav"){
Kh=MLcriterion(SSwg, Kseq,S)
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh,lmin,var.est.t)
res.seg.with.NA<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
Tmu=res.seg.with.NA$Tmu
}
if (selection.K=="BM_BJ"){
Kh=BMcriterion(SSwg,pen)
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh,lmin,var.est.t)
res.seg.with.NA<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
Tmu=res.seg.with.NA$Tmu
}
if (selection.K=="BM_slope"){
DataForCa=data.frame(model=paste("K=",Kseq),pen=pen,complexity=Kseq,contrast=SSwg)
Kh=Kseq[which(capushe::DDSE(DataForCa)@model==DataForCa$model)]
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh,lmin,var.est.t)
res.seg.with.NA<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
Tmu=res.seg.with.NA$Tmu
}
if (selection.K=="mBIC"){
Kh=mBICcriterion(SSwg,res.seg$LogLg,n.X,Kseq)$Kh
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh,lmin,var.est.t)
res.seg.with.NA<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
Tmu=res.seg.with.NA$Tmu
}
if (selection.K=="All"){
Tmu=list()
Kh=list()
#1=mBIC
Kh.mBIC=mBICcriterion(SSwg,res.seg$LogLg,n.X,Kseq)$Kh
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh.mBIC,lmin,var.est.t)
res.seg.mBIC<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
Tmu$mBIC=res.seg.mBIC$Tmu
Kh$mBIC=Kh.mBIC
#2=ML
Kh.ML=MLcriterion(SSwg, Kseq,S)
Kh$Lav=Kh.ML
if (Kh.ML==Kh.mBIC){
res.seg.ML=res.seg.mBIC
} else{
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh.ML,lmin,var.est.t)
res.seg.ML<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
}
Tmu$Lav=res.seg.ML$Tmu
#3=BM_BJ
Kh.BM_BJ=BMcriterion(SSwg,pen)
Kh$BM_BJ=Kh.BM_BJ
if ((Kh.BM_BJ==Kh.mBIC)) {
res.seg.BM_BJ=res.seg.mBIC
} else if ((Kh.BM_BJ==Kh.ML)) {
res.seg.BM_BJ=res.seg.ML
} else {
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh.BM_BJ,lmin,var.est.t)
res.seg.BM_BJ<- add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
}
Tmu$BM_BJ=res.seg.BM_BJ$Tmu
#4=BM2
DataForCa=data.frame(model=paste("K=",Kseq),pen=pen,complexity=Kseq,contrast=SSwg)
Kh.BM_slope=Kseq[which(capushe::DDSE(DataForCa)@model==DataForCa$model)]
Kh$BM_slope=Kh.BM_slope
if ((Kh.BM_slope==Kh.mBIC)) {
res.seg.BM_slope=res.seg.mBIC
} else if ((Kh.BM_slope==Kh.ML)) {
res.seg.BM_slope=res.seg.ML
} else if ((Kh.BM_slope==Kh.BM_BJ)) {
res.seg.BM_slope=res.seg.BM_BJ
} else {
res.seg.sol=c()
res.seg.sol=SegMonthlyVarianceK(Data.X,Kh.BM_slope,lmin,var.est.t)
res.seg.BM_slope= add_NA(res.seg.sol,present.data,n.Data,segf=FALSE)
}
Tmu$BM_slope=res.seg.BM_slope$Tmu
}
}
#Obtained segmentation
result$K=Kh
result$seg=Tmu
result$funct=funct
result$coeff=coeff
#Global results
result$variances=var.est.month
result$SSR=SSwg
result$Tot=res.LoopK
return(result)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_urls.R
\name{get_urls}
\alias{get_urls}
\title{Retrieve urls on google search}
\usage{
get_urls(search, how_many = 10)
}
\arguments{
\item{search}{A search string}
\item{how_many}{How many urls do you want to retrive}
}
\description{
This function provides a simple crawler to to retreive urls from
google search
}
\examples{
get_urls("machine learning", 10)
}
|
/man/get_urls.Rd
|
permissive
|
samuelmacedo83/google.search.crawler
|
R
| false
| true
| 445
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_urls.R
\name{get_urls}
\alias{get_urls}
\title{Retrieve urls on google search}
\usage{
get_urls(search, how_many = 10)
}
\arguments{
\item{search}{A search string}
\item{how_many}{How many urls do you want to retrive}
}
\description{
This function provides a simple crawler to to retreive urls from
google search
}
\examples{
get_urls("machine learning", 10)
}
|
library(tidyverse)
library(lubridate)
library(scales)
load(file = "E:/R/COVID-19/covid.ECDC.Rda")
load(file = "E:/R/COVID-19/covid2.Rda")
# test Białoruś na tle UE
ECDC2 <- covid.ECDC%>%
ungroup()%>%
select(ISO3, population)%>%
unique()
a <- covid%>%
#filter(Państwo=="Białoruś"|Państwo=="Szwajcaria"|`Blok Ekonomiczny`=="Unia Europejska")%>%
filter(Kontynenty=="Europa")%>%
left_join(ECDC2, by="ISO3")%>%
filter(population>=250000)%>%
mutate(zach.100 = nowe.zachorowania*100000/population)%>%
group_by(Państwo)%>%
mutate(srednia= zoo::rollmean(zach.100, k=7, fill=NA, align="right"))%>%
filter(srednia>0.1)%>%
mutate(id=row_number())%>%
mutate(by=if_else(Państwo=="Białoruś", paste("tak"), paste("nie")))
linia1 <-filter(a, Państwo=="Białoruś", id==max(id))%>%
ungroup()%>%
select(srednia)%>%
pull()
id2 <- a %>%
filter(Państwo=="Białoruś", id==max(id))%>%
ungroup()%>%
select(id)%>%
pull()
kolejnosc <- a %>%
filter(id==id2)%>%
arrange(desc(srednia))%>%
select(Państwo)
data.by <- a %>%
filter(Państwo=="Białoruś", id==max(id))%>%
ungroup()%>%
select(data)%>%
pull()
a$Państwo <- ordered(a$Państwo, levels = kolejnosc$Państwo)
#png("bialorus.png", units="in", width=9, height=9, res=600)
ggplot(filter(a), aes(x=id, y=srednia, color=by))+
geom_point(aes(x=id, y=zach.100), color="orange")+
geom_path(size=1.5, alpha=0.8, show.legend = F) +
facet_wrap(~Państwo, ncol=8)+
coord_cartesian(xlim=c(0,sum(a$Państwo=="Białoruś")-1), ylim=c(0,17))+
scale_color_manual(values = c("tak"="red", "nie"="blue"))+
geom_hline(aes(yintercept = linia1, linetype=""), color="red4")+
labs(x="liczba dni od przekroczenia 0,1 zakażenia na 100 tys. mieszkańców",
y="dzienny przyrost",
color="",
title = "Liczba dziennych zakażeń na 100 tys. mieszkańców",
#subtitle = paste( "stan na", format(as.Date(UA1$data), "%d/%m/%Y")),
caption = "Źródło: Center for Systems Science and Engineering at Johns Hopkins University")+
scale_linetype_manual(name = "", values = "longdash", labels = paste("poziom przyrostu zakażeń na Białorusi\nstan na ",format(data.by,"%d %B %Y")))+
theme_bw()+
theme(legend.position = "top", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5), strip.background = element_rect(fill="grey90"))
#dev.off()
#sama białoruś
a <- a %>%
filter(Państwo=="Białoruś")
png("bialorus8 maja.png", units="in", width=7, height=7, res=600)
ggplot(filter(a), aes(x=data, y=srednia))+
geom_point(aes(x=data, y=zach.100), color="orange")+
geom_path(size=1.5, alpha=0.8, show.legend = F, color="blue") +
geom_smooth(span=0.4, size=1, se=F)+
#facet_wrap(~Państwo, ncol=8)+
#coord_cartesian(xlim=c(0,sum(a$Państwo=="Białoruś")-1), ylim=c(0,17))+
#scale_color_manual(values = c("tak"="red", "nie"="blue"))+
#geom_hline(aes(yintercept = linia1, linetype=""), color="red4")+
labs(x="",
y="dzienny przyrost nowych przypadków na 100 tys. mieszkańców",
color="",
title = "Dynamika przyrostu nowych zakażeń na Białorusi",
#subtitle = paste( "stan na", format(as.Date(UA1$data), "%d/%m/%Y")),
caption = "Źródło: Center for Systems Science and Engineering at Johns Hopkins University")+
#scale_linetype_manual(name = "", values = "longdash", labels = paste("poziom przyrostu zakażeń na Białorusi\nstan na ",format(data.by,"%d %B %Y")))+
theme_bw()+
theme(legend.position = "top", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5), strip.background = element_rect(fill="grey90"))
dev.off()
## to samo, tylko z datą na osi x
a <- covid%>%
filter(Państwo=="Białoruś"|Państwo=="Szwajcaria"|`Blok Ekonomiczny`=="Unia Europejska")%>%
separate(indeks, into = "region", sep="_")%>%
filter(region=="")%>%
left_join(ECDC2, by="ISO3")%>%
mutate(zach.100 = nowe.zachorowania*100000/population)%>%
group_by(Państwo)%>%
mutate(srednia= zoo::rollmean(zach.100, k=7, fill=NA, align="right"))%>%
filter(srednia!=is.na(srednia))
ggplot(filter(a), aes(x=data, y=srednia, color=by))+
geom_path(size=1.5, alpha=0.8) +
facet_wrap(~Państwo, ncol=5, scales="free_y")+
#coord_cartesian(ylim=c(0,30))+
#geom_hline(yintercept = linia1), linetype="dashed", color="chocolate3")+
labs(x="liczba dni od przekroczenia 0,1 zakażenia na 100 tys. mieszkańców",
y="dzienny przyrost",
color="",
title = "Liczba dziennych zakażeń na 100 tys. mieszkańców",
#subtitle = paste( "stan na", format(as.Date(UA1$data), "%d/%m/%Y")),
caption = "Źródło: Center for Systems Science and Engineering at Johns Hopkins University")+
theme_bw()+
theme(legend.position = "none", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5))
############################################################################################################################################
# Białoruś i dane ECDC
a <- covid.ECDC%>%
filter(Kontynenty=="Europa")%>%
#filter(population>=250000)%>%
mutate(zach.100 = cases*100000/population)%>%
group_by(Państwo)%>%
mutate(srednia= zoo::rollmean(zach.100, k=7, fill=NA, align="right"))%>%
filter(srednia>0.1)%>%
mutate(id=row_number())%>%
mutate(by=if_else(Państwo=="Białoruś", paste("tak"), paste("nie")))
linia1 <-filter(a, Państwo=="Białoruś", id==max(id))%>%
ungroup()%>%
select(srednia)%>%
pull()
kolejnosc <- a %>%
filter(srednia==max(srednia))%>%
arrange(desc(srednia))%>%
select(Państwo)%>%
unique()
data.by <- a %>%
filter(Państwo=="Białoruś", id==max(id))%>%
ungroup()%>%
select(data)%>%
pull()
a$Państwo <- ordered(a$Państwo, levels = kolejnosc$Państwo)
png("bialorus.ECDC.3maj.png", units="in", width=12, height=8, res=600)
ggplot(filter(a), aes(x=data, y=srednia, color=by))+
geom_path(size=1.5, alpha=0.8, show.legend=F) +
facet_wrap(~Państwo, ncol=8, scales=)+
scale_color_manual(values = c("tak"="red", "nie"="blue"))+
#coord_cartesian(ylim=c(0,30))+
geom_hline(aes(yintercept = linia1, linetype=""), color="red4")+
labs(x="",
y="dzienny przyrost nowych przypadkóW",
color="",
title = "Liczba dziennych zakażeń na 100 tys. mieszkańców",
#subtitle = paste( "stan na", format(as.Date(UA1$data), "%d/%m/%Y")),
caption = "Źródło: European Centre for Disease Prevention and Control")+
scale_linetype_manual(name = "", values = "longdash", labels = paste("poziom przyrostu zakażeń na Białorusi\nstan na ",format(data.by,"%d %B %Y")))+
scale_x_date(date_breaks = "1 month",labels = date_format("%b"))+
theme_bw()+
theme(legend.position = "top", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5))
dev.off()
# to samo z id
ggplot(filter(a), aes(x=id, y=srednia, color=by))+
geom_path(size=1.5, alpha=0.8, show.legend=F) +
facet_wrap(~Państwo, ncol=8, scales=)+
scale_color_manual(values = c("tak"="red", "nie"="blue"))+
#coord_cartesian(ylim=c(0,30))+
geom_hline(aes(yintercept = linia1, linetype=""), color="red4")+
labs(x="liczba dni od przekroczenia poziomu 0,1 zakażenia na 100 tys. mieszkańców",
y="dzienny przyrost nowych przypadkóW",
color="",
title = "Liczba dziennych zakażeń na 100 tys. mieszkańców",
#subtitle = paste( "stan na", format(as.Date(UA1$data), "%d/%m/%Y")),
caption = "Źródło: European Centre for Disease Prevention and Control")+
scale_linetype_manual(name = "", values = "longdash", labels = paste("poziom przyrostu zakażeń na Białorusi\nstan na ",format(data.by,"%d %B %Y")))+
theme_bw()+
theme(legend.position = "top", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5))
## scatterplot z samą białorusią
a <- covid.ECDC%>%
filter(Państwo=="Białoruś")%>%
mutate(zach.100 = cases*100000/population)%>%
mutate(srednia= zoo::rollmean(zach.100, k=7, fill=NA, align="right"))%>%
mutate(srednia.nowe= zoo::rollmean(cases, k=7, fill=NA, align="right"))%>%
filter(srednia>0.1)
ggplot(filter(a), aes(x=data, y=cases))+
geom_point(aes(size="dzienne nowe zakażenia"), color="blue", alpha=0.4) +
#scale_color_manual(values = c("nowe zakażenia"="blue4", "średnia"="red4" ))+
geom_smooth(aes(color="średnia"), size=1.5, se=T, span=0.4, level=0.95)+
coord_cartesian(xlim = c(ymd("2020-04-01"), ymd("2020-05-07")))+
geom_path(aes(x=data, y=srednia.nowe), color="orange", size=2)+
#facet_wrap(~Państwo, ncol=7, scales="free_y")+
#geom_hline(aes(yintercept = linia1, linetype=""), color="red4")+
#geom_vline(aes(xintercept = linia2, linetype=" "),color= "red4", show.legend = F)+
labs(x="ilość dni od przekroczenia 0,1 zakażenia na 100 tys. mieszkańców",
y="dzienna ilość nowych przypadków",
color="",
size= "",
title = "Liczba dziennych zakażeń na 100 tys. mieszkańców w państwach europejskich",
subtitle = paste("Stan na", format(max(a$data), "%d/%m/%Y"), ". Oś y indywidualna dla każdego państwa."),
caption = "Źródło: European Centre for Disease Prevention and Control")+
#scale_linetype_manual(name = c("", " "), values = c("solid", "longdash"), labels = c(paste("poziom przyrostu zakażeń w Polsce\nstan na ",format(data.pl,"%d %B %Y") ),
#"ilość dni od przekroczenia poziomu 0,1 zakażenia \nna 100 tys. mieszkancow w Polsce"))+
theme_bw()+
theme(legend.position = "top", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5))
# odchylenie standardowe dla Białorusi ------------------------------------
library(scales)
bialorus <- covid.ECDC %>%
filter(Państwo=="Białoruś")%>%
filter(data>max(data-14))
sd(bialorus$cases)/mean(bialorus$cases)
PL <- covid.ECDC %>%
filter(Państwo=="Polska")%>%
filter(data>max(data-14))
(sd(PL$cases)/mean(PL$cases))*100
#porównanie roznych państw
kraje <- covid.ECDC %>%
filter(data>max(data-7))%>%
filter(population>1e6)%>%
mutate(zach.100 = cases*100000/population)%>%
filter(Kontynenty=="Europa")%>%
group_by(Państwo)%>%
summarise(
mean.zach.100 = mean(zach.100)
)%>%
filter(mean.zach.100>0.9)%>%
select(Państwo)%>%
unique()%>%
ungroup()%>%
pull()
odchylenie <- covid.ECDC %>%
filter(data>max(data-7))%>%
mutate(zach.100 = cases*100000/population)%>%
filter(Kontynenty=="Europa")%>%
filter(Państwo %in%kraje)%>%
group_by(Państwo)%>%
filter(population>1e6)%>%
summarise(
srednia=mean(cases),
odchylenie = sd(cases)
)%>%
mutate(odchylenie.proc = odchylenie/srednia)%>%
arrange(odchylenie.proc)%>%
filter(odchylenie.proc>0)
ggplot(odchylenie, aes(x=reorder(Państwo, odchylenie.proc), y=odchylenie.proc))+
geom_col()+
coord_flip()+
scale_y_continuous(labels = percent)
library(plotly)
ggplotly(p1)
#porównanie roznych państw w szczycie epidemii
odchylenie.szczyt <- covid.ECDC %>%
filter(Kontynenty=="Europa")%>%
filter(population>1e6)%>%
mutate(srednia.ruchoma = zoo::rollmean(cases, k=7, fill=NA, align="right"))%>%
group_by(Państwo)%>%
mutate(test = srednia.ruchoma==max(srednia.ruchoma, na.rm = T))%>%
mutate(id=row_number())
a <- odchylenie.szczyt %>%
select(data, cases, Państwo, srednia.ruchoma, test, id)%>%
mutate(srednia.sd = roll::roll_sd(cases,7))%>%
mutate(odchylenie.proc = srednia.sd/srednia.ruchoma)%>%
filter(id>max(id)-30)%>%
filter(odchylenie.proc>0)
ggplot(a, aes(x=data, y=odchylenie.proc))+
geom_path()+
facet_wrap(~Państwo)
# próba porównania państw w szczycie 7 dni
odchylenie.szczyt2 <- covid.ECDC %>%
filter(Kontynenty=="Europa")%>%
filter(population>1e6)%>%
mutate(srednia.ruchoma = zoo::rollmean(cases, k=7, fill=NA, align="right"))%>%
group_by(Państwo)%>%
mutate(test = srednia.ruchoma==max(srednia.ruchoma, na.rm = T))%>%
mutate(id=row_number())%>%
mutate(data2 = if_else(test==TRUE, data, NULL))%>%
filter(data>=max(data2, na.rm = T)&data<max(data2, na.rm = T)+7)%>%
summarise(
srednia=mean(cases),
odchylenie = sd(cases)
)%>%
mutate(odchylenie.proc = odchylenie/srednia)%>%
arrange(odchylenie.proc)
ggplot(odchylenie.szczyt2, aes(x=reorder(Państwo, odchylenie.proc), y=odchylenie.proc))+
geom_col()+
coord_flip()+
scale_y_continuous(labels = percent)+
theme_bw()
|
/bialorus.R
|
no_license
|
slawomirmatuszak/COVID-19
|
R
| false
| false
| 13,133
|
r
|
library(tidyverse)
library(lubridate)
library(scales)
load(file = "E:/R/COVID-19/covid.ECDC.Rda")
load(file = "E:/R/COVID-19/covid2.Rda")
# test Białoruś na tle UE
ECDC2 <- covid.ECDC%>%
ungroup()%>%
select(ISO3, population)%>%
unique()
a <- covid%>%
#filter(Państwo=="Białoruś"|Państwo=="Szwajcaria"|`Blok Ekonomiczny`=="Unia Europejska")%>%
filter(Kontynenty=="Europa")%>%
left_join(ECDC2, by="ISO3")%>%
filter(population>=250000)%>%
mutate(zach.100 = nowe.zachorowania*100000/population)%>%
group_by(Państwo)%>%
mutate(srednia= zoo::rollmean(zach.100, k=7, fill=NA, align="right"))%>%
filter(srednia>0.1)%>%
mutate(id=row_number())%>%
mutate(by=if_else(Państwo=="Białoruś", paste("tak"), paste("nie")))
linia1 <-filter(a, Państwo=="Białoruś", id==max(id))%>%
ungroup()%>%
select(srednia)%>%
pull()
id2 <- a %>%
filter(Państwo=="Białoruś", id==max(id))%>%
ungroup()%>%
select(id)%>%
pull()
kolejnosc <- a %>%
filter(id==id2)%>%
arrange(desc(srednia))%>%
select(Państwo)
data.by <- a %>%
filter(Państwo=="Białoruś", id==max(id))%>%
ungroup()%>%
select(data)%>%
pull()
a$Państwo <- ordered(a$Państwo, levels = kolejnosc$Państwo)
#png("bialorus.png", units="in", width=9, height=9, res=600)
ggplot(filter(a), aes(x=id, y=srednia, color=by))+
geom_point(aes(x=id, y=zach.100), color="orange")+
geom_path(size=1.5, alpha=0.8, show.legend = F) +
facet_wrap(~Państwo, ncol=8)+
coord_cartesian(xlim=c(0,sum(a$Państwo=="Białoruś")-1), ylim=c(0,17))+
scale_color_manual(values = c("tak"="red", "nie"="blue"))+
geom_hline(aes(yintercept = linia1, linetype=""), color="red4")+
labs(x="liczba dni od przekroczenia 0,1 zakażenia na 100 tys. mieszkańców",
y="dzienny przyrost",
color="",
title = "Liczba dziennych zakażeń na 100 tys. mieszkańców",
#subtitle = paste( "stan na", format(as.Date(UA1$data), "%d/%m/%Y")),
caption = "Źródło: Center for Systems Science and Engineering at Johns Hopkins University")+
scale_linetype_manual(name = "", values = "longdash", labels = paste("poziom przyrostu zakażeń na Białorusi\nstan na ",format(data.by,"%d %B %Y")))+
theme_bw()+
theme(legend.position = "top", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5), strip.background = element_rect(fill="grey90"))
#dev.off()
#sama białoruś
a <- a %>%
filter(Państwo=="Białoruś")
png("bialorus8 maja.png", units="in", width=7, height=7, res=600)
ggplot(filter(a), aes(x=data, y=srednia))+
geom_point(aes(x=data, y=zach.100), color="orange")+
geom_path(size=1.5, alpha=0.8, show.legend = F, color="blue") +
geom_smooth(span=0.4, size=1, se=F)+
#facet_wrap(~Państwo, ncol=8)+
#coord_cartesian(xlim=c(0,sum(a$Państwo=="Białoruś")-1), ylim=c(0,17))+
#scale_color_manual(values = c("tak"="red", "nie"="blue"))+
#geom_hline(aes(yintercept = linia1, linetype=""), color="red4")+
labs(x="",
y="dzienny przyrost nowych przypadków na 100 tys. mieszkańców",
color="",
title = "Dynamika przyrostu nowych zakażeń na Białorusi",
#subtitle = paste( "stan na", format(as.Date(UA1$data), "%d/%m/%Y")),
caption = "Źródło: Center for Systems Science and Engineering at Johns Hopkins University")+
#scale_linetype_manual(name = "", values = "longdash", labels = paste("poziom przyrostu zakażeń na Białorusi\nstan na ",format(data.by,"%d %B %Y")))+
theme_bw()+
theme(legend.position = "top", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5), strip.background = element_rect(fill="grey90"))
dev.off()
## to samo, tylko z datą na osi x
a <- covid%>%
filter(Państwo=="Białoruś"|Państwo=="Szwajcaria"|`Blok Ekonomiczny`=="Unia Europejska")%>%
separate(indeks, into = "region", sep="_")%>%
filter(region=="")%>%
left_join(ECDC2, by="ISO3")%>%
mutate(zach.100 = nowe.zachorowania*100000/population)%>%
group_by(Państwo)%>%
mutate(srednia= zoo::rollmean(zach.100, k=7, fill=NA, align="right"))%>%
filter(srednia!=is.na(srednia))
ggplot(filter(a), aes(x=data, y=srednia, color=by))+
geom_path(size=1.5, alpha=0.8) +
facet_wrap(~Państwo, ncol=5, scales="free_y")+
#coord_cartesian(ylim=c(0,30))+
#geom_hline(yintercept = linia1), linetype="dashed", color="chocolate3")+
labs(x="liczba dni od przekroczenia 0,1 zakażenia na 100 tys. mieszkańców",
y="dzienny przyrost",
color="",
title = "Liczba dziennych zakażeń na 100 tys. mieszkańców",
#subtitle = paste( "stan na", format(as.Date(UA1$data), "%d/%m/%Y")),
caption = "Źródło: Center for Systems Science and Engineering at Johns Hopkins University")+
theme_bw()+
theme(legend.position = "none", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5))
############################################################################################################################################
# Białoruś i dane ECDC
a <- covid.ECDC%>%
filter(Kontynenty=="Europa")%>%
#filter(population>=250000)%>%
mutate(zach.100 = cases*100000/population)%>%
group_by(Państwo)%>%
mutate(srednia= zoo::rollmean(zach.100, k=7, fill=NA, align="right"))%>%
filter(srednia>0.1)%>%
mutate(id=row_number())%>%
mutate(by=if_else(Państwo=="Białoruś", paste("tak"), paste("nie")))
linia1 <-filter(a, Państwo=="Białoruś", id==max(id))%>%
ungroup()%>%
select(srednia)%>%
pull()
kolejnosc <- a %>%
filter(srednia==max(srednia))%>%
arrange(desc(srednia))%>%
select(Państwo)%>%
unique()
data.by <- a %>%
filter(Państwo=="Białoruś", id==max(id))%>%
ungroup()%>%
select(data)%>%
pull()
a$Państwo <- ordered(a$Państwo, levels = kolejnosc$Państwo)
png("bialorus.ECDC.3maj.png", units="in", width=12, height=8, res=600)
ggplot(filter(a), aes(x=data, y=srednia, color=by))+
geom_path(size=1.5, alpha=0.8, show.legend=F) +
facet_wrap(~Państwo, ncol=8, scales=)+
scale_color_manual(values = c("tak"="red", "nie"="blue"))+
#coord_cartesian(ylim=c(0,30))+
geom_hline(aes(yintercept = linia1, linetype=""), color="red4")+
labs(x="",
y="dzienny przyrost nowych przypadkóW",
color="",
title = "Liczba dziennych zakażeń na 100 tys. mieszkańców",
#subtitle = paste( "stan na", format(as.Date(UA1$data), "%d/%m/%Y")),
caption = "Źródło: European Centre for Disease Prevention and Control")+
scale_linetype_manual(name = "", values = "longdash", labels = paste("poziom przyrostu zakażeń na Białorusi\nstan na ",format(data.by,"%d %B %Y")))+
scale_x_date(date_breaks = "1 month",labels = date_format("%b"))+
theme_bw()+
theme(legend.position = "top", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5))
dev.off()
# to samo z id
ggplot(filter(a), aes(x=id, y=srednia, color=by))+
geom_path(size=1.5, alpha=0.8, show.legend=F) +
facet_wrap(~Państwo, ncol=8, scales=)+
scale_color_manual(values = c("tak"="red", "nie"="blue"))+
#coord_cartesian(ylim=c(0,30))+
geom_hline(aes(yintercept = linia1, linetype=""), color="red4")+
labs(x="liczba dni od przekroczenia poziomu 0,1 zakażenia na 100 tys. mieszkańców",
y="dzienny przyrost nowych przypadkóW",
color="",
title = "Liczba dziennych zakażeń na 100 tys. mieszkańców",
#subtitle = paste( "stan na", format(as.Date(UA1$data), "%d/%m/%Y")),
caption = "Źródło: European Centre for Disease Prevention and Control")+
scale_linetype_manual(name = "", values = "longdash", labels = paste("poziom przyrostu zakażeń na Białorusi\nstan na ",format(data.by,"%d %B %Y")))+
theme_bw()+
theme(legend.position = "top", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5))
## scatterplot z samą białorusią
a <- covid.ECDC%>%
filter(Państwo=="Białoruś")%>%
mutate(zach.100 = cases*100000/population)%>%
mutate(srednia= zoo::rollmean(zach.100, k=7, fill=NA, align="right"))%>%
mutate(srednia.nowe= zoo::rollmean(cases, k=7, fill=NA, align="right"))%>%
filter(srednia>0.1)
ggplot(filter(a), aes(x=data, y=cases))+
geom_point(aes(size="dzienne nowe zakażenia"), color="blue", alpha=0.4) +
#scale_color_manual(values = c("nowe zakażenia"="blue4", "średnia"="red4" ))+
geom_smooth(aes(color="średnia"), size=1.5, se=T, span=0.4, level=0.95)+
coord_cartesian(xlim = c(ymd("2020-04-01"), ymd("2020-05-07")))+
geom_path(aes(x=data, y=srednia.nowe), color="orange", size=2)+
#facet_wrap(~Państwo, ncol=7, scales="free_y")+
#geom_hline(aes(yintercept = linia1, linetype=""), color="red4")+
#geom_vline(aes(xintercept = linia2, linetype=" "),color= "red4", show.legend = F)+
labs(x="ilość dni od przekroczenia 0,1 zakażenia na 100 tys. mieszkańców",
y="dzienna ilość nowych przypadków",
color="",
size= "",
title = "Liczba dziennych zakażeń na 100 tys. mieszkańców w państwach europejskich",
subtitle = paste("Stan na", format(max(a$data), "%d/%m/%Y"), ". Oś y indywidualna dla każdego państwa."),
caption = "Źródło: European Centre for Disease Prevention and Control")+
#scale_linetype_manual(name = c("", " "), values = c("solid", "longdash"), labels = c(paste("poziom przyrostu zakażeń w Polsce\nstan na ",format(data.pl,"%d %B %Y") ),
#"ilość dni od przekroczenia poziomu 0,1 zakażenia \nna 100 tys. mieszkancow w Polsce"))+
theme_bw()+
theme(legend.position = "top", plot.caption = element_text( size = 8), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
plot.background = element_rect(colour = "grey", size = 0.5))
# odchylenie standardowe dla Białorusi ------------------------------------
library(scales)
bialorus <- covid.ECDC %>%
filter(Państwo=="Białoruś")%>%
filter(data>max(data-14))
sd(bialorus$cases)/mean(bialorus$cases)
PL <- covid.ECDC %>%
filter(Państwo=="Polska")%>%
filter(data>max(data-14))
(sd(PL$cases)/mean(PL$cases))*100
#porównanie roznych państw
kraje <- covid.ECDC %>%
filter(data>max(data-7))%>%
filter(population>1e6)%>%
mutate(zach.100 = cases*100000/population)%>%
filter(Kontynenty=="Europa")%>%
group_by(Państwo)%>%
summarise(
mean.zach.100 = mean(zach.100)
)%>%
filter(mean.zach.100>0.9)%>%
select(Państwo)%>%
unique()%>%
ungroup()%>%
pull()
odchylenie <- covid.ECDC %>%
filter(data>max(data-7))%>%
mutate(zach.100 = cases*100000/population)%>%
filter(Kontynenty=="Europa")%>%
filter(Państwo %in%kraje)%>%
group_by(Państwo)%>%
filter(population>1e6)%>%
summarise(
srednia=mean(cases),
odchylenie = sd(cases)
)%>%
mutate(odchylenie.proc = odchylenie/srednia)%>%
arrange(odchylenie.proc)%>%
filter(odchylenie.proc>0)
ggplot(odchylenie, aes(x=reorder(Państwo, odchylenie.proc), y=odchylenie.proc))+
geom_col()+
coord_flip()+
scale_y_continuous(labels = percent)
library(plotly)
ggplotly(p1)
#porównanie roznych państw w szczycie epidemii
odchylenie.szczyt <- covid.ECDC %>%
filter(Kontynenty=="Europa")%>%
filter(population>1e6)%>%
mutate(srednia.ruchoma = zoo::rollmean(cases, k=7, fill=NA, align="right"))%>%
group_by(Państwo)%>%
mutate(test = srednia.ruchoma==max(srednia.ruchoma, na.rm = T))%>%
mutate(id=row_number())
a <- odchylenie.szczyt %>%
select(data, cases, Państwo, srednia.ruchoma, test, id)%>%
mutate(srednia.sd = roll::roll_sd(cases,7))%>%
mutate(odchylenie.proc = srednia.sd/srednia.ruchoma)%>%
filter(id>max(id)-30)%>%
filter(odchylenie.proc>0)
ggplot(a, aes(x=data, y=odchylenie.proc))+
geom_path()+
facet_wrap(~Państwo)
# próba porównania państw w szczycie 7 dni
odchylenie.szczyt2 <- covid.ECDC %>%
filter(Kontynenty=="Europa")%>%
filter(population>1e6)%>%
mutate(srednia.ruchoma = zoo::rollmean(cases, k=7, fill=NA, align="right"))%>%
group_by(Państwo)%>%
mutate(test = srednia.ruchoma==max(srednia.ruchoma, na.rm = T))%>%
mutate(id=row_number())%>%
mutate(data2 = if_else(test==TRUE, data, NULL))%>%
filter(data>=max(data2, na.rm = T)&data<max(data2, na.rm = T)+7)%>%
summarise(
srednia=mean(cases),
odchylenie = sd(cases)
)%>%
mutate(odchylenie.proc = odchylenie/srednia)%>%
arrange(odchylenie.proc)
ggplot(odchylenie.szczyt2, aes(x=reorder(Państwo, odchylenie.proc), y=odchylenie.proc))+
geom_col()+
coord_flip()+
scale_y_continuous(labels = percent)+
theme_bw()
|
# Set workspace directory and bring in datasets + libraries
setwd("/Users/williamjohnson/Desktop/Laura/Hallett_Lab/Repositories/thesis-mussels/site_DATAexplore")
library(tidyverse)
abundance <- as.tibble(read.csv("laurancy.csv", header = TRUE))
streampwr <- as.tibble(read.csv("streamPWR.csv", header = TRUE))
dist <- as.tibble(read.csv("SUMPpnts_distance_area.csv", header = TRUE))
#select only needed columns from streampwr
streampwr2 <- streampwr %>%
select(site_id, av_SLPE_gradient, av_acw, Sstrpwr_2yr, Sstrpwr_5perc )
#### VISUALIZATION OF RIVER DISTANCE (KM) VS STREAM PWR (2 YR) for the South Umpqua
# Join abundance and stream pwr datasets on site_id (need to bring in site id variable)
AbunPwr <- inner_join(abundance, streampwr2, by = "site_id")
# Join AbunPwr and distance datasets by obs_id
AbunPwrDist <- inner_join(AbunPwr, dist, by = "obs_id")
AbunPwrDist <- AbunPwrDist %>%
filter(!usgs_gage %in% c("Riddle"))
# Visualize river distance (km) by stream pwr (2 yr)
ggplot(AbunPwrDist, aes(riv_dist_km, Sstrpwr_2yr, color = usgs_gage)) + geom_jitter()
###### FOLLOWING CODE ONLY APPLIES TO VISUALIZATIONS INVOLVING ABUNDANCES
#filter out nancy's obs that are repeats @ k bar ranch, wiegle rd, and coffee cr
abundance2 <- abundance %>%
filter(!obs_id %in% c("MAFA_CoffeCr1", "MAFA_KBarRanch", "MAFA_WiegleRd"))%>%
# filter out shell records
filter(!obs_type == "shell")
#Join abundance and streampwr datasets
abunpwr <- abundance2 %>%
inner_join(streampwr2, by = "site_id")
# Do initial visualization with streampwr2 and laurancy
ggplot(abunpwr, aes(Sstrpwr_2yr, log(total_count), color = usgs_gage)) + geom_jitter() +
stat_summary(fun.data=mean_cl_normal) +
geom_smooth(method='lm', formula= y~x, aes(group=1))
# No way to get around the fact that 90,000 mussels at TIL03 really obscures the graph... see what it looks like
# with this value removed
abunpwr %>%
filter(!obs_id == "TIL0301") %>%
ggplot(aes(Sstrpwr_2yr, log(total_count), color = usgs_gage)) + geom_jitter()
#Now do the same thing with 5 perc flow Sstrpwr
ggplot(abunpwr, aes(Sstrpwr_5perc, log(total_count), color= usgs_gage)) + geom_jitter() +
stat_summary(fun.data=mean_cl_normal) +
geom_smooth(method='lm', formula= y~x, aes(group=1))
# No way to get around the fact that 90,000 mussels at TIL03 really obscures the graph... see what it looks like
# with this value removed
abunpwr %>%
filter(!obs_id == "TIL0301") %>%
ggplot(aes(Sstrpwr_5perc, log(total_count), color = usgs_gage)) + geom_jitter()
# my points seem off... too many observations at log(x) value 6.... see what log(total_count) looks like
####### July 6th Box Plot of Values
streampwr <- as.tibble(read.csv("stats_streamPWR.csv", header = TRUE))
streampwr2 <- streampwr %>%
mutate(abundance2 = log(abundance)) %>%
dplyr::select(obs_id, site_id, abundance, abundance2, Sstrpwr_10yr) %>%
mutate(species = "M. falcata")
boxplot <- ggplot(streampwr2) + geom_boxplot(aes(y = Sstrpwr_10yr, color = total_count)) + #outlier.shape = NA) +
geom_jitter(width = 0.2) + ylab("10 YR Specific Stream Power (watts/m^2)") + theme_classic() #+
#scale_color_gradientn(name = "log(Mussel Abundance)" +
#theme(legend.title = element_text(size=rel(1.15), hjust=0.5, face="bold"))
mid <- mean(streampwr2$abundance2)
boxplot <- boxplot + scale_color_gradient2(midpoint = mid, low = "red", mid = "blue", high = "green")
boxplot <- boxplot + scale_color_gradientn(colours = rainbow(5))
|
/site_DATAexplore/StreamPWR.R
|
no_license
|
ljohnso8/thesis-mussels
|
R
| false
| false
| 3,487
|
r
|
# Set workspace directory and bring in datasets + libraries
setwd("/Users/williamjohnson/Desktop/Laura/Hallett_Lab/Repositories/thesis-mussels/site_DATAexplore")
library(tidyverse)
abundance <- as.tibble(read.csv("laurancy.csv", header = TRUE))
streampwr <- as.tibble(read.csv("streamPWR.csv", header = TRUE))
dist <- as.tibble(read.csv("SUMPpnts_distance_area.csv", header = TRUE))
#select only needed columns from streampwr
streampwr2 <- streampwr %>%
select(site_id, av_SLPE_gradient, av_acw, Sstrpwr_2yr, Sstrpwr_5perc )
#### VISUALIZATION OF RIVER DISTANCE (KM) VS STREAM PWR (2 YR) for the South Umpqua
# Join abundance and stream pwr datasets on site_id (need to bring in site id variable)
AbunPwr <- inner_join(abundance, streampwr2, by = "site_id")
# Join AbunPwr and distance datasets by obs_id
AbunPwrDist <- inner_join(AbunPwr, dist, by = "obs_id")
AbunPwrDist <- AbunPwrDist %>%
filter(!usgs_gage %in% c("Riddle"))
# Visualize river distance (km) by stream pwr (2 yr)
ggplot(AbunPwrDist, aes(riv_dist_km, Sstrpwr_2yr, color = usgs_gage)) + geom_jitter()
###### FOLLOWING CODE ONLY APPLIES TO VISUALIZATIONS INVOLVING ABUNDANCES
#filter out nancy's obs that are repeats @ k bar ranch, wiegle rd, and coffee cr
abundance2 <- abundance %>%
filter(!obs_id %in% c("MAFA_CoffeCr1", "MAFA_KBarRanch", "MAFA_WiegleRd"))%>%
# filter out shell records
filter(!obs_type == "shell")
#Join abundance and streampwr datasets
abunpwr <- abundance2 %>%
inner_join(streampwr2, by = "site_id")
# Do initial visualization with streampwr2 and laurancy
ggplot(abunpwr, aes(Sstrpwr_2yr, log(total_count), color = usgs_gage)) + geom_jitter() +
stat_summary(fun.data=mean_cl_normal) +
geom_smooth(method='lm', formula= y~x, aes(group=1))
# No way to get around the fact that 90,000 mussels at TIL03 really obscures the graph... see what it looks like
# with this value removed
abunpwr %>%
filter(!obs_id == "TIL0301") %>%
ggplot(aes(Sstrpwr_2yr, log(total_count), color = usgs_gage)) + geom_jitter()
#Now do the same thing with 5 perc flow Sstrpwr
ggplot(abunpwr, aes(Sstrpwr_5perc, log(total_count), color= usgs_gage)) + geom_jitter() +
stat_summary(fun.data=mean_cl_normal) +
geom_smooth(method='lm', formula= y~x, aes(group=1))
# No way to get around the fact that 90,000 mussels at TIL03 really obscures the graph... see what it looks like
# with this value removed
abunpwr %>%
filter(!obs_id == "TIL0301") %>%
ggplot(aes(Sstrpwr_5perc, log(total_count), color = usgs_gage)) + geom_jitter()
# my points seem off... too many observations at log(x) value 6.... see what log(total_count) looks like
####### July 6th Box Plot of Values
streampwr <- as.tibble(read.csv("stats_streamPWR.csv", header = TRUE))
streampwr2 <- streampwr %>%
mutate(abundance2 = log(abundance)) %>%
dplyr::select(obs_id, site_id, abundance, abundance2, Sstrpwr_10yr) %>%
mutate(species = "M. falcata")
boxplot <- ggplot(streampwr2) + geom_boxplot(aes(y = Sstrpwr_10yr, color = total_count)) + #outlier.shape = NA) +
geom_jitter(width = 0.2) + ylab("10 YR Specific Stream Power (watts/m^2)") + theme_classic() #+
#scale_color_gradientn(name = "log(Mussel Abundance)" +
#theme(legend.title = element_text(size=rel(1.15), hjust=0.5, face="bold"))
mid <- mean(streampwr2$abundance2)
boxplot <- boxplot + scale_color_gradient2(midpoint = mid, low = "red", mid = "blue", high = "green")
boxplot <- boxplot + scale_color_gradientn(colours = rainbow(5))
|
pop<-100
K<-1000
pop.hist<-c()
r<-0.05
for (i in 1:150) {
pop.hist[i]<-pop
pop<-pop*exp(r*(1-pop/K))
}
plot(pop.hist)
|
/chem160homework7/pop2.R
|
no_license
|
nhukim35/chem160homework7
|
R
| false
| false
| 122
|
r
|
pop<-100
K<-1000
pop.hist<-c()
r<-0.05
for (i in 1:150) {
pop.hist[i]<-pop
pop<-pop*exp(r*(1-pop/K))
}
plot(pop.hist)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ses_operations.R
\name{ses_update_configuration_set_tracking_options}
\alias{ses_update_configuration_set_tracking_options}
\title{Modifies an association between a configuration set and a custom domain
for open and click event tracking}
\usage{
ses_update_configuration_set_tracking_options(ConfigurationSetName,
TrackingOptions)
}
\arguments{
\item{ConfigurationSetName}{[required] The name of the configuration set for which you want to update the
custom tracking domain.}
\item{TrackingOptions}{[required]}
}
\description{
Modifies an association between a configuration set and a custom domain
for open and click event tracking.
}
\details{
By default, images and links used for tracking open and click events are
hosted on domains operated by Amazon SES. You can configure a subdomain
of your own to handle these events. For information about using custom
domains, see the \href{https://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html}{Amazon SES Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$update_configuration_set_tracking_options(
ConfigurationSetName = "string",
TrackingOptions = list(
CustomRedirectDomain = "string"
)
)
}
}
\keyword{internal}
|
/cran/paws.customer.engagement/man/ses_update_configuration_set_tracking_options.Rd
|
permissive
|
johnnytommy/paws
|
R
| false
| true
| 1,307
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ses_operations.R
\name{ses_update_configuration_set_tracking_options}
\alias{ses_update_configuration_set_tracking_options}
\title{Modifies an association between a configuration set and a custom domain
for open and click event tracking}
\usage{
ses_update_configuration_set_tracking_options(ConfigurationSetName,
TrackingOptions)
}
\arguments{
\item{ConfigurationSetName}{[required] The name of the configuration set for which you want to update the
custom tracking domain.}
\item{TrackingOptions}{[required]}
}
\description{
Modifies an association between a configuration set and a custom domain
for open and click event tracking.
}
\details{
By default, images and links used for tracking open and click events are
hosted on domains operated by Amazon SES. You can configure a subdomain
of your own to handle these events. For information about using custom
domains, see the \href{https://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html}{Amazon SES Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$update_configuration_set_tracking_options(
ConfigurationSetName = "string",
TrackingOptions = list(
CustomRedirectDomain = "string"
)
)
}
}
\keyword{internal}
|
#' @title Query GDC data
#' @description
#' Uses GDC API to search for search, it searches for both controlled and
#' open-access data.
#' For GDC data arguments project, data.category, data.type and workflow.type should be used
#' For the legacy data arguments project, data.category, platform and/or file.extension should be used.
#' Please, see the vignette for a table with the possibilities.
#' @param project A list of valid project (see list with TCGAbiolinks:::getGDCprojects()$project_id)]
#' @param data.category A valid project (see list with TCGAbiolinks:::getProjectSummary(project))
#' @param data.type A data type to filter the files to download
#' @param sample.type A sample type to filter the files to download
#' @param barcode A list of barcodes to filter the files to download
#' @param legacy Search in the legacy repository
#' @param data.format Data format filter ("VCF", "TXT", "BAM","SVS","BCR XML","BCR SSF XML",
#' "TSV", "BCR Auxiliary XML", "BCR OMF XML", "BCR Biotab", "MAF", "BCR PPS XML", "XLSX")
#' @param file.type To be used in the legacy database for some platforms,
#' to define which file types to be used.
#' @param workflow.type GDC workflow type
#' @param experimental.strategy Filter to experimental strategy. Harmonized: WXS, RNA-Seq, miRNA-Seq, Genotyping Array.
#' Legacy: WXS, RNA-Seq, miRNA-Seq, Genotyping Array,
#' DNA-Seq, Methylation array, Protein expression array, WXS,CGH array, VALIDATION, Gene expression array,WGS,
#' MSI-Mono-Dinucleotide Assay, miRNA expression array, Mixed strategies, AMPLICON, Exon array,
#' Total RNA-Seq, Capillary sequencing, Bisulfite-Seq
#' @param access Filter by access type. Possible values: controlled, open
#' @param platform Example:
#' \tabular{ll}{
#'CGH- 1x1M_G4447A \tab IlluminaGA_RNASeqV2 \cr
#'AgilentG4502A_07 \tab IlluminaGA_mRNA_DGE \cr
#'Human1MDuo \tab HumanMethylation450 \cr
#'HG-CGH-415K_G4124A \tab IlluminaGA_miRNASeq \cr
#'HumanHap550 \tab IlluminaHiSeq_miRNASeq \cr
#'ABI \tab H-miRNA_8x15K \cr
#'HG-CGH-244A \tab SOLiD_DNASeq \cr
#'IlluminaDNAMethylation_OMA003_CPI \tab IlluminaGA_DNASeq_automated \cr
#'IlluminaDNAMethylation_OMA002_CPI \tab HG-U133_Plus_2 \cr
#'HuEx- 1_0-st-v2 \tab Mixed_DNASeq \cr
#'H-miRNA_8x15Kv2 \tab IlluminaGA_DNASeq_curated \cr
#'MDA_RPPA_Core \tab IlluminaHiSeq_TotalRNASeqV2 \cr
#'HT_HG-U133A \tab IlluminaHiSeq_DNASeq_automated \cr
#'diagnostic_images \tab microsat_i \cr
#'IlluminaHiSeq_RNASeq \tab SOLiD_DNASeq_curated \cr
#'IlluminaHiSeq_DNASeqC \tab Mixed_DNASeq_curated \cr
#'IlluminaGA_RNASeq \tab IlluminaGA_DNASeq_Cont_automated \cr
#'IlluminaGA_DNASeq \tab IlluminaHiSeq_WGBS \cr
#'pathology_reports \tab IlluminaHiSeq_DNASeq_Cont_automated\cr
#'Genome_Wide_SNP_6 \tab bio \cr
#'tissue_images \tab Mixed_DNASeq_automated \cr
#'HumanMethylation27 \tab Mixed_DNASeq_Cont_curated \cr
#'IlluminaHiSeq_RNASeqV2 \tab Mixed_DNASeq_Cont
#'}
#' @export
#' @examples
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy Number Variation",
#' data.type = "Copy Number Segment")
#' \dontrun{
#' query <- GDCquery(project = "TARGET-AML",
#' data.category = "Transcriptome Profiling",
#' data.type = "miRNA Expression Quantification",
#' workflow.type = "BCGSC miRNA Profiling",
#' barcode = c("TARGET-20-PARUDL-03A-01R","TARGET-20-PASRRB-03A-01R"))
#' query <- GDCquery(project = "TARGET-AML",
#' data.category = "Transcriptome Profiling",
#' data.type = "Gene Expression Quantification",
#' workflow.type = "HTSeq - Counts",
#' barcode = c("TARGET-20-PADZCG-04A-01R","TARGET-20-PARJCR-09A-01R"))
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy Number Variation",
#' data.type = "Masked Copy Number Segment",
#' sample.type = c("Primary solid Tumor"))
#' query.met <- GDCquery(project = c("TCGA-GBM","TCGA-LGG"),
#' legacy = TRUE,
#' data.category = "DNA methylation",
#' platform = "Illumina Human Methylation 450")
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy number variation",
#' legacy = TRUE,
#' file.type = "hg19.seg",
#' barcode = c("TCGA-OR-A5LR-01A-11D-A29H-01"))
#' }
#' @return A data frame with the results and the parameters used
#' @importFrom jsonlite fromJSON
#' @importFrom knitr kable
#' @importFrom httr timeout
#' @importFrom dplyr pull
GDCquery <- function(project,
data.category,
data.type,
workflow.type,
legacy = FALSE,
access,
platform,
file.type,
barcode,
data.format,
experimental.strategy,
sample.type){
isServeOK()
suppressWarnings({
# prepare output
if(missing(sample.type)) {
sample.type <- NA
} else if(all(sample.type == FALSE)) {
sample.type <- NA
}
if(missing(data.type)) {
data.type <- NA
} else if(data.type == FALSE) {
data.type <- NA
}
if(missing(barcode)) {
barcode <- NA
} else if(length(barcode) == 1) {
if(barcode == FALSE) barcode <- NA
}
if(missing(platform)) {
platform <- NA
} else if(platform == FALSE) {
platform <- NA
}
if(missing(file.type)) {
file.type <- NA
} else if(file.type == FALSE) {
file.type <- NA
}
if(missing(workflow.type)) {
workflow.type <- NA
} else if(workflow.type == FALSE) {
workflow.type <- NA
}
if(missing(experimental.strategy)) {
experimental.strategy <- NA
} else if(experimental.strategy == FALSE) {
experimental.strategy <- NA
}
if(missing(access)) {
access <- NA
} else if(access == FALSE) {
access <- NA
}
if(missing(data.format)) {
data.format <- NA
} else if(data.format == FALSE) {
data.format <- NA
}
})
print.header("GDCquery: Searching in GDC database","section")
message("Genome of reference: ",ifelse(legacy,"hg19","hg38"))
# Check arguments
checkProjectInput(project)
checkDataCategoriesInput(project, data.category, legacy)
if(!is.na(data.type)) checkDataTypeInput(legacy = legacy, data.type = data.type)
if(!any(is.na(sample.type))) checkBarcodeDefinition(sample.type)
results <- NULL
print.header("Accessing GDC. This might take a while...","subsection")
for(proj in project){
url <- getGDCquery(project = proj,
data.category = data.category,
data.type = data.type,
legacy = legacy,
workflow.type = workflow.type,
platform = platform,
file.type = file.type,
files.access = access,
experimental.strategy = experimental.strategy,
sample.type = sample.type)
message("ooo Project: ", proj)
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC!")
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
if(json$data$pagination$count == 0) {
url <- getGDCquery(project = proj,
data.category = data.category,
data.type = data.type,
legacy = legacy,
workflow.type = NA,
platform = NA,
file.type = file.type,
experimental.strategy = experimental.strategy,
files.access = access,
sample.type = sample.type)
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC!")
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
}
json$data$hits$acl <- NULL
json$data$hits$project <- proj
if("archive" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$archive)){
archive <- json$data$hits$archive
colnames(archive)[1:ncol(archive)] <- paste0("archive_", colnames(archive)[1:ncol(archive)])
json$data$hits$archive <- NULL
json$data$hits <- cbind(json$data$hits, archive)
}
}
if("analysis" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$analysis)){
analysis <- json$data$hits$analysis
colnames(analysis)[2:ncol(analysis)] <- paste0("analysis_", colnames(analysis)[2:ncol(analysis)])
json$data$hits$analysis <- NULL
json$data$hits <- cbind(json$data$hits, analysis)
}
}
if("center" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$center)){
center <- json$data$hits$center
colnames(center)[2:ncol(center)] <- paste0("center_", colnames(center)[2:ncol(center)])
json$data$hits$center <- NULL
json$data$hits <- cbind(json$data$hits, center)
}
}
results <- plyr::rbind.fill(as.data.frame(results),as.data.frame(json$data$hits))
}
if(ncol(results) == 1) {
message("Sorry! There is no result for your query. Please check in GDC the data available or if there is no error in your query.")
return (NULL)
}
print.header("Filtering results","subsection")
if(!any(is.na(platform))){
if(!(all(platform %in% results$platform))){
stop("Please set a valid platform argument from the list below:\n => ", paste(unique(results$platform), collapse = "\n => "))
}
message("ooo By platform")
results <- results[tolower(results$platform) %in% tolower(platform),]
}
# Filter by access
if(!is.na(access)) {
message("ooo By access")
results <- results[grepl(access,results$access,ignore.case = TRUE),]
}
# Filter by experimental strategy
if(!is.na(experimental.strategy)) {
if(all(tolower(experimental.strategy) %in% tolower(results$experimental_strategy))) {
message("ooo By experimental.strategy")
results <- results[tolower(results$experimental_strategy) %in% tolower(experimental.strategy),]
} else {
message(paste0("The argument experimental_strategy does not match any of the results.\nPossible values:",
paste(unique(results$experimental_strategy),collapse = "\n=>")))
}
}
if(!is.na(data.format)) {
if(all(tolower(data.format) %in% tolower(results$data_format))) {
message("ooo By data.format")
results <- results[tolower(results$data_format) %in% tolower(data.format),]
} else {
message(paste0("The argument experimental_strategy does not match any of the results.\nPossible values:",
paste(unique(results$data_format),collapse = "\n=>")))
}
}
# Filter by data.type
if(!is.na(data.type)) {
if(!(tolower(data.type) %in% tolower(results$data_type))) {
stop("Please set a valid data.type argument from the list below:\n => ", paste(unique(results$data_type), collapse = "\n => "))
}
message("ooo By data.type")
results <- results[tolower(results$data_type) %in% tolower(data.type),]
}
# Filter by workflow.type
if(!is.na(workflow.type)) {
if(!(workflow.type %in% results$analysis_workflow_type)) {
stop("Please set a valid workflow.type argument from the list below:\n => ", paste(unique(results$analysis_workflow_type), collapse = "\n => "))
}
message("ooo By workflow.type")
results <- results[results$analysis_workflow_type %in% workflow.type,]
}
# Filter by file.type
if(!is.na(file.type)){
message("ooo By file.type")
pat <- file.type
invert <- FALSE
if(file.type == "normalized_results") pat <- "normalized_results"
if(file.type == "results") pat <- "[^normalized_]results"
if(file.type == "nocnv_hg18" | file.type == "nocnv_hg18.seg") pat <- "nocnv_hg18"
if(file.type == "cnv_hg18" | file.type == "hg18.seg") pat <- "[^nocnv_]hg18.seg"
if(file.type == "nocnv_hg19" | file.type == "nocnv_hg19.seg") pat <- "nocnv_hg19"
if(file.type == "cnv_hg19" | file.type == "hg19.seg") pat <- "[^nocnv_]hg19.seg"
if(file.type == "mirna") {
pat <- "hg19.*mirna"
invert <- TRUE
}
# if(file.type == "hg19.mirna") pat <- "hg19.mirna"
# if(file.type == "hg19.mirbase20.mirna") pat <- "hg19.mirbase20.mirna"
if(file.type == "hg19.isoform") pat <- "hg19.*isoform"
if(file.type == "isoform") {
pat <- "hg19.*isoform"
invert <- TRUE
}
idx <- grep(pat,results$file_name,invert = invert)
if(length(idx) == 0) {
print(knitr::kable(sort(results$file_name)[1:10],col.names = "Files"))
stop("We were not able to filter using this file type. Examples of available files are above. Please check the vignette for possible entries")
}
results <- results[idx,]
}
# get barcode of the samples
# 1) Normally for each sample we will have only single information
# however the mutation call uses both normal and tumor which are both
# reported by the API
if(!data.category %in% c("Clinical",
"Copy Number Variation",
"Biospecimen",
"Other",
"Simple Nucleotide Variation",
"Simple nucleotide variation")){
# we also need to deal with pooled samples (mixed from different patients)
# example CPT0000870008
if("portions" %in% (results$cases[[1]]$samples[[1]] %>% names)) {
aux <- plyr::laply(results$cases,
function(x) {
summarize(x$samples[[1]],
submitter_id = paste(submitter_id,collapse = ";"),
is_ffpe = any(is_ffpe),
sample_type = paste(sample_type,collapse = ";"),
aliquot.submiter.id = x$samples[[1]]$portions[[1]]$analytes[[1]]$aliquots[[1]]$submitter_id)
}) %>% as.data.frame
} else {
aux <- plyr::laply(results$cases,
function(x) {
summarize(x$samples[[1]],
submitter_id = paste(submitter_id,collapse = ";"),
is_ffpe = any(is_ffpe),
sample_type = paste(sample_type,collapse = ";"))
}) %>% as.data.frame
}
results$sample_type <- aux$sample_type %>% as.character()
results$is_ffpe <- aux$is_ffpe %>% as.logical
# ORGANOID-PANCREATIC does not have aliquots
if("aliquot.submiter.id" %in% colnames(aux)){
results$cases <- aux$aliquot.submiter.id %>% as.character()
results$sample.submitter_id <- aux$submitter_id %>% as.character()
} else{
results$cases <- aux$submitter_id %>% as.character()
results$sample.submitter_id <- aux$submitter_id %>% as.character()
}
} else if(data.category %in% c("Clinical")){
# Clinical has another structure
aux <- plyr::laply(results$cases,
function(x) {
unlist(x,recursive = T)[c("submitter_id")]
}) %>% as.data.frame
results$cases <- aux %>% dplyr::pull(1) %>% as.character()
} else if(data.category %in% c("Biospecimen")){
# Biospecimen has another structure
aux <- plyr::laply(results$cases,
function(x) {
paste(x$submitter_id,collapse = ",")
})
results$cases <- aux
} else if(data.category == "Other"){
# Auxiliary test files does not have information linked toit.
# get frm file names
results$cases <- str_extract_all(results$file_name,"TCGA-[:alnum:]{2}-[:alnum:]{4}") %>% unlist
} else if(data.category %in% c( "Copy Number Variation","Simple nucleotide variation")){
aux <- plyr::laply(results$cases,
function(x) {
lapply(x$samples,FUN = function(y) unlist(y,recursive = T)[c("portions.analytes.aliquots.submitter_id")]) %>%
unlist %>%
na.omit %>%
paste(collapse = ",")
}) %>% as.data.frame %>% pull(1) %>% as.character()
results$cases <- aux
} else if(data.category == "Simple Nucleotide Variation"){
if(data.type %in% "Masked Somatic Mutation"){
# MAF files are one single file for all samples
aux <- plyr::laply(results$cases[[1]]$samples,
function(x) {
unlist(x,recursive = T)[c("portions.analytes.aliquots.submitter_id","sample_type1","sample_type2","is_ffpe1","is_ffpe2")]
}) %>% as.data.frame
results$cases <- aux$portions.analytes.aliquots.submitter_id %>% as.character() %>% paste(collapse = ",")
if(!is.na(sample.type)) sample.type <- NA # ensure no filtering will be applied
} else {
# TODO: Add comnetary with case
aux <- plyr::laply(results$cases,
function(x) {
unlist(x$samples[[1]],recursive = T)[c("portions.analytes.aliquots.submitter_id","sample_type1","sample_type2","is_ffpe1","is_ffpe2")]
}) %>% as.data.frame
results$sample_type1 <- aux$sample_type1 %>% as.character()
results$sample_type2 <- aux$sample_type2 %>% as.character()
results$is_ffpe1 <- aux$is_ffpe1 %>% as.logical
results$is_ffpe2 <- aux$is_ffpe2 %>% as.logical
results$cases <- aux$portions.analytes.aliquots.submitter_id %>% as.character()
if(!is.na(sample.type)) sample.type <- NA # ensure no filtering will be applied
}
}
# Filter by barcode
if(!any(is.na(barcode))) {
message("ooo By barcode")
idx <- unique(unlist(sapply(barcode, function(x) grep(x, results$cases,ignore.case = TRUE))))
if(length(idx) == 0) {
print(knitr::kable(results$cases,col.names = "Available barcodes"))
stop("None of the barcodes were matched. Available barcodes are above")
}
results <- results[idx,]
}
# Filter by sample.type
if(!any(is.na(sample.type))) {
if(!any(tolower(results$sample_type) %in% tolower(sample.type))) {
aux <- as.data.frame(table(results$sample_type))
aux <- aux[aux$Freq > 0,]
print(kable(aux,row.names = FALSE,col.names = c("sample.type","Number of samples")))
stop("Please set a valid sample.type argument from the list above.")
}
message("ooo By sample.type")
results <- results[tolower(results$sample_type) %in% tolower(sample.type),]
}
# some how there are duplicated files in GDC we should remove them
# Example of problematic query
# query.exp <- GDCquery(project = "TCGA-BRCA",
# legacy = TRUE,
# data.category = "Gene expression",
# data.type = "Gene expression quantification",
# platform = "Illumina HiSeq",
# file.type = "results",
# experimental_strategy = "RNA-Seq",
# sample.type = c("Primary solid Tumor","Solid Tissue Normal"))
#
print.header("Checking data","subsection")
message("ooo Check if there are duplicated cases")
if(any(duplicated(results$cases))) {
message("Warning: There are more than one file for the same case. Please verify query results. You can use the command View(getResults(query)) in rstudio")
}
message("ooo Check if there results for the query")
if(nrow(results) == 0) stop("Sorry, no results were found for this query")
print.header("Preparing output","section")
ret <- data.frame(results = I(list(results)),
project = I(list(project)),
data.category = data.category,
data.type = data.type,
legacy = legacy,
access = I(list(access)),
experimental.strategy = I(list(experimental.strategy)),
file.type = file.type,
platform = I(list(platform)),
sample.type = I(list(sample.type)),
barcode = I(list(barcode)),
workflow.type = workflow.type)
return(ret)
}
getGDCquery <- function(project, data.category, data.type, legacy, workflow.type,platform,file.type,files.access,sample.type,experimental.strategy){
# Get manifest using the API
baseURL <- ifelse(legacy,"https://api.gdc.cancer.gov/legacy/files/?","https://api.gdc.cancer.gov/files/?")
options.pretty <- "pretty=true"
if(data.category == "Protein expression" & legacy) {
options.expand <- "fields=archive.revision,archive.file_name,md5sum,state,data_category,file_id,platform,file_name,file_size,md5sum,submitter_id,data_type&expand=cases.samples.portions,cases.project,center,analysis"
} else if(data.category %in% c("Clinical","Biospecimen")) {
options.expand <- "expand=cases,cases.project,center,analysis"
} else {
options.expand <- "expand=cases.samples.portions.analytes.aliquots,cases.project,center,analysis,cases.samples"
}
option.size <- paste0("size=",getNbFiles(project,data.category,legacy))
option.format <- paste0("format=JSON")
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":['), # Start json request
URLencode('{"op":"in","content":{"field":"cases.project.project_id","value":["'),
project,
URLencode('"]}}'))
if(!is.na(experimental.strategy)) options.filter <- paste0(options.filter,addFilter("files.experimental_strategy", experimental.strategy))
if(!is.na(data.category)) options.filter <- paste0(options.filter,addFilter("files.data_category", data.category))
if(!is.na(data.type)) options.filter <- paste0(options.filter,addFilter("files.data_type", data.type))
if(!is.na(workflow.type)) options.filter <- paste0(options.filter,addFilter("files.analysis.workflow_type", workflow.type))
if(!any(is.na(platform))) options.filter <- paste0(options.filter,addFilter("files.platform", platform))
if(!any(is.na(file.type))) {
if(file.type == "results" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "unnormalized"))
if(file.type == "normalized_results" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "normalized"))
if(file.type == "nocnv_hg19.seg" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "nocnv"))
if(file.type == "hg19.isoform" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "hg19"))
}
if(!any(is.na(files.access))) {
options.filter <- paste0(options.filter,addFilter("files.access", files.access))
}
if(!any(is.na(sample.type))) {
if("Primary solid Tumor" %in% sample.type) sample.type[sample.type == "Primary solid Tumor"] <- "Primary Tumor"
if("Recurrent Solid Tumor" %in% sample.type) sample.type[sample.type == "Recurrent Solid Tumor"] <- "Recurrent Tumor"
options.filter <- paste0(options.filter,addFilter("cases.samples.sample_type", sample.type))
}
# Close json request
options.filter <- paste0(options.filter, URLencode(']}'))
url <- paste0(baseURL,paste(options.pretty,
options.expand,
option.size,
options.filter,
option.format,
sep = "&"))
return(url)
}
addFilter <- function(field, values){
ret <- paste0(
URLencode(',{"op":"in","content":{"field":"'),
URLencode(field),
URLencode('","value":["'),
URLencode(paste0(values, collapse = '","')),
URLencode('"]}}')
)
return(ret)
}
expandBarcodeInfo <- function(barcode){
if(any(grepl("TARGET",barcode))) {
ret <- DataFrame(barcode = barcode,
code = substr(barcode, 8, 9),
case.unique.id = substr(barcode, 11, 16),
tissue.code = substr(barcode, 18, 19),
nucleic.acid.code = substr(barcode, 24, 24))
ret <- merge(ret,getBarcodeDefinition(), by = "tissue.code", sort = FALSE, all.x = TRUE)
ret <- ret[match(barcode,ret$barcode),]
}
if(any(grepl("TCGA",barcode))) {
ret <- data.frame(barcode = barcode,
patient = substr(barcode, 1, 12),
sample = substr(barcode, 1, 16),
tissue.code = substr(barcode, 14, 15))
ret <- merge(ret,getBarcodeDefinition(), by = "tissue.code", sort = FALSE, all.x = TRUE)
ret <- ret[match(barcode,ret$barcode),]
}
return(ret)
}
getBarcodeDefinition <- function(type = "TCGA"){
if(type == "TCGA"){
tissue.code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','20','40','50','60','61')
shortLetterCode <- c("TP","TR","TB","TRBM","TAP","TM","TAM","THOC",
"TBM","NB","NT","NBC","NEBV","NBM","CELLC","TRB",
"CELL","XP","XCL")
tissue.definition <- c("Primary Tumor",
"Recurrent Tumor",
"Primary Blood Derived Cancer - Peripheral Blood",
"Recurrent Blood Derived Cancer - Bone Marrow",
"Additional - New Primary",
"Metastatic",
"Additional Metastatic",
"Human Tumor Original Cells",
"Primary Blood Derived Cancer - Bone Marrow",
"Blood Derived Normal",
"Solid Tissue Normal",
"Buccal Cell Normal",
"EBV Immortalized Normal",
"Bone Marrow Normal",
"Control Analyte",
"Recurrent Blood Derived Cancer - Peripheral Blood",
"Cell Lines",
"Primary Xenograft Tissue",
"Cell Line Derived Xenograft Tissue")
aux <- data.frame(tissue.code = tissue.code,shortLetterCode,tissue.definition)
} else {
tissue.code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','15','16','17','20','40','41','42','50','60','61','99')
tissue.definition <- c("Primary solid Tumor", # 01
"Recurrent Solid Tumor", # 02
"Primary Blood Derived Cancer - Peripheral Blood", # 03
"Recurrent Blood Derived Cancer - Bone Marrow", # 04
"Additional - New Primary", # 05
"Metastatic", # 06
"Additional Metastatic", # 07
"Tissue disease-specific post-adjuvant therapy", # 08
"Primary Blood Derived Cancer - Bone Marrow", # 09
"Blood Derived Normal", # 10
"Solid Tissue Normal", # 11
"Buccal Cell Normal", # 12
"EBV Immortalized Normal", # 13
"Bone Marrow Normal", # 14
"Fibroblasts from Bone Marrow Normal", # 15
"Mononuclear Cells from Bone Marrow Normal", # 16
"Lymphatic Tissue Normal (including centroblasts)", # 17
"Control Analyte", # 20
"Recurrent Blood Derived Cancer - Peripheral Blood", # 40
"Blood Derived Cancer- Bone Marrow, Post-treatment", # 41
"Blood Derived Cancer- Peripheral Blood, Post-treatment", # 42
"Cell line from patient tumor", # 50
"Xenograft from patient not grown as intermediate on plastic tissue culture dish", # 60
"Xenograft grown in mice from established cell lines", #61
"Granulocytes after a Ficoll separation") # 99
aux <- DataFrame(tissue.code = tissue.code,tissue.definition)
}
return(aux)
}
#' @title Retrieve open access maf files from GDC server
#' @description
#' GDCquery_Maf uses the following guide to download maf files
#' https://gdc-docs.nci.nih.gov/Data/Release_Notes/Data_Release_Notes/
#' @param pipelines Four separate variant calling pipelines are implemented for GDC data harmonization.
#' Options: muse, varscan2, somaticsniper, mutect2. For more information:
#' https://gdc-docs.nci.nih.gov/Data/Bioinformatics_Pipelines/DNA_Seq_Variant_Calling_Pipeline/
#' @param tumor a valid tumor
#' @param save.csv Write maf file into a csv document
#' @param directory Directory/Folder where the data will downloaded. Default: GDCdata
#' @export
#' @importFrom data.table fread
#' @import readr stringr
#' @importFrom downloader download
#' @importFrom R.utils gunzip
#' @importFrom tools md5sum
#' @examples
#' \dontrun{
#' acc.muse.maf <- GDCquery_Maf("ACC", pipelines = "muse")
#' acc.varscan2.maf <- GDCquery_Maf("ACC", pipelines = "varscan2")
#' acc.somaticsniper.maf <- GDCquery_Maf("ACC", pipelines = "somaticsniper")
#' acc.mutect.maf <- GDCquery_Maf("ACC", pipelines = "mutect2")
#' }
#' @return A data frame with the maf file information
GDCquery_Maf <- function(tumor,
save.csv = FALSE,
directory = "GDCdata",
pipelines = NULL){
if(is.null(pipelines)) stop("Please select the pipeline argument (muse, varscan2, somaticsniper, mutect2)")
if(grepl("varscan",pipelines, ignore.case = TRUE)) {
workflow.type <- "VarScan2 Variant Aggregation and Masking"
} else if(pipelines == "muse") {
workflow.type <- "MuSE Variant Aggregation and Masking"
} else if(pipelines == "somaticsniper") {
workflow.type <- "SomaticSniper Variant Aggregation and Masking"
} else if(grepl("mutect",pipelines, ignore.case = TRUE)) {
workflow.type <- "MuTect2 Variant Aggregation and Masking"
} else {
stop("Please select the pipeline argument (muse, varscan2, somaticsniper, mutect2)")
}
# Info to user
message("============================================================================")
message(" For more information about MAF data please read the following GDC manual and web pages:")
message(" GDC manual: https://gdc-docs.nci.nih.gov/Data/PDF/Data_UG.pdf")
message(" https://gdc-docs.nci.nih.gov/Data/Bioinformatics_Pipelines/DNA_Seq_Variant_Calling_Pipeline/")
message(" https://gdc.cancer.gov/about-gdc/variant-calling-gdc")
message("============================================================================")
query <- GDCquery(paste0("TCGA-",tumor),
data.category = "Simple Nucleotide Variation",
data.type = "Masked Somatic Mutation",
workflow.type = workflow.type,
access = "open")
if(nrow(query$results[[1]]) == 0) stop("No MAF file found for this type of workflow")
maf <- tryCatch({
tryCatch({
GDCdownload(query, directory = directory, method = "api")
}, error = function(e) {
GDCdownload(query, directory = directory, method = "client")
})
maf <- GDCprepare(query, directory = directory)
maf
}, error = function(e) {
manifest <- getManifest(query)
GDCdownload.aux( "https://api.gdc.cancer.gov/data/", manifest, manifest$filename, ".")
maf <- readSimpleNucleotideVariationMaf(file.path(manifest$id,manifest$filename))
maf
})
if(save.csv) {
fout <- file.path(directory,gsub("\\.gz", "\\.csv",getResults(query)$file_name))
write_csv(maf, fout)
message(paste0("File created: ", fout))
}
return(maf)
}
#' @title Retrieve open access mc3 MAF file from GDC server
#' @description
#' Download data from https://gdc.cancer.gov/about-data/publications/mc3-2017
#' https://gdc-docs.nci.nih.gov/Data/Release_Notes/Data_Release_Notes/
#' @examples
#' \dontrun{
#' maf <- getMC3MAF()
#' }
#' @return A data frame with the MAF file information from https://gdc.cancer.gov/about-data/publications/mc3-2017
#' @export
getMC3MAF <- function(){
fout <- "mc3.v0.2.8.PUBLIC.maf.gz"
fpath <- "https://api.gdc.cancer.gov/data/1c8cfe5f-e52d-41ba-94da-f15ea1337efc"
if(is.windows()) mode <- "wb" else mode <- "w"
message(rep("-",100))
message("o Starting to download Publi MAF from GDC")
message("o More information at: https://gdc.cancer.gov/about-data/publications/mc3-2017")
message("o Please, cite: Cell Systems. Volume 6 Issue 3: p271-281.e7, 28 March 2018 10.1016/j.cels.2018.03.002")
if(!file.exists(gsub("\\.gz", "", fout))){
download(fpath, fout, mode = mode)
message("o Uncompressing file")
gunzip(fout, remove = FALSE)
}
message("o Reading MAF")
maf <- readr::read_tsv(gsub("\\.gz", "", fout),progress = TRUE, col_types = readr::cols())
message("o Adding project_id information")
project <- grep("TCGA",sort(getGDCprojects()$project_id),value = TRUE)
df <- plyr::adply(project,
.margins = 1,
.fun = function(proj) {
samples <- getSubmitterID(proj)
return(data.frame(proj,samples))
}
)
maf$project_id <- df$proj[match(substr(maf$Tumor_Sample_Barcode,1,12),df$samples)] %>% as.character
message(rep("-",100))
}
#' @title Query gene counts of TCGA and GTEx data from the Recount2 project
#' @description
#' TCGArecount2_query queries and downloads data produced by the Recount2 project. User can specify which project and which tissue to query
#' @param project is a string denoting which project the user wants. Options are "tcga" and "gtex"
#' @param tissue a vector of tissue(s) to download. Options are "adipose tissue", "adrenal", "gland", "bladder","blood", "blood vessel", "bone marrow", "brain", "breast","cervix uteri", "colon", "esophagus", "fallopian tube","heart", "kidney", "liver", "lung", "muscle", "nerve", "ovary","pancreas", "pituitary", "prostate", "salivary", "gland", "skin", "small intestine", "spleen", "stomach", "testis", "thyroid", "uterus", "vagina"
#' @export
#' @examples
#' \dontrun{
#' brain.rec<-TCGAquery_recount2(project = "gtex", tissue = "brain")
#' }
#' @return List with $subtypes attribute as a dataframe with barcodes, samples, subtypes, and colors. The $filtered attribute is returned as filtered samples with no subtype info
TCGAquery_recount2<-function(project, tissue=c()){
tissuesGTEx <- c(
"adipose_tissue",
"adrenal_gland",
"bladder",
"blood",
"blood_vessel",
"bone_marrow",
"brain",
"breast",
"cervix_uteri",
"colon",
"esophagus",
"fallopian_tube",
"heart",
"kidney",
"liver",
"lung",
"muscle",
"nerve",
"ovary",
"pancreas",
"pituitary",
"prostate",
"salivary_gland",
"skin",
"small_intestine",
"spleen",
"stomach",
"testis",
"thyroid",
"uterus",
"vagina"
)
tissuesTCGA <- c(
"adrenal_gland",
"bile_duct",
"bladder",
"bone_marrow",
"brain",
"breast",
"cervix",
"colorectal",
"esophagus",
"eye",
"head_and_neck",
"kidney",
"liver",
"lung",
"lymph_nodes",
"ovary",
"pancreas",
"pleura",
"prostate",
"skin",
"soft_tissue",
"stomach",
"testis",
"thymus",
"thyroid",
"uterus")
tissue<-paste(unlist(strsplit(tissue, " ")), collapse="_")
Res<-list()
if(tolower(project)=="gtex"){
for(t_i in tissue){
if(tissue%in%tissuesGTEx){
con<-"http://duffel.rail.bio/recount/v2/SRP012682/rse_gene_"
con<-paste0(con,tissue,".Rdata")
message(paste0("downloading Range Summarized Experiment for: ", tissue))
load(url(con))
Res[[paste0(project,"_", t_i)]]<-rse_gene
}
else stop(paste0(tissue, " is not an available tissue on Recount2"))
}
return(Res)
}
else if(tolower(project)=="tcga"){
for(t_i in tissue){
if(tissue%in%tissuesTCGA){
con<-"http://duffel.rail.bio/recount/v2/TCGA/rse_gene_"
con<-paste0(con,tissue,".Rdata")
message(paste0("downloading Range Summarized Experiment for: ", tissue))
load(url(con))
Res[[paste0(project,"_", t_i)]]<-rse_gene
}
else stop(paste0(tissue, " is not an available tissue on Recount2"))
}
return(Res)
}
else stop(paste0(project, " is not a valid project"))
}
#' @title Retrieve open access ATAC-seq files from GDC server
#' @description
#' Retrieve open access ATAC-seq files from GDC server
#' https://gdc.cancer.gov/about-data/publications/ATACseq-AWG
#' Manifest available at: https://gdc.cancer.gov/files/public/file/ATACseq-AWG_Open_GDC-Manifest.txt
#' @param tumor a valid tumor
#' @param file.type Write maf file into a csv document
#' @export
#' @examples
#' \dontrun{
#' query <- GDCquery_ATAC_seq(file.type = "txt")
#' GDCdownload(query)
#' query <- GDCquery_ATAC_seq(file.type = "bigWigs")
#' GDCdownload(query)
#' }
#' @return A data frame with the maf file information
GDCquery_ATAC_seq <- function(tumor = NULL,
file.type = NULL) {
isServeOK()
results <- readr::read_tsv("https://gdc.cancer.gov/files/public/file/ATACseq-AWG_Open_GDC-Manifest.txt")
if(!is.null(tumor)) results <- results[grep(tumor,results$filename,ignore.case = T),]
if(!is.null(file.type)) results <- results[grep(file.type,results$filename,ignore.case = T),]
colnames(results) <- c("file_id", "file_name", "md5sum", "file_size")
results$state <- "released"
results$data_type <- "ATAC-seq"
results$data_category <- "ATAC-seq"
results$project <- "ATAC-seq"
ret <- data.frame(results=I(list(results)),
tumor = I(list(tumor)),
project = I(list("ATAC-seq")),
data.type = I(list("ATAC-seq")),
data.category = I(list("ATAC-seq")),
legacy = I(list(FALSE)))
return(ret)
}
|
/R/query.R
|
no_license
|
romagnolid/TCGAbiolinks
|
R
| false
| false
| 41,955
|
r
|
#' @title Query GDC data
#' @description
#' Uses GDC API to search for search, it searches for both controlled and
#' open-access data.
#' For GDC data arguments project, data.category, data.type and workflow.type should be used
#' For the legacy data arguments project, data.category, platform and/or file.extension should be used.
#' Please, see the vignette for a table with the possibilities.
#' @param project A list of valid project (see list with TCGAbiolinks:::getGDCprojects()$project_id)]
#' @param data.category A valid project (see list with TCGAbiolinks:::getProjectSummary(project))
#' @param data.type A data type to filter the files to download
#' @param sample.type A sample type to filter the files to download
#' @param barcode A list of barcodes to filter the files to download
#' @param legacy Search in the legacy repository
#' @param data.format Data format filter ("VCF", "TXT", "BAM","SVS","BCR XML","BCR SSF XML",
#' "TSV", "BCR Auxiliary XML", "BCR OMF XML", "BCR Biotab", "MAF", "BCR PPS XML", "XLSX")
#' @param file.type To be used in the legacy database for some platforms,
#' to define which file types to be used.
#' @param workflow.type GDC workflow type
#' @param experimental.strategy Filter to experimental strategy. Harmonized: WXS, RNA-Seq, miRNA-Seq, Genotyping Array.
#' Legacy: WXS, RNA-Seq, miRNA-Seq, Genotyping Array,
#' DNA-Seq, Methylation array, Protein expression array, WXS,CGH array, VALIDATION, Gene expression array,WGS,
#' MSI-Mono-Dinucleotide Assay, miRNA expression array, Mixed strategies, AMPLICON, Exon array,
#' Total RNA-Seq, Capillary sequencing, Bisulfite-Seq
#' @param access Filter by access type. Possible values: controlled, open
#' @param platform Example:
#' \tabular{ll}{
#'CGH- 1x1M_G4447A \tab IlluminaGA_RNASeqV2 \cr
#'AgilentG4502A_07 \tab IlluminaGA_mRNA_DGE \cr
#'Human1MDuo \tab HumanMethylation450 \cr
#'HG-CGH-415K_G4124A \tab IlluminaGA_miRNASeq \cr
#'HumanHap550 \tab IlluminaHiSeq_miRNASeq \cr
#'ABI \tab H-miRNA_8x15K \cr
#'HG-CGH-244A \tab SOLiD_DNASeq \cr
#'IlluminaDNAMethylation_OMA003_CPI \tab IlluminaGA_DNASeq_automated \cr
#'IlluminaDNAMethylation_OMA002_CPI \tab HG-U133_Plus_2 \cr
#'HuEx- 1_0-st-v2 \tab Mixed_DNASeq \cr
#'H-miRNA_8x15Kv2 \tab IlluminaGA_DNASeq_curated \cr
#'MDA_RPPA_Core \tab IlluminaHiSeq_TotalRNASeqV2 \cr
#'HT_HG-U133A \tab IlluminaHiSeq_DNASeq_automated \cr
#'diagnostic_images \tab microsat_i \cr
#'IlluminaHiSeq_RNASeq \tab SOLiD_DNASeq_curated \cr
#'IlluminaHiSeq_DNASeqC \tab Mixed_DNASeq_curated \cr
#'IlluminaGA_RNASeq \tab IlluminaGA_DNASeq_Cont_automated \cr
#'IlluminaGA_DNASeq \tab IlluminaHiSeq_WGBS \cr
#'pathology_reports \tab IlluminaHiSeq_DNASeq_Cont_automated\cr
#'Genome_Wide_SNP_6 \tab bio \cr
#'tissue_images \tab Mixed_DNASeq_automated \cr
#'HumanMethylation27 \tab Mixed_DNASeq_Cont_curated \cr
#'IlluminaHiSeq_RNASeqV2 \tab Mixed_DNASeq_Cont
#'}
#' @export
#' @examples
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy Number Variation",
#' data.type = "Copy Number Segment")
#' \dontrun{
#' query <- GDCquery(project = "TARGET-AML",
#' data.category = "Transcriptome Profiling",
#' data.type = "miRNA Expression Quantification",
#' workflow.type = "BCGSC miRNA Profiling",
#' barcode = c("TARGET-20-PARUDL-03A-01R","TARGET-20-PASRRB-03A-01R"))
#' query <- GDCquery(project = "TARGET-AML",
#' data.category = "Transcriptome Profiling",
#' data.type = "Gene Expression Quantification",
#' workflow.type = "HTSeq - Counts",
#' barcode = c("TARGET-20-PADZCG-04A-01R","TARGET-20-PARJCR-09A-01R"))
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy Number Variation",
#' data.type = "Masked Copy Number Segment",
#' sample.type = c("Primary solid Tumor"))
#' query.met <- GDCquery(project = c("TCGA-GBM","TCGA-LGG"),
#' legacy = TRUE,
#' data.category = "DNA methylation",
#' platform = "Illumina Human Methylation 450")
#' query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy number variation",
#' legacy = TRUE,
#' file.type = "hg19.seg",
#' barcode = c("TCGA-OR-A5LR-01A-11D-A29H-01"))
#' }
#' @return A data frame with the results and the parameters used
#' @importFrom jsonlite fromJSON
#' @importFrom knitr kable
#' @importFrom httr timeout
#' @importFrom dplyr pull
GDCquery <- function(project,
data.category,
data.type,
workflow.type,
legacy = FALSE,
access,
platform,
file.type,
barcode,
data.format,
experimental.strategy,
sample.type){
isServeOK()
suppressWarnings({
# prepare output
if(missing(sample.type)) {
sample.type <- NA
} else if(all(sample.type == FALSE)) {
sample.type <- NA
}
if(missing(data.type)) {
data.type <- NA
} else if(data.type == FALSE) {
data.type <- NA
}
if(missing(barcode)) {
barcode <- NA
} else if(length(barcode) == 1) {
if(barcode == FALSE) barcode <- NA
}
if(missing(platform)) {
platform <- NA
} else if(platform == FALSE) {
platform <- NA
}
if(missing(file.type)) {
file.type <- NA
} else if(file.type == FALSE) {
file.type <- NA
}
if(missing(workflow.type)) {
workflow.type <- NA
} else if(workflow.type == FALSE) {
workflow.type <- NA
}
if(missing(experimental.strategy)) {
experimental.strategy <- NA
} else if(experimental.strategy == FALSE) {
experimental.strategy <- NA
}
if(missing(access)) {
access <- NA
} else if(access == FALSE) {
access <- NA
}
if(missing(data.format)) {
data.format <- NA
} else if(data.format == FALSE) {
data.format <- NA
}
})
print.header("GDCquery: Searching in GDC database","section")
message("Genome of reference: ",ifelse(legacy,"hg19","hg38"))
# Check arguments
checkProjectInput(project)
checkDataCategoriesInput(project, data.category, legacy)
if(!is.na(data.type)) checkDataTypeInput(legacy = legacy, data.type = data.type)
if(!any(is.na(sample.type))) checkBarcodeDefinition(sample.type)
results <- NULL
print.header("Accessing GDC. This might take a while...","subsection")
for(proj in project){
url <- getGDCquery(project = proj,
data.category = data.category,
data.type = data.type,
legacy = legacy,
workflow.type = workflow.type,
platform = platform,
file.type = file.type,
files.access = access,
experimental.strategy = experimental.strategy,
sample.type = sample.type)
message("ooo Project: ", proj)
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC!")
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
if(json$data$pagination$count == 0) {
url <- getGDCquery(project = proj,
data.category = data.category,
data.type = data.type,
legacy = legacy,
workflow.type = NA,
platform = NA,
file.type = file.type,
experimental.strategy = experimental.strategy,
files.access = access,
sample.type = sample.type)
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC!")
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
}
json$data$hits$acl <- NULL
json$data$hits$project <- proj
if("archive" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$archive)){
archive <- json$data$hits$archive
colnames(archive)[1:ncol(archive)] <- paste0("archive_", colnames(archive)[1:ncol(archive)])
json$data$hits$archive <- NULL
json$data$hits <- cbind(json$data$hits, archive)
}
}
if("analysis" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$analysis)){
analysis <- json$data$hits$analysis
colnames(analysis)[2:ncol(analysis)] <- paste0("analysis_", colnames(analysis)[2:ncol(analysis)])
json$data$hits$analysis <- NULL
json$data$hits <- cbind(json$data$hits, analysis)
}
}
if("center" %in% colnames(json$data$hits)){
if(is.data.frame(json$data$hits$center)){
center <- json$data$hits$center
colnames(center)[2:ncol(center)] <- paste0("center_", colnames(center)[2:ncol(center)])
json$data$hits$center <- NULL
json$data$hits <- cbind(json$data$hits, center)
}
}
results <- plyr::rbind.fill(as.data.frame(results),as.data.frame(json$data$hits))
}
if(ncol(results) == 1) {
message("Sorry! There is no result for your query. Please check in GDC the data available or if there is no error in your query.")
return (NULL)
}
print.header("Filtering results","subsection")
if(!any(is.na(platform))){
if(!(all(platform %in% results$platform))){
stop("Please set a valid platform argument from the list below:\n => ", paste(unique(results$platform), collapse = "\n => "))
}
message("ooo By platform")
results <- results[tolower(results$platform) %in% tolower(platform),]
}
# Filter by access
if(!is.na(access)) {
message("ooo By access")
results <- results[grepl(access,results$access,ignore.case = TRUE),]
}
# Filter by experimental strategy
if(!is.na(experimental.strategy)) {
if(all(tolower(experimental.strategy) %in% tolower(results$experimental_strategy))) {
message("ooo By experimental.strategy")
results <- results[tolower(results$experimental_strategy) %in% tolower(experimental.strategy),]
} else {
message(paste0("The argument experimental_strategy does not match any of the results.\nPossible values:",
paste(unique(results$experimental_strategy),collapse = "\n=>")))
}
}
if(!is.na(data.format)) {
if(all(tolower(data.format) %in% tolower(results$data_format))) {
message("ooo By data.format")
results <- results[tolower(results$data_format) %in% tolower(data.format),]
} else {
message(paste0("The argument experimental_strategy does not match any of the results.\nPossible values:",
paste(unique(results$data_format),collapse = "\n=>")))
}
}
# Filter by data.type
if(!is.na(data.type)) {
if(!(tolower(data.type) %in% tolower(results$data_type))) {
stop("Please set a valid data.type argument from the list below:\n => ", paste(unique(results$data_type), collapse = "\n => "))
}
message("ooo By data.type")
results <- results[tolower(results$data_type) %in% tolower(data.type),]
}
# Filter by workflow.type
if(!is.na(workflow.type)) {
if(!(workflow.type %in% results$analysis_workflow_type)) {
stop("Please set a valid workflow.type argument from the list below:\n => ", paste(unique(results$analysis_workflow_type), collapse = "\n => "))
}
message("ooo By workflow.type")
results <- results[results$analysis_workflow_type %in% workflow.type,]
}
# Filter by file.type
if(!is.na(file.type)){
message("ooo By file.type")
pat <- file.type
invert <- FALSE
if(file.type == "normalized_results") pat <- "normalized_results"
if(file.type == "results") pat <- "[^normalized_]results"
if(file.type == "nocnv_hg18" | file.type == "nocnv_hg18.seg") pat <- "nocnv_hg18"
if(file.type == "cnv_hg18" | file.type == "hg18.seg") pat <- "[^nocnv_]hg18.seg"
if(file.type == "nocnv_hg19" | file.type == "nocnv_hg19.seg") pat <- "nocnv_hg19"
if(file.type == "cnv_hg19" | file.type == "hg19.seg") pat <- "[^nocnv_]hg19.seg"
if(file.type == "mirna") {
pat <- "hg19.*mirna"
invert <- TRUE
}
# if(file.type == "hg19.mirna") pat <- "hg19.mirna"
# if(file.type == "hg19.mirbase20.mirna") pat <- "hg19.mirbase20.mirna"
if(file.type == "hg19.isoform") pat <- "hg19.*isoform"
if(file.type == "isoform") {
pat <- "hg19.*isoform"
invert <- TRUE
}
idx <- grep(pat,results$file_name,invert = invert)
if(length(idx) == 0) {
print(knitr::kable(sort(results$file_name)[1:10],col.names = "Files"))
stop("We were not able to filter using this file type. Examples of available files are above. Please check the vignette for possible entries")
}
results <- results[idx,]
}
# get barcode of the samples
# 1) Normally for each sample we will have only single information
# however the mutation call uses both normal and tumor which are both
# reported by the API
if(!data.category %in% c("Clinical",
"Copy Number Variation",
"Biospecimen",
"Other",
"Simple Nucleotide Variation",
"Simple nucleotide variation")){
# we also need to deal with pooled samples (mixed from different patients)
# example CPT0000870008
if("portions" %in% (results$cases[[1]]$samples[[1]] %>% names)) {
aux <- plyr::laply(results$cases,
function(x) {
summarize(x$samples[[1]],
submitter_id = paste(submitter_id,collapse = ";"),
is_ffpe = any(is_ffpe),
sample_type = paste(sample_type,collapse = ";"),
aliquot.submiter.id = x$samples[[1]]$portions[[1]]$analytes[[1]]$aliquots[[1]]$submitter_id)
}) %>% as.data.frame
} else {
aux <- plyr::laply(results$cases,
function(x) {
summarize(x$samples[[1]],
submitter_id = paste(submitter_id,collapse = ";"),
is_ffpe = any(is_ffpe),
sample_type = paste(sample_type,collapse = ";"))
}) %>% as.data.frame
}
results$sample_type <- aux$sample_type %>% as.character()
results$is_ffpe <- aux$is_ffpe %>% as.logical
# ORGANOID-PANCREATIC does not have aliquots
if("aliquot.submiter.id" %in% colnames(aux)){
results$cases <- aux$aliquot.submiter.id %>% as.character()
results$sample.submitter_id <- aux$submitter_id %>% as.character()
} else{
results$cases <- aux$submitter_id %>% as.character()
results$sample.submitter_id <- aux$submitter_id %>% as.character()
}
} else if(data.category %in% c("Clinical")){
# Clinical has another structure
aux <- plyr::laply(results$cases,
function(x) {
unlist(x,recursive = T)[c("submitter_id")]
}) %>% as.data.frame
results$cases <- aux %>% dplyr::pull(1) %>% as.character()
} else if(data.category %in% c("Biospecimen")){
# Biospecimen has another structure
aux <- plyr::laply(results$cases,
function(x) {
paste(x$submitter_id,collapse = ",")
})
results$cases <- aux
} else if(data.category == "Other"){
# Auxiliary test files does not have information linked toit.
# get frm file names
results$cases <- str_extract_all(results$file_name,"TCGA-[:alnum:]{2}-[:alnum:]{4}") %>% unlist
} else if(data.category %in% c( "Copy Number Variation","Simple nucleotide variation")){
aux <- plyr::laply(results$cases,
function(x) {
lapply(x$samples,FUN = function(y) unlist(y,recursive = T)[c("portions.analytes.aliquots.submitter_id")]) %>%
unlist %>%
na.omit %>%
paste(collapse = ",")
}) %>% as.data.frame %>% pull(1) %>% as.character()
results$cases <- aux
} else if(data.category == "Simple Nucleotide Variation"){
if(data.type %in% "Masked Somatic Mutation"){
# MAF files are one single file for all samples
aux <- plyr::laply(results$cases[[1]]$samples,
function(x) {
unlist(x,recursive = T)[c("portions.analytes.aliquots.submitter_id","sample_type1","sample_type2","is_ffpe1","is_ffpe2")]
}) %>% as.data.frame
results$cases <- aux$portions.analytes.aliquots.submitter_id %>% as.character() %>% paste(collapse = ",")
if(!is.na(sample.type)) sample.type <- NA # ensure no filtering will be applied
} else {
# TODO: Add comnetary with case
aux <- plyr::laply(results$cases,
function(x) {
unlist(x$samples[[1]],recursive = T)[c("portions.analytes.aliquots.submitter_id","sample_type1","sample_type2","is_ffpe1","is_ffpe2")]
}) %>% as.data.frame
results$sample_type1 <- aux$sample_type1 %>% as.character()
results$sample_type2 <- aux$sample_type2 %>% as.character()
results$is_ffpe1 <- aux$is_ffpe1 %>% as.logical
results$is_ffpe2 <- aux$is_ffpe2 %>% as.logical
results$cases <- aux$portions.analytes.aliquots.submitter_id %>% as.character()
if(!is.na(sample.type)) sample.type <- NA # ensure no filtering will be applied
}
}
# Filter by barcode
if(!any(is.na(barcode))) {
message("ooo By barcode")
idx <- unique(unlist(sapply(barcode, function(x) grep(x, results$cases,ignore.case = TRUE))))
if(length(idx) == 0) {
print(knitr::kable(results$cases,col.names = "Available barcodes"))
stop("None of the barcodes were matched. Available barcodes are above")
}
results <- results[idx,]
}
# Filter by sample.type
if(!any(is.na(sample.type))) {
if(!any(tolower(results$sample_type) %in% tolower(sample.type))) {
aux <- as.data.frame(table(results$sample_type))
aux <- aux[aux$Freq > 0,]
print(kable(aux,row.names = FALSE,col.names = c("sample.type","Number of samples")))
stop("Please set a valid sample.type argument from the list above.")
}
message("ooo By sample.type")
results <- results[tolower(results$sample_type) %in% tolower(sample.type),]
}
# some how there are duplicated files in GDC we should remove them
# Example of problematic query
# query.exp <- GDCquery(project = "TCGA-BRCA",
# legacy = TRUE,
# data.category = "Gene expression",
# data.type = "Gene expression quantification",
# platform = "Illumina HiSeq",
# file.type = "results",
# experimental_strategy = "RNA-Seq",
# sample.type = c("Primary solid Tumor","Solid Tissue Normal"))
#
print.header("Checking data","subsection")
message("ooo Check if there are duplicated cases")
if(any(duplicated(results$cases))) {
message("Warning: There are more than one file for the same case. Please verify query results. You can use the command View(getResults(query)) in rstudio")
}
message("ooo Check if there results for the query")
if(nrow(results) == 0) stop("Sorry, no results were found for this query")
print.header("Preparing output","section")
ret <- data.frame(results = I(list(results)),
project = I(list(project)),
data.category = data.category,
data.type = data.type,
legacy = legacy,
access = I(list(access)),
experimental.strategy = I(list(experimental.strategy)),
file.type = file.type,
platform = I(list(platform)),
sample.type = I(list(sample.type)),
barcode = I(list(barcode)),
workflow.type = workflow.type)
return(ret)
}
getGDCquery <- function(project, data.category, data.type, legacy, workflow.type,platform,file.type,files.access,sample.type,experimental.strategy){
# Get manifest using the API
baseURL <- ifelse(legacy,"https://api.gdc.cancer.gov/legacy/files/?","https://api.gdc.cancer.gov/files/?")
options.pretty <- "pretty=true"
if(data.category == "Protein expression" & legacy) {
options.expand <- "fields=archive.revision,archive.file_name,md5sum,state,data_category,file_id,platform,file_name,file_size,md5sum,submitter_id,data_type&expand=cases.samples.portions,cases.project,center,analysis"
} else if(data.category %in% c("Clinical","Biospecimen")) {
options.expand <- "expand=cases,cases.project,center,analysis"
} else {
options.expand <- "expand=cases.samples.portions.analytes.aliquots,cases.project,center,analysis,cases.samples"
}
option.size <- paste0("size=",getNbFiles(project,data.category,legacy))
option.format <- paste0("format=JSON")
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":['), # Start json request
URLencode('{"op":"in","content":{"field":"cases.project.project_id","value":["'),
project,
URLencode('"]}}'))
if(!is.na(experimental.strategy)) options.filter <- paste0(options.filter,addFilter("files.experimental_strategy", experimental.strategy))
if(!is.na(data.category)) options.filter <- paste0(options.filter,addFilter("files.data_category", data.category))
if(!is.na(data.type)) options.filter <- paste0(options.filter,addFilter("files.data_type", data.type))
if(!is.na(workflow.type)) options.filter <- paste0(options.filter,addFilter("files.analysis.workflow_type", workflow.type))
if(!any(is.na(platform))) options.filter <- paste0(options.filter,addFilter("files.platform", platform))
if(!any(is.na(file.type))) {
if(file.type == "results" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "unnormalized"))
if(file.type == "normalized_results" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "normalized"))
if(file.type == "nocnv_hg19.seg" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "nocnv"))
if(file.type == "hg19.isoform" & legacy) options.filter <- paste0(options.filter,addFilter("files.tags", "hg19"))
}
if(!any(is.na(files.access))) {
options.filter <- paste0(options.filter,addFilter("files.access", files.access))
}
if(!any(is.na(sample.type))) {
if("Primary solid Tumor" %in% sample.type) sample.type[sample.type == "Primary solid Tumor"] <- "Primary Tumor"
if("Recurrent Solid Tumor" %in% sample.type) sample.type[sample.type == "Recurrent Solid Tumor"] <- "Recurrent Tumor"
options.filter <- paste0(options.filter,addFilter("cases.samples.sample_type", sample.type))
}
# Close json request
options.filter <- paste0(options.filter, URLencode(']}'))
url <- paste0(baseURL,paste(options.pretty,
options.expand,
option.size,
options.filter,
option.format,
sep = "&"))
return(url)
}
addFilter <- function(field, values){
ret <- paste0(
URLencode(',{"op":"in","content":{"field":"'),
URLencode(field),
URLencode('","value":["'),
URLencode(paste0(values, collapse = '","')),
URLencode('"]}}')
)
return(ret)
}
expandBarcodeInfo <- function(barcode){
if(any(grepl("TARGET",barcode))) {
ret <- DataFrame(barcode = barcode,
code = substr(barcode, 8, 9),
case.unique.id = substr(barcode, 11, 16),
tissue.code = substr(barcode, 18, 19),
nucleic.acid.code = substr(barcode, 24, 24))
ret <- merge(ret,getBarcodeDefinition(), by = "tissue.code", sort = FALSE, all.x = TRUE)
ret <- ret[match(barcode,ret$barcode),]
}
if(any(grepl("TCGA",barcode))) {
ret <- data.frame(barcode = barcode,
patient = substr(barcode, 1, 12),
sample = substr(barcode, 1, 16),
tissue.code = substr(barcode, 14, 15))
ret <- merge(ret,getBarcodeDefinition(), by = "tissue.code", sort = FALSE, all.x = TRUE)
ret <- ret[match(barcode,ret$barcode),]
}
return(ret)
}
getBarcodeDefinition <- function(type = "TCGA"){
if(type == "TCGA"){
tissue.code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','20','40','50','60','61')
shortLetterCode <- c("TP","TR","TB","TRBM","TAP","TM","TAM","THOC",
"TBM","NB","NT","NBC","NEBV","NBM","CELLC","TRB",
"CELL","XP","XCL")
tissue.definition <- c("Primary Tumor",
"Recurrent Tumor",
"Primary Blood Derived Cancer - Peripheral Blood",
"Recurrent Blood Derived Cancer - Bone Marrow",
"Additional - New Primary",
"Metastatic",
"Additional Metastatic",
"Human Tumor Original Cells",
"Primary Blood Derived Cancer - Bone Marrow",
"Blood Derived Normal",
"Solid Tissue Normal",
"Buccal Cell Normal",
"EBV Immortalized Normal",
"Bone Marrow Normal",
"Control Analyte",
"Recurrent Blood Derived Cancer - Peripheral Blood",
"Cell Lines",
"Primary Xenograft Tissue",
"Cell Line Derived Xenograft Tissue")
aux <- data.frame(tissue.code = tissue.code,shortLetterCode,tissue.definition)
} else {
tissue.code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','15','16','17','20','40','41','42','50','60','61','99')
tissue.definition <- c("Primary solid Tumor", # 01
"Recurrent Solid Tumor", # 02
"Primary Blood Derived Cancer - Peripheral Blood", # 03
"Recurrent Blood Derived Cancer - Bone Marrow", # 04
"Additional - New Primary", # 05
"Metastatic", # 06
"Additional Metastatic", # 07
"Tissue disease-specific post-adjuvant therapy", # 08
"Primary Blood Derived Cancer - Bone Marrow", # 09
"Blood Derived Normal", # 10
"Solid Tissue Normal", # 11
"Buccal Cell Normal", # 12
"EBV Immortalized Normal", # 13
"Bone Marrow Normal", # 14
"Fibroblasts from Bone Marrow Normal", # 15
"Mononuclear Cells from Bone Marrow Normal", # 16
"Lymphatic Tissue Normal (including centroblasts)", # 17
"Control Analyte", # 20
"Recurrent Blood Derived Cancer - Peripheral Blood", # 40
"Blood Derived Cancer- Bone Marrow, Post-treatment", # 41
"Blood Derived Cancer- Peripheral Blood, Post-treatment", # 42
"Cell line from patient tumor", # 50
"Xenograft from patient not grown as intermediate on plastic tissue culture dish", # 60
"Xenograft grown in mice from established cell lines", #61
"Granulocytes after a Ficoll separation") # 99
aux <- DataFrame(tissue.code = tissue.code,tissue.definition)
}
return(aux)
}
#' @title Retrieve open access maf files from GDC server
#' @description
#' GDCquery_Maf uses the following guide to download maf files
#' https://gdc-docs.nci.nih.gov/Data/Release_Notes/Data_Release_Notes/
#' @param pipelines Four separate variant calling pipelines are implemented for GDC data harmonization.
#' Options: muse, varscan2, somaticsniper, mutect2. For more information:
#' https://gdc-docs.nci.nih.gov/Data/Bioinformatics_Pipelines/DNA_Seq_Variant_Calling_Pipeline/
#' @param tumor a valid tumor
#' @param save.csv Write maf file into a csv document
#' @param directory Directory/Folder where the data will downloaded. Default: GDCdata
#' @export
#' @importFrom data.table fread
#' @import readr stringr
#' @importFrom downloader download
#' @importFrom R.utils gunzip
#' @importFrom tools md5sum
#' @examples
#' \dontrun{
#' acc.muse.maf <- GDCquery_Maf("ACC", pipelines = "muse")
#' acc.varscan2.maf <- GDCquery_Maf("ACC", pipelines = "varscan2")
#' acc.somaticsniper.maf <- GDCquery_Maf("ACC", pipelines = "somaticsniper")
#' acc.mutect.maf <- GDCquery_Maf("ACC", pipelines = "mutect2")
#' }
#' @return A data frame with the maf file information
GDCquery_Maf <- function(tumor,
save.csv = FALSE,
directory = "GDCdata",
pipelines = NULL){
if(is.null(pipelines)) stop("Please select the pipeline argument (muse, varscan2, somaticsniper, mutect2)")
if(grepl("varscan",pipelines, ignore.case = TRUE)) {
workflow.type <- "VarScan2 Variant Aggregation and Masking"
} else if(pipelines == "muse") {
workflow.type <- "MuSE Variant Aggregation and Masking"
} else if(pipelines == "somaticsniper") {
workflow.type <- "SomaticSniper Variant Aggregation and Masking"
} else if(grepl("mutect",pipelines, ignore.case = TRUE)) {
workflow.type <- "MuTect2 Variant Aggregation and Masking"
} else {
stop("Please select the pipeline argument (muse, varscan2, somaticsniper, mutect2)")
}
# Info to user
message("============================================================================")
message(" For more information about MAF data please read the following GDC manual and web pages:")
message(" GDC manual: https://gdc-docs.nci.nih.gov/Data/PDF/Data_UG.pdf")
message(" https://gdc-docs.nci.nih.gov/Data/Bioinformatics_Pipelines/DNA_Seq_Variant_Calling_Pipeline/")
message(" https://gdc.cancer.gov/about-gdc/variant-calling-gdc")
message("============================================================================")
query <- GDCquery(paste0("TCGA-",tumor),
data.category = "Simple Nucleotide Variation",
data.type = "Masked Somatic Mutation",
workflow.type = workflow.type,
access = "open")
if(nrow(query$results[[1]]) == 0) stop("No MAF file found for this type of workflow")
maf <- tryCatch({
tryCatch({
GDCdownload(query, directory = directory, method = "api")
}, error = function(e) {
GDCdownload(query, directory = directory, method = "client")
})
maf <- GDCprepare(query, directory = directory)
maf
}, error = function(e) {
manifest <- getManifest(query)
GDCdownload.aux( "https://api.gdc.cancer.gov/data/", manifest, manifest$filename, ".")
maf <- readSimpleNucleotideVariationMaf(file.path(manifest$id,manifest$filename))
maf
})
if(save.csv) {
fout <- file.path(directory,gsub("\\.gz", "\\.csv",getResults(query)$file_name))
write_csv(maf, fout)
message(paste0("File created: ", fout))
}
return(maf)
}
#' @title Retrieve open access mc3 MAF file from GDC server
#' @description
#' Download data from https://gdc.cancer.gov/about-data/publications/mc3-2017
#' https://gdc-docs.nci.nih.gov/Data/Release_Notes/Data_Release_Notes/
#' @examples
#' \dontrun{
#' maf <- getMC3MAF()
#' }
#' @return A data frame with the MAF file information from https://gdc.cancer.gov/about-data/publications/mc3-2017
#' @export
getMC3MAF <- function(){
fout <- "mc3.v0.2.8.PUBLIC.maf.gz"
fpath <- "https://api.gdc.cancer.gov/data/1c8cfe5f-e52d-41ba-94da-f15ea1337efc"
if(is.windows()) mode <- "wb" else mode <- "w"
message(rep("-",100))
message("o Starting to download Publi MAF from GDC")
message("o More information at: https://gdc.cancer.gov/about-data/publications/mc3-2017")
message("o Please, cite: Cell Systems. Volume 6 Issue 3: p271-281.e7, 28 March 2018 10.1016/j.cels.2018.03.002")
if(!file.exists(gsub("\\.gz", "", fout))){
download(fpath, fout, mode = mode)
message("o Uncompressing file")
gunzip(fout, remove = FALSE)
}
message("o Reading MAF")
maf <- readr::read_tsv(gsub("\\.gz", "", fout),progress = TRUE, col_types = readr::cols())
message("o Adding project_id information")
project <- grep("TCGA",sort(getGDCprojects()$project_id),value = TRUE)
df <- plyr::adply(project,
.margins = 1,
.fun = function(proj) {
samples <- getSubmitterID(proj)
return(data.frame(proj,samples))
}
)
maf$project_id <- df$proj[match(substr(maf$Tumor_Sample_Barcode,1,12),df$samples)] %>% as.character
message(rep("-",100))
}
#' @title Query gene counts of TCGA and GTEx data from the Recount2 project
#' @description
#' TCGArecount2_query queries and downloads data produced by the Recount2 project. User can specify which project and which tissue to query
#' @param project is a string denoting which project the user wants. Options are "tcga" and "gtex"
#' @param tissue a vector of tissue(s) to download. Options are "adipose tissue", "adrenal", "gland", "bladder","blood", "blood vessel", "bone marrow", "brain", "breast","cervix uteri", "colon", "esophagus", "fallopian tube","heart", "kidney", "liver", "lung", "muscle", "nerve", "ovary","pancreas", "pituitary", "prostate", "salivary", "gland", "skin", "small intestine", "spleen", "stomach", "testis", "thyroid", "uterus", "vagina"
#' @export
#' @examples
#' \dontrun{
#' brain.rec<-TCGAquery_recount2(project = "gtex", tissue = "brain")
#' }
#' @return List with $subtypes attribute as a dataframe with barcodes, samples, subtypes, and colors. The $filtered attribute is returned as filtered samples with no subtype info
TCGAquery_recount2<-function(project, tissue=c()){
tissuesGTEx <- c(
"adipose_tissue",
"adrenal_gland",
"bladder",
"blood",
"blood_vessel",
"bone_marrow",
"brain",
"breast",
"cervix_uteri",
"colon",
"esophagus",
"fallopian_tube",
"heart",
"kidney",
"liver",
"lung",
"muscle",
"nerve",
"ovary",
"pancreas",
"pituitary",
"prostate",
"salivary_gland",
"skin",
"small_intestine",
"spleen",
"stomach",
"testis",
"thyroid",
"uterus",
"vagina"
)
tissuesTCGA <- c(
"adrenal_gland",
"bile_duct",
"bladder",
"bone_marrow",
"brain",
"breast",
"cervix",
"colorectal",
"esophagus",
"eye",
"head_and_neck",
"kidney",
"liver",
"lung",
"lymph_nodes",
"ovary",
"pancreas",
"pleura",
"prostate",
"skin",
"soft_tissue",
"stomach",
"testis",
"thymus",
"thyroid",
"uterus")
tissue<-paste(unlist(strsplit(tissue, " ")), collapse="_")
Res<-list()
if(tolower(project)=="gtex"){
for(t_i in tissue){
if(tissue%in%tissuesGTEx){
con<-"http://duffel.rail.bio/recount/v2/SRP012682/rse_gene_"
con<-paste0(con,tissue,".Rdata")
message(paste0("downloading Range Summarized Experiment for: ", tissue))
load(url(con))
Res[[paste0(project,"_", t_i)]]<-rse_gene
}
else stop(paste0(tissue, " is not an available tissue on Recount2"))
}
return(Res)
}
else if(tolower(project)=="tcga"){
for(t_i in tissue){
if(tissue%in%tissuesTCGA){
con<-"http://duffel.rail.bio/recount/v2/TCGA/rse_gene_"
con<-paste0(con,tissue,".Rdata")
message(paste0("downloading Range Summarized Experiment for: ", tissue))
load(url(con))
Res[[paste0(project,"_", t_i)]]<-rse_gene
}
else stop(paste0(tissue, " is not an available tissue on Recount2"))
}
return(Res)
}
else stop(paste0(project, " is not a valid project"))
}
#' @title Retrieve open access ATAC-seq files from GDC server
#' @description
#' Retrieve open access ATAC-seq files from GDC server
#' https://gdc.cancer.gov/about-data/publications/ATACseq-AWG
#' Manifest available at: https://gdc.cancer.gov/files/public/file/ATACseq-AWG_Open_GDC-Manifest.txt
#' @param tumor a valid tumor
#' @param file.type Write maf file into a csv document
#' @export
#' @examples
#' \dontrun{
#' query <- GDCquery_ATAC_seq(file.type = "txt")
#' GDCdownload(query)
#' query <- GDCquery_ATAC_seq(file.type = "bigWigs")
#' GDCdownload(query)
#' }
#' @return A data frame with the maf file information
GDCquery_ATAC_seq <- function(tumor = NULL,
file.type = NULL) {
isServeOK()
results <- readr::read_tsv("https://gdc.cancer.gov/files/public/file/ATACseq-AWG_Open_GDC-Manifest.txt")
if(!is.null(tumor)) results <- results[grep(tumor,results$filename,ignore.case = T),]
if(!is.null(file.type)) results <- results[grep(file.type,results$filename,ignore.case = T),]
colnames(results) <- c("file_id", "file_name", "md5sum", "file_size")
results$state <- "released"
results$data_type <- "ATAC-seq"
results$data_category <- "ATAC-seq"
results$project <- "ATAC-seq"
ret <- data.frame(results=I(list(results)),
tumor = I(list(tumor)),
project = I(list("ATAC-seq")),
data.type = I(list("ATAC-seq")),
data.category = I(list("ATAC-seq")),
legacy = I(list(FALSE)))
return(ret)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/custom_fields.R
\name{update_field}
\alias{update_field}
\title{Update custom field}
\usage{
update_field(id, body = list(name = "New name"), ...)
}
\arguments{
\item{id}{Board ID}
\item{body}{Named list with additional parameters}
\item{...}{Additional arguments passed to \code{\link{put_model}}}
}
\description{
Update custom field definition.
}
\seealso{
\code{\link{put_model}}
}
|
/man/update_field.Rd
|
no_license
|
amirmahmoodv/trelloR
|
R
| false
| true
| 465
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/custom_fields.R
\name{update_field}
\alias{update_field}
\title{Update custom field}
\usage{
update_field(id, body = list(name = "New name"), ...)
}
\arguments{
\item{id}{Board ID}
\item{body}{Named list with additional parameters}
\item{...}{Additional arguments passed to \code{\link{put_model}}}
}
\description{
Update custom field definition.
}
\seealso{
\code{\link{put_model}}
}
|
#' Interpolate new positions within a spatiotemporal path data
#'
#' Interpolate new positions within a spatiotemporal path data set
#' (e.g., detections of tagged fish) at regularly-spaced time intervals
#' using linear or non-linear interpolation.
#'
#' @param det An object of class \code{glatos_detections} or data frame
#' containing spatiotemporal data with at least 4 columns containing
#' 'animal_id', 'detection_timestamp_utc', 'deploy_long', and
#' 'deploy_lat' columns.
#'
#' @param start_time specify the first time bin for interpolated data.
#' If not supplied, default is first timestamp in the input data
#' set. Must be a character string that can be coerced to
#' 'POSIXct' or an object of class 'POSIXct'. If character string
#' is supplied, timezone is automatically set to UTC.
#'
#' @param out_class Return results as a data.table or tibble. Default
#' returns results as data.frame. Accepts `data.table` or `tibble`.
#'
#' @param int_time_stamp The time step size (in seconds) of interpolated
#' positions. Default is 86400 (one day).
#'
#' @param trans An optional transition matrix with the "cost" of
#' moving across each cell within the map extent. Must be of class
#' \code{TransitionLayer}. A transition layer may be
#' created from a polygon shapefile using \link{make_transition}.
#'
#' @param lnl_thresh A numeric threshold for determining if linear or
#' non-linear interpolation shortest path will be used.
#'
#' @param show_progress Logical. Progress bar and status messages will be
#' shown if TRUE (default) and not shown if FALSE.
#'
#' @details Non-linear interpolation uses the \code{gdistance} package
#' to find the shortest pathway between two locations (i.e.,
#' receivers) that avoid 'impossible' movements (e.g., over land for
#' fish). The shortest non-linear path between two locations is
#' calculated using a transition matrix layer that represents the
#' 'cost' of an animal moving between adjacent grid cells. The
#' transition matrix layer (see \link{gdistance}) is created from
#' a polygon shapefile using \link{make_transition} or from a
#' \code{RasterLayer} object using \link[gdistance]{transition}. In
#' \code{make_transition}, each cell in the output transition layer
#' is coded as water (1) or land (0) to represent possible (1) and
#' impossible (0) movement paths.
#'
#' @details Linear interpolation is used for all points when
#' \code{trans} is not supplied. When \code{trans} is supplied,
#' then interpolation method is determined for each pair of
#' sequential observed detections. For example, linear interpolation
#' will be used if the two geographical positions are exactly the
#' same and when the ratio (linear distance:non-linear distance)
#' between two positions is less than \code{lnl_thresh}. Non-linear
#' interpolation will be used when ratio is greater than
#' \code{lnl_thresh}. When the ratio of linear distance to
#' non-linear distance is greater than \code{lnl_thresh}, then the
#' distance of the non-linear path needed to avoid land is greater
#' than the linear path that crosses land. \code{lnl_thresh} can be
#' used to control whether non-linear or linear interpolation is
#' used for all points. For example, non-linear interpolation will
#' be used for all points when \code{lnl_thresh} > 1 and linear
#' interpolation will be used for all points when \code{lnl_thresh}
#' = 0.
#'
#' @return A dataframe with animal_id, bin_timestamp,
#' latitude, longitude, and record_type.
#'
#'
#' @author Todd Hayden, Tom Binder, Chris Holbrook
#'
#' @examples
#'
#' #--------------------------------------------------
#' # EXAMPLE #1 - simple interpolate among lakes
#'
#' library(sp) #for loading greatLakesPoly because spatial object
#'
#' # get polygon of the Great Lakes
#' data(greatLakesPoly) #glatos example data; a SpatialPolygonsDataFrame
#' plot(greatLakesPoly, xlim = c(-92, -76))
#'
#' # make sample detections data frame
#' pos <- data.frame(
#' animal_id=1,
#' deploy_long=c(-87,-82.5, -78),
#' deploy_lat=c(44, 44.5, 43.5),
#' detection_timestamp_utc=as.POSIXct(c("2000-01-01 00:00",
#' "2000-02-01 00:00", "2000-03-01 00:00"), tz = "UTC"))
#'
#' #add to plot
#' points(deploy_lat ~ deploy_long, data = pos, pch = 20, cex = 2, col = 'red')
#'
#' # interpolate path using linear method
#' path1 <- interpolate_path(pos)
#' nrow(path1) #now 61 points
#' sum(path1$record_type == "interpolated") #58 interpolated points
#'
#' #add linear path to plot
#' points(latitude ~ longitude, data = path1, pch = 20, cex = 0.8, col = 'blue')
#'
#' # load a transition matrix of Great Lakes
#' # NOTE: This is a LOW RESOLUTION TransitionLayer suitable only for
#' # coarse/large scale interpolation only. Most realistic uses
#' # will need to create a TransitionLayer; see ?make_transition.
#' data(greatLakesTrLayer) #glatos example data; a TransitionLayer
#'
#' # interpolate path using non-linear method (requires 'trans')
#' path2 <- interpolate_path(pos, trans = greatLakesTrLayer)
#'
#' # add non-linear path to plot
#' points(latitude ~ longitude, data = path2, pch = 20, cex = 1,
#' col = 'green')
#'
#' # can also force linear-interpolation with lnlThresh = 0
#' path3 <- interpolate_path(pos, trans = greatLakesTrLayer, lnl_thresh = 0)
#'
#' # add new linear path to plot
#' points(latitude ~ longitude, data = path3, pch = 20, cex = 1,
#' col = 'magenta')
#'
#' #--------------------------------------------------
#' # EXAMPLE #2 - walleye in western Lake Erie
#' \dontrun{
#'
#' library(sp) #for loading greatLakesPoly
#' library(raster) #for raster manipulation (e.g., crop)
#'
#' # get example walleye detection data
#' det_file <- system.file("extdata", "walleye_detections.csv",
#' package = "glatos")
#' det <- read_glatos_detections(det_file)
#'
#' # take a look
#' head(det)
#'
#' # extract one fish and subset date
#' det <- det[det$animal_id == 22 &
#' det$detection_timestamp_utc > as.POSIXct("2012-04-08") &
#' det$detection_timestamp_utc < as.POSIXct("2013-04-15") , ]
#'
#' # get polygon of the Great Lakes
#' data(greatLakesPoly) #glatos example data; a SpatialPolygonsDataFrame
#'
#' # crop polygon to western Lake Erie
#' maumee <- crop(greatLakesPoly, extent(-83.7, -82.5, 41.3, 42.4))
#' plot(maumee, col = "grey")
#' points(deploy_lat ~ deploy_long, data = det, pch = 20, col = "red",
#' xlim = c(-83.7, -80))
#'
#' #make transition layer object
#' # Note: using make_transition2 here for simplicity, but
#' # make_transition is generally preferred for real application
#' # if your system can run it see ?make_transition
#' tran <- make_transition(maumee, res = c(0.1, 0.1))
#'
#' plot(tran$rast, xlim = c(-83.7, -82.0), ylim = c(41.3, 42.7))
#' plot(maumee, add = TRUE)
#'
#' # not high enough resolution- bump up resolution
#' tran1 <- make_transition(maumee, res = c(0.001, 0.001))
#'
#' # plot to check resolution- much better
#' plot(tran1$rast, xlim = c(-83.7, -82.0), ylim = c(41.3, 42.7))
#' plot(maumee, add = TRUE)
#'
#'
#' # add fish detections to make sure they are "on the map"
#' # plot unique values only for simplicity
#' foo <- unique(det[, c("deploy_lat", "deploy_long")])
#' points(foo$deploy_long, foo$deploy_lat, pch = 20, col = "red")
#'
#' # call with "transition matrix" (non-linear interpolation), other options
#' # note that it is quite a bit slower due than linear interpolation
#' pos2 <- interpolate_path(det, trans = tran1$transition, out_class = "data.table")
#'
#' plot(maumee, col = "grey")
#' points(latitude ~ longitude, data = pos2, pch=20, col='red', cex=0.5)
#'
#' }
#'
#' @export
interpolate_path <- function(det, trans = NULL, start_time = NULL,
int_time_stamp = 86400, lnl_thresh = 0.9,
out_class = NULL, show_progress = TRUE){
# stop if out_class is not NULL, data.table, or tibble
if(!is.null(out_class)){
if( !(out_class %in% c("data.table", "tibble"))) {stop('out_class is not a "data.table" or "tibble"')}}
# check to see that trans is a transition layer or transition stack
if(!is.null(trans) &
inherits(trans, c("TransitionLayer", "TransitionStack")) == FALSE){
stop(paste0("Supplied object for 'trans' argument is not class ",
"TransitionLayer or TransitionStack."),
call. = FALSE)
}
# check start_time
if(is.null(start_time)){
start_time <- min(det$detection_timestamp_utc)
}
if(is.na(start_time) & length(start_time) > 0){
stop("start_time cannot be coerced to 'POSIXct' or 'POSIXt' class")
}
if(is.character(start_time)){
start_time <- as.POSIXct(start_time, tz = "UTC")
}
# make sure start_time < largest timestamp in dataset
if(start_time > max(det$detection_timestamp_utc)){
stop("start_time is larger than last detection. No data to interpolate!", call. = FALSE)}
# make copy of detections for function
dtc <- data.table::as.data.table(det)
# subset only columns for function and rows >= start_time:
dtc <- dtc[detection_timestamp_utc >= start_time, c("animal_id",
"detection_timestamp_utc",
"deploy_lat",
"deploy_long")]
dtc[, record_type := "detection"]
# count number of rows- single observations are not interpolated
dtc[, num_rows := nrow(.SD), by = animal_id]
# Sort detections by transmitter id and then by detection timestamp
data.table::setkey(dtc, animal_id, detection_timestamp_utc)
# save original dataset to combine with interpolated data in the end
det <- data.table::copy(dtc)
data.table::setnames(det, c("animal_id", "bin_stamp", "i_lat", "i_lon",
"record_type", "num_rows"))
# remove any fish with only one detection
dtc <- dtc[num_rows != 1]
# error if only fish with one observation.
if (nrow(dtc) == 0) stop("must have two observations to interpolate")
# extract and determine start time
t_seq <- seq(start_time, max(dtc$detection_timestamp_utc),
int_time_stamp)
# bin data by time interval and add bin to dtc
dtc[, bin := t_seq[findInterval(detection_timestamp_utc, t_seq)] ]
# make all combinations of animals and detection bins
dtc <- dtc[data.table::CJ(bin = t_seq, animal_id = unique(animal_id)),
on = c("bin", "animal_id")]
data.table::setkey(dtc, animal_id, bin, detection_timestamp_utc)
# if only need to do linear interpolation:
if(is.null(trans) | lnl_thresh == 0){
dtc[, bin_stamp := detection_timestamp_utc][is.na(detection_timestamp_utc),
bin_stamp := bin]
dtc[, i_lat := approx(detection_timestamp_utc, deploy_lat,
xout = bin_stamp)$y, by = animal_id]
dtc[, i_lon := approx(detection_timestamp_utc, deploy_long,
xout = bin_stamp)$y, by = animal_id]
dtc[is.na(deploy_long), record_type := "interpolated"]
dtc <- dtc[, c("animal_id", "bin_stamp", "i_lat", "i_lon", "record_type")]
det <- det[num_rows == 1, c("animal_id", "bin_stamp", "i_lat", "i_lon",
"record_type")]
out <- data.table::rbindlist(list(dtc, det), use.names = TRUE)
data.table::setkey(out, animal_id, bin_stamp)
out[, bin_stamp := t_seq[findInterval(bin_stamp, t_seq)] ]
out <- na.omit(out, cols = "i_lat")
data.table::setnames(out, c("animal_id", "bin_timestamp", "latitude",
"longitude", "record_type"))
out <- unique(out)
out <- data.table::setorder(out, animal_id, bin_timestamp, -record_type)
# If out_class == NULL, then return data as data.table
if(is.null(out_class)){ out <- as.data.frame(out)
return(out)
}
# if out_class == "tibble", then return tibble object
if(out_class == "tibble"){ out <- tibble::as_tibble(out)
return(out)}
# if out_class == NULL, then return data.frame object
return(out)
}
# routine for combined nln and ln interpolation
# identify start and end rows for observations before and after NA
ends <- dtc[!is.na(deploy_lat), .(start = .I[-nrow(.SD)], end = .I[-1]),
by = animal_id][end - start > 1]
# identify observations that are both start and ends
dups <- c(ends$start, ends$end)[ ends[, duplicated(c(start, end))]]
# create and append duplicate rows for observations
# that are both start and end.
# This is so each observation can be in only one group
# identifies rows and duplicate rows that need duplicated
dtc[, c("rep", "num") := list(1L, 1:.N)][dups, rep := 2L]
dtc <- dtc[rep(num, rep)]
dtc[, rep := NULL]
dtc[, num := NULL]
# recalculate first and last rows- no duplicate rows this time...
new_ends <- dtc[!is.na(deploy_lat), .(start = .I[-nrow(.SD)], end = .I[-1]),
by = animal_id][end - start > 1]
# create row index
dtc[, start_dtc := 1:.N]
# extract rows that need interpolated
dtc <- dtc[new_ends, .(animal_id = x.animal_id,
detection_timestamp_utc = x.detection_timestamp_utc,
deploy_lat = x.deploy_lat, deploy_long = x.deploy_long,
record_type = x.record_type, num_rows = x.num_rows,
bin = x.bin, i.start = start),
on = .(start_dtc >= start, start_dtc <= end)]
# calculate great circle distance between coords
dtc[, gcd := geosphere::distHaversine(as.matrix(
.SD[1, c("deploy_long", "deploy_lat")]),
as.matrix(.SD[.N, c("deploy_long", "deploy_lat")])), by = i.start]
# calculate least cost (non-linear) distance between points
message("Calculating least-cost (non-linear) distances... (step 1 of 3)")
grpn = data.table::uniqueN(dtc$i.start)
if(show_progress) pb <- txtProgressBar(min = 0, max = grpn, style = 3)
dtc[, lcd := {if(show_progress) setTxtProgressBar(pb, value = .GRP);
gdistance::costDistance(trans, fromCoords = as.matrix(
.SD[1, c("deploy_long", "deploy_lat")]),
toCoords = as.matrix(.SD[.N, c("deploy_long", "deploy_lat")]))},
by = i.start]
# calculate ratio of gcd:lcd
dtc[, crit := gcd / lcd]
# create keys for lookup
dtc[!is.na(detection_timestamp_utc),
t_lat := data.table::shift(deploy_lat, type = "lead"), by = i.start]
dtc[!is.na(detection_timestamp_utc),
t_lon := data.table::shift(deploy_long, type = "lead"), by = i.start]
dtc[!is.na(detection_timestamp_utc),
t_timestamp := data.table::shift(detection_timestamp_utc, type = "lead"),
by = i.start]
# extract rows that need non-linear interpolation
# based on gcd:lcd distance
nln <- dtc[crit < lnl_thresh ]
land_chk <- dtc[is.infinite(lcd)][!is.na(deploy_lat),
c("deploy_lat", "deploy_long")]
# stop execution and display offending receivers if any receivers are on land.
capture <- function(x)paste(capture.output(print(x)), collapse = "\n")
if (nrow(land_chk) > 0) {stop("Some coordinates are on land or beyond extent.
Interpolation impossible! Check receiver locations or extents of transition
layer:\n", capture(as.data.table(land_chk)), call. = FALSE)
}
# extract data for linear interpolation
# check to make sure that all points to be interpolated
# are within the tranition layer is needed before any interpolation.
ln <- dtc[crit >= lnl_thresh | is.nan(crit) ]
if (nrow(ln) == 0){
ln <- data.table::data.table(animal_id = character(), i_lat = numeric(),
i_lon = numeric(),
bin_stamp = as.POSIXct(character()),
record_type = character())
} else {
message("Starting linear interpolation... (step 2 of 3)")
# linear interpolation
grpn = uniqueN(ln$i.start)
if(show_progress) pb <- txtProgressBar(min = 0, max = grpn, style = 3)
ln[, bin_stamp := detection_timestamp_utc][is.na(detection_timestamp_utc),
bin_stamp := bin]
ln[, i_lat := {if(show_progress) setTxtProgressBar(pb, .GRP);
tmp = .SD[c(1, .N),
c("detection_timestamp_utc", "deploy_lat")];
approx(c(tmp$detection_timestamp_utc),
c(tmp$deploy_lat),
xout = c(bin_stamp))$y}, by = i.start]
ln[, i_lon := {tmp = .SD[c(1, .N),
c("detection_timestamp_utc", "deploy_long")];
approx(c(tmp$detection_timestamp_utc),
c(tmp$deploy_long),
xout = c(bin_stamp))$y},
by = i.start]
ln[is.na(deploy_long), record_type := "interpolated"]
}
# extract records to lookup
nln_small <- nln[ !is.na(detection_timestamp_utc)][!is.na(t_lat)]
if(nrow(nln_small) == 0){
nln <- data.table(animal_id = character(), i_lat = numeric(),
i_lon = numeric(),
bin_stamp = as.POSIXct(character()),
record_type = character())
} else {
# nln interpolation
# create lookup table
data.table::setkey(nln_small, deploy_lat, deploy_long, t_lat, t_lon)
lookup <- unique(nln_small[, .(deploy_lat, deploy_long, t_lat, t_lon),
allow.cartesian = TRUE])
message("\nStarting non-linear interpolation... (step 3 of 3)")
grpn <- nrow(lookup)
if(show_progress) pb <- txtProgressBar(min = 0, max = grpn, style = 3)
# calculate non-linear interpolation for all unique movements in lookup
lookup[, coord := { if(show_progress) setTxtProgressBar(pb, value = .GRP);
sp::coordinates(
gdistance::shortestPath(trans, as.matrix(
.SD[1, c("deploy_long", "deploy_lat")]), as.matrix(
.SD[1, c("t_lon", "t_lat")]), output = "SpatialLines"))},
by = 1:nrow(lookup)]
message("\nFinalizing results.")
lookup[, grp := 1:.N]
# extract interpolated points from coordinate lists...
res <- lookup[, .(nln_longitude = lookup$coord[[.I]][, 1],
nln_latitude = lookup$coord[[.I]][, 2]), by = grp]
# set keys, join interpolation and original data
data.table::setkey(lookup, grp)
data.table::setkey(res, grp)
lookup <- lookup[res]
lookup[, coord := NULL]
# added first/last rows, number sequence for groups
lookup[lookup[, .I[1], by = grp]$V1, nln_longitude := deploy_long]
lookup[lookup[, .I[.N], by = grp]$V1, nln_longitude := t_lon]
lookup[lookup[, .I[1], by = grp]$V1, nln_latitude := deploy_lat]
lookup[lookup[, .I[.N], by = grp]$V1, nln_latitude := t_lat]
lookup[,seq_count := 1:.N, by = grp]
# lookup interpolated values for original dataset
data.table::setkey(lookup, deploy_lat, deploy_long, t_lat, t_lon)
nln_small <- lookup[nln_small, allow.cartesian = TRUE]
data.table::setkey(nln_small, i.start, seq_count)
# add timeseries for interpolating nln movements
nln_small[nln_small[, .I[1], by = i.start]$V1,
i_time := detection_timestamp_utc]
nln_small[nln_small[, .I[.N], by = i.start]$V1, i_time := t_timestamp]
arch <- nln_small
# nln_small <- nln_small[i.start == 163]
nln_small[, latitude_lead := data.table::shift(nln_latitude, type = "lag", fill = NA), by = i.start]
nln_small[, longitude_lead := data.table::shift(nln_longitude, type = "lag", fill = NA), by = i.start]
nln_small[, cumdist := geosphere::distGeo(.SD[, c("nln_longitude", "nln_latitude")],
.SD[,c("longitude_lead", "latitude_lead")]), by = i.start]
nln_small[is.na(cumdist), cumdist := 0]
nln_small[, cumdist := cumsum(cumdist), by = i.start]
nln_small[, latitude_lead := NULL][, longitude_lead := NULL]
# calculate cumdist
## nln_small[, cumdist := cumsum(c(0, sqrt(diff(nln_longitude) ^ 2 +
## diff(nln_latitude) ^ 2))),
## by = i.start]
# interpolate missing timestamps for interpolated coordinates
nln_small[, i_time := as.POSIXct(approx(cumdist, i_time, xout = cumdist)$y,
origin = "1970-01-01 00:00:00",
tz = attr(nln_small$i_time, "tzone")),
by = i.start]
# create timestamp vector to interpolate on.
nln[, bin_stamp := detection_timestamp_utc]
nln[is.na(detection_timestamp_utc), bin_stamp := bin]
nln[, grp := i.start]
# interpolate timestamps
data.table::setkey(nln_small, i.start)
data.table::setkey(nln, i.start)
nln[, i_lat := {tmp = nln_small[.(.SD[1, "i.start"]),
c("i_time", "nln_latitude")];
approx(tmp$i_time, tmp$nln_latitude,
xout = bin_stamp)$y}, by = grp]
nln[, i_lon := {tmp = nln_small[.(.SD[1, "i.start"]),
c("i_time", "nln_longitude")];
approx(tmp$i_time, tmp$nln_longitude,
xout = bin_stamp)$y}, by = grp]
nln[is.na(deploy_long), record_type := "interpolated"]
}
# combine into a single data.table
out <- data.table::rbindlist(list(ln[record_type == "interpolated",
c("animal_id", "bin_stamp", "i_lat", "i_lon", "record_type")],
nln[record_type == "interpolated",
c("animal_id", "bin_stamp", "i_lat", "i_lon",
"record_type")],
det[, c("animal_id", "bin_stamp", "i_lat", "i_lon",
"record_type")]), use.names = TRUE)
out[, !c("animal_id")]
data.table::setkey(out, animal_id, bin_stamp)
out[, bin_stamp := t_seq[findInterval(bin_stamp, t_seq)] ]
data.table::setnames(out, c("animal_id", "bin_timestamp", "latitude",
"longitude", "record_type"))
out <- na.omit(out, cols = "latitude")
out <- unique(out)
data.table::setorder(out, animal_id, bin_timestamp, -record_type)
# If out_class == NULL, then return data as data.table
if(is.null(out_class)){ out <- as.data.frame(out)
return(out)
}
# if out_class == "tibble", then return tibble object
if(out_class == "tibble"){ out <- tibble::as_tibble(out)
return(out)}
# if out_class == NULL, then return data.frame object
return(out)
}
|
/R/vis-interpolate_path.r
|
no_license
|
jsta/glatos
|
R
| false
| false
| 22,942
|
r
|
#' Interpolate new positions within a spatiotemporal path data
#'
#' Interpolate new positions within a spatiotemporal path data set
#' (e.g., detections of tagged fish) at regularly-spaced time intervals
#' using linear or non-linear interpolation.
#'
#' @param det An object of class \code{glatos_detections} or data frame
#' containing spatiotemporal data with at least 4 columns containing
#' 'animal_id', 'detection_timestamp_utc', 'deploy_long', and
#' 'deploy_lat' columns.
#'
#' @param start_time specify the first time bin for interpolated data.
#' If not supplied, default is first timestamp in the input data
#' set. Must be a character string that can be coerced to
#' 'POSIXct' or an object of class 'POSIXct'. If character string
#' is supplied, timezone is automatically set to UTC.
#'
#' @param out_class Return results as a data.table or tibble. Default
#' returns results as data.frame. Accepts `data.table` or `tibble`.
#'
#' @param int_time_stamp The time step size (in seconds) of interpolated
#' positions. Default is 86400 (one day).
#'
#' @param trans An optional transition matrix with the "cost" of
#' moving across each cell within the map extent. Must be of class
#' \code{TransitionLayer}. A transition layer may be
#' created from a polygon shapefile using \link{make_transition}.
#'
#' @param lnl_thresh A numeric threshold for determining if linear or
#' non-linear interpolation shortest path will be used.
#'
#' @param show_progress Logical. Progress bar and status messages will be
#' shown if TRUE (default) and not shown if FALSE.
#'
#' @details Non-linear interpolation uses the \code{gdistance} package
#' to find the shortest pathway between two locations (i.e.,
#' receivers) that avoid 'impossible' movements (e.g., over land for
#' fish). The shortest non-linear path between two locations is
#' calculated using a transition matrix layer that represents the
#' 'cost' of an animal moving between adjacent grid cells. The
#' transition matrix layer (see \link{gdistance}) is created from
#' a polygon shapefile using \link{make_transition} or from a
#' \code{RasterLayer} object using \link[gdistance]{transition}. In
#' \code{make_transition}, each cell in the output transition layer
#' is coded as water (1) or land (0) to represent possible (1) and
#' impossible (0) movement paths.
#'
#' @details Linear interpolation is used for all points when
#' \code{trans} is not supplied. When \code{trans} is supplied,
#' then interpolation method is determined for each pair of
#' sequential observed detections. For example, linear interpolation
#' will be used if the two geographical positions are exactly the
#' same and when the ratio (linear distance:non-linear distance)
#' between two positions is less than \code{lnl_thresh}. Non-linear
#' interpolation will be used when ratio is greater than
#' \code{lnl_thresh}. When the ratio of linear distance to
#' non-linear distance is greater than \code{lnl_thresh}, then the
#' distance of the non-linear path needed to avoid land is greater
#' than the linear path that crosses land. \code{lnl_thresh} can be
#' used to control whether non-linear or linear interpolation is
#' used for all points. For example, non-linear interpolation will
#' be used for all points when \code{lnl_thresh} > 1 and linear
#' interpolation will be used for all points when \code{lnl_thresh}
#' = 0.
#'
#' @return A dataframe with animal_id, bin_timestamp,
#' latitude, longitude, and record_type.
#'
#'
#' @author Todd Hayden, Tom Binder, Chris Holbrook
#'
#' @examples
#'
#' #--------------------------------------------------
#' # EXAMPLE #1 - simple interpolate among lakes
#'
#' library(sp) #for loading greatLakesPoly because spatial object
#'
#' # get polygon of the Great Lakes
#' data(greatLakesPoly) #glatos example data; a SpatialPolygonsDataFrame
#' plot(greatLakesPoly, xlim = c(-92, -76))
#'
#' # make sample detections data frame
#' pos <- data.frame(
#' animal_id=1,
#' deploy_long=c(-87,-82.5, -78),
#' deploy_lat=c(44, 44.5, 43.5),
#' detection_timestamp_utc=as.POSIXct(c("2000-01-01 00:00",
#' "2000-02-01 00:00", "2000-03-01 00:00"), tz = "UTC"))
#'
#' #add to plot
#' points(deploy_lat ~ deploy_long, data = pos, pch = 20, cex = 2, col = 'red')
#'
#' # interpolate path using linear method
#' path1 <- interpolate_path(pos)
#' nrow(path1) #now 61 points
#' sum(path1$record_type == "interpolated") #58 interpolated points
#'
#' #add linear path to plot
#' points(latitude ~ longitude, data = path1, pch = 20, cex = 0.8, col = 'blue')
#'
#' # load a transition matrix of Great Lakes
#' # NOTE: This is a LOW RESOLUTION TransitionLayer suitable only for
#' # coarse/large scale interpolation only. Most realistic uses
#' # will need to create a TransitionLayer; see ?make_transition.
#' data(greatLakesTrLayer) #glatos example data; a TransitionLayer
#'
#' # interpolate path using non-linear method (requires 'trans')
#' path2 <- interpolate_path(pos, trans = greatLakesTrLayer)
#'
#' # add non-linear path to plot
#' points(latitude ~ longitude, data = path2, pch = 20, cex = 1,
#' col = 'green')
#'
#' # can also force linear-interpolation with lnlThresh = 0
#' path3 <- interpolate_path(pos, trans = greatLakesTrLayer, lnl_thresh = 0)
#'
#' # add new linear path to plot
#' points(latitude ~ longitude, data = path3, pch = 20, cex = 1,
#' col = 'magenta')
#'
#' #--------------------------------------------------
#' # EXAMPLE #2 - walleye in western Lake Erie
#' \dontrun{
#'
#' library(sp) #for loading greatLakesPoly
#' library(raster) #for raster manipulation (e.g., crop)
#'
#' # get example walleye detection data
#' det_file <- system.file("extdata", "walleye_detections.csv",
#' package = "glatos")
#' det <- read_glatos_detections(det_file)
#'
#' # take a look
#' head(det)
#'
#' # extract one fish and subset date
#' det <- det[det$animal_id == 22 &
#' det$detection_timestamp_utc > as.POSIXct("2012-04-08") &
#' det$detection_timestamp_utc < as.POSIXct("2013-04-15") , ]
#'
#' # get polygon of the Great Lakes
#' data(greatLakesPoly) #glatos example data; a SpatialPolygonsDataFrame
#'
#' # crop polygon to western Lake Erie
#' maumee <- crop(greatLakesPoly, extent(-83.7, -82.5, 41.3, 42.4))
#' plot(maumee, col = "grey")
#' points(deploy_lat ~ deploy_long, data = det, pch = 20, col = "red",
#' xlim = c(-83.7, -80))
#'
#' #make transition layer object
#' # Note: using make_transition2 here for simplicity, but
#' # make_transition is generally preferred for real application
#' # if your system can run it see ?make_transition
#' tran <- make_transition(maumee, res = c(0.1, 0.1))
#'
#' plot(tran$rast, xlim = c(-83.7, -82.0), ylim = c(41.3, 42.7))
#' plot(maumee, add = TRUE)
#'
#' # not high enough resolution- bump up resolution
#' tran1 <- make_transition(maumee, res = c(0.001, 0.001))
#'
#' # plot to check resolution- much better
#' plot(tran1$rast, xlim = c(-83.7, -82.0), ylim = c(41.3, 42.7))
#' plot(maumee, add = TRUE)
#'
#'
#' # add fish detections to make sure they are "on the map"
#' # plot unique values only for simplicity
#' foo <- unique(det[, c("deploy_lat", "deploy_long")])
#' points(foo$deploy_long, foo$deploy_lat, pch = 20, col = "red")
#'
#' # call with "transition matrix" (non-linear interpolation), other options
#' # note that it is quite a bit slower due than linear interpolation
#' pos2 <- interpolate_path(det, trans = tran1$transition, out_class = "data.table")
#'
#' plot(maumee, col = "grey")
#' points(latitude ~ longitude, data = pos2, pch=20, col='red', cex=0.5)
#'
#' }
#'
#' @export
interpolate_path <- function(det, trans = NULL, start_time = NULL,
int_time_stamp = 86400, lnl_thresh = 0.9,
out_class = NULL, show_progress = TRUE){
# stop if out_class is not NULL, data.table, or tibble
if(!is.null(out_class)){
if( !(out_class %in% c("data.table", "tibble"))) {stop('out_class is not a "data.table" or "tibble"')}}
# check to see that trans is a transition layer or transition stack
if(!is.null(trans) &
inherits(trans, c("TransitionLayer", "TransitionStack")) == FALSE){
stop(paste0("Supplied object for 'trans' argument is not class ",
"TransitionLayer or TransitionStack."),
call. = FALSE)
}
# check start_time
if(is.null(start_time)){
start_time <- min(det$detection_timestamp_utc)
}
if(is.na(start_time) & length(start_time) > 0){
stop("start_time cannot be coerced to 'POSIXct' or 'POSIXt' class")
}
if(is.character(start_time)){
start_time <- as.POSIXct(start_time, tz = "UTC")
}
# make sure start_time < largest timestamp in dataset
if(start_time > max(det$detection_timestamp_utc)){
stop("start_time is larger than last detection. No data to interpolate!", call. = FALSE)}
# make copy of detections for function
dtc <- data.table::as.data.table(det)
# subset only columns for function and rows >= start_time:
dtc <- dtc[detection_timestamp_utc >= start_time, c("animal_id",
"detection_timestamp_utc",
"deploy_lat",
"deploy_long")]
dtc[, record_type := "detection"]
# count number of rows- single observations are not interpolated
dtc[, num_rows := nrow(.SD), by = animal_id]
# Sort detections by transmitter id and then by detection timestamp
data.table::setkey(dtc, animal_id, detection_timestamp_utc)
# save original dataset to combine with interpolated data in the end
det <- data.table::copy(dtc)
data.table::setnames(det, c("animal_id", "bin_stamp", "i_lat", "i_lon",
"record_type", "num_rows"))
# remove any fish with only one detection
dtc <- dtc[num_rows != 1]
# error if only fish with one observation.
if (nrow(dtc) == 0) stop("must have two observations to interpolate")
# extract and determine start time
t_seq <- seq(start_time, max(dtc$detection_timestamp_utc),
int_time_stamp)
# bin data by time interval and add bin to dtc
dtc[, bin := t_seq[findInterval(detection_timestamp_utc, t_seq)] ]
# make all combinations of animals and detection bins
dtc <- dtc[data.table::CJ(bin = t_seq, animal_id = unique(animal_id)),
on = c("bin", "animal_id")]
data.table::setkey(dtc, animal_id, bin, detection_timestamp_utc)
# if only need to do linear interpolation:
if(is.null(trans) | lnl_thresh == 0){
dtc[, bin_stamp := detection_timestamp_utc][is.na(detection_timestamp_utc),
bin_stamp := bin]
dtc[, i_lat := approx(detection_timestamp_utc, deploy_lat,
xout = bin_stamp)$y, by = animal_id]
dtc[, i_lon := approx(detection_timestamp_utc, deploy_long,
xout = bin_stamp)$y, by = animal_id]
dtc[is.na(deploy_long), record_type := "interpolated"]
dtc <- dtc[, c("animal_id", "bin_stamp", "i_lat", "i_lon", "record_type")]
det <- det[num_rows == 1, c("animal_id", "bin_stamp", "i_lat", "i_lon",
"record_type")]
out <- data.table::rbindlist(list(dtc, det), use.names = TRUE)
data.table::setkey(out, animal_id, bin_stamp)
out[, bin_stamp := t_seq[findInterval(bin_stamp, t_seq)] ]
out <- na.omit(out, cols = "i_lat")
data.table::setnames(out, c("animal_id", "bin_timestamp", "latitude",
"longitude", "record_type"))
out <- unique(out)
out <- data.table::setorder(out, animal_id, bin_timestamp, -record_type)
# If out_class == NULL, then return data as data.table
if(is.null(out_class)){ out <- as.data.frame(out)
return(out)
}
# if out_class == "tibble", then return tibble object
if(out_class == "tibble"){ out <- tibble::as_tibble(out)
return(out)}
# if out_class == NULL, then return data.frame object
return(out)
}
# routine for combined nln and ln interpolation
# identify start and end rows for observations before and after NA
ends <- dtc[!is.na(deploy_lat), .(start = .I[-nrow(.SD)], end = .I[-1]),
by = animal_id][end - start > 1]
# identify observations that are both start and ends
dups <- c(ends$start, ends$end)[ ends[, duplicated(c(start, end))]]
# create and append duplicate rows for observations
# that are both start and end.
# This is so each observation can be in only one group
# identifies rows and duplicate rows that need duplicated
dtc[, c("rep", "num") := list(1L, 1:.N)][dups, rep := 2L]
dtc <- dtc[rep(num, rep)]
dtc[, rep := NULL]
dtc[, num := NULL]
# recalculate first and last rows- no duplicate rows this time...
new_ends <- dtc[!is.na(deploy_lat), .(start = .I[-nrow(.SD)], end = .I[-1]),
by = animal_id][end - start > 1]
# create row index
dtc[, start_dtc := 1:.N]
# extract rows that need interpolated
dtc <- dtc[new_ends, .(animal_id = x.animal_id,
detection_timestamp_utc = x.detection_timestamp_utc,
deploy_lat = x.deploy_lat, deploy_long = x.deploy_long,
record_type = x.record_type, num_rows = x.num_rows,
bin = x.bin, i.start = start),
on = .(start_dtc >= start, start_dtc <= end)]
# calculate great circle distance between coords
dtc[, gcd := geosphere::distHaversine(as.matrix(
.SD[1, c("deploy_long", "deploy_lat")]),
as.matrix(.SD[.N, c("deploy_long", "deploy_lat")])), by = i.start]
# calculate least cost (non-linear) distance between points
message("Calculating least-cost (non-linear) distances... (step 1 of 3)")
grpn = data.table::uniqueN(dtc$i.start)
if(show_progress) pb <- txtProgressBar(min = 0, max = grpn, style = 3)
dtc[, lcd := {if(show_progress) setTxtProgressBar(pb, value = .GRP);
gdistance::costDistance(trans, fromCoords = as.matrix(
.SD[1, c("deploy_long", "deploy_lat")]),
toCoords = as.matrix(.SD[.N, c("deploy_long", "deploy_lat")]))},
by = i.start]
# calculate ratio of gcd:lcd
dtc[, crit := gcd / lcd]
# create keys for lookup
dtc[!is.na(detection_timestamp_utc),
t_lat := data.table::shift(deploy_lat, type = "lead"), by = i.start]
dtc[!is.na(detection_timestamp_utc),
t_lon := data.table::shift(deploy_long, type = "lead"), by = i.start]
dtc[!is.na(detection_timestamp_utc),
t_timestamp := data.table::shift(detection_timestamp_utc, type = "lead"),
by = i.start]
# extract rows that need non-linear interpolation
# based on gcd:lcd distance
nln <- dtc[crit < lnl_thresh ]
land_chk <- dtc[is.infinite(lcd)][!is.na(deploy_lat),
c("deploy_lat", "deploy_long")]
# stop execution and display offending receivers if any receivers are on land.
capture <- function(x)paste(capture.output(print(x)), collapse = "\n")
if (nrow(land_chk) > 0) {stop("Some coordinates are on land or beyond extent.
Interpolation impossible! Check receiver locations or extents of transition
layer:\n", capture(as.data.table(land_chk)), call. = FALSE)
}
# extract data for linear interpolation
# check to make sure that all points to be interpolated
# are within the tranition layer is needed before any interpolation.
ln <- dtc[crit >= lnl_thresh | is.nan(crit) ]
if (nrow(ln) == 0){
ln <- data.table::data.table(animal_id = character(), i_lat = numeric(),
i_lon = numeric(),
bin_stamp = as.POSIXct(character()),
record_type = character())
} else {
message("Starting linear interpolation... (step 2 of 3)")
# linear interpolation
grpn = uniqueN(ln$i.start)
if(show_progress) pb <- txtProgressBar(min = 0, max = grpn, style = 3)
ln[, bin_stamp := detection_timestamp_utc][is.na(detection_timestamp_utc),
bin_stamp := bin]
ln[, i_lat := {if(show_progress) setTxtProgressBar(pb, .GRP);
tmp = .SD[c(1, .N),
c("detection_timestamp_utc", "deploy_lat")];
approx(c(tmp$detection_timestamp_utc),
c(tmp$deploy_lat),
xout = c(bin_stamp))$y}, by = i.start]
ln[, i_lon := {tmp = .SD[c(1, .N),
c("detection_timestamp_utc", "deploy_long")];
approx(c(tmp$detection_timestamp_utc),
c(tmp$deploy_long),
xout = c(bin_stamp))$y},
by = i.start]
ln[is.na(deploy_long), record_type := "interpolated"]
}
# extract records to lookup
nln_small <- nln[ !is.na(detection_timestamp_utc)][!is.na(t_lat)]
if(nrow(nln_small) == 0){
nln <- data.table(animal_id = character(), i_lat = numeric(),
i_lon = numeric(),
bin_stamp = as.POSIXct(character()),
record_type = character())
} else {
# nln interpolation
# create lookup table
data.table::setkey(nln_small, deploy_lat, deploy_long, t_lat, t_lon)
lookup <- unique(nln_small[, .(deploy_lat, deploy_long, t_lat, t_lon),
allow.cartesian = TRUE])
message("\nStarting non-linear interpolation... (step 3 of 3)")
grpn <- nrow(lookup)
if(show_progress) pb <- txtProgressBar(min = 0, max = grpn, style = 3)
# calculate non-linear interpolation for all unique movements in lookup
lookup[, coord := { if(show_progress) setTxtProgressBar(pb, value = .GRP);
sp::coordinates(
gdistance::shortestPath(trans, as.matrix(
.SD[1, c("deploy_long", "deploy_lat")]), as.matrix(
.SD[1, c("t_lon", "t_lat")]), output = "SpatialLines"))},
by = 1:nrow(lookup)]
message("\nFinalizing results.")
lookup[, grp := 1:.N]
# extract interpolated points from coordinate lists...
res <- lookup[, .(nln_longitude = lookup$coord[[.I]][, 1],
nln_latitude = lookup$coord[[.I]][, 2]), by = grp]
# set keys, join interpolation and original data
data.table::setkey(lookup, grp)
data.table::setkey(res, grp)
lookup <- lookup[res]
lookup[, coord := NULL]
# added first/last rows, number sequence for groups
lookup[lookup[, .I[1], by = grp]$V1, nln_longitude := deploy_long]
lookup[lookup[, .I[.N], by = grp]$V1, nln_longitude := t_lon]
lookup[lookup[, .I[1], by = grp]$V1, nln_latitude := deploy_lat]
lookup[lookup[, .I[.N], by = grp]$V1, nln_latitude := t_lat]
lookup[,seq_count := 1:.N, by = grp]
# lookup interpolated values for original dataset
data.table::setkey(lookup, deploy_lat, deploy_long, t_lat, t_lon)
nln_small <- lookup[nln_small, allow.cartesian = TRUE]
data.table::setkey(nln_small, i.start, seq_count)
# add timeseries for interpolating nln movements
nln_small[nln_small[, .I[1], by = i.start]$V1,
i_time := detection_timestamp_utc]
nln_small[nln_small[, .I[.N], by = i.start]$V1, i_time := t_timestamp]
arch <- nln_small
# nln_small <- nln_small[i.start == 163]
nln_small[, latitude_lead := data.table::shift(nln_latitude, type = "lag", fill = NA), by = i.start]
nln_small[, longitude_lead := data.table::shift(nln_longitude, type = "lag", fill = NA), by = i.start]
nln_small[, cumdist := geosphere::distGeo(.SD[, c("nln_longitude", "nln_latitude")],
.SD[,c("longitude_lead", "latitude_lead")]), by = i.start]
nln_small[is.na(cumdist), cumdist := 0]
nln_small[, cumdist := cumsum(cumdist), by = i.start]
nln_small[, latitude_lead := NULL][, longitude_lead := NULL]
# calculate cumdist
## nln_small[, cumdist := cumsum(c(0, sqrt(diff(nln_longitude) ^ 2 +
## diff(nln_latitude) ^ 2))),
## by = i.start]
# interpolate missing timestamps for interpolated coordinates
nln_small[, i_time := as.POSIXct(approx(cumdist, i_time, xout = cumdist)$y,
origin = "1970-01-01 00:00:00",
tz = attr(nln_small$i_time, "tzone")),
by = i.start]
# create timestamp vector to interpolate on.
nln[, bin_stamp := detection_timestamp_utc]
nln[is.na(detection_timestamp_utc), bin_stamp := bin]
nln[, grp := i.start]
# interpolate timestamps
data.table::setkey(nln_small, i.start)
data.table::setkey(nln, i.start)
nln[, i_lat := {tmp = nln_small[.(.SD[1, "i.start"]),
c("i_time", "nln_latitude")];
approx(tmp$i_time, tmp$nln_latitude,
xout = bin_stamp)$y}, by = grp]
nln[, i_lon := {tmp = nln_small[.(.SD[1, "i.start"]),
c("i_time", "nln_longitude")];
approx(tmp$i_time, tmp$nln_longitude,
xout = bin_stamp)$y}, by = grp]
nln[is.na(deploy_long), record_type := "interpolated"]
}
# combine into a single data.table
out <- data.table::rbindlist(list(ln[record_type == "interpolated",
c("animal_id", "bin_stamp", "i_lat", "i_lon", "record_type")],
nln[record_type == "interpolated",
c("animal_id", "bin_stamp", "i_lat", "i_lon",
"record_type")],
det[, c("animal_id", "bin_stamp", "i_lat", "i_lon",
"record_type")]), use.names = TRUE)
out[, !c("animal_id")]
data.table::setkey(out, animal_id, bin_stamp)
out[, bin_stamp := t_seq[findInterval(bin_stamp, t_seq)] ]
data.table::setnames(out, c("animal_id", "bin_timestamp", "latitude",
"longitude", "record_type"))
out <- na.omit(out, cols = "latitude")
out <- unique(out)
data.table::setorder(out, animal_id, bin_timestamp, -record_type)
# If out_class == NULL, then return data as data.table
if(is.null(out_class)){ out <- as.data.frame(out)
return(out)
}
# if out_class == "tibble", then return tibble object
if(out_class == "tibble"){ out <- tibble::as_tibble(out)
return(out)}
# if out_class == NULL, then return data.frame object
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.timeresolved}
\alias{plot.timeresolved}
\alias{plot.PHdata}
\title{Plot a time resolved mass spectrometry signal}
\usage{
\method{plot}{timeresolved}(x, label, mass, ...)
\method{plot}{PHdata}(x, label, mass, ...)
}
\arguments{
\item{x}{an object of class \code{\link{timeresolved}} or
\code{\link{PHdata}}}
\item{label}{a string with the name of the run}
\item{mass}{a string indicating the isotope of interest}
\item{...}{optional parameters}
}
\description{
Plots the raw signal of a given isotope against time.
}
\examples{
samplefile <- system.file("Samples.csv",package="ArArRedux")
masses <- c("Ar37","Ar38","Ar39","Ar40","Ar36")
mMC <- loaddata(samplefile,masses)
plot(mMC,"MD2-1a","Ar40")
mPH <- loaddata(samplefile,masses,PH=TRUE)
plot(mPH,"MD2-1a","Ar40")
}
|
/man/plot.Rd
|
no_license
|
pvermees/ArArRedux
|
R
| false
| true
| 869
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.timeresolved}
\alias{plot.timeresolved}
\alias{plot.PHdata}
\title{Plot a time resolved mass spectrometry signal}
\usage{
\method{plot}{timeresolved}(x, label, mass, ...)
\method{plot}{PHdata}(x, label, mass, ...)
}
\arguments{
\item{x}{an object of class \code{\link{timeresolved}} or
\code{\link{PHdata}}}
\item{label}{a string with the name of the run}
\item{mass}{a string indicating the isotope of interest}
\item{...}{optional parameters}
}
\description{
Plots the raw signal of a given isotope against time.
}
\examples{
samplefile <- system.file("Samples.csv",package="ArArRedux")
masses <- c("Ar37","Ar38","Ar39","Ar40","Ar36")
mMC <- loaddata(samplefile,masses)
plot(mMC,"MD2-1a","Ar40")
mPH <- loaddata(samplefile,masses,PH=TRUE)
plot(mPH,"MD2-1a","Ar40")
}
|
##
##
## plot1.R
## -----------------------
##
## Exploratory Data Analysis Project 1
## David Saint Ruby
## September 5, 2014
##
## comments date
##
## original 9/5/14
## our vector of NA strings
nachars <- c("?")
## read in the file
## use as.is=TRUE to allow easy conversion later of dates and times
householdpower <-read.table("household_power_consumption.txt", header=TRUE, sep=";",
na.strings=nachars, as.is=TRUE)
## clean up the date part of the time and stuff into the time column
householdpower <- transform(householdpower, Time=strptime(paste(Date, Time),
format="%d/%m/%Y %H:%M:%S"))
## convert the dates as well in the date column
householdpower$Date <- as.Date(householdpower$Date, format="%d/%m/%Y")
## subset to the range we care about
householdpower <- subset(householdpower, Date>="2007-02-01" & Date<="2007-02-02")
##open our png output
png(file = "plot1.png", bg="white")
## plot it
hist(householdpower$Global_active_power,
main="Global Active Power", xlab="Global Active Power (kilowatts)",
ylab="Frequency", col="red")
## close out output
dev.off()
## cleanup
rm(householdpower, nachars)
|
/plot1.R
|
no_license
|
davidsaintruby/ExData_Plotting1
|
R
| false
| false
| 1,133
|
r
|
##
##
## plot1.R
## -----------------------
##
## Exploratory Data Analysis Project 1
## David Saint Ruby
## September 5, 2014
##
## comments date
##
## original 9/5/14
## our vector of NA strings
nachars <- c("?")
## read in the file
## use as.is=TRUE to allow easy conversion later of dates and times
householdpower <-read.table("household_power_consumption.txt", header=TRUE, sep=";",
na.strings=nachars, as.is=TRUE)
## clean up the date part of the time and stuff into the time column
householdpower <- transform(householdpower, Time=strptime(paste(Date, Time),
format="%d/%m/%Y %H:%M:%S"))
## convert the dates as well in the date column
householdpower$Date <- as.Date(householdpower$Date, format="%d/%m/%Y")
## subset to the range we care about
householdpower <- subset(householdpower, Date>="2007-02-01" & Date<="2007-02-02")
##open our png output
png(file = "plot1.png", bg="white")
## plot it
hist(householdpower$Global_active_power,
main="Global Active Power", xlab="Global Active Power (kilowatts)",
ylab="Frequency", col="red")
## close out output
dev.off()
## cleanup
rm(householdpower, nachars)
|
#########################################
#This function computes #
#the log of a function proportional to #
#the posterior distribution #
#########################################
logpost <- function(parms, indep, Y, times, VN, VF, n,
indBeta, aBeta, bBeta, indQ, aQ, bQ,
indG, aG, bG, S, v,
tauN_sh, tauN_sc, tauF_sh, tauF_sc){
lpBeta <- switch(indBeta, dunif(parms[1], aBeta, bBeta, log=TRUE), dgamma(parms[1], aBeta, bBeta, log=TRUE),
dexp(parms[1], aBeta, log=TRUE), dnorm(parms[1], aBeta, bBeta, log=TRUE),
dt(parms[1], aBeta, bBeta, log=TRUE), dweibull(parms[1], aBeta, bBeta, log=TRUE),
dchisq(parms[1], aBeta, bBeta, log=TRUE), dcauchy(parms[1], aBeta, bBeta, log=TRUE),
dlnorm(parms[1], aBeta, bBeta,log=TRUE))
lpQ <- switch(indQ, dunif(parms[2], aQ, bQ, log=TRUE), dgamma(parms[2], aQ, bQ, log=TRUE),
dexp(parms[2], aQ, log=TRUE), dnorm(parms[2], aQ, bQ, log=TRUE),
dt(parms[2], aQ, bQ, log=TRUE), dweibull(parms[2], aQ, bQ, log=TRUE),
dchisq(parms[2], aQ, bQ, log=TRUE), dcauchy(parms[2], aQ, bQ, log=TRUE),
dlnorm(parms[2], aQ, bQ, log=TRUE))
lpG <- switch(indG, dunif(parms[3], aG, bG, log=TRUE), dgamma(parms[3], aG, bG, log=TRUE),
dexp(parms[3], aG, log=TRUE), dnorm(parms[3], aG, bG, log=TRUE),
dt(parms[3], aG, bG, log=TRUE), dweibull(parms[3], aG, bG, log=TRUE),
dchisq(parms[3], aG, bG, log=TRUE), dcauchy(parms[3], aG, bG, log=TRUE),
dlnorm(parms[3], aG, bG, log=TRUE))
if(indep){
lptauN <- tauN_sh * log(tauN_sc) - lgamma(tauN_sh) - (tauN_sh + 1) * log(parms[4]) - (tauN_sc/parms[4])
lptauF <- tauF_sh * log(tauF_sc) - lgamma(tauF_sh) - (tauF_sh + 1) * log(parms[5]) - (tauF_sc/parms[5])
lpvar <- lptauN + lptauF
}
else{
W <- matrix(c(parms[4],parms[6],parms[6],parms[5]), 2, 2)
lpvar <- logdiwish(W, v, S)
}
lp <- loglik(parms, indep, Y, times, VN, VF, n) + lpBeta + lpQ + lpG + lpvar
return(lp)
}
|
/B2Z/R/logpost.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,397
|
r
|
#########################################
#This function computes #
#the log of a function proportional to #
#the posterior distribution #
#########################################
logpost <- function(parms, indep, Y, times, VN, VF, n,
indBeta, aBeta, bBeta, indQ, aQ, bQ,
indG, aG, bG, S, v,
tauN_sh, tauN_sc, tauF_sh, tauF_sc){
lpBeta <- switch(indBeta, dunif(parms[1], aBeta, bBeta, log=TRUE), dgamma(parms[1], aBeta, bBeta, log=TRUE),
dexp(parms[1], aBeta, log=TRUE), dnorm(parms[1], aBeta, bBeta, log=TRUE),
dt(parms[1], aBeta, bBeta, log=TRUE), dweibull(parms[1], aBeta, bBeta, log=TRUE),
dchisq(parms[1], aBeta, bBeta, log=TRUE), dcauchy(parms[1], aBeta, bBeta, log=TRUE),
dlnorm(parms[1], aBeta, bBeta,log=TRUE))
lpQ <- switch(indQ, dunif(parms[2], aQ, bQ, log=TRUE), dgamma(parms[2], aQ, bQ, log=TRUE),
dexp(parms[2], aQ, log=TRUE), dnorm(parms[2], aQ, bQ, log=TRUE),
dt(parms[2], aQ, bQ, log=TRUE), dweibull(parms[2], aQ, bQ, log=TRUE),
dchisq(parms[2], aQ, bQ, log=TRUE), dcauchy(parms[2], aQ, bQ, log=TRUE),
dlnorm(parms[2], aQ, bQ, log=TRUE))
lpG <- switch(indG, dunif(parms[3], aG, bG, log=TRUE), dgamma(parms[3], aG, bG, log=TRUE),
dexp(parms[3], aG, log=TRUE), dnorm(parms[3], aG, bG, log=TRUE),
dt(parms[3], aG, bG, log=TRUE), dweibull(parms[3], aG, bG, log=TRUE),
dchisq(parms[3], aG, bG, log=TRUE), dcauchy(parms[3], aG, bG, log=TRUE),
dlnorm(parms[3], aG, bG, log=TRUE))
if(indep){
lptauN <- tauN_sh * log(tauN_sc) - lgamma(tauN_sh) - (tauN_sh + 1) * log(parms[4]) - (tauN_sc/parms[4])
lptauF <- tauF_sh * log(tauF_sc) - lgamma(tauF_sh) - (tauF_sh + 1) * log(parms[5]) - (tauF_sc/parms[5])
lpvar <- lptauN + lptauF
}
else{
W <- matrix(c(parms[4],parms[6],parms[6],parms[5]), 2, 2)
lpvar <- logdiwish(W, v, S)
}
lp <- loglik(parms, indep, Y, times, VN, VF, n) + lpBeta + lpQ + lpG + lpvar
return(lp)
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(matrix) m <<- matrix
getmatrix <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m <- x$getmatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmatrix(m)
m
## Return a matrix that is the inverse of 'x'
}
|
/cachematrix.R
|
no_license
|
sfpacman/datasciencecoursera
|
R
| false
| false
| 731
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(matrix) m <<- matrix
getmatrix <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m <- x$getmatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmatrix(m)
m
## Return a matrix that is the inverse of 'x'
}
|
#integrate samples together
gastric_N.big.normalized<-merge(P3_N1.s, y=c(P3_N2.s, P5_N1.s, P5_N2.s), add.cell.ids=c("P3_N1", "P3_N2", "P5_N1", "P5_N2"), project = "Normal", merge.data = TRUE)
gastric_P.big.normalized<-merge(P3_P1.s, y=c(P3_P2.s, P5_P1.s, P5_P2.s), add.cell.ids=c("P3_P1", "P3_N2", "P5_P1", "P5_P2"), project = "PARI", merge.data = TRUE)
gastric_T.big.normalized<-merge(P3_T1.s, y=c(P3_T2.s, P3_T3.s, P5_T2.s), add.cell.ids=c("P3_T1", "P3_T2", "P5_T3", "P5_T1"), project = "Tumor", merge.data = TRUE)
gastric_N.big.normalized$stim <- "Normal"
gastric_P.big.normalized$stim <- "P"
gastric_T.big.normalized$stim <- "Tumor"
#merge together
gastric_P3P5<-merge(gastric_N.big.normalized, y=c(gastric_P.big.normalized, gastric_T.big.normalized), project = "GASTRIC12", merge.data = TRUE)
#a little bit Quality Control
gastric_P3P5[["percent.mt"]]<-PercentageFeatureSet(gastric_P3P5, pattern = "^MT-")
VlnPlot(gastric_P3P5, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
plot1<-FeatureScatter(gastric_P3P5, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2<-FeatureScatter(gastric_P3P5, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
plot1 + plot2
#normalize data
gastric_P3P5<-NormalizeData(gastric_P3P5, normalization.method = "LogNormalize", scale.factor = 50000)
#feature selection
gastric_P3P5<-FindVariableFeatures(gastric_P3P5)
top10<-head(VariableFeatures(gastric_P3P5),10)
#scaling the data
all.genes<-rownames(gastric_P3P5)
gastric_P3P5<-ScaleData(gastric_P3P5, features = all.genes)
#dim reduction--PCA
gastric_P3P5<-RunPCA(gastric_P3P5, npcs = 100, ndims.print = 1:5, nfeatures.print = 5)
#gastric_P3P5<-JackStraw(gastric_P3P5, num.replicate = 1000)
gastric_P3P5<-ScoreJackStraw(gastric_P3P5, dims =1:100)
JackStrawPlot(gastric_P3P5, dims = 1:100)
ElowPlot(gastric_P3P5, ndims = 100)
DimHeatmap(gastric_P3P5, dims = c(1:3, 50:60), cells = 500, balanced = TRUE)
#clustering
gastric_P3P5<-FindNeighbors(gastric_P3P5, reduction = "pca", dims = 1:75)
gastric_P3P5<-FindClusters(gastric_P3P5, resolution = 0.4)
head(Idents(gastric_P3P5),5)
#Visualization(UMAP)
gastric_P3P5<-RunUMAP(gastric_P3P5,dims = 1:75)
DimPlot(gastric_P3P5, reduction = "umap", label = TRUE, group.by = "stim")
#Visualization(t-SNE)
gastric_P3P5<-RunTSNE(gastric_P3P5, dims = 1:75, nthreads = 4, max_iter =2000, check_duplicates=FALSE)
#Visualization(t-SNE vs. UMAP)
library(ggplot2)
p1<-DimPlot(gastric_P3P5, reduction = "tsne", label = TRUE) + ggtitle(label="t-SNE")
p2<-DimPlot(gastric_P3P5, reduction = "umap", label = TRUE) + ggtitle(label="UMAP")
p1<-AugmentPlot(plot = p1)
p2<-AugmentPlot(plot = p2)
(p1 + p2) & NoLegend()
#Visualization(batch effect)
library(cowplot)
DimPlot(gastric_P3P5, reduction = "umap", label = TRUE)
p1<-DimPlot(gastric_P3P5, reduction = "umap", group.by = gastric_P3P5)
p2<-DimPlot(gastric_P3P5, reduction = "umap", label = TRUE)
plot_grid(p1,p2)
|
/scRNAscript/merge_samples.R
|
no_license
|
pyanne2000/GC-analysis
|
R
| false
| false
| 2,903
|
r
|
#integrate samples together
gastric_N.big.normalized<-merge(P3_N1.s, y=c(P3_N2.s, P5_N1.s, P5_N2.s), add.cell.ids=c("P3_N1", "P3_N2", "P5_N1", "P5_N2"), project = "Normal", merge.data = TRUE)
gastric_P.big.normalized<-merge(P3_P1.s, y=c(P3_P2.s, P5_P1.s, P5_P2.s), add.cell.ids=c("P3_P1", "P3_N2", "P5_P1", "P5_P2"), project = "PARI", merge.data = TRUE)
gastric_T.big.normalized<-merge(P3_T1.s, y=c(P3_T2.s, P3_T3.s, P5_T2.s), add.cell.ids=c("P3_T1", "P3_T2", "P5_T3", "P5_T1"), project = "Tumor", merge.data = TRUE)
gastric_N.big.normalized$stim <- "Normal"
gastric_P.big.normalized$stim <- "P"
gastric_T.big.normalized$stim <- "Tumor"
#merge together
gastric_P3P5<-merge(gastric_N.big.normalized, y=c(gastric_P.big.normalized, gastric_T.big.normalized), project = "GASTRIC12", merge.data = TRUE)
#a little bit Quality Control
gastric_P3P5[["percent.mt"]]<-PercentageFeatureSet(gastric_P3P5, pattern = "^MT-")
VlnPlot(gastric_P3P5, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
plot1<-FeatureScatter(gastric_P3P5, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2<-FeatureScatter(gastric_P3P5, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
plot1 + plot2
#normalize data
gastric_P3P5<-NormalizeData(gastric_P3P5, normalization.method = "LogNormalize", scale.factor = 50000)
#feature selection
gastric_P3P5<-FindVariableFeatures(gastric_P3P5)
top10<-head(VariableFeatures(gastric_P3P5),10)
#scaling the data
all.genes<-rownames(gastric_P3P5)
gastric_P3P5<-ScaleData(gastric_P3P5, features = all.genes)
#dim reduction--PCA
gastric_P3P5<-RunPCA(gastric_P3P5, npcs = 100, ndims.print = 1:5, nfeatures.print = 5)
#gastric_P3P5<-JackStraw(gastric_P3P5, num.replicate = 1000)
gastric_P3P5<-ScoreJackStraw(gastric_P3P5, dims =1:100)
JackStrawPlot(gastric_P3P5, dims = 1:100)
ElowPlot(gastric_P3P5, ndims = 100)
DimHeatmap(gastric_P3P5, dims = c(1:3, 50:60), cells = 500, balanced = TRUE)
#clustering
gastric_P3P5<-FindNeighbors(gastric_P3P5, reduction = "pca", dims = 1:75)
gastric_P3P5<-FindClusters(gastric_P3P5, resolution = 0.4)
head(Idents(gastric_P3P5),5)
#Visualization(UMAP)
gastric_P3P5<-RunUMAP(gastric_P3P5,dims = 1:75)
DimPlot(gastric_P3P5, reduction = "umap", label = TRUE, group.by = "stim")
#Visualization(t-SNE)
gastric_P3P5<-RunTSNE(gastric_P3P5, dims = 1:75, nthreads = 4, max_iter =2000, check_duplicates=FALSE)
#Visualization(t-SNE vs. UMAP)
library(ggplot2)
p1<-DimPlot(gastric_P3P5, reduction = "tsne", label = TRUE) + ggtitle(label="t-SNE")
p2<-DimPlot(gastric_P3P5, reduction = "umap", label = TRUE) + ggtitle(label="UMAP")
p1<-AugmentPlot(plot = p1)
p2<-AugmentPlot(plot = p2)
(p1 + p2) & NoLegend()
#Visualization(batch effect)
library(cowplot)
DimPlot(gastric_P3P5, reduction = "umap", label = TRUE)
p1<-DimPlot(gastric_P3P5, reduction = "umap", group.by = gastric_P3P5)
p2<-DimPlot(gastric_P3P5, reduction = "umap", label = TRUE)
plot_grid(p1,p2)
|
testlist <- list(a = 0L, b = 0L, x = c(134744072L, 134744072L, 144678815L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, 134744072L, 134744072L, 134744072L, 134744072L, 134744072L, 134744072L, 134744072L, 134221320L, 134744072L, 134744072L, 134744072L, 134744064L, 0L, 218103807L, -16777216L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131952-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 516
|
r
|
testlist <- list(a = 0L, b = 0L, x = c(134744072L, 134744072L, 144678815L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, -1616928865L, 134744072L, 134744072L, 134744072L, 134744072L, 134744072L, 134744072L, 134744072L, 134221320L, 134744072L, 134744072L, 134744072L, 134744064L, 0L, 218103807L, -16777216L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/owner.R
\name{owner}
\alias{owner}
\alias{owner<-}
\alias{owner,character-method}
\alias{owner,SsimLibrary-method}
\alias{owner,Project-method}
\alias{owner,Scenario-method}
\alias{owner,Folder-method}
\alias{owner<-,character-method}
\alias{owner<-,SsimObject-method}
\alias{owner<-,Folder-method}
\title{Owner of a SsimLibrary, Project, Scenario, or Folder}
\usage{
owner(ssimObject)
owner(ssimObject) <- value
\S4method{owner}{character}(ssimObject)
\S4method{owner}{SsimLibrary}(ssimObject)
\S4method{owner}{Project}(ssimObject)
\S4method{owner}{Scenario}(ssimObject)
\S4method{owner}{Folder}(ssimObject)
\S4method{owner}{character}(ssimObject) <- value
\S4method{owner}{SsimObject}(ssimObject) <- value
\S4method{owner}{Folder}(ssimObject) <- value
}
\arguments{
\item{ssimObject}{\code{\link{Session}}, \code{\link{Project}},
\code{\link{SsimLibrary}}, or \code{\link{Folder}} object}
\item{value}{character string of the new owner}
}
\value{
A character string: the owner of the SsimObject.
}
\description{
Retrieves or sets the owner of a \code{\link{SsimLibrary}},
\code{\link{Project}}, \code{\link{Scenario}}, or \code{\link{Folder}}.
}
\examples{
\dontrun{
# Specify file path and name of new SsimLibrary
myLibraryName <- file.path(tempdir(), "testlib")
# Set up a SyncroSim Session, SsimLibrary, Project, and Scenario
mySession <- session()
myLibrary <- ssimLibrary(name = myLibraryName, session = mySession)
myProject <- project(myLibrary, project = "Definitions")
myScenario <- scenario(myProject, scenario = "My Scenario")
# Retrieve the owner of an SsimObject
owner(myLibrary)
owner(myProject)
owner(myScenario)
# Set the owner of a SyncroSim Scenario
owner(myScenario) <- "Apex RMS"
}
}
|
/man/owner.Rd
|
permissive
|
syncrosim/rsyncrosim
|
R
| false
| true
| 1,797
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/owner.R
\name{owner}
\alias{owner}
\alias{owner<-}
\alias{owner,character-method}
\alias{owner,SsimLibrary-method}
\alias{owner,Project-method}
\alias{owner,Scenario-method}
\alias{owner,Folder-method}
\alias{owner<-,character-method}
\alias{owner<-,SsimObject-method}
\alias{owner<-,Folder-method}
\title{Owner of a SsimLibrary, Project, Scenario, or Folder}
\usage{
owner(ssimObject)
owner(ssimObject) <- value
\S4method{owner}{character}(ssimObject)
\S4method{owner}{SsimLibrary}(ssimObject)
\S4method{owner}{Project}(ssimObject)
\S4method{owner}{Scenario}(ssimObject)
\S4method{owner}{Folder}(ssimObject)
\S4method{owner}{character}(ssimObject) <- value
\S4method{owner}{SsimObject}(ssimObject) <- value
\S4method{owner}{Folder}(ssimObject) <- value
}
\arguments{
\item{ssimObject}{\code{\link{Session}}, \code{\link{Project}},
\code{\link{SsimLibrary}}, or \code{\link{Folder}} object}
\item{value}{character string of the new owner}
}
\value{
A character string: the owner of the SsimObject.
}
\description{
Retrieves or sets the owner of a \code{\link{SsimLibrary}},
\code{\link{Project}}, \code{\link{Scenario}}, or \code{\link{Folder}}.
}
\examples{
\dontrun{
# Specify file path and name of new SsimLibrary
myLibraryName <- file.path(tempdir(), "testlib")
# Set up a SyncroSim Session, SsimLibrary, Project, and Scenario
mySession <- session()
myLibrary <- ssimLibrary(name = myLibraryName, session = mySession)
myProject <- project(myLibrary, project = "Definitions")
myScenario <- scenario(myProject, scenario = "My Scenario")
# Retrieve the owner of an SsimObject
owner(myLibrary)
owner(myProject)
owner(myScenario)
# Set the owner of a SyncroSim Scenario
owner(myScenario) <- "Apex RMS"
}
}
|
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "household_power_consumption.zip")
unzip("household_power_consumption.zip")
library(sqldf)
x<-read.csv.sql("household_power_consumption.txt", sql="select * from file where Date in ('1/2/2007','2/2/2007')", sep = ";", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
png(filename="plot1.png")
hist(x$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
sebastianovide/ExData_Plotting1
|
R
| false
| false
| 589
|
r
|
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "household_power_consumption.zip")
unzip("household_power_consumption.zip")
library(sqldf)
x<-read.csv.sql("household_power_consumption.txt", sql="select * from file where Date in ('1/2/2007','2/2/2007')", sep = ";", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
png(filename="plot1.png")
hist(x$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
library(fMultivar)
### Name: utils-adapt
### Title: Integrator for multivariate distributions
### Aliases: adapt
### Keywords: math
### ** Examples
## No test:
## Check that dnorm2d is normalized:
# Normal Density:
density <- function(x) dnorm2d(x=x[1], y = x[2])
# Calling Cubature:
BIG <- c(99, 99)
cubature::adaptIntegrate(f=density, lowerLimit=-BIG, upperLimit=BIG)
cubature::adaptIntegrate(f=density, low=-BIG, upp=BIG, tol=1e-7)
# Using the Wrapper:
adapt(lower=-BIG, upper=BIG, functn=density)
adapt(lower=-BIG, upper=BIG, functn=density, tol=1e-7)$integral
## End(No test)
|
/data/genthat_extracted_code/fMultivar/examples/utils-adapt.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 632
|
r
|
library(fMultivar)
### Name: utils-adapt
### Title: Integrator for multivariate distributions
### Aliases: adapt
### Keywords: math
### ** Examples
## No test:
## Check that dnorm2d is normalized:
# Normal Density:
density <- function(x) dnorm2d(x=x[1], y = x[2])
# Calling Cubature:
BIG <- c(99, 99)
cubature::adaptIntegrate(f=density, lowerLimit=-BIG, upperLimit=BIG)
cubature::adaptIntegrate(f=density, low=-BIG, upp=BIG, tol=1e-7)
# Using the Wrapper:
adapt(lower=-BIG, upper=BIG, functn=density)
adapt(lower=-BIG, upper=BIG, functn=density, tol=1e-7)$integral
## End(No test)
|
# W knitrze jakos bezsensu ustawia sie filled.contour (legenda zajmuje 50% wykresu!)
# tutaj generuje te obrazki recznie.
pdfFnc = function(name)
{
par(mar = c(2,2,2,2))
pdf(sprintf("contours/%s.pdf",name), pointsize = 16)
}
pdfFnc("e1")
x = mvrnorm(200,c(0,0), cbind(c(1,0.8),c(0.8,1)))
depthContour(x, method = "Euclidean", points = TRUE)
dev.off()
pdfFnc("e2")
data(inf.mort,maesles.imm)
data1990=na.omit(cbind(inf.mort[,1],maesles.imm[,1]))
depthContour(data1990, method = "Euclidean", points = TRUE)
dev.off()
pdfFnc("m1")
x = mvrnorm(200,c(0,0), cbind(c(1,0.8),c(0.8,1)))
depthContour(x, method = "Mahalanobis", points = TRUE)
dev.off()
pdfFnc("m2")
data(inf.mort,maesles.imm)
data1990=na.omit(cbind(inf.mort[,1],maesles.imm[,1]))
depthContour(data1990, method = "Mahalanobis", points = TRUE)
dev.off()
# borrowed from https://github.com/hadley/bigvis/blob/master/R/challenge.r
rchallenge <- function(n) {
nt <- rbinom(1, n, 1 / 3)
ngamma <- n - nt
spike <- 2 * rt(nt, df = 2) + 15
spike[spike < 0] <- 0
slope <- rgamma(ngamma, 2, 1/3)
c(spike, slope)
}
pdfFnc("t1")
set.seed(123)
x = cbind(rchallenge(120),rchallenge(120))
depthContour(x, method = "Tukey", points = TRUE)
dev.off()
pdfFnc("t2")
depthPersp(x, method = "Tukey")
dev.off()
|
/contoursPlots.R
|
no_license
|
zzawadz/DepthProc_PAZUR2014
|
R
| false
| false
| 1,282
|
r
|
# W knitrze jakos bezsensu ustawia sie filled.contour (legenda zajmuje 50% wykresu!)
# tutaj generuje te obrazki recznie.
pdfFnc = function(name)
{
par(mar = c(2,2,2,2))
pdf(sprintf("contours/%s.pdf",name), pointsize = 16)
}
pdfFnc("e1")
x = mvrnorm(200,c(0,0), cbind(c(1,0.8),c(0.8,1)))
depthContour(x, method = "Euclidean", points = TRUE)
dev.off()
pdfFnc("e2")
data(inf.mort,maesles.imm)
data1990=na.omit(cbind(inf.mort[,1],maesles.imm[,1]))
depthContour(data1990, method = "Euclidean", points = TRUE)
dev.off()
pdfFnc("m1")
x = mvrnorm(200,c(0,0), cbind(c(1,0.8),c(0.8,1)))
depthContour(x, method = "Mahalanobis", points = TRUE)
dev.off()
pdfFnc("m2")
data(inf.mort,maesles.imm)
data1990=na.omit(cbind(inf.mort[,1],maesles.imm[,1]))
depthContour(data1990, method = "Mahalanobis", points = TRUE)
dev.off()
# borrowed from https://github.com/hadley/bigvis/blob/master/R/challenge.r
rchallenge <- function(n) {
nt <- rbinom(1, n, 1 / 3)
ngamma <- n - nt
spike <- 2 * rt(nt, df = 2) + 15
spike[spike < 0] <- 0
slope <- rgamma(ngamma, 2, 1/3)
c(spike, slope)
}
pdfFnc("t1")
set.seed(123)
x = cbind(rchallenge(120),rchallenge(120))
depthContour(x, method = "Tukey", points = TRUE)
dev.off()
pdfFnc("t2")
depthPersp(x, method = "Tukey")
dev.off()
|
# title: "Responding to analysis and communication: Data science the R way"
# subtitle: "DataTeka"
# author: "Tatjana Kecojevic"
# date: "26 April 2018"
# **Tip**💡:
# - When start working on a new R code/R Project in [RStudio IDE](https://support.rstudio.com/hc/en-us/sections/200107586-Using-the-RStudio-IDE) use
# ***File -> New Project***
# This way your working directory would be set up when you start a new project and it will save all your files in it. Next time you open your project it would set project's directory as a working directory... It would help you with so much [more](https://support.rstudio.com/hc/en-us/articles/200526207-Using-Projects).
# ---
## Dataset
# **gapminder** dataset available from **gapminder** package.
# For each of 142 countries, the package provides values for life expectancy, GDP per capita, and population, every five years, from 1952 to 2007.
DT::datatable(head(gapminder::gapminder, 4))
##Gapminder Data
gapminder::gapminder[1:3,]
install.packages("dplyr", repos = "http://cran.us.r-project.org")
install.packages("ggplot2", repos = "http://cran.us.r-project.org")
install.packages("gapminder", repos = "http://cran.us.r-project.org")
## 1st look at the data: <span style="color:blue">`dim()`</span> & <span style="color:blue">`head()`</span>
library(gapminder)
dim(gapminder)
head(gapminder, n=10)
##Examine the structure of the data: <span style="color:blue">`str()`</span>
str(gapminder)
##Do it in a tidy way: glimpse()
library(dplyr)
glimpse(gapminder)
##Select your variables
#1) that ends with letter `p`
#2) starts with letter `o`. Try to do this selection using base R.
##Solutions:
# gm_pop_gdp <- select()
# head(gm_pop_gdp)
# gm_cc <- select()
# head()
# gm_cc <- gapminder[]
##Create new variables of existing variables: <span style="color:blue">`mutate()`</span>
# gapminder2 <- mutate()
# head(gapminder2)
## Filter your data:
# Use `gapminder2` `df` to filter:
# 1) only Europian countries and save it as `gapmEU`
# 2) only Europian countries from 2000 onward and save it as `gapmEU21c`
# 3) rows where the life expectancy is greater than 80
#
# Don't forget to **use `==` instead of `=`**! and
# Don't forget the quotes ** `""` **
---
##Solutions:
# gapmEU <- filter(gapminder2, )
# head(gapmEU)
# gapmEU21c <- filter(gapminder2, )
# head(gapmEU21c)
# filter(gapminder2, lifeExp > 80)
## Arranging your data
# 1) Arrange countries in `gapmEU21c` `df` by life expectancy in ascending and descending order.
# 2) Using `gapminder df`
# - Find the records with the smallest population
# - Find the records with the largest life expectancy.
# ---
## Solution 1):
# gapmEU21c_h2l <- arrange(gapmEU21c, )
# head()
# gapmEU21c_l2h <- arrange(gapmEU21c, )
# head()
# ---
## Solution 2):
# arrange()
# arrange()
# ---
##Solution: Summarise your data
# summarise(gapminder, max_lifeExp = , max_gdpPercap = )
# summarise()
#**Do you know what this code does?**
gapminder_pipe <- gapminder %>%
filter(continent == "Europe" & year == 2007) %>%
mutate(pop_e6 = pop / 1000000)
plot(gapminder_pipe$pop_e6, gapminder_pipe$lifeExp, cex = 0.5, col = "red")
# Can we make it look better? 😁
## ggplot()
# 1. "Initialise" a plot with `ggplot()`
# 2. Add layers with `geom_` functions
library(ggplot2)
ggplot(gapminder_pipe, aes(x = pop_e6, y = lifeExp)) +
geom_point(col ="red")
# ggplot() gallery
ggplot(data = gapminder, mapping = aes(x = lifeExp), binwidth = 10) +
geom_histogram()
#
ggplot(data = gapminder, mapping = aes(x = lifeExp)) +
geom_density()
#
ggplot(data = gapminder, mapping = aes(x = continent, color = continent)) +
geom_bar()
#
ggplot(data = gapminder, mapping = aes(x = continent, fill = continent)) +
geom_bar()
##Confer with your neighbours:
m1 <- lm(gapminder_pipe$lifeExp ~ gapminder_pipe$pop_e6)
summary(m1)
## Your turn!
# Use gapminder data.
# **Does the life expectancy depend upon the GDP per capita?**
# 1) Have a glance at the data. (tip: `sample_n(df, n)`)
# 2) Produce a scattep plot: what does it tell you?
# 3) Fit a regression model: is there a relationship? How strong is it?
# Is the relationship linear? What conclusion(s) can you draw?
# 4) What are the other questions you could ask; could you provide the answers to them?
## Possible Solution: code Q1; sample
# sample_n()
## Possible Solution: code Q2; Plot the data;
# ggplot(gapminder, aes(x = gdpPercap, y = lifeExp)) +
# geom_point(alpha = 0.2, shape = 21, fill = "blue", colour="black", size = 5) +
# geom_smooth(method = "lm", se = F, col = "maroon3") +
# geom_smooth(method = "loess", se = F, col = "limegreen")
## Possible Solution: code Q3; simple regression model
# my.model <- lm()
# summary(my.model)
## Adding layers to your ggplot()
ggplot(gapminder, aes(x = gdpPercap, y = lifeExp, col = "red")) +
geom_point(alpha = 0.2, shape = 21, fill = "blue", colour="black", size = 5) +
geom_smooth(method = "lm", se = F, col = "maroon3") +
geom_smooth(method = "loess", se = F, col = "limegreen") +
labs (title= "Life Exp. vs. Population Size",
x = "population", y = "Life Exp.") +
theme(legend.position = "none",
panel.border = element_rect(fill = NA,
colour = "black",
size = .75),
plot.title=element_text(hjust=0.5)) +
geom_text(x = 80000, y = 125, label = "regression line", col = "maroon3") +
geom_text(x = 90000, y = 75, label = "smooth line", col = "limegreen")
## **There is a challenge:**
# - `dplyr`'s `group_by()` function enables you to group your data. It allows you to create a separate df that splits the original df by a variable.
# - `boxplot()` function produces boxplot(s) of the given (grouped) values.
# Knowing about `group_by()` and `boxplot()` function, coud you compute the median life expectancy for year 2007 by continent and visualise your result?
## Possible Solution:
# gapminder %>%
## Possible Solution:
# visualise the information code
# ggplot(gapminder, aes(x = continent, y = lifeExp)) +
# geom_boxplot(outlier.colour = "hotpink") +
# geom_jitter(position = position_jitter(width = 0.1, height = 0), alpha = .2) +
# labs (title= "Life Exp. vs. Continent",
# x = "Continent", y = "Life Exp.") +
# theme(legend.position = "none",
# panel.border = element_rect(fill = NA,
# colour = "black",
# size = .75),
# plot.title=element_text(hjust=0.5))
##Let's do Elain's Dance!!! 😃🎵🎶
|
/Slides_Script_NoAnswers.R
|
no_license
|
TanjaKec/RWorkshop_xaringan
|
R
| false
| false
| 6,555
|
r
|
# title: "Responding to analysis and communication: Data science the R way"
# subtitle: "DataTeka"
# author: "Tatjana Kecojevic"
# date: "26 April 2018"
# **Tip**💡:
# - When start working on a new R code/R Project in [RStudio IDE](https://support.rstudio.com/hc/en-us/sections/200107586-Using-the-RStudio-IDE) use
# ***File -> New Project***
# This way your working directory would be set up when you start a new project and it will save all your files in it. Next time you open your project it would set project's directory as a working directory... It would help you with so much [more](https://support.rstudio.com/hc/en-us/articles/200526207-Using-Projects).
# ---
## Dataset
# **gapminder** dataset available from **gapminder** package.
# For each of 142 countries, the package provides values for life expectancy, GDP per capita, and population, every five years, from 1952 to 2007.
DT::datatable(head(gapminder::gapminder, 4))
##Gapminder Data
gapminder::gapminder[1:3,]
install.packages("dplyr", repos = "http://cran.us.r-project.org")
install.packages("ggplot2", repos = "http://cran.us.r-project.org")
install.packages("gapminder", repos = "http://cran.us.r-project.org")
## 1st look at the data: <span style="color:blue">`dim()`</span> & <span style="color:blue">`head()`</span>
library(gapminder)
dim(gapminder)
head(gapminder, n=10)
##Examine the structure of the data: <span style="color:blue">`str()`</span>
str(gapminder)
##Do it in a tidy way: glimpse()
library(dplyr)
glimpse(gapminder)
##Select your variables
#1) that ends with letter `p`
#2) starts with letter `o`. Try to do this selection using base R.
##Solutions:
# gm_pop_gdp <- select()
# head(gm_pop_gdp)
# gm_cc <- select()
# head()
# gm_cc <- gapminder[]
##Create new variables of existing variables: <span style="color:blue">`mutate()`</span>
# gapminder2 <- mutate()
# head(gapminder2)
## Filter your data:
# Use `gapminder2` `df` to filter:
# 1) only Europian countries and save it as `gapmEU`
# 2) only Europian countries from 2000 onward and save it as `gapmEU21c`
# 3) rows where the life expectancy is greater than 80
#
# Don't forget to **use `==` instead of `=`**! and
# Don't forget the quotes ** `""` **
---
##Solutions:
# gapmEU <- filter(gapminder2, )
# head(gapmEU)
# gapmEU21c <- filter(gapminder2, )
# head(gapmEU21c)
# filter(gapminder2, lifeExp > 80)
## Arranging your data
# 1) Arrange countries in `gapmEU21c` `df` by life expectancy in ascending and descending order.
# 2) Using `gapminder df`
# - Find the records with the smallest population
# - Find the records with the largest life expectancy.
# ---
## Solution 1):
# gapmEU21c_h2l <- arrange(gapmEU21c, )
# head()
# gapmEU21c_l2h <- arrange(gapmEU21c, )
# head()
# ---
## Solution 2):
# arrange()
# arrange()
# ---
##Solution: Summarise your data
# summarise(gapminder, max_lifeExp = , max_gdpPercap = )
# summarise()
#**Do you know what this code does?**
gapminder_pipe <- gapminder %>%
filter(continent == "Europe" & year == 2007) %>%
mutate(pop_e6 = pop / 1000000)
plot(gapminder_pipe$pop_e6, gapminder_pipe$lifeExp, cex = 0.5, col = "red")
# Can we make it look better? 😁
## ggplot()
# 1. "Initialise" a plot with `ggplot()`
# 2. Add layers with `geom_` functions
library(ggplot2)
ggplot(gapminder_pipe, aes(x = pop_e6, y = lifeExp)) +
geom_point(col ="red")
# ggplot() gallery
ggplot(data = gapminder, mapping = aes(x = lifeExp), binwidth = 10) +
geom_histogram()
#
ggplot(data = gapminder, mapping = aes(x = lifeExp)) +
geom_density()
#
ggplot(data = gapminder, mapping = aes(x = continent, color = continent)) +
geom_bar()
#
ggplot(data = gapminder, mapping = aes(x = continent, fill = continent)) +
geom_bar()
##Confer with your neighbours:
m1 <- lm(gapminder_pipe$lifeExp ~ gapminder_pipe$pop_e6)
summary(m1)
## Your turn!
# Use gapminder data.
# **Does the life expectancy depend upon the GDP per capita?**
# 1) Have a glance at the data. (tip: `sample_n(df, n)`)
# 2) Produce a scattep plot: what does it tell you?
# 3) Fit a regression model: is there a relationship? How strong is it?
# Is the relationship linear? What conclusion(s) can you draw?
# 4) What are the other questions you could ask; could you provide the answers to them?
## Possible Solution: code Q1; sample
# sample_n()
## Possible Solution: code Q2; Plot the data;
# ggplot(gapminder, aes(x = gdpPercap, y = lifeExp)) +
# geom_point(alpha = 0.2, shape = 21, fill = "blue", colour="black", size = 5) +
# geom_smooth(method = "lm", se = F, col = "maroon3") +
# geom_smooth(method = "loess", se = F, col = "limegreen")
## Possible Solution: code Q3; simple regression model
# my.model <- lm()
# summary(my.model)
## Adding layers to your ggplot()
ggplot(gapminder, aes(x = gdpPercap, y = lifeExp, col = "red")) +
geom_point(alpha = 0.2, shape = 21, fill = "blue", colour="black", size = 5) +
geom_smooth(method = "lm", se = F, col = "maroon3") +
geom_smooth(method = "loess", se = F, col = "limegreen") +
labs (title= "Life Exp. vs. Population Size",
x = "population", y = "Life Exp.") +
theme(legend.position = "none",
panel.border = element_rect(fill = NA,
colour = "black",
size = .75),
plot.title=element_text(hjust=0.5)) +
geom_text(x = 80000, y = 125, label = "regression line", col = "maroon3") +
geom_text(x = 90000, y = 75, label = "smooth line", col = "limegreen")
## **There is a challenge:**
# - `dplyr`'s `group_by()` function enables you to group your data. It allows you to create a separate df that splits the original df by a variable.
# - `boxplot()` function produces boxplot(s) of the given (grouped) values.
# Knowing about `group_by()` and `boxplot()` function, coud you compute the median life expectancy for year 2007 by continent and visualise your result?
## Possible Solution:
# gapminder %>%
## Possible Solution:
# visualise the information code
# ggplot(gapminder, aes(x = continent, y = lifeExp)) +
# geom_boxplot(outlier.colour = "hotpink") +
# geom_jitter(position = position_jitter(width = 0.1, height = 0), alpha = .2) +
# labs (title= "Life Exp. vs. Continent",
# x = "Continent", y = "Life Exp.") +
# theme(legend.position = "none",
# panel.border = element_rect(fill = NA,
# colour = "black",
# size = .75),
# plot.title=element_text(hjust=0.5))
##Let's do Elain's Dance!!! 😃🎵🎶
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R
\name{torch_repeat_interleave}
\alias{torch_repeat_interleave}
\title{Repeat_interleave}
\arguments{
\item{input}{(Tensor) the input tensor.}
\item{repeats}{(Tensor or int) The number of repetitions for each element. repeats is broadcasted to fit the shape of the given axis.}
\item{dim}{(int, optional) The dimension along which to repeat values. By default, use the flattened input array, and return a flat output array.}
}
\description{
Repeat_interleave
}
\section{repeat_interleave(input, repeats, dim=None) -> Tensor }{
Repeat elements of a tensor.
}
\section{Warning}{
\preformatted{This is different from `torch_Tensor.repeat` but similar to ``numpy.repeat``.
}
}
\section{repeat_interleave(repeats) -> Tensor }{
If the \code{repeats} is \verb{tensor([n1, n2, n3, ...])}, then the output will be
\verb{tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])} where \code{0} appears \code{n1} times,
\code{1} appears \code{n2} times, \code{2} appears \code{n3} times, etc.
}
\examples{
\dontrun{
x = torch_tensor(c(1, 2, 3))
x$repeat_interleave(2)
y = torch_tensor(matrix(c(1, 2, 3, 4), ncol = 2, byrow=TRUE))
torch_repeat_interleave(y, 2)
torch_repeat_interleave(y, 3, dim=1)
torch_repeat_interleave(y, torch_tensor(c(1, 2)), dim=0)
}
}
|
/man/torch_repeat_interleave.Rd
|
permissive
|
qykong/torch
|
R
| false
| true
| 1,397
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R
\name{torch_repeat_interleave}
\alias{torch_repeat_interleave}
\title{Repeat_interleave}
\arguments{
\item{input}{(Tensor) the input tensor.}
\item{repeats}{(Tensor or int) The number of repetitions for each element. repeats is broadcasted to fit the shape of the given axis.}
\item{dim}{(int, optional) The dimension along which to repeat values. By default, use the flattened input array, and return a flat output array.}
}
\description{
Repeat_interleave
}
\section{repeat_interleave(input, repeats, dim=None) -> Tensor }{
Repeat elements of a tensor.
}
\section{Warning}{
\preformatted{This is different from `torch_Tensor.repeat` but similar to ``numpy.repeat``.
}
}
\section{repeat_interleave(repeats) -> Tensor }{
If the \code{repeats} is \verb{tensor([n1, n2, n3, ...])}, then the output will be
\verb{tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])} where \code{0} appears \code{n1} times,
\code{1} appears \code{n2} times, \code{2} appears \code{n3} times, etc.
}
\examples{
\dontrun{
x = torch_tensor(c(1, 2, 3))
x$repeat_interleave(2)
y = torch_tensor(matrix(c(1, 2, 3, 4), ncol = 2, byrow=TRUE))
torch_repeat_interleave(y, 2)
torch_repeat_interleave(y, 3, dim=1)
torch_repeat_interleave(y, torch_tensor(c(1, 2)), dim=0)
}
}
|
#load libraries
library(quantreg)
library(glmnet)
library(magrittr)
library(purrr)
#load data
#data.half <- readRDS()
#full.data <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/fulldata_091620.RData")
half.data <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/500_data_10052020.RData")
#adaptive lasso function with two-way CV for selecting both lambda and nu/gamma
adalasso.sim.fnct <- function(data) {
#create simulation tracker
tracker <- as.vector(unlist(data$conditions))
#print tracker of status
cat("n = " , tracker[1] , " , p = " , tracker[2] ,
" , eta.x = " , tracker[3] , " , eta.y = " , tracker[4] ,
" , g = " , tracker[5] , " , h = " , tracker[6] ,
";\n")
#load X, Y, and p
X <- data$X
Y <- data$Y
p <- data$conditions$p
seed.ridge <- data$seeds[ , "seed.2"]
set.seed(seed.ridge)
#ridge coefs for weighting
lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100))
ridge.model <- cv.glmnet(x = X , y = Y , lambda = lambda.try , alpha = 0)
lambda.ridge.opt <- ridge.model$lambda.min
best.ridge.coefs <- predict(ridge.model , type = "coefficients" ,
s = lambda.ridge.opt)[-1]
##grid of nu/gamma values to try
nu.try <- exp(seq(log(0.01) , log(10) , length.out = 100))
#seed.pre.nu <- data$seeds[ , "seed.3"]
#set.seed(seed.pre.nu)
#seed.nu <- sample(rnorm(n = 1000000000) , size = length(nu.try) , replace = FALSE)
##initialize list of best adalasso results from each nu/gamma
adalasso.nu.cv <- list()
for(i in 1:length(nu.try)) {
#seed <- seed.nu[i]
#set.seed(seed)
#single adaptive lasso run with ridge weighting and nu = 1
adalasso.model <- cv.glmnet(X , Y , family = "gaussian" ,
lambda = lambda.try ,
penalty.factor = 1 / abs(best.ridge.coefs)^nu.try[i])
lambda.adalasso.opt <- adalasso.model$lambda.min
best.adalasso.coefs <- predict(adalasso.model , type = "coefficients" ,
s = lambda.adalasso.opt)[-1]
adalasso.nu.cv[[i]] <- list(model = list(full.model = adalasso.model ,
lambda = lambda.adalasso.opt ,
coefs = best.adalasso.coefs) ,
metrics_and_info = list(model.seed.ridge = seed.ridge ,
#model.seed.prenu = seed.pre.nu ,
#model.seed.nu = seed ,
ridge.coefs = best.ridge.coefs ,
weights = 1 / abs(best.ridge.coefs)^nu.try[i] ,
nu = nu.try[i] ,
lambda = lambda.adalasso.opt ,
coefs = best.adalasso.coefs ,
mpe = adalasso.model$cvm[which(adalasso.model$lambda == lambda.adalasso.opt)] ,
mpe.sd = adalasso.model$cvsd[which(adalasso.model$lambda == lambda.adalasso.opt)] ,
fpr = length(which(best.adalasso.coefs[c(5:p)] != 0)) / length(best.adalasso.coefs[c(5:p)]) ,
fnr = length(which(best.adalasso.coefs[c(1:4)] == 0)) / length(best.adalasso.coefs[1:4])))
}
#find minimizing nu/gamma
adalasso.nu.cv.mpe <- numeric()
adalasso.seeds.ridge <- numeric()
#adalasso.seeds.prenu <- numeric()
#adalasso.seeds.nu <- numeric()
for(i in 1:length(adalasso.nu.cv)) {
adalasso.nu.cv.mpe[i] <- adalasso.nu.cv[[i]]$metrics_and_info$mpe
adalasso.seeds.ridge[i] <- adalasso.nu.cv[[i]]$metrics_and_info$model.seed.ridge
#adalasso.seeds.prenu[i] <- adalasso.nu.cv[[i]]$metrics_and_info$model.seed.prenu
#adalasso.seeds.nu[i] <- adalasso.nu.cv[[i]]$metrics_and_info$model.seed.nu
}
#return(adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]])
#store BEST adalasso result plus all seeds
###below is used to check that seeds are regenerated properly and not uniform
return(list(mpes = adalasso.nu.cv.mpe ,
seeds.ridge = adalasso.seeds.ridge ,
#seeds.prenu = adalasso.seeds.prenu ,
#seeds.nu = adalasso.seeds.nu ,
model = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]] ,
important = list(diagnostics = data.frame(cbind(data.seed = tracker[7] ,
model.seed.ridge = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge)) ,
#model.seed.prenu = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu ,
#model.seed.nu = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.nu)) ,
coefs = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$coefs ,
weights = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$weights ,
info = data.frame(cbind(n = tracker[1] ,
p = tracker[2] ,
eta.x = tracker[3] ,
eta.y = tracker[4] ,
g = tracker[5] ,
h = tracker[6] ,
data.seed = tracker[7] ,
model.seed.ridge = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge ,
#model.seed.prenu = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu ,
#model.seed.nu = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.nu ,
lambda = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$lambda ,
nu = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$nu ,
mpe = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$mpe ,
mpe.sd = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$mpe.sd ,
fpr = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$fpr ,
fnr = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$fnr
)
)
)
)
)
}
#run across full dataset
#run across full dataset
adalasso.half <- half.data %>%
map(safely(adalasso.sim.fnct))
saveRDS(adalasso.half , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Full_results/adalasso_500.RData")
{#dealing with error/result from map(safely())
#create empty lists for error + result
#adalasso.error <- list()
#adalasso.result <- list()
#adalasso.final <- list()
#split data into separate error and result lists
#for(i in 1:length(adalasso.half)) {
#iteration tracker
# cat("i = " , i , "\n")
#fill error list
# adalasso.error[[i]] <- list(error = adalasso.half[[i]]$error ,
# condition = as.data.frame(unlist(testing10.data[[i]]$condition) ,
# n = n , p = p ,
# eta.x = eta.x , eta.y = eta.y ,
# g = g , h = h , seed = seed))
#fill in results if results aren't NULL from safely()
# adalasso.result[[i]] <- adalasso.half[[i]]$result
#fill final list
# if(!is.null(adalasso.half[[i]]$result)) {
# adalasso.final[[i]] <- adalasso.half[[i]]$result$important
# } else {
# adalasso.final[[i]] <- adalasso.error[[i]]
# }
#}
#combine diagnostics
#diagnostics <- data.frame(matrix(ncol = 4 , nrow = length(full.data)))
#colnames(diagnostics) <- c("data.seed" , "model.seed.ridge" , "model.seed.prenu" , "model.seed.nu")
#for(i in 1:length(adalasso.final)) {
# diagnostics[i , "data.seed"] <- adalasso.final[[i]]$diagnostics$data.seed
# diagnostics[i , "model.seed.ridge"] <- adalasso.final[[i]]$diagnostics$model.seed.ridge
# diagnostics[i , "model.seed.prenu"] <- adalasso.final[[i]]$diagnostics$model.seed.prenu
# diagnostics[i , "model.seed.nu"] <- adalasso.final[[i]]$diagnostics$model.seed.nu
#}
#save files
#saveRDS(adalasso.result , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Model_Storage/adalasso_result_DEBUG.RData")
#saveRDS(adalasso.error , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Error_Storage/adalasso_error_DEBUG.RData")
#saveRDS(adalasso.final , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/MainResults_Storage/adalasso_resultmain_DEBUG.RData")
#saveRDS(diagnostics , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Diagnostics_Storage/adalasso_diagnostics_DEBUG.RData")
}
|
/Model_Application/Full_Run/AdaLasso_500.R
|
no_license
|
multach87/Dissertation
|
R
| false
| false
| 10,691
|
r
|
#load libraries
library(quantreg)
library(glmnet)
library(magrittr)
library(purrr)
#load data
#data.half <- readRDS()
#full.data <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/fulldata_091620.RData")
half.data <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/500_data_10052020.RData")
#adaptive lasso function with two-way CV for selecting both lambda and nu/gamma
adalasso.sim.fnct <- function(data) {
#create simulation tracker
tracker <- as.vector(unlist(data$conditions))
#print tracker of status
cat("n = " , tracker[1] , " , p = " , tracker[2] ,
" , eta.x = " , tracker[3] , " , eta.y = " , tracker[4] ,
" , g = " , tracker[5] , " , h = " , tracker[6] ,
";\n")
#load X, Y, and p
X <- data$X
Y <- data$Y
p <- data$conditions$p
seed.ridge <- data$seeds[ , "seed.2"]
set.seed(seed.ridge)
#ridge coefs for weighting
lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100))
ridge.model <- cv.glmnet(x = X , y = Y , lambda = lambda.try , alpha = 0)
lambda.ridge.opt <- ridge.model$lambda.min
best.ridge.coefs <- predict(ridge.model , type = "coefficients" ,
s = lambda.ridge.opt)[-1]
##grid of nu/gamma values to try
nu.try <- exp(seq(log(0.01) , log(10) , length.out = 100))
#seed.pre.nu <- data$seeds[ , "seed.3"]
#set.seed(seed.pre.nu)
#seed.nu <- sample(rnorm(n = 1000000000) , size = length(nu.try) , replace = FALSE)
##initialize list of best adalasso results from each nu/gamma
adalasso.nu.cv <- list()
for(i in 1:length(nu.try)) {
#seed <- seed.nu[i]
#set.seed(seed)
#single adaptive lasso run with ridge weighting and nu = 1
adalasso.model <- cv.glmnet(X , Y , family = "gaussian" ,
lambda = lambda.try ,
penalty.factor = 1 / abs(best.ridge.coefs)^nu.try[i])
lambda.adalasso.opt <- adalasso.model$lambda.min
best.adalasso.coefs <- predict(adalasso.model , type = "coefficients" ,
s = lambda.adalasso.opt)[-1]
adalasso.nu.cv[[i]] <- list(model = list(full.model = adalasso.model ,
lambda = lambda.adalasso.opt ,
coefs = best.adalasso.coefs) ,
metrics_and_info = list(model.seed.ridge = seed.ridge ,
#model.seed.prenu = seed.pre.nu ,
#model.seed.nu = seed ,
ridge.coefs = best.ridge.coefs ,
weights = 1 / abs(best.ridge.coefs)^nu.try[i] ,
nu = nu.try[i] ,
lambda = lambda.adalasso.opt ,
coefs = best.adalasso.coefs ,
mpe = adalasso.model$cvm[which(adalasso.model$lambda == lambda.adalasso.opt)] ,
mpe.sd = adalasso.model$cvsd[which(adalasso.model$lambda == lambda.adalasso.opt)] ,
fpr = length(which(best.adalasso.coefs[c(5:p)] != 0)) / length(best.adalasso.coefs[c(5:p)]) ,
fnr = length(which(best.adalasso.coefs[c(1:4)] == 0)) / length(best.adalasso.coefs[1:4])))
}
#find minimizing nu/gamma
adalasso.nu.cv.mpe <- numeric()
adalasso.seeds.ridge <- numeric()
#adalasso.seeds.prenu <- numeric()
#adalasso.seeds.nu <- numeric()
for(i in 1:length(adalasso.nu.cv)) {
adalasso.nu.cv.mpe[i] <- adalasso.nu.cv[[i]]$metrics_and_info$mpe
adalasso.seeds.ridge[i] <- adalasso.nu.cv[[i]]$metrics_and_info$model.seed.ridge
#adalasso.seeds.prenu[i] <- adalasso.nu.cv[[i]]$metrics_and_info$model.seed.prenu
#adalasso.seeds.nu[i] <- adalasso.nu.cv[[i]]$metrics_and_info$model.seed.nu
}
#return(adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]])
#store BEST adalasso result plus all seeds
###below is used to check that seeds are regenerated properly and not uniform
return(list(mpes = adalasso.nu.cv.mpe ,
seeds.ridge = adalasso.seeds.ridge ,
#seeds.prenu = adalasso.seeds.prenu ,
#seeds.nu = adalasso.seeds.nu ,
model = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]] ,
important = list(diagnostics = data.frame(cbind(data.seed = tracker[7] ,
model.seed.ridge = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge)) ,
#model.seed.prenu = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu ,
#model.seed.nu = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.nu)) ,
coefs = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$coefs ,
weights = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$weights ,
info = data.frame(cbind(n = tracker[1] ,
p = tracker[2] ,
eta.x = tracker[3] ,
eta.y = tracker[4] ,
g = tracker[5] ,
h = tracker[6] ,
data.seed = tracker[7] ,
model.seed.ridge = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge ,
#model.seed.prenu = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu ,
#model.seed.nu = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$model.seed.nu ,
lambda = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$lambda ,
nu = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$nu ,
mpe = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$mpe ,
mpe.sd = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$mpe.sd ,
fpr = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$fpr ,
fnr = adalasso.nu.cv[[which.min(adalasso.nu.cv.mpe)]]$metrics_and_info$fnr
)
)
)
)
)
}
#run across full dataset
#run across full dataset
adalasso.half <- half.data %>%
map(safely(adalasso.sim.fnct))
saveRDS(adalasso.half , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Full_results/adalasso_500.RData")
{#dealing with error/result from map(safely())
#create empty lists for error + result
#adalasso.error <- list()
#adalasso.result <- list()
#adalasso.final <- list()
#split data into separate error and result lists
#for(i in 1:length(adalasso.half)) {
#iteration tracker
# cat("i = " , i , "\n")
#fill error list
# adalasso.error[[i]] <- list(error = adalasso.half[[i]]$error ,
# condition = as.data.frame(unlist(testing10.data[[i]]$condition) ,
# n = n , p = p ,
# eta.x = eta.x , eta.y = eta.y ,
# g = g , h = h , seed = seed))
#fill in results if results aren't NULL from safely()
# adalasso.result[[i]] <- adalasso.half[[i]]$result
#fill final list
# if(!is.null(adalasso.half[[i]]$result)) {
# adalasso.final[[i]] <- adalasso.half[[i]]$result$important
# } else {
# adalasso.final[[i]] <- adalasso.error[[i]]
# }
#}
#combine diagnostics
#diagnostics <- data.frame(matrix(ncol = 4 , nrow = length(full.data)))
#colnames(diagnostics) <- c("data.seed" , "model.seed.ridge" , "model.seed.prenu" , "model.seed.nu")
#for(i in 1:length(adalasso.final)) {
# diagnostics[i , "data.seed"] <- adalasso.final[[i]]$diagnostics$data.seed
# diagnostics[i , "model.seed.ridge"] <- adalasso.final[[i]]$diagnostics$model.seed.ridge
# diagnostics[i , "model.seed.prenu"] <- adalasso.final[[i]]$diagnostics$model.seed.prenu
# diagnostics[i , "model.seed.nu"] <- adalasso.final[[i]]$diagnostics$model.seed.nu
#}
#save files
#saveRDS(adalasso.result , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Model_Storage/adalasso_result_DEBUG.RData")
#saveRDS(adalasso.error , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Error_Storage/adalasso_error_DEBUG.RData")
#saveRDS(adalasso.final , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/MainResults_Storage/adalasso_resultmain_DEBUG.RData")
#saveRDS(diagnostics , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Diagnostics_Storage/adalasso_diagnostics_DEBUG.RData")
}
|
if (!require("pacman")) install.packages("pacman")
pacman::p_load(shinydashboard, plotly, fs, dplyr, stringr, lubridate, fs)
source("helper.R")
source("config.R")
# read dataframes and ALL resulting model objects as stored from model training as training data foundation
load("models/models.Rda")
# read metadata from model evaluation/selection, incl. the reference to highest performing ones
df_model_results <- read.csv("models.csv")
# func to predict closing prices for given timeframe
# no_days -> number of days for which the prediction should be undertaken
predict_data <- function(ticker="", target_model, no_days=6, no_back_months=20){
if (ticker == "DEFAULT"){
ticker <- "AMZN"
target_model <- "LM"
}
data <- load_data(ticker)
now_date <- now()
s_date <- now_date %m-% months(no_back_months)
start_date <- paste(year(s_date),str_pad(month(s_date), 2, "left", "0"),str_pad(day(s_date), 2, "left", "0"), "/",sep="")
data <- data[start_date]
raw_data <- extend_xts(data, no_days)
df_data_model <- prepare_xts(raw_data)
df_data_model_pred <- predict_on_xts(in_stock_data=df_data_model, no_days=no_days, ticker=ticker, model_type=target_model)
df_data_model_pred
}
# func to load the data foundation that existed for training
get_data_foundation <- function(ticker=""){
# !!!! dummy function / dummy data being created --> later replaced by actual data from model training !!!
out <- load_data(ticker)
out <- convert_xts_to_df(out, ticker)
out
}
# func to return meta data on ticker from nodel training
get_stored_model_results <- function(ticker=""){
if (ticker == ""){
return
}
res <- df_model_results[df_model_results$Ticker == ticker,]
res
}
# default data
df_pred_data <- predict_data("DEFAULT")
# ramp up the server
server <- function(input, output) {
set.seed(122)
histdata <- rnorm(500)
get_pred_data <- reactive({
ticker <- input$predictTicker
modelVar <- input$modelTypeSelect
dates <- input$forecastingDates
prev_hist_months <- input$displayLastMonth
# get data by ticker and load models --> execute predictions
if (input$modelTypeSelect == "AUTO"){
sel_model_results <- get_stored_model_results(input$predictTicker)
target_model_type <- sel_model_results$ModelType
}else{
target_model_type <- input$modelTypeSelect
}
start_date <- input$forecastingDates[1]
end_date <- input$forecastingDates[2]
no_days <- as.numeric(as.Date(as.character(end_date), format="%Y-%m-%d")-as.Date(as.character(start_date), format="%Y-%m-%d")) + 1
predict_data(ticker, target_model_type, no_days=no_days, no_back_months=prev_hist_months)
})
##TAB: dashboard
output$predictChartLy <- renderPlotly({
df_pred_data <<- get_pred_data()
# separate act/preds into two traces for formatting reasons
d_a <- df_pred_data[df_pred_data$DataType == 'actuals',]
d_p <- df_pred_data[df_pred_data$DataType == 'prediction',]
p <- plot_ly(d_a, x = ~Date, y = ~Close, name = 'Actuals', type = 'scatter', mode = 'lines', source = "subset") %>%
add_trace(data = d_p, y = ~Close, name = 'Prediction', mode = 'lines', line = list(color = 'rgb(205, 12, 24)', width = 2, dash = 'dash'))
p
})
output$predictTickerSelected <- renderValueBox({
valueBox(
paste0(input$predictTicker), "STOCK SELECTED", icon = icon("list"),
color = "blue"
)
})
output$predictTickerDataAvailable <- renderValueBox({
d <- get_pred_data()
valueBox(
paste0(length(d[d$DataType == 'actuals',1]), "/", length(d[d$DataType == 'prediction',1]), " DAYS"), "ACT. / PRED.", icon = icon("database"),
color = "blue"
)
})
output$predictTickerModelSelected <- renderValueBox({
sel_model_results <- get_stored_model_results(input$predictTicker)
if(input$modelTypeSelect == "AUTO"){
# get the one which is stored as best performing model for ticker
sel_model_results <- get_stored_model_results(input$predictTicker)
display <- sel_model_results$ModelType
}else{
sel_model_results <- input$modelTypeSelect
display <- sel_model_results
}
valueBox(
paste0(display), "MODEL", icon = icon("microchip"),
color = "blue"
)
})
output$predictTickerModel <- renderValueBox({
sel_model_results <- get_stored_model_results(input$predictTicker)
valueBox(
paste0(format_accuracy(type=sel_model_results$ModelAccuracyMetricFormat, sel_model_results$ModelAccuracy)), sel_model_results$ModelAccuracyMetric, icon = icon("balance-scale"),
color = "blue"
)
})
output$outPredTable <- renderDataTable({
dates <- input$forecastingDates
prev_hist_months <- input$displayLastMonth
out <- get_pred_data() %>%
mutate(
Date = as.Date(Date),
ClosingPrice = scales::dollar_format(negative_parens = TRUE)(Close),
ModelType = input$modelTypeSelect,
Ticker = input$predictTicker
) %>%
select("Date", "ClosingPrice", "DataType", "ModelType", "Ticker") %>% arrange(desc(Date))
out
})
output$downloadData <- downloadHandler(
filename = function() {
paste('data-stock-pred-', Sys.Date(), '.csv', sep='')
},
content = function(file) {
write.csv(get_pred_data(), file, row.names = FALSE)
}
)
## TAB: model evaluation
output$txt_gen_approach <- renderText({"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean vitae leo in tellus imperdiet posuere. In fringilla neque faucibus velit vulputate, venenatis congue dolor gravida. Quisque posuere viverra cursus. Duis sapien metus, dapibus et tristique non, egestas eget dui. Ut et ante tortor. Aliquam erat volutpat. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Etiam felis quam, ullamcorper a rutrum id, tristique a tortor. Duis sem turpis, interdum in euismod at, ornare vel massa. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Donec viverra rhoncus aliquet. Praesent arcu turpis, volutpat nec risus in, cursus vehicula urna. Proin in tristique libero. Nam eleifend metus a fermentum ornare. Donec mauris mauris, sagittis nec leo tincidunt, pretium venenatis turpis. Morbi mattis ultricies purus, vitae scelerisque justo vulputate eu. Aliquam bibendum tellus sed lacinia dictum. Aliquam erat volutpat. Phasellus faucibus pretium nunc dictum condimentum. Duis congue mattis nisi volutpat tincidunt. Mauris tincidunt purus non lacus fermentum tempor. Sed bibendum vitae urna vitae porttitor. Suspendisse erat ipsum, viverra a tincidunt ut, venenatis eget orci. Mauris id ante eget massa iaculis varius a euismod velit. Pellentesque in felis quis odio rhoncus fermentum sed vitae turpis. Etiam et suscipit lorem, non posuere purus. Nullam semper eleifend metus ut consequat. Cras auctor mi sapien, at consequat lacus semper ut. Curabitur ornare convallis dui vitae vehicula. Sed congue quam eu consectetur accumsan. Curabitur non auctor magna."})
output$modEvalChart <- renderPlotly({
ticker <- input$modEvalTicker
df_pred_data <- get_data_foundation(ticker)
p <- plot_ly(df_pred_data, x = ~Date, y = ~Close, name = 'Actuals', type = 'scatter', mode = 'lines', source = "subset")
p
})
output$txt_mod_data_summary <- renderText({
ticker <- input$modEvalTicker
summary(df_pred_data)
})
output$pdfviewer <- renderText({
url <- paste0("Stock_Prediction_", input$modEvalTicker, "--Updated.pdf")
return(paste('<iframe style="height:600px; width:100%" src="', url, '"></iframe>', sep = ""))
})
}
|
/server.R
|
no_license
|
justusfowl/ddmr
|
R
| false
| false
| 7,889
|
r
|
if (!require("pacman")) install.packages("pacman")
pacman::p_load(shinydashboard, plotly, fs, dplyr, stringr, lubridate, fs)
source("helper.R")
source("config.R")
# read dataframes and ALL resulting model objects as stored from model training as training data foundation
load("models/models.Rda")
# read metadata from model evaluation/selection, incl. the reference to highest performing ones
df_model_results <- read.csv("models.csv")
# func to predict closing prices for given timeframe
# no_days -> number of days for which the prediction should be undertaken
predict_data <- function(ticker="", target_model, no_days=6, no_back_months=20){
if (ticker == "DEFAULT"){
ticker <- "AMZN"
target_model <- "LM"
}
data <- load_data(ticker)
now_date <- now()
s_date <- now_date %m-% months(no_back_months)
start_date <- paste(year(s_date),str_pad(month(s_date), 2, "left", "0"),str_pad(day(s_date), 2, "left", "0"), "/",sep="")
data <- data[start_date]
raw_data <- extend_xts(data, no_days)
df_data_model <- prepare_xts(raw_data)
df_data_model_pred <- predict_on_xts(in_stock_data=df_data_model, no_days=no_days, ticker=ticker, model_type=target_model)
df_data_model_pred
}
# func to load the data foundation that existed for training
get_data_foundation <- function(ticker=""){
# !!!! dummy function / dummy data being created --> later replaced by actual data from model training !!!
out <- load_data(ticker)
out <- convert_xts_to_df(out, ticker)
out
}
# func to return meta data on ticker from nodel training
get_stored_model_results <- function(ticker=""){
if (ticker == ""){
return
}
res <- df_model_results[df_model_results$Ticker == ticker,]
res
}
# default data
df_pred_data <- predict_data("DEFAULT")
# ramp up the server
server <- function(input, output) {
set.seed(122)
histdata <- rnorm(500)
get_pred_data <- reactive({
ticker <- input$predictTicker
modelVar <- input$modelTypeSelect
dates <- input$forecastingDates
prev_hist_months <- input$displayLastMonth
# get data by ticker and load models --> execute predictions
if (input$modelTypeSelect == "AUTO"){
sel_model_results <- get_stored_model_results(input$predictTicker)
target_model_type <- sel_model_results$ModelType
}else{
target_model_type <- input$modelTypeSelect
}
start_date <- input$forecastingDates[1]
end_date <- input$forecastingDates[2]
no_days <- as.numeric(as.Date(as.character(end_date), format="%Y-%m-%d")-as.Date(as.character(start_date), format="%Y-%m-%d")) + 1
predict_data(ticker, target_model_type, no_days=no_days, no_back_months=prev_hist_months)
})
##TAB: dashboard
output$predictChartLy <- renderPlotly({
df_pred_data <<- get_pred_data()
# separate act/preds into two traces for formatting reasons
d_a <- df_pred_data[df_pred_data$DataType == 'actuals',]
d_p <- df_pred_data[df_pred_data$DataType == 'prediction',]
p <- plot_ly(d_a, x = ~Date, y = ~Close, name = 'Actuals', type = 'scatter', mode = 'lines', source = "subset") %>%
add_trace(data = d_p, y = ~Close, name = 'Prediction', mode = 'lines', line = list(color = 'rgb(205, 12, 24)', width = 2, dash = 'dash'))
p
})
output$predictTickerSelected <- renderValueBox({
valueBox(
paste0(input$predictTicker), "STOCK SELECTED", icon = icon("list"),
color = "blue"
)
})
output$predictTickerDataAvailable <- renderValueBox({
d <- get_pred_data()
valueBox(
paste0(length(d[d$DataType == 'actuals',1]), "/", length(d[d$DataType == 'prediction',1]), " DAYS"), "ACT. / PRED.", icon = icon("database"),
color = "blue"
)
})
output$predictTickerModelSelected <- renderValueBox({
sel_model_results <- get_stored_model_results(input$predictTicker)
if(input$modelTypeSelect == "AUTO"){
# get the one which is stored as best performing model for ticker
sel_model_results <- get_stored_model_results(input$predictTicker)
display <- sel_model_results$ModelType
}else{
sel_model_results <- input$modelTypeSelect
display <- sel_model_results
}
valueBox(
paste0(display), "MODEL", icon = icon("microchip"),
color = "blue"
)
})
output$predictTickerModel <- renderValueBox({
sel_model_results <- get_stored_model_results(input$predictTicker)
valueBox(
paste0(format_accuracy(type=sel_model_results$ModelAccuracyMetricFormat, sel_model_results$ModelAccuracy)), sel_model_results$ModelAccuracyMetric, icon = icon("balance-scale"),
color = "blue"
)
})
output$outPredTable <- renderDataTable({
dates <- input$forecastingDates
prev_hist_months <- input$displayLastMonth
out <- get_pred_data() %>%
mutate(
Date = as.Date(Date),
ClosingPrice = scales::dollar_format(negative_parens = TRUE)(Close),
ModelType = input$modelTypeSelect,
Ticker = input$predictTicker
) %>%
select("Date", "ClosingPrice", "DataType", "ModelType", "Ticker") %>% arrange(desc(Date))
out
})
output$downloadData <- downloadHandler(
filename = function() {
paste('data-stock-pred-', Sys.Date(), '.csv', sep='')
},
content = function(file) {
write.csv(get_pred_data(), file, row.names = FALSE)
}
)
## TAB: model evaluation
output$txt_gen_approach <- renderText({"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean vitae leo in tellus imperdiet posuere. In fringilla neque faucibus velit vulputate, venenatis congue dolor gravida. Quisque posuere viverra cursus. Duis sapien metus, dapibus et tristique non, egestas eget dui. Ut et ante tortor. Aliquam erat volutpat. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Etiam felis quam, ullamcorper a rutrum id, tristique a tortor. Duis sem turpis, interdum in euismod at, ornare vel massa. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Donec viverra rhoncus aliquet. Praesent arcu turpis, volutpat nec risus in, cursus vehicula urna. Proin in tristique libero. Nam eleifend metus a fermentum ornare. Donec mauris mauris, sagittis nec leo tincidunt, pretium venenatis turpis. Morbi mattis ultricies purus, vitae scelerisque justo vulputate eu. Aliquam bibendum tellus sed lacinia dictum. Aliquam erat volutpat. Phasellus faucibus pretium nunc dictum condimentum. Duis congue mattis nisi volutpat tincidunt. Mauris tincidunt purus non lacus fermentum tempor. Sed bibendum vitae urna vitae porttitor. Suspendisse erat ipsum, viverra a tincidunt ut, venenatis eget orci. Mauris id ante eget massa iaculis varius a euismod velit. Pellentesque in felis quis odio rhoncus fermentum sed vitae turpis. Etiam et suscipit lorem, non posuere purus. Nullam semper eleifend metus ut consequat. Cras auctor mi sapien, at consequat lacus semper ut. Curabitur ornare convallis dui vitae vehicula. Sed congue quam eu consectetur accumsan. Curabitur non auctor magna."})
output$modEvalChart <- renderPlotly({
ticker <- input$modEvalTicker
df_pred_data <- get_data_foundation(ticker)
p <- plot_ly(df_pred_data, x = ~Date, y = ~Close, name = 'Actuals', type = 'scatter', mode = 'lines', source = "subset")
p
})
output$txt_mod_data_summary <- renderText({
ticker <- input$modEvalTicker
summary(df_pred_data)
})
output$pdfviewer <- renderText({
url <- paste0("Stock_Prediction_", input$modEvalTicker, "--Updated.pdf")
return(paste('<iframe style="height:600px; width:100%" src="', url, '"></iframe>', sep = ""))
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_eeg_bands.R
\name{power_eeg_bands}
\alias{power_eeg_bands}
\title{Get power values for EEG bands}
\usage{
power_eeg_bands(
eeg_signal,
sampling_frequency = 125,
max_frequency = 32,
num_sec_w = 5,
aggreg_level = 6
)
}
\arguments{
\item{eeg_signal}{EEG signal expressed in micro-Volts}
\item{sampling_frequency}{Sampling frequency of the EEG signal. This is
typically equal to 125Hz. Default value is 125.}
\item{max_frequency}{The maximum frequency for which the spectrum is being
calculated. Default value is 32.}
\item{num_sec_w}{number of seconds in a time window used to
obtain the Fourier coefficients. Typically, this number is 5}
\item{aggreg_level}{number of 5 second intervals used to aggregate
power. Typically, this number is 6 to ensure a 30 second
interval window (standard in EEG analysis)}
}
\value{
List containing the aggregated power values for each EEG band
}
\description{
Calculate power values for
each of the EEG bands:
Delta < 4
Theta >=4 and < 8
Alpha >= 8 and < 14
Beta >= 14 and < 32
Gamma >= 32 and < 50
}
|
/man/power_eeg_bands.Rd
|
no_license
|
adigherman/EEGSpectralAnalysis
|
R
| false
| true
| 1,159
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_eeg_bands.R
\name{power_eeg_bands}
\alias{power_eeg_bands}
\title{Get power values for EEG bands}
\usage{
power_eeg_bands(
eeg_signal,
sampling_frequency = 125,
max_frequency = 32,
num_sec_w = 5,
aggreg_level = 6
)
}
\arguments{
\item{eeg_signal}{EEG signal expressed in micro-Volts}
\item{sampling_frequency}{Sampling frequency of the EEG signal. This is
typically equal to 125Hz. Default value is 125.}
\item{max_frequency}{The maximum frequency for which the spectrum is being
calculated. Default value is 32.}
\item{num_sec_w}{number of seconds in a time window used to
obtain the Fourier coefficients. Typically, this number is 5}
\item{aggreg_level}{number of 5 second intervals used to aggregate
power. Typically, this number is 6 to ensure a 30 second
interval window (standard in EEG analysis)}
}
\value{
List containing the aggregated power values for each EEG band
}
\description{
Calculate power values for
each of the EEG bands:
Delta < 4
Theta >=4 and < 8
Alpha >= 8 and < 14
Beta >= 14 and < 32
Gamma >= 32 and < 50
}
|
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/haematopoietic.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.05,family="gaussian",standardize=FALSE)
sink('./Model/EN/ReliefF/haematopoietic/haematopoietic_022.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/ReliefF/haematopoietic/haematopoietic_022.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 377
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/haematopoietic.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.05,family="gaussian",standardize=FALSE)
sink('./Model/EN/ReliefF/haematopoietic/haematopoietic_022.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
get_data <- function(url, zip_file, data_files, output_dir="./data")
{
if (!file.exists(output_dir))
dir.create(output_dir)
file_missing = FALSE
for (data_file in data_files)
{
if (!file.exists(data_file))
file_missing = TRUE
}
if (file_missing)
{
if (!file.exists(zip_file))
{
print(paste("Downloading ", url))
download.file(url, zip_file, method="curl")
}
print(paste("Unzipping ", zip_file))
unzip(zip_file, exdir=output_dir)
}
}
|
/get_data.R
|
no_license
|
cdated/JHUCleaningData
|
R
| false
| false
| 500
|
r
|
get_data <- function(url, zip_file, data_files, output_dir="./data")
{
if (!file.exists(output_dir))
dir.create(output_dir)
file_missing = FALSE
for (data_file in data_files)
{
if (!file.exists(data_file))
file_missing = TRUE
}
if (file_missing)
{
if (!file.exists(zip_file))
{
print(paste("Downloading ", url))
download.file(url, zip_file, method="curl")
}
print(paste("Unzipping ", zip_file))
unzip(zip_file, exdir=output_dir)
}
}
|
#' Value and Circulation of Currency
#'
#' This dataset contains, for the smaller bill denominations, the value of the bill and the total value in circulation. The source for these data is \emph{The World Almanac and Book of Facts 2014}.
#'
#' @format A data frame with 5 rows and 3 variables:
#' \describe{
#' \item{BillValue}{denomination}
#' \item{TotalCirculation}{total currency in circulation in U.S. dollars}
#' \item{NumberCirculation}{total number of bills in circulation}
#' }
"Currency"
|
/R/data-Currency.R
|
no_license
|
cran/sur
|
R
| false
| false
| 504
|
r
|
#' Value and Circulation of Currency
#'
#' This dataset contains, for the smaller bill denominations, the value of the bill and the total value in circulation. The source for these data is \emph{The World Almanac and Book of Facts 2014}.
#'
#' @format A data frame with 5 rows and 3 variables:
#' \describe{
#' \item{BillValue}{denomination}
#' \item{TotalCirculation}{total currency in circulation in U.S. dollars}
#' \item{NumberCirculation}{total number of bills in circulation}
#' }
"Currency"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1ModelEvaluationSliceSlice}
\alias{GoogleCloudAiplatformV1ModelEvaluationSliceSlice}
\title{GoogleCloudAiplatformV1ModelEvaluationSliceSlice Object}
\usage{
GoogleCloudAiplatformV1ModelEvaluationSliceSlice()
}
\value{
GoogleCloudAiplatformV1ModelEvaluationSliceSlice object
}
\description{
GoogleCloudAiplatformV1ModelEvaluationSliceSlice Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Definition of a slice.
}
\concept{GoogleCloudAiplatformV1ModelEvaluationSliceSlice functions}
|
/googleaiplatformv1.auto/man/GoogleCloudAiplatformV1ModelEvaluationSliceSlice.Rd
|
no_license
|
justinjm/autoGoogleAPI
|
R
| false
| true
| 647
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1ModelEvaluationSliceSlice}
\alias{GoogleCloudAiplatformV1ModelEvaluationSliceSlice}
\title{GoogleCloudAiplatformV1ModelEvaluationSliceSlice Object}
\usage{
GoogleCloudAiplatformV1ModelEvaluationSliceSlice()
}
\value{
GoogleCloudAiplatformV1ModelEvaluationSliceSlice object
}
\description{
GoogleCloudAiplatformV1ModelEvaluationSliceSlice Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Definition of a slice.
}
\concept{GoogleCloudAiplatformV1ModelEvaluationSliceSlice functions}
|
helper <- function(data, outcome, num){
hospital <- data[, 2][order(outcome, data[, 2])[num]]
hospital
}
rankall <- function(outcome, num = "best") {
## Read outcome data
## Check that state and outcome are valid
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
data <- read.csv(file="./data/outcome-of-care-measures.csv", colClasses = "character")
# "..." is the directory in your computer; masked here for privacy
reason <- c("heart attack", "heart failure", "pneumonia")
state_arr <- sort(unique(data$State))
arr_len <- length(state_arr)
hospital <- rep("", arr_len)
if(!outcome %in% reason){
stop("invalid outcome")
} else {
for(i in 1:arr_len){
goal <- data[data$State == state_arr[i], ] # loop for each state
if(outcome == "heart attack"){
attack <- as.numeric(goal[, 11])
len <- dim(goal[!is.na(attack),])[1]
if(num == "best"){
hospital[i] <- helper(goal, attack, 1)
} else if(num == "worst"){
hospital[i] <- helper(goal, attack, len)
} else if(num > len){
hospital[i] <- NA
} else{
hospital[i] <- helper(goal, attack, num)
}
}
else if(outcome == "heart failure" ){ # Attention here!
failure <- as.numeric(goal[, 17])
len <- dim(goal[!is.na(failure),])[1]
if(num == "best"){
hospital[i] <- helper(goal, failure, 1)
#hospital[i] <- best(state_arr[i], "heart failure")
} else if(num == "worst"){
hospital[i] <- helper(goal, failure, len)
} else if(num > len){
hospital[i] <- NA
} else{
hospital[i] <- helper(goal, failure, num)
}
}
else{
pneumonia <- as.numeric(goal[, 23])
len <- dim(goal[!is.na(pneumonia),])[1]
if(num == "best"){
#hospital[i] <- best(state_arr[i], "pneumonia")
hospital[i] <- helper(goal, pneumonia, 1)
} else if(num == "worst"){
hospital[i] <- helper(goal, pneumonia, len)
} else if(num > len){
hospital[i] <- NA
} else{
hospital[i] <- helper(goal, pneumonia, num)
}
}
} # end of the for loop
df <- data.frame(hospital = hospital, state = state_arr)
df
}
}
|
/coursera/compdata-004/Week3/rankall.R
|
no_license
|
wz125/course
|
R
| false
| false
| 2,868
|
r
|
helper <- function(data, outcome, num){
hospital <- data[, 2][order(outcome, data[, 2])[num]]
hospital
}
rankall <- function(outcome, num = "best") {
## Read outcome data
## Check that state and outcome are valid
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
data <- read.csv(file="./data/outcome-of-care-measures.csv", colClasses = "character")
# "..." is the directory in your computer; masked here for privacy
reason <- c("heart attack", "heart failure", "pneumonia")
state_arr <- sort(unique(data$State))
arr_len <- length(state_arr)
hospital <- rep("", arr_len)
if(!outcome %in% reason){
stop("invalid outcome")
} else {
for(i in 1:arr_len){
goal <- data[data$State == state_arr[i], ] # loop for each state
if(outcome == "heart attack"){
attack <- as.numeric(goal[, 11])
len <- dim(goal[!is.na(attack),])[1]
if(num == "best"){
hospital[i] <- helper(goal, attack, 1)
} else if(num == "worst"){
hospital[i] <- helper(goal, attack, len)
} else if(num > len){
hospital[i] <- NA
} else{
hospital[i] <- helper(goal, attack, num)
}
}
else if(outcome == "heart failure" ){ # Attention here!
failure <- as.numeric(goal[, 17])
len <- dim(goal[!is.na(failure),])[1]
if(num == "best"){
hospital[i] <- helper(goal, failure, 1)
#hospital[i] <- best(state_arr[i], "heart failure")
} else if(num == "worst"){
hospital[i] <- helper(goal, failure, len)
} else if(num > len){
hospital[i] <- NA
} else{
hospital[i] <- helper(goal, failure, num)
}
}
else{
pneumonia <- as.numeric(goal[, 23])
len <- dim(goal[!is.na(pneumonia),])[1]
if(num == "best"){
#hospital[i] <- best(state_arr[i], "pneumonia")
hospital[i] <- helper(goal, pneumonia, 1)
} else if(num == "worst"){
hospital[i] <- helper(goal, pneumonia, len)
} else if(num > len){
hospital[i] <- NA
} else{
hospital[i] <- helper(goal, pneumonia, num)
}
}
} # end of the for loop
df <- data.frame(hospital = hospital, state = state_arr)
df
}
}
|
#modified 7/25/21 to report factor scores so that we can use biplot on the exensions.\
"fa.extension" <-
function(Roe,fo,correct=TRUE) {
cl <- match.call()
omega <-FALSE
if(!is.null(class(fo)[2])) {if(inherits(fo,"fa")) {
if(!is.null(fo$Phi)) {Phi <- fo$Phi} else {Phi <- NULL}
fl <- fo$loadings
fs <- fo$Structure
} else {if (inherits(fo,"omega")) { #switched to inherits December 20, 2019
omega <- TRUE
w <- fo$stats$weights
fl <- fo$schmid$sl
Phi <- NULL
fl <- fl[,1:(dim(fl)[2]-3)]
nfactors <- dim(fl)[2]
fe <- t(t(w) %*% Roe)
foblique <- fo$schmid$oblique
feoblique <- t( Roe) %*% foblique %*% (solve(t(foblique)%*% (foblique)))
feoblique <- feoblique %*% solve(fo$schmid$phi)
}
}
}
#Roe is Horn's Re R1 is Phi Pc is pattern of original = fl
# Pe = Re Pc solve (Pc'Pc) solve Phi
if(!omega) fe <- t( Roe) %*% fl %*% (solve(t(fl)%*% (fl))) #should we include Phi?
if(!is.null(Phi)) fe <- fe %*% solve(Phi) #horn equation 26
if(!correct) {#the Gorsuch case -- not actually-- read Gorsuch again
# d <- diag(t(fl) %*% fo$weight) #this is probably wrong
d <- sqrt(diag(t(fl) %*% fo$weight)) #a correction of sorts for reliability
fe <- (fe * d)
}
colnames(fe) <- colnames(fl)
rownames(fe) <- colnames(Roe)
if(!is.null(Phi)) {resid <- Roe - fl %*% Phi %*% t(fe)} else {resid <- Roe - fl %*% t(fe)} #fixed to actually give residual (1/30/18)
result <- list(loadings = fe,Phi=Phi,resid=resid,Call=cl)
if(!omega) {result <- list(loadings = fe,Phi=Phi,resid=resid,Call=cl)} else {result <- list(loadings = fe,oblique= feoblique,Phi=Phi,resid=resid,Call=cl)}
class(result) <- c("psych","extension")
return(result)
}
#written April 5, 2011
#revised August 15, 2011 to avoid using the weights matrix except in the omega case
#created December 8, 2012 to allow for extension and goodness of fits of total model
#modified 31/5/14 to allow for omega extension as well
#modified 04-09/16 to pass the Structure matrix as well
#Added the cors and correct parameters to pass to fa 1/3/21
"fa.extend" <-
function(r,nfactors=1,ov=NULL,ev=NULL,n.obs = NA, np.obs=NULL,correct=TRUE,rotate="oblimin",SMC=TRUE,warnings=TRUE, fm="minres",alpha=.1, omega=FALSE,cor="cor",use="pairwise",cor.correct=.5,weight=NULL,smooth=TRUE, ...) {
cl <- match.call()
if(is.numeric(ev)) ev <- colnames(r)[ev] #in case we are selecting variables
if(is.numeric(ov)) ov <- colnames(r)[ov]
nv <- c(ov,ev)
if(nrow(r) > ncol(r)){ #the case of a data matrix
#first find the correlations
n.obs <- nrow(r)
np.obs.r <- pairwiseCount(r)[nv,nv]
np.obs <- np.obs.r[ov,ov]
data <- r #if we want to find factor scores
# r <- cor(r,use='pairwise')
switch(cor,
cor = {r <- cor(r,use=use) }, #does not include the weight option from fa
cov = {r <- cov(r,use=use)
covar <- TRUE},
wtd = { r <- cor.wt(r,w=weight)$r},
spearman = {r <- cor(r,use=use,method="spearman")},
kendall = {r <- cor(r,use=use,method="kendall")},
tet = {r <- tetrachoric(r,correct=cor.correct,weight=weight)$rho},
poly = {r <- polychoric(r,correct=cor.correct,weight=weight)$rho},
tetrachoric = {r <- tetrachoric(r,correct=cor.correct,weight=weight)$rho},
polychoric = {r <- polychoric(r,correct=cor.correct,weight=weight)$rho},
mixed = {r <- mixedCor(r,use=use,correct=cor.correct)$rho}
)
if(omega) {fo <- omega(r[ov,ov],nfactors=nfactors,rotate=rotate,SMC=SMC,warnings=warnings,fm=fm,alpha=alpha,...)} else {
fo <- fa(r[ov,ov],nfactors=nfactors,rotate=rotate,SMC=SMC,warnings=warnings,fm=fm,cor=cor,alpha=alpha,smooth=smooth,...)}
} else { #the case of a correlation matrix
data <- NULL
R <- r[ov,ov]
np.obs.r <- np.obs
if(omega) {fo <- omega(R,nfactors=nfactors,n.obs=n.obs,rotate=rotate,SMC=SMC,warnings=warnings,fm=fm,cor=cor,alpha=alpha,np.obs=np.obs[ov,ov],...)} else {
fo <- fa(R,nfactors=nfactors,n.obs=n.obs,rotate=rotate,SMC=SMC,warnings=warnings,fm=fm,cor=cor, correct=correct,alpha=alpha,np.obs=np.obs[ov,ov],smooth=smooth,...)}
}
Roe <- r[ov,ev,drop=FALSE]
fe <- fa.extension(Roe,fo,correct=correct)
if(omega) fo$loadings <- fo$schmid$sl[,1:(ncol(fo$schmid$sl)-3)]
foe <- rbind(fo$loadings,fe$loadings)
if(omega) oblique <- rbind(fo$schmid$oblique,fe$oblique)
if(is.na(n.obs) && !is.null(np.obs)) n.obs <- max(as.vector(np.obs))
result <- factor.stats(r[nv,nv],foe,fo$Phi,n.obs,np.obs.r,alpha=alpha,smooth=smooth)
if(omega) result$schmid$sl <- foe
result$rotation <- rotate
result$loadings <- foe
if(nfactors > 1) {if(is.null(fo$Phi)) {h2 <- rowSums(foe^2)} else {h2 <- diag(foe %*% fo$Phi %*% t(foe)) }} else {h2 <-foe^2}
result$communality <- h2
result$fm <- fm #remember what kind of analysis we did
result$fo=fo
if(!is.null(data)) result$scores <- factor.scores(data[,ov],fo)
if(omega) {result$schmid$sl <- foe
result$schmid$gloading <- fo$schmid$gloading
result$schmid$oblique <- oblique
}
if(is.null(fo$Phi)) {result$Structure <- foe } else { result$Structure <- foe %*% fo$Phi}
result$fe=fe
result$resid=fe$resid
result$Phi=fo$Phi
result$fn="fa"
result$Call=cl
class(result) <- c("psych","extend")
return(result)
}
#adapted from fa.diagram but treats the extension variables as y variables
#draw the standard fa.diagram for the original variables and then regressions to the fe variables
#basically for the case of extension to criterion variables with lower path strengths
#offers a bit more control in the e.cut and e.simple options
|
/R/fa.extension.R
|
no_license
|
cran/psych
|
R
| false
| false
| 5,858
|
r
|
#modified 7/25/21 to report factor scores so that we can use biplot on the exensions.\
"fa.extension" <-
function(Roe,fo,correct=TRUE) {
cl <- match.call()
omega <-FALSE
if(!is.null(class(fo)[2])) {if(inherits(fo,"fa")) {
if(!is.null(fo$Phi)) {Phi <- fo$Phi} else {Phi <- NULL}
fl <- fo$loadings
fs <- fo$Structure
} else {if (inherits(fo,"omega")) { #switched to inherits December 20, 2019
omega <- TRUE
w <- fo$stats$weights
fl <- fo$schmid$sl
Phi <- NULL
fl <- fl[,1:(dim(fl)[2]-3)]
nfactors <- dim(fl)[2]
fe <- t(t(w) %*% Roe)
foblique <- fo$schmid$oblique
feoblique <- t( Roe) %*% foblique %*% (solve(t(foblique)%*% (foblique)))
feoblique <- feoblique %*% solve(fo$schmid$phi)
}
}
}
#Roe is Horn's Re R1 is Phi Pc is pattern of original = fl
# Pe = Re Pc solve (Pc'Pc) solve Phi
if(!omega) fe <- t( Roe) %*% fl %*% (solve(t(fl)%*% (fl))) #should we include Phi?
if(!is.null(Phi)) fe <- fe %*% solve(Phi) #horn equation 26
if(!correct) {#the Gorsuch case -- not actually-- read Gorsuch again
# d <- diag(t(fl) %*% fo$weight) #this is probably wrong
d <- sqrt(diag(t(fl) %*% fo$weight)) #a correction of sorts for reliability
fe <- (fe * d)
}
colnames(fe) <- colnames(fl)
rownames(fe) <- colnames(Roe)
if(!is.null(Phi)) {resid <- Roe - fl %*% Phi %*% t(fe)} else {resid <- Roe - fl %*% t(fe)} #fixed to actually give residual (1/30/18)
result <- list(loadings = fe,Phi=Phi,resid=resid,Call=cl)
if(!omega) {result <- list(loadings = fe,Phi=Phi,resid=resid,Call=cl)} else {result <- list(loadings = fe,oblique= feoblique,Phi=Phi,resid=resid,Call=cl)}
class(result) <- c("psych","extension")
return(result)
}
#written April 5, 2011
#revised August 15, 2011 to avoid using the weights matrix except in the omega case
#created December 8, 2012 to allow for extension and goodness of fits of total model
#modified 31/5/14 to allow for omega extension as well
#modified 04-09/16 to pass the Structure matrix as well
#Added the cors and correct parameters to pass to fa 1/3/21
"fa.extend" <-
function(r,nfactors=1,ov=NULL,ev=NULL,n.obs = NA, np.obs=NULL,correct=TRUE,rotate="oblimin",SMC=TRUE,warnings=TRUE, fm="minres",alpha=.1, omega=FALSE,cor="cor",use="pairwise",cor.correct=.5,weight=NULL,smooth=TRUE, ...) {
cl <- match.call()
if(is.numeric(ev)) ev <- colnames(r)[ev] #in case we are selecting variables
if(is.numeric(ov)) ov <- colnames(r)[ov]
nv <- c(ov,ev)
if(nrow(r) > ncol(r)){ #the case of a data matrix
#first find the correlations
n.obs <- nrow(r)
np.obs.r <- pairwiseCount(r)[nv,nv]
np.obs <- np.obs.r[ov,ov]
data <- r #if we want to find factor scores
# r <- cor(r,use='pairwise')
switch(cor,
cor = {r <- cor(r,use=use) }, #does not include the weight option from fa
cov = {r <- cov(r,use=use)
covar <- TRUE},
wtd = { r <- cor.wt(r,w=weight)$r},
spearman = {r <- cor(r,use=use,method="spearman")},
kendall = {r <- cor(r,use=use,method="kendall")},
tet = {r <- tetrachoric(r,correct=cor.correct,weight=weight)$rho},
poly = {r <- polychoric(r,correct=cor.correct,weight=weight)$rho},
tetrachoric = {r <- tetrachoric(r,correct=cor.correct,weight=weight)$rho},
polychoric = {r <- polychoric(r,correct=cor.correct,weight=weight)$rho},
mixed = {r <- mixedCor(r,use=use,correct=cor.correct)$rho}
)
if(omega) {fo <- omega(r[ov,ov],nfactors=nfactors,rotate=rotate,SMC=SMC,warnings=warnings,fm=fm,alpha=alpha,...)} else {
fo <- fa(r[ov,ov],nfactors=nfactors,rotate=rotate,SMC=SMC,warnings=warnings,fm=fm,cor=cor,alpha=alpha,smooth=smooth,...)}
} else { #the case of a correlation matrix
data <- NULL
R <- r[ov,ov]
np.obs.r <- np.obs
if(omega) {fo <- omega(R,nfactors=nfactors,n.obs=n.obs,rotate=rotate,SMC=SMC,warnings=warnings,fm=fm,cor=cor,alpha=alpha,np.obs=np.obs[ov,ov],...)} else {
fo <- fa(R,nfactors=nfactors,n.obs=n.obs,rotate=rotate,SMC=SMC,warnings=warnings,fm=fm,cor=cor, correct=correct,alpha=alpha,np.obs=np.obs[ov,ov],smooth=smooth,...)}
}
Roe <- r[ov,ev,drop=FALSE]
fe <- fa.extension(Roe,fo,correct=correct)
if(omega) fo$loadings <- fo$schmid$sl[,1:(ncol(fo$schmid$sl)-3)]
foe <- rbind(fo$loadings,fe$loadings)
if(omega) oblique <- rbind(fo$schmid$oblique,fe$oblique)
if(is.na(n.obs) && !is.null(np.obs)) n.obs <- max(as.vector(np.obs))
result <- factor.stats(r[nv,nv],foe,fo$Phi,n.obs,np.obs.r,alpha=alpha,smooth=smooth)
if(omega) result$schmid$sl <- foe
result$rotation <- rotate
result$loadings <- foe
if(nfactors > 1) {if(is.null(fo$Phi)) {h2 <- rowSums(foe^2)} else {h2 <- diag(foe %*% fo$Phi %*% t(foe)) }} else {h2 <-foe^2}
result$communality <- h2
result$fm <- fm #remember what kind of analysis we did
result$fo=fo
if(!is.null(data)) result$scores <- factor.scores(data[,ov],fo)
if(omega) {result$schmid$sl <- foe
result$schmid$gloading <- fo$schmid$gloading
result$schmid$oblique <- oblique
}
if(is.null(fo$Phi)) {result$Structure <- foe } else { result$Structure <- foe %*% fo$Phi}
result$fe=fe
result$resid=fe$resid
result$Phi=fo$Phi
result$fn="fa"
result$Call=cl
class(result) <- c("psych","extend")
return(result)
}
#adapted from fa.diagram but treats the extension variables as y variables
#draw the standard fa.diagram for the original variables and then regressions to the fe variables
#basically for the case of extension to criterion variables with lower path strengths
#offers a bit more control in the e.cut and e.simple options
|
favstats(~ hand_width, data = Hand.null)
prop(~ (hand_width <= -6.756), data = Hand.null)
|
/inst/snippets/Exploration10.4.6.R
|
no_license
|
rpruim/ISIwithR
|
R
| false
| false
| 91
|
r
|
favstats(~ hand_width, data = Hand.null)
prop(~ (hand_width <= -6.756), data = Hand.null)
|
#' Uji Varians 1 atau 2 Populasi
#'
#' Fungsi digunakan untuk menguji varians baik dari satu ataupun dua populasi
#'
#'
#' @param varsampel varians dari sampel (untuk 1 populasi langsung input nilai, untuk 2 populasi gunakan syntax c(), contoh: c(varsampel1, varsampel2))
#' @param nsampel jumlah sampel (untuk 1 populasi langsung input nilai, untuk 2 populasi gunakan syntax c(), contoh: c(nsampel1, nsampel2))
#' @param varpop0 input varians populasi uji untuk uji 1 sampel, apabila pada parameter termuat, maka akan menghasilkan uji varians 1 populasi
#' @param h1 hipotesis alternatif (HA/H1), pilih = "two.sided", "right.sided", atau "left.sided". default = "two.sided"
#' @param alpha taraf signifikansi yang diinginkan
#' @return Uji varians
#' @export
variance.test <- function( varsampel = c(NA,NA), nsampel = c(NA,NA),
varpop0 = NA, h1 = "two.sided",
alpha = 0.05){
if (!is.na(varpop0) & is.na(varsampel[2]) & is.na(nsampel[2])) {
khi <- (nsampel[1]-1)*varsampel[1]/varpop0
v <- nsampel[1] - 1
p.val <- pchisq(khi, df = v, lower.tail = F)
cat("\n\n Chi-square One Variance Test by yoursunshine \n",
"\n\n",
"Chi-square hitung : ", round(khi,4), "\n",
"Degree of freedom : ", v, "\n",
"Alpha : ", alpha, "\n",
"p-value (P[H0]) : ", round(p.val,4), "\n\n")
#two.sided
if (h1 == "two.sided") {
khi.tab.bwh <- qchisq(alpha/2, df = v)
khi.tab.ats <- qchisq(alpha/2, df = v, lower.tail = F)
cat(" Bottom critical : ", round(khi.tab.bwh,4), "\n",
"Upper critical : ", round(khi.tab.ats,4), "\n")
if (khi < khi.tab.bwh | khi > khi.tab.ats) {
cat(" Chi-square hitung tidak di antara dua critical \n\n")
cat(" Keputusan : Tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%, \n belum cukup bukti bahwa varians populasi sama dengan ", varpop0)
} else {
cat(" Chi-square hitung ada di antara dua critical \n\n")
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa varians populasi sama dengan ", varpop0)
}
} else if (h1 == "right.sided") {
khi.tab <- qchisq(alpha, df = v, lower.tail = F)
cat(" Critical right.sided : ", round(khi.tab,4), "\n")
if (khi > khi.tab) {
cat(" Chi-square hitung > Chi-square tabel \n\n")
cat(" Keputusan : Tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%, \n belum cukup bukti bahwa varians populasi lebih dari ", varpop0)
} else {
cat(" Chi-square hitung < Chi-square tabel \n\n")
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa varians populasi lebih dari ", varpop0)
}
} else if (h1 == "left.sided") {
khi.tab <- qchisq(alpha, df = v)
cat(" Critical left.sided : ", round(khi.tab,4), "\n")
if (khi < khi.tab) {
cat(" Chi-square hitung < Chi-square tabel \n\n")
cat(" Keputusan : Tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n belum cukup bukti bahwa varians populasi kurang dari ", varpop0)
} else {
cat(" Chi-square hitung > Chi-square tabel \n\n")
result <- "Gagal tolak H0"
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa varians populasi kurang dari ", varpop0)
}
} else cat("\n Masukkan nilai h1 yang benar")
} else {
s1 <- varsampel[1]
s2 <- varsampel[2]
v1 <- nsampel[1] - 1
v2 <- nsampel[2] - 1
f <- s1/s2
cat("\n\n Fisher Two Variances Test by yoursunshine \n",
"\n\n",
"F hitung : ", round(f,4), "\n",
"Degree of freedom 1: ", v1, "\n",
"Degree of freedom 2: ", v2, "\n",
"Alpha : ", alpha, "\n\n")
#two.sided
if (h1 == "two.sided") {
f.tab.bwh <- qf(1 - alpha/2, df1 = v1, df2 = v2, lower.tail = F)
f.tab.ats <- qf(alpha/2, df1 = v1, df2 = v2, lower.tail = F)
cat(" Bottom critical : ", round(f.tab.bwh,4), "\n",
"Upper critical : ", round(f.tab.ats,4), "\n")
if (f < f.tab.bwh | f > f.tab.ats) {
cat(" F hitung hitung tidak di antara dua critical \n\n")
cat(" Keputusan : Tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n
belum cukup bukti bahwa var.pop.1 = var.pop.2")
} else {
cat(" F hitung ada di antara dua critical \n\n")
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa var.pop.1 = var.pop.2")
}
} else if (h1 == "right.sided") {
f.tab <- qf(alpha, df1 = v1, df2 = v2, lower.tail = F)
cat(" Critical right.sided : ", round(f.tab,4), "\n")
if (f > f.tab) {
cat(" F hitung > F tabel \n\n")
cat(" Keputusan : Tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n belum cukup bukti bahwa var.pop.1 <= var.pop.2")
} else {
cat(" F hitung < Ftabel \n\n")
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa var.pop.1 <= var.pop.2")
}
} else if (h1 == "left.sided") {
f.tab <- qf(1 - alpha, df1 = v1, df2 = v2, lower.tail = F)
cat(" Critical left.sided : ", round(f.tab,4), "\n")
if (f < f.tab) {
cat(" F hitung < F tabel \n\n")
cat(" Keputusan : Tolak H0 \n",
" Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n belum cukup bukti bahwa var.pop.1 >= var.pop.2")
} else {
cat(" F hitung > F tabel \n\n")
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa var.pop.1 >= var.pop.2")
}
} else
cat("\nMasukkan nilai h1 yang benar")
}
}
|
/R/variance-test.R
|
no_license
|
yoursunshineR/statitest
|
R
| false
| false
| 6,194
|
r
|
#' Uji Varians 1 atau 2 Populasi
#'
#' Fungsi digunakan untuk menguji varians baik dari satu ataupun dua populasi
#'
#'
#' @param varsampel varians dari sampel (untuk 1 populasi langsung input nilai, untuk 2 populasi gunakan syntax c(), contoh: c(varsampel1, varsampel2))
#' @param nsampel jumlah sampel (untuk 1 populasi langsung input nilai, untuk 2 populasi gunakan syntax c(), contoh: c(nsampel1, nsampel2))
#' @param varpop0 input varians populasi uji untuk uji 1 sampel, apabila pada parameter termuat, maka akan menghasilkan uji varians 1 populasi
#' @param h1 hipotesis alternatif (HA/H1), pilih = "two.sided", "right.sided", atau "left.sided". default = "two.sided"
#' @param alpha taraf signifikansi yang diinginkan
#' @return Uji varians
#' @export
variance.test <- function( varsampel = c(NA,NA), nsampel = c(NA,NA),
varpop0 = NA, h1 = "two.sided",
alpha = 0.05){
if (!is.na(varpop0) & is.na(varsampel[2]) & is.na(nsampel[2])) {
khi <- (nsampel[1]-1)*varsampel[1]/varpop0
v <- nsampel[1] - 1
p.val <- pchisq(khi, df = v, lower.tail = F)
cat("\n\n Chi-square One Variance Test by yoursunshine \n",
"\n\n",
"Chi-square hitung : ", round(khi,4), "\n",
"Degree of freedom : ", v, "\n",
"Alpha : ", alpha, "\n",
"p-value (P[H0]) : ", round(p.val,4), "\n\n")
#two.sided
if (h1 == "two.sided") {
khi.tab.bwh <- qchisq(alpha/2, df = v)
khi.tab.ats <- qchisq(alpha/2, df = v, lower.tail = F)
cat(" Bottom critical : ", round(khi.tab.bwh,4), "\n",
"Upper critical : ", round(khi.tab.ats,4), "\n")
if (khi < khi.tab.bwh | khi > khi.tab.ats) {
cat(" Chi-square hitung tidak di antara dua critical \n\n")
cat(" Keputusan : Tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%, \n belum cukup bukti bahwa varians populasi sama dengan ", varpop0)
} else {
cat(" Chi-square hitung ada di antara dua critical \n\n")
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa varians populasi sama dengan ", varpop0)
}
} else if (h1 == "right.sided") {
khi.tab <- qchisq(alpha, df = v, lower.tail = F)
cat(" Critical right.sided : ", round(khi.tab,4), "\n")
if (khi > khi.tab) {
cat(" Chi-square hitung > Chi-square tabel \n\n")
cat(" Keputusan : Tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%, \n belum cukup bukti bahwa varians populasi lebih dari ", varpop0)
} else {
cat(" Chi-square hitung < Chi-square tabel \n\n")
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa varians populasi lebih dari ", varpop0)
}
} else if (h1 == "left.sided") {
khi.tab <- qchisq(alpha, df = v)
cat(" Critical left.sided : ", round(khi.tab,4), "\n")
if (khi < khi.tab) {
cat(" Chi-square hitung < Chi-square tabel \n\n")
cat(" Keputusan : Tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n belum cukup bukti bahwa varians populasi kurang dari ", varpop0)
} else {
cat(" Chi-square hitung > Chi-square tabel \n\n")
result <- "Gagal tolak H0"
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa varians populasi kurang dari ", varpop0)
}
} else cat("\n Masukkan nilai h1 yang benar")
} else {
s1 <- varsampel[1]
s2 <- varsampel[2]
v1 <- nsampel[1] - 1
v2 <- nsampel[2] - 1
f <- s1/s2
cat("\n\n Fisher Two Variances Test by yoursunshine \n",
"\n\n",
"F hitung : ", round(f,4), "\n",
"Degree of freedom 1: ", v1, "\n",
"Degree of freedom 2: ", v2, "\n",
"Alpha : ", alpha, "\n\n")
#two.sided
if (h1 == "two.sided") {
f.tab.bwh <- qf(1 - alpha/2, df1 = v1, df2 = v2, lower.tail = F)
f.tab.ats <- qf(alpha/2, df1 = v1, df2 = v2, lower.tail = F)
cat(" Bottom critical : ", round(f.tab.bwh,4), "\n",
"Upper critical : ", round(f.tab.ats,4), "\n")
if (f < f.tab.bwh | f > f.tab.ats) {
cat(" F hitung hitung tidak di antara dua critical \n\n")
cat(" Keputusan : Tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n
belum cukup bukti bahwa var.pop.1 = var.pop.2")
} else {
cat(" F hitung ada di antara dua critical \n\n")
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa var.pop.1 = var.pop.2")
}
} else if (h1 == "right.sided") {
f.tab <- qf(alpha, df1 = v1, df2 = v2, lower.tail = F)
cat(" Critical right.sided : ", round(f.tab,4), "\n")
if (f > f.tab) {
cat(" F hitung > F tabel \n\n")
cat(" Keputusan : Tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n belum cukup bukti bahwa var.pop.1 <= var.pop.2")
} else {
cat(" F hitung < Ftabel \n\n")
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa var.pop.1 <= var.pop.2")
}
} else if (h1 == "left.sided") {
f.tab <- qf(1 - alpha, df1 = v1, df2 = v2, lower.tail = F)
cat(" Critical left.sided : ", round(f.tab,4), "\n")
if (f < f.tab) {
cat(" F hitung < F tabel \n\n")
cat(" Keputusan : Tolak H0 \n",
" Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n belum cukup bukti bahwa var.pop.1 >= var.pop.2")
} else {
cat(" F hitung > F tabel \n\n")
cat(" Keputusan : Gagal tolak H0 \n",
"Dengan tingkat kepercayaan ", (1-alpha)*100, "%,\n cukup bukti bahwa var.pop.1 >= var.pop.2")
}
} else
cat("\nMasukkan nilai h1 yang benar")
}
}
|
library(ggplot2)
library(twitteR)
library(stringr)
library(wordcloud)
# harvest tweets from each user
epa_tweets = userTimeline("EPAgov", n=500)
nih_tweets = userTimeline("NIHforHealth", n=500)
cdc_tweets = userTimeline("CDCgov", n=500)
# dump tweets information into data frames
epa_df = twListToDF(epa_tweets)
nih_df = twListToDF(nih_tweets)
cdc_df = twListToDF(cdc_tweets)
|
/twitteR.R
|
no_license
|
rtremeaud/code-r
|
R
| false
| false
| 376
|
r
|
library(ggplot2)
library(twitteR)
library(stringr)
library(wordcloud)
# harvest tweets from each user
epa_tweets = userTimeline("EPAgov", n=500)
nih_tweets = userTimeline("NIHforHealth", n=500)
cdc_tweets = userTimeline("CDCgov", n=500)
# dump tweets information into data frames
epa_df = twListToDF(epa_tweets)
nih_df = twListToDF(nih_tweets)
cdc_df = twListToDF(cdc_tweets)
|
#! /usr/bin/env Rscript
## Extract background ADT signal from empty droplets
# using empty droplets from GEX libraries
# subtract background estimated from a 2-component mixture model
# ------- arg parsing ----------
library(optparse)
parser <- OptionParser()
parser <- add_option(parser, c("-x", "--matrixlist"), type="character",
help="A set of comma-separated paths to the raw matrix.mtx.gz")
parser <- add_option(parser, c("-f", "--featurelist"), type="character",
help="A set of comma-separated paths to the feature information")
parser <- add_option(parser, c("-b", "--barcodeslist"), type="character",
help="Path to .txt containing barcodes of called cells")
parser <- add_option(parser, c("-w", "--whitelists"), type="character",
help="Path to .txt file containing QC'd cells")
parser <- add_option(parser, c("-o", "--output"), type="character",
help="Prefix for output files denoised data and combined SCE object")
parser <- add_option(parser, c("-p", "--plots"), type="character",
help="Path to directory for plotting")
opt <- parse_args(parser)
library(Matrix)
library(mclust)
library(ggplot2)
#library(ggsci)
library(ggthemes)
library(reshape2)
# read in cell barcodes, features and counts matrix
# read in barcode whitelist to exclude QC-passed cells
# this should be a comma-separated list of matrices
barcode.list <- unlist(strsplit(opt$barcodeslist, split=",", fixed=TRUE))
samp.names <- lapply(barcode.list, FUN=function(P) gsub(unlist(lapply(strsplit(P, fixed=TRUE, split="/"),
FUN=function(sP) paste0(sP[length(sP)-3]))), pattern="_cells\\.txt", replacement=""))
samp.names <- gsub(samp.names, pattern="_CITE", replacement="")
samp.names <- as.factor(unlist(samp.names))
print(samp.names)
all.barcodes.list <- lapply(barcode.list, FUN=function(FB) read.table(FB, stringsAsFactors=FALSE, header=FALSE)[,1])
# to keep a consistent ordering, this needs to become a factor - it also needs to be learnt from the filenames
# rather than strictly relying on the input order
names(all.barcodes.list) <- samp.names
# Read in raw counts
message("Reading in raw counts matrices")
fldr.list <- unlist(strsplit(opt$matrixlist, split=",", fixed=TRUE))
matrix.list <- lapply(fldr.list, FUN=function(FM) readMM(FM))
names(matrix.list) <- samp.names
# Read in feature info
message("Reading in feature information")
feature.list <- unlist(strsplit(opt$featurelist, split=",", fixed=TRUE))
all.feature.list <- lapply(feature.list, FUN=function(FX) read.table(FX, stringsAsFactors=FALSE, header=FALSE, sep="\t"))
names(all.feature.list) <- samp.names
for(x in seq_along(levels(samp.names))){
samp.x <- levels(samp.names)[x]
x.mat <- matrix.list[[samp.x]]
colnames(x.mat) <- all.barcodes.list[[samp.x]]
matrix.list[[samp.x]] <- x.mat
}
rm(list=c("x.mat"))
gc()
# read in whitelist barcodes
white.list <- unlist(strsplit(opt$whitelists, fixed=TRUE, split=","))
keep.cell.list <- lapply(white.list, FUN=function(FW) read.table(FW, stringsAsFactors=FALSE, header=FALSE)[, 1])
names(keep.cell.list) <- samp.names
print(lapply(keep.cell.list, length))
print(names(keep.cell.list))
message(paste0("Found ", length(unlist(keep.cell.list)), " good cell barcodes to exclude"))
non.zero.list <- list()
for(x in seq_along(levels(samp.names))){
samp.x <- levels(samp.names)[x]
x.non <- colSums(matrix.list[[samp.x]][all.feature.list[[samp.x]]$V3 == "Gene Expression", ]) > 10 &
colSums(matrix.list[[samp.x]][all.feature.list[[samp.x]]$V3 == "Gene Expression", ]) < 100
x.bg.bcs <- setdiff(all.barcodes.list[[samp.x]][x.non], keep.cell.list[[samp.x]])
non.zero.list[[samp.x]] <- x.bg.bcs
print(length(x.bg.bcs))
}
message(paste0("Extracted ", length(unlist(non.zero.list)), " background barcodes"))
# extract the ADT counts for these background barcodes and log CPM normalize
adtcpm.bg.list <- list()
for(x in seq_along(levels(samp.names))){
samp.x <- levels(samp.names)[x]
message(paste("Extracting ADT counts and performing log CPM normalisation for sample: ", samp.x))
x.adt <- matrix.list[[samp.x]][all.feature.list[[samp.x]]$V3 == "Antibody Capture", non.zero.list[[samp.x]]]
x.adt <- apply(x.adt, 2, FUN=function(X) log10((X+1)/((sum(X)+1)/(1e6+1))))
adtcpm.bg.list[[samp.x]] <- x.adt
print(dim(x.adt))
pdf(paste(opt$plots, paste0(samp.x, "-ADT_logCPM_hist-badcells.pdf"), sep="/"), height=3.95, width=5.15, useDingbats=FALSE)
hist(apply(x.adt, 2, mean), 100, main="ADT logCPM distribution for empty/bad cells")
dev.off()
x.adt <- as.data.frame(x.adt)
x.adt$ADT <- all.feature.list[[samp.x]]$V2[all.feature.list[[samp.x]]$V3 == "Antibody Capture"]
# write out these matrices for later use
x.bgofile <- gzfile(paste0(opt$output, "/EmptyDroplets_", samp.x, "_bgCPM.txt.gz"), "w")
write.table(x.adt, file=x.bgofile, sep="\t", quote=FALSE, row.names=FALSE)
close(x.bgofile)
x.counts <- as.data.frame(as.matrix(matrix.list[[samp.x]][all.feature.list[[samp.x]]$V3 == "Antibody Capture", non.zero.list[[samp.x]]]))
x.counts$ADT <- all.feature.list[[samp.x]]$V2[all.feature.list[[samp.x]]$V3 == "Antibody Capture"]
x.countofile <- gzfile(paste0(opt$output, "/EmptyDroplets_", samp.x, "_counts.txt.gz"), "w")
write.table(x.counts, file=x.countofile, sep="\t", quote=FALSE, row.names=FALSE)
close(x.countofile)
sink(file="/dev/null")
rm(list=c("x.counts", "x.adt"))
gc()
sink(file=NULL)
}
# fit a mixture model to these backgrounds
#######################################
#### Fitting a GMM to each protein ####
#######################################
message("Fitting a 2-component gaussian mixture model to each protein")
gmm.list <- list()
for(x in seq_along(levels(samp.names))){
x.samp <- levels(samp.names)[x]
x.mclust <- apply(adtcpm.bg.list[[x.samp]], 1, function(P) {
g = mclust::Mclust(P, G=2, warn=FALSE , verbose=FALSE)
return(g$parameters$mean)})
x.means <- do.call(rbind.data.frame, list(t(x.mclust)))
colnames(x.means) <- paste0("Mean", 1:2)
gmm.list[[x.samp]] <- x.means
print(dim(x.means))
# why is this model so bad at finding the 3 components?!
# do I need to fit a separate model to each protein, rather than each cell??
x.plot <- ggplot(melt(x.means), aes(x=value, fill=variable)) +
geom_histogram(bins=100) +
scale_fill_colorblind() +
facet_wrap(~variable, ncol=1)
ggsave(x.plot, filename=paste0(opt$plots, "/", samp.x, "-ADT_logCPM_hist-badcells.pdf"),
height=4.95, width=4.95, useDingbats=FALSE)
}
message("Extracting ADT libraries to keep")
## The white list gives us the cell barcodes to keep
keep.adtcpm.list <- list()
for(x in seq_along(levels(samp.names))){
samp.x <- levels(samp.names)[x]
print(length(keep.cell.list[[samp.x]]))
x.keep.mtx <- matrix.list[[samp.x]][all.feature.list[[samp.x]]$V3 == "Antibody Capture", keep.cell.list[[samp.x]]]
x.keep.mtx <- apply(x.keep.mtx, 2, FUN=function(X) log10((X+1)/((sum(X)+1)/(1e6+1))))
keep.adtcpm.list[[samp.x]] <- x.keep.mtx
}
message(paste0("Retained ", sum(unlist(lapply(keep.adtcpm.list, ncol))), " barcodes"))
message("Removing background signal per-protein")
for(x in seq_along(levels(samp.names))){
x.samp <- levels(samp.names)[x]
x.bgshift <- apply(keep.adtcpm.list[[x.samp]], 2,
FUN=function(X) X - gmm.list[[x.samp]][, 1])
x.bgshift <- as.data.frame(x.bgshift)
colnames(x.bgshift) <- paste0(x.samp, "_", colnames(x.bgshift))
print(dim(x.bgshift))
n.prots <- nrow(x.bgshift)
pdf(paste0(opt$plots, "/", x.samp, "_logCPM_BgShift-histogram.pdf"),
height=18.95, width=12.95, useDingbats=FALSE)
par(mfrow=c(28, 7), mai=c(0, 0, 0, 0))
for(i in seq_along(1:n.prots)){
hist(unlist(x.bgshift[x, ]), xlab="", ylab="", main="", breaks=100)
}
dev.off()
x.bgshift$ADT <- all.feature.list[[x.samp]]$V2[all.feature.list[[x.samp]]$V3 == "Antibody Capture"]
x.ofile <- gzfile(paste0(opt$output, "/Covid_ADT_", x.samp, "_bgCPM.txt.gz"), "w")
print(dim(x.bgshift))
write.table(x.bgshift, file=x.ofile, quote=FALSE, row.names=FALSE, sep="\t")
close(x.ofile)
}
|
/src/bgshift_cpm.R
|
no_license
|
MarioniLab/CovidPBMC
|
R
| false
| false
| 8,363
|
r
|
#! /usr/bin/env Rscript
## Extract background ADT signal from empty droplets
# using empty droplets from GEX libraries
# subtract background estimated from a 2-component mixture model
# ------- arg parsing ----------
library(optparse)
parser <- OptionParser()
parser <- add_option(parser, c("-x", "--matrixlist"), type="character",
help="A set of comma-separated paths to the raw matrix.mtx.gz")
parser <- add_option(parser, c("-f", "--featurelist"), type="character",
help="A set of comma-separated paths to the feature information")
parser <- add_option(parser, c("-b", "--barcodeslist"), type="character",
help="Path to .txt containing barcodes of called cells")
parser <- add_option(parser, c("-w", "--whitelists"), type="character",
help="Path to .txt file containing QC'd cells")
parser <- add_option(parser, c("-o", "--output"), type="character",
help="Prefix for output files denoised data and combined SCE object")
parser <- add_option(parser, c("-p", "--plots"), type="character",
help="Path to directory for plotting")
opt <- parse_args(parser)
library(Matrix)
library(mclust)
library(ggplot2)
#library(ggsci)
library(ggthemes)
library(reshape2)
# read in cell barcodes, features and counts matrix
# read in barcode whitelist to exclude QC-passed cells
# this should be a comma-separated list of matrices
barcode.list <- unlist(strsplit(opt$barcodeslist, split=",", fixed=TRUE))
samp.names <- lapply(barcode.list, FUN=function(P) gsub(unlist(lapply(strsplit(P, fixed=TRUE, split="/"),
FUN=function(sP) paste0(sP[length(sP)-3]))), pattern="_cells\\.txt", replacement=""))
samp.names <- gsub(samp.names, pattern="_CITE", replacement="")
samp.names <- as.factor(unlist(samp.names))
print(samp.names)
all.barcodes.list <- lapply(barcode.list, FUN=function(FB) read.table(FB, stringsAsFactors=FALSE, header=FALSE)[,1])
# to keep a consistent ordering, this needs to become a factor - it also needs to be learnt from the filenames
# rather than strictly relying on the input order
names(all.barcodes.list) <- samp.names
# Read in raw counts
message("Reading in raw counts matrices")
fldr.list <- unlist(strsplit(opt$matrixlist, split=",", fixed=TRUE))
matrix.list <- lapply(fldr.list, FUN=function(FM) readMM(FM))
names(matrix.list) <- samp.names
# Read in feature info
message("Reading in feature information")
feature.list <- unlist(strsplit(opt$featurelist, split=",", fixed=TRUE))
all.feature.list <- lapply(feature.list, FUN=function(FX) read.table(FX, stringsAsFactors=FALSE, header=FALSE, sep="\t"))
names(all.feature.list) <- samp.names
for(x in seq_along(levels(samp.names))){
samp.x <- levels(samp.names)[x]
x.mat <- matrix.list[[samp.x]]
colnames(x.mat) <- all.barcodes.list[[samp.x]]
matrix.list[[samp.x]] <- x.mat
}
rm(list=c("x.mat"))
gc()
# read in whitelist barcodes
white.list <- unlist(strsplit(opt$whitelists, fixed=TRUE, split=","))
keep.cell.list <- lapply(white.list, FUN=function(FW) read.table(FW, stringsAsFactors=FALSE, header=FALSE)[, 1])
names(keep.cell.list) <- samp.names
print(lapply(keep.cell.list, length))
print(names(keep.cell.list))
message(paste0("Found ", length(unlist(keep.cell.list)), " good cell barcodes to exclude"))
non.zero.list <- list()
for(x in seq_along(levels(samp.names))){
samp.x <- levels(samp.names)[x]
x.non <- colSums(matrix.list[[samp.x]][all.feature.list[[samp.x]]$V3 == "Gene Expression", ]) > 10 &
colSums(matrix.list[[samp.x]][all.feature.list[[samp.x]]$V3 == "Gene Expression", ]) < 100
x.bg.bcs <- setdiff(all.barcodes.list[[samp.x]][x.non], keep.cell.list[[samp.x]])
non.zero.list[[samp.x]] <- x.bg.bcs
print(length(x.bg.bcs))
}
message(paste0("Extracted ", length(unlist(non.zero.list)), " background barcodes"))
# extract the ADT counts for these background barcodes and log CPM normalize
adtcpm.bg.list <- list()
for(x in seq_along(levels(samp.names))){
samp.x <- levels(samp.names)[x]
message(paste("Extracting ADT counts and performing log CPM normalisation for sample: ", samp.x))
x.adt <- matrix.list[[samp.x]][all.feature.list[[samp.x]]$V3 == "Antibody Capture", non.zero.list[[samp.x]]]
x.adt <- apply(x.adt, 2, FUN=function(X) log10((X+1)/((sum(X)+1)/(1e6+1))))
adtcpm.bg.list[[samp.x]] <- x.adt
print(dim(x.adt))
pdf(paste(opt$plots, paste0(samp.x, "-ADT_logCPM_hist-badcells.pdf"), sep="/"), height=3.95, width=5.15, useDingbats=FALSE)
hist(apply(x.adt, 2, mean), 100, main="ADT logCPM distribution for empty/bad cells")
dev.off()
x.adt <- as.data.frame(x.adt)
x.adt$ADT <- all.feature.list[[samp.x]]$V2[all.feature.list[[samp.x]]$V3 == "Antibody Capture"]
# write out these matrices for later use
x.bgofile <- gzfile(paste0(opt$output, "/EmptyDroplets_", samp.x, "_bgCPM.txt.gz"), "w")
write.table(x.adt, file=x.bgofile, sep="\t", quote=FALSE, row.names=FALSE)
close(x.bgofile)
x.counts <- as.data.frame(as.matrix(matrix.list[[samp.x]][all.feature.list[[samp.x]]$V3 == "Antibody Capture", non.zero.list[[samp.x]]]))
x.counts$ADT <- all.feature.list[[samp.x]]$V2[all.feature.list[[samp.x]]$V3 == "Antibody Capture"]
x.countofile <- gzfile(paste0(opt$output, "/EmptyDroplets_", samp.x, "_counts.txt.gz"), "w")
write.table(x.counts, file=x.countofile, sep="\t", quote=FALSE, row.names=FALSE)
close(x.countofile)
sink(file="/dev/null")
rm(list=c("x.counts", "x.adt"))
gc()
sink(file=NULL)
}
# fit a mixture model to these backgrounds
#######################################
#### Fitting a GMM to each protein ####
#######################################
message("Fitting a 2-component gaussian mixture model to each protein")
gmm.list <- list()
for(x in seq_along(levels(samp.names))){
x.samp <- levels(samp.names)[x]
x.mclust <- apply(adtcpm.bg.list[[x.samp]], 1, function(P) {
g = mclust::Mclust(P, G=2, warn=FALSE , verbose=FALSE)
return(g$parameters$mean)})
x.means <- do.call(rbind.data.frame, list(t(x.mclust)))
colnames(x.means) <- paste0("Mean", 1:2)
gmm.list[[x.samp]] <- x.means
print(dim(x.means))
# why is this model so bad at finding the 3 components?!
# do I need to fit a separate model to each protein, rather than each cell??
x.plot <- ggplot(melt(x.means), aes(x=value, fill=variable)) +
geom_histogram(bins=100) +
scale_fill_colorblind() +
facet_wrap(~variable, ncol=1)
ggsave(x.plot, filename=paste0(opt$plots, "/", samp.x, "-ADT_logCPM_hist-badcells.pdf"),
height=4.95, width=4.95, useDingbats=FALSE)
}
message("Extracting ADT libraries to keep")
## The white list gives us the cell barcodes to keep
keep.adtcpm.list <- list()
for(x in seq_along(levels(samp.names))){
samp.x <- levels(samp.names)[x]
print(length(keep.cell.list[[samp.x]]))
x.keep.mtx <- matrix.list[[samp.x]][all.feature.list[[samp.x]]$V3 == "Antibody Capture", keep.cell.list[[samp.x]]]
x.keep.mtx <- apply(x.keep.mtx, 2, FUN=function(X) log10((X+1)/((sum(X)+1)/(1e6+1))))
keep.adtcpm.list[[samp.x]] <- x.keep.mtx
}
message(paste0("Retained ", sum(unlist(lapply(keep.adtcpm.list, ncol))), " barcodes"))
message("Removing background signal per-protein")
for(x in seq_along(levels(samp.names))){
x.samp <- levels(samp.names)[x]
x.bgshift <- apply(keep.adtcpm.list[[x.samp]], 2,
FUN=function(X) X - gmm.list[[x.samp]][, 1])
x.bgshift <- as.data.frame(x.bgshift)
colnames(x.bgshift) <- paste0(x.samp, "_", colnames(x.bgshift))
print(dim(x.bgshift))
n.prots <- nrow(x.bgshift)
pdf(paste0(opt$plots, "/", x.samp, "_logCPM_BgShift-histogram.pdf"),
height=18.95, width=12.95, useDingbats=FALSE)
par(mfrow=c(28, 7), mai=c(0, 0, 0, 0))
for(i in seq_along(1:n.prots)){
hist(unlist(x.bgshift[x, ]), xlab="", ylab="", main="", breaks=100)
}
dev.off()
x.bgshift$ADT <- all.feature.list[[x.samp]]$V2[all.feature.list[[x.samp]]$V3 == "Antibody Capture"]
x.ofile <- gzfile(paste0(opt$output, "/Covid_ADT_", x.samp, "_bgCPM.txt.gz"), "w")
print(dim(x.bgshift))
write.table(x.bgshift, file=x.ofile, quote=FALSE, row.names=FALSE, sep="\t")
close(x.ofile)
}
|
# Install required packages -----------------------------------------------
install.packages("forecast")
install.packages("fpp")
install.packages("ggplot2")
# load those packages to the current session ------------------------------
library(ggplot2)
library(forecast)
library(fpp) # get a dataset to work with from 'fpp' - datasets from forecasting principles and practice
View(elecequip)
# Explore elecequip dataset -----------------------------------------------
?elecequip
plot(elecequip)
head(elecequip)
class(elecequip) # ts class
elecequip
View(elecequip)
# Decompose time series with STL ------------------------------------------
# Time-Series Components
fit <- stl(elecequip, s.window=5) #seasonal decomposition
plot(fit) # show the components
autoplot(fit) # plot with ggplot2
# Plot elecequip time series ----------------------------------------------
plot(elecequip, col='gray',
main="Electrical Equipment Manyfacturing",
ylab='New orders index', xlab="")
lines(fit$time.series[,2], col="red", ylab="Trend")
<<<<<<< HEAD
=======
>>>>>>> upstream/master
# Apply Exponential Smoothing to Oil Data ---------------------------------
plot(oil)
?oil
# Exponential smoothing model - with different Alpha smoothing and H perio parameter settings
fit1 <- ses(oil, alpha=0.2, initial="simple", h=3) # alpha=smppthing parameter h=periods
fit2 <- ses(oil, alpha=0.6, initial="simple", h=3)
fit3 <- ses(oil, h=3)
fit4 <- ses(oil, alpha=0.6, initial="simple", h=1)
# Plot the model fit (training data)
plot(fit1, plot.conf=FALSE, ylab="Oil (millions of tonnes)",
xlab="Year", main="", fcol="white", type="o")
lines(fitted(fit1), col="blue", type="o")
lines(fitted(fit2), col="red", type="o")
lines(fitted(fit3), col="green", type="o")
lines(fitted(fit4), col="yellow", type="o")
# PLot the forecast
plot(fit1, plot.conf=FALSE, ylab="Oil (millions of tonnes)",
xlab="Year", main="", fcol="white", type="o")
lines(fit1$mean, col="blue", type="o")
lines(fit2$mean, col="red", type="o")
lines(fit3$mean, col="green", type="o")
# Holt Winters
aust <- window(austourists, start=2005)
plot(aust)
fit1 <- hw(aust, seasonal="additive")
fit2 <- hw(aust, seasonal="multiplicative")
plot(fit2, ylab="International visitnor night in Australia (millions)",
plot.conf=FALSE, type="o", fcol="white", xlab="Year")
lines(fitted(fit1), col="red", lty=2)
lines(fitted(fit2), col="green", lty=2)
#add the forecasts
lines(fit1$mean, type="o", col="red")
lines(fit2$mean, type="o", col="green")
# Monthly anti-diabetic drug sales in Australia from 1992 to 2008 ---------
?a10
# Seasonal Differencing
plot(a10)
plot(log(a10)) # variance (as well as the scale) reduces for the log values
# for exponential models, differencing ones gives you a striaght line, differencing twice, straight
plot(stl(a10, s.window=12))
plot(stl(log(a10), s.window=12))
plot(diff(log(a10), s.window=12))
WWWusage
diff(WWWusage)
# ARIMA model on diabetes dataset -----------------------------------------
# ARIMA Model
?WWWusage
tsdisplay(diff(WWWusage), main="")
fit <- Arima(WWWusage, order=c(3,1,1))
summary(fit)
plot(forecast(fit))
# now using auto method for selecting order
fit1 <- auto.arima(WWWusage)
plot(forecast(fit1))
summary(fit1)
# for a10 dataset
fit2 <- auto.arima(a10)
plot(forecast(fit2))
summary(fit2)
# Evaluate forecast models using Australian beer dataset ------------------
# Evaluating the models
beer2 <- window(ausbeer, start=1992, end=2006-.1)
beerfit1 <- meanf(beer2, h=11)
beerfit2 <- rwf(beer2, h=11)
beerfit3 <- snaive(beer2, h=11)
beerfit4 <- auto.arima(beer2)
plot(beerfit1, plot.conf=FALSE,
main="forecasts for quaterly beer production")
lines(beerfit2$mean, col="red")
lines(beerfit3$mean, col="green")
plot(forecast(beerfit4), col="grey")
lines(ausbeer)
|
/6. labs1/Week 8/Time_Series_Lab.R
|
no_license
|
wendy-wong/WENDY_DATA_PROJECT
|
R
| false
| false
| 3,818
|
r
|
# Install required packages -----------------------------------------------
install.packages("forecast")
install.packages("fpp")
install.packages("ggplot2")
# load those packages to the current session ------------------------------
library(ggplot2)
library(forecast)
library(fpp) # get a dataset to work with from 'fpp' - datasets from forecasting principles and practice
View(elecequip)
# Explore elecequip dataset -----------------------------------------------
?elecequip
plot(elecequip)
head(elecequip)
class(elecequip) # ts class
elecequip
View(elecequip)
# Decompose time series with STL ------------------------------------------
# Time-Series Components
fit <- stl(elecequip, s.window=5) #seasonal decomposition
plot(fit) # show the components
autoplot(fit) # plot with ggplot2
# Plot elecequip time series ----------------------------------------------
plot(elecequip, col='gray',
main="Electrical Equipment Manyfacturing",
ylab='New orders index', xlab="")
lines(fit$time.series[,2], col="red", ylab="Trend")
<<<<<<< HEAD
=======
>>>>>>> upstream/master
# Apply Exponential Smoothing to Oil Data ---------------------------------
plot(oil)
?oil
# Exponential smoothing model - with different Alpha smoothing and H perio parameter settings
fit1 <- ses(oil, alpha=0.2, initial="simple", h=3) # alpha=smppthing parameter h=periods
fit2 <- ses(oil, alpha=0.6, initial="simple", h=3)
fit3 <- ses(oil, h=3)
fit4 <- ses(oil, alpha=0.6, initial="simple", h=1)
# Plot the model fit (training data)
plot(fit1, plot.conf=FALSE, ylab="Oil (millions of tonnes)",
xlab="Year", main="", fcol="white", type="o")
lines(fitted(fit1), col="blue", type="o")
lines(fitted(fit2), col="red", type="o")
lines(fitted(fit3), col="green", type="o")
lines(fitted(fit4), col="yellow", type="o")
# PLot the forecast
plot(fit1, plot.conf=FALSE, ylab="Oil (millions of tonnes)",
xlab="Year", main="", fcol="white", type="o")
lines(fit1$mean, col="blue", type="o")
lines(fit2$mean, col="red", type="o")
lines(fit3$mean, col="green", type="o")
# Holt Winters
aust <- window(austourists, start=2005)
plot(aust)
fit1 <- hw(aust, seasonal="additive")
fit2 <- hw(aust, seasonal="multiplicative")
plot(fit2, ylab="International visitnor night in Australia (millions)",
plot.conf=FALSE, type="o", fcol="white", xlab="Year")
lines(fitted(fit1), col="red", lty=2)
lines(fitted(fit2), col="green", lty=2)
#add the forecasts
lines(fit1$mean, type="o", col="red")
lines(fit2$mean, type="o", col="green")
# Monthly anti-diabetic drug sales in Australia from 1992 to 2008 ---------
?a10
# Seasonal Differencing
plot(a10)
plot(log(a10)) # variance (as well as the scale) reduces for the log values
# for exponential models, differencing ones gives you a striaght line, differencing twice, straight
plot(stl(a10, s.window=12))
plot(stl(log(a10), s.window=12))
plot(diff(log(a10), s.window=12))
WWWusage
diff(WWWusage)
# ARIMA model on diabetes dataset -----------------------------------------
# ARIMA Model
?WWWusage
tsdisplay(diff(WWWusage), main="")
fit <- Arima(WWWusage, order=c(3,1,1))
summary(fit)
plot(forecast(fit))
# now using auto method for selecting order
fit1 <- auto.arima(WWWusage)
plot(forecast(fit1))
summary(fit1)
# for a10 dataset
fit2 <- auto.arima(a10)
plot(forecast(fit2))
summary(fit2)
# Evaluate forecast models using Australian beer dataset ------------------
# Evaluating the models
beer2 <- window(ausbeer, start=1992, end=2006-.1)
beerfit1 <- meanf(beer2, h=11)
beerfit2 <- rwf(beer2, h=11)
beerfit3 <- snaive(beer2, h=11)
beerfit4 <- auto.arima(beer2)
plot(beerfit1, plot.conf=FALSE,
main="forecasts for quaterly beer production")
lines(beerfit2$mean, col="red")
lines(beerfit3$mean, col="green")
plot(forecast(beerfit4), col="grey")
lines(ausbeer)
|
## Test code for Normalization + transformation
x <- rnorm(1000)
y <- rnorm(1000)
z <- rnorm(1000)
site <- rep_len(0.69, 1000)
test.data <- data.frame(x,y,z, site)
gen_config()
test_that("Check normalization, denormalization",{
std.data <- standardize_all(test.data)
expect_true(all(std.data$site == 0.69))
## Check Denormalization
normal.data <- destandardize_all(std.data)
clean_up_stats()
clean_up_config()
expect_equivalent(normal.data, test.data[1:3])
})
|
/tests/testthat/test_standardize.R
|
no_license
|
NSAPH/airpred
|
R
| false
| false
| 479
|
r
|
## Test code for Normalization + transformation
x <- rnorm(1000)
y <- rnorm(1000)
z <- rnorm(1000)
site <- rep_len(0.69, 1000)
test.data <- data.frame(x,y,z, site)
gen_config()
test_that("Check normalization, denormalization",{
std.data <- standardize_all(test.data)
expect_true(all(std.data$site == 0.69))
## Check Denormalization
normal.data <- destandardize_all(std.data)
clean_up_stats()
clean_up_config()
expect_equivalent(normal.data, test.data[1:3])
})
|
library(dplyr)
library(rnaturalearth)
library(sf)
library(sp)
library(raster)
library(rgdal)
library(RStoolbox)
select = dplyr::select
#----Making spatial extent----
# making a function for coordinates() w/in a pipe
coordinates_iP = function(spdf){
coordinates(spdf) = ~long+lat
return(spdf)
}
df = expand.grid(data.frame(lat = c(-60, 40), long = c(-125,-30)))
spdf = coordinates_iP(df)
#----Creating study raster----
# getting extent shapefile
names_iP = function(spolydf, newLayerName){
names(spolydf) = newLayerName
return(spolydf)
}
# rasterize a shapefile with a new resolution
rasterize2 = function(shapefile, resolution) {
# Make empty raster
r = raster(ncol=500, nrow=500)
# set extent to shapefile
extent(r) = extent(shapefile)
# set desired resolution
res(r) = resolution
# assign random values to pixels
r[] = runif(n = ncell(r), min=0, max=1)
# rasterize the shapefile
r_shp = rasterize(shapefile, r)
return(r_shp)
}
# get world polys, crop, dissolve, rename, rasterize
master = ne_countries(type = 'countries', scale = 'small') %>%
crop(spdf) %>%
aggregate() %>%
as('SpatialPolygonsDataFrame') %>%
names_iP(.,'studyArea') %>%
#writeOGR(dsn=stdyshp_dir, layer='studyArea', driver='ESRI Shapefile') %>%
rasterize2(., 0.1)
#----Raster coregistering----
# canopy height
m.canopy_height = "data/GIS/canopyHeight/Simard_Pinto_3DGlobalVeg_JGR.tif" %>%
raster() %>%
projectRaster(from = ., to = master, method="bilinear")
# human density
m.human_density_15 = "data\\GIS\\humanDensity\\gpw-v4-population-density-rev11_2015_30_sec_tif\\gpw_v4_population_density_rev11_2015_30_sec.tif" %>%
raster() %>%
projectRaster(from = ., to = master, method="bilinear")
# human footprint
m.human_footprint = "data\\GIS\\humanFootprint\\wildareas-v3-2009-human-footprint-geotiff\\wildareas-v3-2009-human-footprint.tif" %>%
raster() %>%
projectRaster(from = ., to = master, method="bilinear")
# elevation
m.srtm = "data/GIS/srtm/srtm.tif" %>%
raster() %>%
projectRaster(from = ., to = master, method="ngb")
|
/CompileGIS/RasterCoRegister.R
|
permissive
|
GatesDupont/Jaguars
|
R
| false
| false
| 2,067
|
r
|
library(dplyr)
library(rnaturalearth)
library(sf)
library(sp)
library(raster)
library(rgdal)
library(RStoolbox)
select = dplyr::select
#----Making spatial extent----
# making a function for coordinates() w/in a pipe
coordinates_iP = function(spdf){
coordinates(spdf) = ~long+lat
return(spdf)
}
df = expand.grid(data.frame(lat = c(-60, 40), long = c(-125,-30)))
spdf = coordinates_iP(df)
#----Creating study raster----
# getting extent shapefile
names_iP = function(spolydf, newLayerName){
names(spolydf) = newLayerName
return(spolydf)
}
# rasterize a shapefile with a new resolution
rasterize2 = function(shapefile, resolution) {
# Make empty raster
r = raster(ncol=500, nrow=500)
# set extent to shapefile
extent(r) = extent(shapefile)
# set desired resolution
res(r) = resolution
# assign random values to pixels
r[] = runif(n = ncell(r), min=0, max=1)
# rasterize the shapefile
r_shp = rasterize(shapefile, r)
return(r_shp)
}
# get world polys, crop, dissolve, rename, rasterize
master = ne_countries(type = 'countries', scale = 'small') %>%
crop(spdf) %>%
aggregate() %>%
as('SpatialPolygonsDataFrame') %>%
names_iP(.,'studyArea') %>%
#writeOGR(dsn=stdyshp_dir, layer='studyArea', driver='ESRI Shapefile') %>%
rasterize2(., 0.1)
#----Raster coregistering----
# canopy height
m.canopy_height = "data/GIS/canopyHeight/Simard_Pinto_3DGlobalVeg_JGR.tif" %>%
raster() %>%
projectRaster(from = ., to = master, method="bilinear")
# human density
m.human_density_15 = "data\\GIS\\humanDensity\\gpw-v4-population-density-rev11_2015_30_sec_tif\\gpw_v4_population_density_rev11_2015_30_sec.tif" %>%
raster() %>%
projectRaster(from = ., to = master, method="bilinear")
# human footprint
m.human_footprint = "data\\GIS\\humanFootprint\\wildareas-v3-2009-human-footprint-geotiff\\wildareas-v3-2009-human-footprint.tif" %>%
raster() %>%
projectRaster(from = ., to = master, method="bilinear")
# elevation
m.srtm = "data/GIS/srtm/srtm.tif" %>%
raster() %>%
projectRaster(from = ., to = master, method="ngb")
|
pacman::p_load(tidyverse, magrittr, data.table, janitor, readxl)
input =
"Legislatives 2022/resultats-par-niveau-burvot-t1-france-entiere.xlsx" %>%
readxl::read_excel()
input %<>% janitor::clean_names()
input %<>% rowid_to_column()
CLINNE <- function(data) {
names(data) = c("rowid", "candidat", "voix")
return(data)
}
input %>% names
Scores =
map(.x = 8*0:21, .f = ~input[,c(1, 27 + ., 28 + .)]) %>%
map(.f = CLINNE) %>%
rbindlist() %>% drop_na()
Bureaux = input[, 1:8]
Abstention =
input %>% select(rowid, abstentions, blancs, nuls) %>%
pivot_longer(cols = -rowid,
names_to = "candidat",
values_to = "voix")
output_T1 =
bind_rows(Scores, Abstention) %>%
inner_join(x = Bureaux, by = "rowid") %>%
drop_na(candidat, voix)
output_T1 %<>%
group_by(rowid) %>%
mutate(score = voix/sum(voix)) %>%
ungroup()
rm(input, Scores, Abstention)
gc()
|
/Legislatives 2022/Fetch_data_bdv.R
|
no_license
|
Reinaldodos/Elections
|
R
| false
| false
| 907
|
r
|
pacman::p_load(tidyverse, magrittr, data.table, janitor, readxl)
input =
"Legislatives 2022/resultats-par-niveau-burvot-t1-france-entiere.xlsx" %>%
readxl::read_excel()
input %<>% janitor::clean_names()
input %<>% rowid_to_column()
CLINNE <- function(data) {
names(data) = c("rowid", "candidat", "voix")
return(data)
}
input %>% names
Scores =
map(.x = 8*0:21, .f = ~input[,c(1, 27 + ., 28 + .)]) %>%
map(.f = CLINNE) %>%
rbindlist() %>% drop_na()
Bureaux = input[, 1:8]
Abstention =
input %>% select(rowid, abstentions, blancs, nuls) %>%
pivot_longer(cols = -rowid,
names_to = "candidat",
values_to = "voix")
output_T1 =
bind_rows(Scores, Abstention) %>%
inner_join(x = Bureaux, by = "rowid") %>%
drop_na(candidat, voix)
output_T1 %<>%
group_by(rowid) %>%
mutate(score = voix/sum(voix)) %>%
ungroup()
rm(input, Scores, Abstention)
gc()
|
#Загрузите данные в датафрейм. Адрес: github https://raw???путь_к_файлу_найдите_сами???/data/gmp.dat
gmp <- read.table("https://raw.githubusercontent.com/SergeyMirvoda/MD-DA-2018/master/data/gmp.dat", skip = 1)
names(gmp) <- c("ID", "MSA", "gmp", "pcgmp")
gmp$pop <- gmp$gmp/gmp$pcgmp
# Функция, высчитывающая коэффициент alpha для модели Y=y0*N^alpha (источник статьи https://arxiv.org/pdf/1102.4101.pdf)
# Работает на основе критерия наименьших квадратов
# Входные параметры
# a - примерная оценка, коэффициент, который требуется более точно установить
# y0 - коэффициент y0 для заданной модели
# response - влияемый компонент
# predictor - влияющий компонент
# maximum.iterations - ограничение, чтобы избежать зацикливания функции
# deriv - вычисляемая производная
# deriv.step - шаг дифференцирования
# step.scale - шаг приближения
# stopping.deriv - позволяет выйти из цикла, когда deriv становится меньше этого параметра
# Выходные параметры:
# $a - полученный коэффициент
# $iterations - количество выполненных итераций
# $converged - был ли произведен выход из цикла или было достигнуто максимальное количество шагов в цикле
estimate.scaling.exponent <- function(a, y0=6611, response=gmp$pcgmp,
predictor = gmp$pop, maximum.iterations=100, deriv.step = 1/100,
step.scale = 1e-12, stopping.deriv = 1/100) {
# mse - коэффициент наименьших квадратов
# а - аргумент для вычисления коэффициента
mse <- function(a) { mean((response - y0*predictor^a)^2) }
for (iteration in 1:maximum.iterations) {
deriv <- (mse(a+deriv.step) - mse(a))/deriv.step
a <- a - step.scale*deriv
if (stopping.deriv >= abs(deriv) ) { break(); }
}
stopifnot(iteration > 10)
fit <- list(a=a,iterations=iteration,
converged=(iteration < maximum.iterations))
return(fit)
}
#Пример вызова с начальным занчением a
a <- estimate.scaling.exponent(0.15)
#С помошью полученного коэффициента постройте кривую (функция curve) зависимости
curve(6611*x^a$a, xlab = "Население, человек", ylab = "Доход на душу населения, $ на душу населения в год", from = min(gmp$pop), to=max(gmp$pop))
#Удалите точку из набора исходных данных случайным образом, как изменилось статистическая оценка коэффициента a?
rnd <- runif(1 ,min = 1, max = max(gmp$ID))
gmp.onedel <- gmp
gmp.onedel <- gmp.onedel[-rnd,]
b <- estimate.scaling.exponent(0.15, response = gmp.onedel$pcgmp, predictor = gmp.onedel$pop)
b$a - a$a
# Коэффициент поменялся не сильно, уменьшился на 0.00005 при случайном значении rnd = 40
#Запустите оценку несколько раз с разных стартовых точек. Как изменилось значение a?
estimate.scaling.exponent(1) # -4701782057 за 1 итерацию
estimate.scaling.exponent(0.5) # -990.2312 за 2 итерации
estimate.scaling.exponent(0.3) # -2.850634 за 2 итерации
estimate.scaling.exponent(0.25, maximum.iterations = 10000) # 0.1211533 за 5607 итераций
estimate.scaling.exponent(0.2) # 0.1211533 за 70 итераций
estimate.scaling.exponent(0.1211533) # 0.1211533 за 28 итерации
estimate.scaling.exponent(0.12) # 0.1211533 за 54 итерации
estimate.scaling.exponent(0.10) # 0.1211533 за 61 итерацию
estimate.scaling.exponent(0.05) # 0.1211533 за 69 итераций
estimate.scaling.exponent(0) # 0.1211533 за 78 итераций
estimate.scaling.exponent(-0.1, maximum.iterations = 1000) # 0.1211533 за 117 итераций
estimate.scaling.exponent(-0.5, maximum.iterations = 10000) # 0.1211533 за 7459 итераций
estimate.scaling.exponent(-1, maximum.iterations = 1000000) # -0.9705958 за 1000000 итераций
estimate.scaling.exponent(-10) # -10 за 1 итерацию
# Таким образом, чем ближе передан начальный коэффициент а к итоговому, тем меньшее количество итераций требуется на его расчет.
# При передаче слишком большого значения функция выдает некорректный результат и производит 1-2 итерации
# для избежания ошибок в функцию была добавлена строка stopifnot(iteration > 10), которая выдаст ошибку при некорректной работе
|
/classwork4/gmp.R
|
no_license
|
normall777/MyDataAccessMethods
|
R
| false
| false
| 5,406
|
r
|
#Загрузите данные в датафрейм. Адрес: github https://raw???путь_к_файлу_найдите_сами???/data/gmp.dat
gmp <- read.table("https://raw.githubusercontent.com/SergeyMirvoda/MD-DA-2018/master/data/gmp.dat", skip = 1)
names(gmp) <- c("ID", "MSA", "gmp", "pcgmp")
gmp$pop <- gmp$gmp/gmp$pcgmp
# Функция, высчитывающая коэффициент alpha для модели Y=y0*N^alpha (источник статьи https://arxiv.org/pdf/1102.4101.pdf)
# Работает на основе критерия наименьших квадратов
# Входные параметры
# a - примерная оценка, коэффициент, который требуется более точно установить
# y0 - коэффициент y0 для заданной модели
# response - влияемый компонент
# predictor - влияющий компонент
# maximum.iterations - ограничение, чтобы избежать зацикливания функции
# deriv - вычисляемая производная
# deriv.step - шаг дифференцирования
# step.scale - шаг приближения
# stopping.deriv - позволяет выйти из цикла, когда deriv становится меньше этого параметра
# Выходные параметры:
# $a - полученный коэффициент
# $iterations - количество выполненных итераций
# $converged - был ли произведен выход из цикла или было достигнуто максимальное количество шагов в цикле
estimate.scaling.exponent <- function(a, y0=6611, response=gmp$pcgmp,
predictor = gmp$pop, maximum.iterations=100, deriv.step = 1/100,
step.scale = 1e-12, stopping.deriv = 1/100) {
# mse - коэффициент наименьших квадратов
# а - аргумент для вычисления коэффициента
mse <- function(a) { mean((response - y0*predictor^a)^2) }
for (iteration in 1:maximum.iterations) {
deriv <- (mse(a+deriv.step) - mse(a))/deriv.step
a <- a - step.scale*deriv
if (stopping.deriv >= abs(deriv) ) { break(); }
}
stopifnot(iteration > 10)
fit <- list(a=a,iterations=iteration,
converged=(iteration < maximum.iterations))
return(fit)
}
#Пример вызова с начальным занчением a
a <- estimate.scaling.exponent(0.15)
#С помошью полученного коэффициента постройте кривую (функция curve) зависимости
curve(6611*x^a$a, xlab = "Население, человек", ylab = "Доход на душу населения, $ на душу населения в год", from = min(gmp$pop), to=max(gmp$pop))
#Удалите точку из набора исходных данных случайным образом, как изменилось статистическая оценка коэффициента a?
rnd <- runif(1 ,min = 1, max = max(gmp$ID))
gmp.onedel <- gmp
gmp.onedel <- gmp.onedel[-rnd,]
b <- estimate.scaling.exponent(0.15, response = gmp.onedel$pcgmp, predictor = gmp.onedel$pop)
b$a - a$a
# Коэффициент поменялся не сильно, уменьшился на 0.00005 при случайном значении rnd = 40
#Запустите оценку несколько раз с разных стартовых точек. Как изменилось значение a?
estimate.scaling.exponent(1) # -4701782057 за 1 итерацию
estimate.scaling.exponent(0.5) # -990.2312 за 2 итерации
estimate.scaling.exponent(0.3) # -2.850634 за 2 итерации
estimate.scaling.exponent(0.25, maximum.iterations = 10000) # 0.1211533 за 5607 итераций
estimate.scaling.exponent(0.2) # 0.1211533 за 70 итераций
estimate.scaling.exponent(0.1211533) # 0.1211533 за 28 итерации
estimate.scaling.exponent(0.12) # 0.1211533 за 54 итерации
estimate.scaling.exponent(0.10) # 0.1211533 за 61 итерацию
estimate.scaling.exponent(0.05) # 0.1211533 за 69 итераций
estimate.scaling.exponent(0) # 0.1211533 за 78 итераций
estimate.scaling.exponent(-0.1, maximum.iterations = 1000) # 0.1211533 за 117 итераций
estimate.scaling.exponent(-0.5, maximum.iterations = 10000) # 0.1211533 за 7459 итераций
estimate.scaling.exponent(-1, maximum.iterations = 1000000) # -0.9705958 за 1000000 итераций
estimate.scaling.exponent(-10) # -10 за 1 итерацию
# Таким образом, чем ближе передан начальный коэффициент а к итоговому, тем меньшее количество итераций требуется на его расчет.
# При передаче слишком большого значения функция выдает некорректный результат и производит 1-2 итерации
# для избежания ошибок в функцию была добавлена строка stopifnot(iteration > 10), которая выдаст ошибку при некорректной работе
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualizer.R
\name{clade.anno}
\alias{clade.anno}
\title{clade.anno}
\usage{
clade.anno(gtree, anno.data, alpha = 0.2, anno.depth = 3, anno.x = 10,
anno.y = 40)
}
\arguments{
\item{gtree}{a ggtree object}
\item{anno.data}{a 2 column data.frame of annotation information. It has columns of clade name and color used for highlighting.}
\item{alpha}{alpha parameter for shading}
\item{anno.depth}{more specific clades will be shown on the side}
\item{anno.x}{x position of annotations}
\item{anno.y}{y position of annotations}
}
\value{
a ggtree object
}
\description{
annotate a ggtree plot to highlight certain clades
}
\author{
Chenhao Li, Guangchuang Yu, Chenghao Zhu
}
|
/man/clade.anno.Rd
|
no_license
|
zhuchcn/microbiomeViz
|
R
| false
| true
| 756
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualizer.R
\name{clade.anno}
\alias{clade.anno}
\title{clade.anno}
\usage{
clade.anno(gtree, anno.data, alpha = 0.2, anno.depth = 3, anno.x = 10,
anno.y = 40)
}
\arguments{
\item{gtree}{a ggtree object}
\item{anno.data}{a 2 column data.frame of annotation information. It has columns of clade name and color used for highlighting.}
\item{alpha}{alpha parameter for shading}
\item{anno.depth}{more specific clades will be shown on the side}
\item{anno.x}{x position of annotations}
\item{anno.y}{y position of annotations}
}
\value{
a ggtree object
}
\description{
annotate a ggtree plot to highlight certain clades
}
\author{
Chenhao Li, Guangchuang Yu, Chenghao Zhu
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.