content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#############################################
#############################################
### ###
### TEMPLATE FOR SDM WORKFLOW ###
### ###
### This script file is a template for ###
### the species distribution modelling ###
### workflow as part of the bushfire 2019 ###
### - 2020 response analysis. ###
### ###
### For each actual species workflow we ###
### should copy this template into a new ###
### file. Then edit the workflow to suit ###
### the species being analysed. ###
### ###
### Species-specific workflow files are ###
### to be saved as: ###
### ###
### "species_name_workflow.R" ###
### ###
### Species-specific workflow files are ###
### to be saved in the appropriate ###
### folder: ###
### ###
### "scripts/workflows" ###
### ###
#############################################
#############################################
########################
### WORKFLOW DETAILS ###
########################
## Species: Eulamprus heatwolei
## Guild: Reptiles
## Region:
## Analyst:
## Reviewer:
## SDM Required: Y/N
## Used existing SDM: Y/N
## Built SDM: Y/N
## Data available: PO/PA
## Type of SDM: PresBG/PresAbs/Hybrid
## Number of presence records:
## Number of background points:
## Type of background points:
## Date completed:
## Any other comments:
species <- "Eulamprus heatwolei"
guild <- "Reptiles"
date_cutoff <- "1990-01-01"
uncertainty_cutoff <- 1000
#####################
### Load Packages ###
#####################
.libPaths("/home/davidpw/R/lib/3.6")
library(bushfireSOS)
#########################
### Load Species Data ###
#########################
# Load appropriate species data.
# Comment out unused methods instead of deleting them in case more
# data becomes available at a later date
## Presence background data
# spp_data <- bushfireSOS::load_pres_bg_data_AUS(species = species,
# region = c("VIC", "NSW", "QLD", "SA", "NT", "WA", "TAS"),
# save.map = FALSE,
# map.directory = "outputs_1990/data_outputs",
# email = "dbrizuela@student.unimelb.edu.au",
# dir.NSW = "bushfireResponse_data/spp_data_raw",
# dir.QLD = "bushfireResponse_data/spp_data_raw",
# dir.WA = "bushfireResponse_data/spp_data_raw",
# dir.SA = "bushfireResponse_data/spp_data_raw",
# dir.VIC = "bushfireResponse_data/spp_data_raw",
# file.VIC = "bushfireResponse_data/VBA_data_inverts_plants_updated_verts_0209202/original_spp_list",
# file.SA = "bushfireResponse_data/spp_data_raw/BIODATAREQUESTS_table_UniMelbourne.xlsx",
# file.BirdLife = "bushfireResponse_data/spp_data_raw/BirdLife/BirdLife_data.csv",
# date.cutoff = date_cutoff,
# uncertainty.cutoff = uncertainty_cutoff)
#
# # spp_data$map
#
# region <- bushfireSOS::species_data_get_state_character(spp_data$data)
#
# ## Presence absence data
#
# # spp_data <- bushfireSOS::load_pres_abs_data(species,
# # region)
#
# ## Preliminary presence records check
# ## If <20 can end workflow here
#
# nrow(spp_data$data)
#
# saveRDS(spp_data,
# sprintf("bushfireResponse_data/outputs_1990/spp_data_tmp/spp_data_%s.rds",
# gsub(" ", "_", species)))
spp_data <- readRDS(sprintf("bushfireResponse_data/outputs_1990/spp_data_tmp/spp_data_%s.rds",
gsub(" ", "_", species)))
region <- bushfireSOS::species_data_get_state_character(spp_data$data)
###############################
### Load Environmental Data ###
###############################
# Load appropriate environmental raster data
env_data <- bushfireSOS::load_env_data(stack_dir = "bushfireResponse_data/spatial_layers/raster_tiles",
region = region)
#########################
### Background Points ###
#########################
# Generate our background points
spp_data <- bushfireSOS::background_points(species = species,
spp_data = spp_data,
guild = guild,
region = region,
background_group = "vertebrates_1990",
bias_layer = "bushfireResponse_data/spatial_layers/aus_road_distance_250_aa.tif",
sample_min = 100000)
## Check that there are >= 20 presences (1s) and an appropriate number of
## background points (1000 * number of states with data for target group,
## or 10,000 for random)
table(spp_data$data$Value)
#######################
### Data Extraction ###
#######################
spp_data <- bushfireSOS::env_data_extraction(spp_data = spp_data,
env_data = env_data)
# bushfireSOS::map_sp_data(spp_data,
# only_presences = TRUE)
saveRDS(spp_data,
sprintf("bushfireResponse_data/outputs_1990/spp_data/spp_data_%s.rds",
gsub(" ", "_", species)))
#####################
### SDM Required? ###
#####################
# Do we have >=20 presence records?
# Y/N
# Can we fit an SDM for this species?
# Y/N
# If no, how should we create an output for Zonation?
#########################
### Use Existing SDM? ###
#########################
# Can we use an existing SDM for this species?
# Y/N
# If yes, how should we ensure its suitable for our purposes?
if(nrow(spp_data$data[spp_data$data$Value == 1, ]) >= 20){
print("At least 20 presence records")
# feature_options <- c("default",
# "lqp",
# "lq",
# "l")
#
########################
### Model Evaluation ###
########################
# Perform appropriate model checking
# Ensure features is set identical to that of the above full model
# If Boyce Index returns NAs then re-run the cross-validation with
# one fewer fold i.e. 5 > 4 > 3 > 2 > 1
# for(feat in feature_options){
#
# features <- feat
#
model_eval <- tryCatch(expr = bushfireSOS::cross_validate(spp_data = spp_data,
type = "po",
k = 5,
# parallel = FALSE,
filepath = sprintf("bushfireResponse_data/outputs_1990/model/MaxEnt_outputs_CV/%s",
gsub(" ", "_", species))),
err = function(err){ return(NULL) })
# if(!is.null(model_eval)){
# break()
# }
#
# }
if(!is.null(model_eval)){
print("Model evaluation complete, fitting full model")
saveRDS(model_eval,
sprintf("bushfireResponse_data/outputs_1990/model_eval/model_eval_%s.rds",
gsub(" ", "_", species)))
#####################
### Model Fitting ###
#####################
# Fit an appropriate model type
# Comment out unused methods instead of deleting them in case more
# data becomes available at a later date
## Presence only
## Features should equal "default" on first attempt. Can reduce
## to "lqp", "lq", or "l" if model is too complex to fit
model <- bushfireSOS::fit_pres_bg_model(spp_data = spp_data,
tuneParam = TRUE,
k = 5,
filepath = sprintf("bushfireResponse_data/outputs_1990/model/MaxEnt_outputs/%s",
gsub(" ", "_", species)))
saveRDS(model,
sprintf("bushfireResponse_data/outputs_1990/model/model_%s.rds",
gsub(" ", "_", species)))
## Presence absence model
# model <- bushfireSOS::fit_pres_abs_model()
## Hybrid model
# model <- bushfireSOS::fit_hybrid_model()
########################
### Model Prediction ###
########################
# Perform appropriate prediction
prediction <- bushfireSOS::model_prediction(model = model,
env_data = env_data,
mask = "bushfireResponse_data/spatial_layers/NIAFED_v20200428",
parallel = FALSE)
raster::writeRaster(prediction,
sprintf("bushfireResponse_data/outputs_1990/predictions/predictions_%s.tif",
gsub(" ", "_", species)),
overwrite = TRUE)
prediction_threshold <- bushfireSOS::predict_threshold(pred_ras = prediction,
threshold = model_eval[3])
raster::writeRaster(prediction_threshold,
sprintf("bushfireResponse_data/outputs_1990/predictions/predictions_%s_threshold.tif",
gsub(" ", "_", species)),
overwrite = TRUE)
# mapview::mapview(prediction)
} else{
print("Model evaluation failed")
}
} else {
print("Less than 20 records, no model fit")
}
#################
### Meta Data ###
#################
# Store meta data relevant to analysis
meta_data <- sessionInfo()
saveRDS(meta_data,
sprintf("bushfireResponse_data/outputs_1990/meta_data/meta_data_%s.rds",
gsub(" ", "_", species)))
| /scripts/workflows_spartan/Eulamprus_heatwolei_workflow_1990.R | no_license | Doi90/bushfireSOS_workflow | R | false | false | 10,631 | r | #############################################
#############################################
### ###
### TEMPLATE FOR SDM WORKFLOW ###
### ###
### This script file is a template for ###
### the species distribution modelling ###
### workflow as part of the bushfire 2019 ###
### - 2020 response analysis. ###
### ###
### For each actual species workflow we ###
### should copy this template into a new ###
### file. Then edit the workflow to suit ###
### the species being analysed. ###
### ###
### Species-specific workflow files are ###
### to be saved as: ###
### ###
### "species_name_workflow.R" ###
### ###
### Species-specific workflow files are ###
### to be saved in the appropriate ###
### folder: ###
### ###
### "scripts/workflows" ###
### ###
#############################################
#############################################
########################
### WORKFLOW DETAILS ###
########################
## Species: Eulamprus heatwolei
## Guild: Reptiles
## Region:
## Analyst:
## Reviewer:
## SDM Required: Y/N
## Used existing SDM: Y/N
## Built SDM: Y/N
## Data available: PO/PA
## Type of SDM: PresBG/PresAbs/Hybrid
## Number of presence records:
## Number of background points:
## Type of background points:
## Date completed:
## Any other comments:
species <- "Eulamprus heatwolei"
guild <- "Reptiles"
date_cutoff <- "1990-01-01"
uncertainty_cutoff <- 1000
#####################
### Load Packages ###
#####################
.libPaths("/home/davidpw/R/lib/3.6")
library(bushfireSOS)
#########################
### Load Species Data ###
#########################
# Load appropriate species data.
# Comment out unused methods instead of deleting them in case more
# data becomes available at a later date
## Presence background data
# spp_data <- bushfireSOS::load_pres_bg_data_AUS(species = species,
# region = c("VIC", "NSW", "QLD", "SA", "NT", "WA", "TAS"),
# save.map = FALSE,
# map.directory = "outputs_1990/data_outputs",
# email = "dbrizuela@student.unimelb.edu.au",
# dir.NSW = "bushfireResponse_data/spp_data_raw",
# dir.QLD = "bushfireResponse_data/spp_data_raw",
# dir.WA = "bushfireResponse_data/spp_data_raw",
# dir.SA = "bushfireResponse_data/spp_data_raw",
# dir.VIC = "bushfireResponse_data/spp_data_raw",
# file.VIC = "bushfireResponse_data/VBA_data_inverts_plants_updated_verts_0209202/original_spp_list",
# file.SA = "bushfireResponse_data/spp_data_raw/BIODATAREQUESTS_table_UniMelbourne.xlsx",
# file.BirdLife = "bushfireResponse_data/spp_data_raw/BirdLife/BirdLife_data.csv",
# date.cutoff = date_cutoff,
# uncertainty.cutoff = uncertainty_cutoff)
#
# # spp_data$map
#
# region <- bushfireSOS::species_data_get_state_character(spp_data$data)
#
# ## Presence absence data
#
# # spp_data <- bushfireSOS::load_pres_abs_data(species,
# # region)
#
# ## Preliminary presence records check
# ## If <20 can end workflow here
#
# nrow(spp_data$data)
#
# saveRDS(spp_data,
# sprintf("bushfireResponse_data/outputs_1990/spp_data_tmp/spp_data_%s.rds",
# gsub(" ", "_", species)))
spp_data <- readRDS(sprintf("bushfireResponse_data/outputs_1990/spp_data_tmp/spp_data_%s.rds",
gsub(" ", "_", species)))
region <- bushfireSOS::species_data_get_state_character(spp_data$data)
###############################
### Load Environmental Data ###
###############################
# Load appropriate environmental raster data
env_data <- bushfireSOS::load_env_data(stack_dir = "bushfireResponse_data/spatial_layers/raster_tiles",
region = region)
#########################
### Background Points ###
#########################
# Generate our background points
spp_data <- bushfireSOS::background_points(species = species,
spp_data = spp_data,
guild = guild,
region = region,
background_group = "vertebrates_1990",
bias_layer = "bushfireResponse_data/spatial_layers/aus_road_distance_250_aa.tif",
sample_min = 100000)
## Check that there are >= 20 presences (1s) and an appropriate number of
## background points (1000 * number of states with data for target group,
## or 10,000 for random)
table(spp_data$data$Value)
#######################
### Data Extraction ###
#######################
spp_data <- bushfireSOS::env_data_extraction(spp_data = spp_data,
env_data = env_data)
# bushfireSOS::map_sp_data(spp_data,
# only_presences = TRUE)
saveRDS(spp_data,
sprintf("bushfireResponse_data/outputs_1990/spp_data/spp_data_%s.rds",
gsub(" ", "_", species)))
#####################
### SDM Required? ###
#####################
# Do we have >=20 presence records?
# Y/N
# Can we fit an SDM for this species?
# Y/N
# If no, how should we create an output for Zonation?
#########################
### Use Existing SDM? ###
#########################
# Can we use an existing SDM for this species?
# Y/N
# If yes, how should we ensure its suitable for our purposes?
if(nrow(spp_data$data[spp_data$data$Value == 1, ]) >= 20){
print("At least 20 presence records")
# feature_options <- c("default",
# "lqp",
# "lq",
# "l")
#
########################
### Model Evaluation ###
########################
# Perform appropriate model checking
# Ensure features is set identical to that of the above full model
# If Boyce Index returns NAs then re-run the cross-validation with
# one fewer fold i.e. 5 > 4 > 3 > 2 > 1
# for(feat in feature_options){
#
# features <- feat
#
model_eval <- tryCatch(expr = bushfireSOS::cross_validate(spp_data = spp_data,
type = "po",
k = 5,
# parallel = FALSE,
filepath = sprintf("bushfireResponse_data/outputs_1990/model/MaxEnt_outputs_CV/%s",
gsub(" ", "_", species))),
err = function(err){ return(NULL) })
# if(!is.null(model_eval)){
# break()
# }
#
# }
if(!is.null(model_eval)){
print("Model evaluation complete, fitting full model")
saveRDS(model_eval,
sprintf("bushfireResponse_data/outputs_1990/model_eval/model_eval_%s.rds",
gsub(" ", "_", species)))
#####################
### Model Fitting ###
#####################
# Fit an appropriate model type
# Comment out unused methods instead of deleting them in case more
# data becomes available at a later date
## Presence only
## Features should equal "default" on first attempt. Can reduce
## to "lqp", "lq", or "l" if model is too complex to fit
model <- bushfireSOS::fit_pres_bg_model(spp_data = spp_data,
tuneParam = TRUE,
k = 5,
filepath = sprintf("bushfireResponse_data/outputs_1990/model/MaxEnt_outputs/%s",
gsub(" ", "_", species)))
saveRDS(model,
sprintf("bushfireResponse_data/outputs_1990/model/model_%s.rds",
gsub(" ", "_", species)))
## Presence absence model
# model <- bushfireSOS::fit_pres_abs_model()
## Hybrid model
# model <- bushfireSOS::fit_hybrid_model()
########################
### Model Prediction ###
########################
# Perform appropriate prediction
prediction <- bushfireSOS::model_prediction(model = model,
env_data = env_data,
mask = "bushfireResponse_data/spatial_layers/NIAFED_v20200428",
parallel = FALSE)
raster::writeRaster(prediction,
sprintf("bushfireResponse_data/outputs_1990/predictions/predictions_%s.tif",
gsub(" ", "_", species)),
overwrite = TRUE)
prediction_threshold <- bushfireSOS::predict_threshold(pred_ras = prediction,
threshold = model_eval[3])
raster::writeRaster(prediction_threshold,
sprintf("bushfireResponse_data/outputs_1990/predictions/predictions_%s_threshold.tif",
gsub(" ", "_", species)),
overwrite = TRUE)
# mapview::mapview(prediction)
} else{
print("Model evaluation failed")
}
} else {
print("Less than 20 records, no model fit")
}
#################
### Meta Data ###
#################
# Store meta data relevant to analysis
meta_data <- sessionInfo()
saveRDS(meta_data,
sprintf("bushfireResponse_data/outputs_1990/meta_data/meta_data_%s.rds",
gsub(" ", "_", species)))
|
## linear SVM parameter estimation using primal-form quadratic programming
## solvers: "solve.QP" or "ipop"
svm.linear.prim <- function(formula, data, svthres=1e-9, inf=1e3, solver="solve.QP")
{
class <- y.var(formula)
attributes <- x.vars(formula, data)
aind <- names(data) %in% attributes
cvec <- 2*as.num0(data[[class]])-1 # class vector using {-1, 1} labels
amat <- cbind(as.matrix(data[,aind]), intercept=1) # attribute value matrix
if (solver=="solve.QP")
args <- list(Dmat=nearPD(rbind(cbind(diag(sum(aind)), 0), 0))$mat,
dvec=rep(0, sum(aind)+1),
Amat=t(rmm(amat, cvec)),
bvec=rep(1, nrow(data)))
else if (solver=="ipop")
args <- list(c=rep(0, sum(aind)+1),
H=rbind(cbind(diag(sum(aind)), 0), 0),
A=rmm(amat, cvec),
b=rep(1, nrow(data)),
l=rep(-inf, sum(aind)+1),
u=rep(inf, sum(aind)+1),
r=rep(inf, nrow(data)))
else stop("Unknown solver: ", solver)
qp <- do.call(solver, args)
w <- if (solver=="solve.QP") qp$solution else if (solver=="ipop") qp@primal
sv <- unname(which(cvec*predict.par(list(repf=repf.linear, w=w),
data[,aind,drop=FALSE])<=1+svthres))
list(model=`class<-`(list(repf=repf.threshold(repf.linear), w=w), "par"), sv=sv)
}
if (FALSE)
{
# estimate linear SVM model parameters
svm.p.ls <- svm.linear.prim(c~a1+a2+a3+a4, kmdat.train.ls)
# misclassification error
err(predict(svm.p.ls$model, kmdat.train.ls[,1:4]), kmdat.train.ls$c)
err(predict(svm.p.ls$model, kmdat.test.ls[,1:4]), kmdat.test.ls$c)
}
| /R/kernel-methods-svm-primal.R | no_license | 42n4/dmr.kernel | R | false | false | 1,660 | r | ## linear SVM parameter estimation using primal-form quadratic programming
## solvers: "solve.QP" or "ipop"
svm.linear.prim <- function(formula, data, svthres=1e-9, inf=1e3, solver="solve.QP")
{
class <- y.var(formula)
attributes <- x.vars(formula, data)
aind <- names(data) %in% attributes
cvec <- 2*as.num0(data[[class]])-1 # class vector using {-1, 1} labels
amat <- cbind(as.matrix(data[,aind]), intercept=1) # attribute value matrix
if (solver=="solve.QP")
args <- list(Dmat=nearPD(rbind(cbind(diag(sum(aind)), 0), 0))$mat,
dvec=rep(0, sum(aind)+1),
Amat=t(rmm(amat, cvec)),
bvec=rep(1, nrow(data)))
else if (solver=="ipop")
args <- list(c=rep(0, sum(aind)+1),
H=rbind(cbind(diag(sum(aind)), 0), 0),
A=rmm(amat, cvec),
b=rep(1, nrow(data)),
l=rep(-inf, sum(aind)+1),
u=rep(inf, sum(aind)+1),
r=rep(inf, nrow(data)))
else stop("Unknown solver: ", solver)
qp <- do.call(solver, args)
w <- if (solver=="solve.QP") qp$solution else if (solver=="ipop") qp@primal
sv <- unname(which(cvec*predict.par(list(repf=repf.linear, w=w),
data[,aind,drop=FALSE])<=1+svthres))
list(model=`class<-`(list(repf=repf.threshold(repf.linear), w=w), "par"), sv=sv)
}
if (FALSE)
{
# estimate linear SVM model parameters
svm.p.ls <- svm.linear.prim(c~a1+a2+a3+a4, kmdat.train.ls)
# misclassification error
err(predict(svm.p.ls$model, kmdat.train.ls[,1:4]), kmdat.train.ls$c)
err(predict(svm.p.ls$model, kmdat.test.ls[,1:4]), kmdat.test.ls$c)
}
|
library(curl)
library(BioChemPantry)
library(Zr)
library(plyr)
library(dplyr)
schema <- paste0("drugbank")
staging_directory <- BioChemPantry::get_staging_directory(schema)
dir.create(paste0(staging_directory, "/dump"), recursive=TRUE)
####### to download from website using curl
### if this doesn't work, download directly from drugbank website
dump_fname <- paste0(staging_directory, "/dump/drugbank_compounds.zip")
h <- curl::new_handle()
curl::handle_setopt(handle = h, httpauth = 1, userpwd = "user:pwd")
curl::curl_download(url = "https://go.drugbank.com/releases/5-1-8/downloads/all-full-database",
destfile = dump_fname, quiet = FALSE, handle = h)
unzip(dump_fname, exdir=paste0(staging_directory, "/dump"))
drugbank_substances <- Zr::catalog_items(
"dball",
output_fields=c(
"zinc_id",
"supplier_code",
"substance.preferred_name",
"substance.smiles"),
result_batch_size=10000,
verbose=T) %>%
dplyr::select(
accession=supplier_code,
zinc_id,
zinc_name = substance.preferred_name,
zinc_smiles = substance.smiles)
drugbank_substances %>% readr::write_tsv(
paste0(staging_directory, "/dball_substances.tsv"))
| /0_load_drugbank_xml.R | no_license | saisaitian/BioChemPantry | R | false | false | 1,185 | r | library(curl)
library(BioChemPantry)
library(Zr)
library(plyr)
library(dplyr)
schema <- paste0("drugbank")
staging_directory <- BioChemPantry::get_staging_directory(schema)
dir.create(paste0(staging_directory, "/dump"), recursive=TRUE)
####### to download from website using curl
### if this doesn't work, download directly from drugbank website
dump_fname <- paste0(staging_directory, "/dump/drugbank_compounds.zip")
h <- curl::new_handle()
curl::handle_setopt(handle = h, httpauth = 1, userpwd = "user:pwd")
curl::curl_download(url = "https://go.drugbank.com/releases/5-1-8/downloads/all-full-database",
destfile = dump_fname, quiet = FALSE, handle = h)
unzip(dump_fname, exdir=paste0(staging_directory, "/dump"))
drugbank_substances <- Zr::catalog_items(
"dball",
output_fields=c(
"zinc_id",
"supplier_code",
"substance.preferred_name",
"substance.smiles"),
result_batch_size=10000,
verbose=T) %>%
dplyr::select(
accession=supplier_code,
zinc_id,
zinc_name = substance.preferred_name,
zinc_smiles = substance.smiles)
drugbank_substances %>% readr::write_tsv(
paste0(staging_directory, "/dball_substances.tsv"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.plot.tree.R
\name{xgb.plot.tree}
\alias{xgb.plot.tree}
\title{Plot a boosted tree model}
\usage{
xgb.plot.tree(feature_names = NULL, model = NULL, trees = NULL,
plot_width = NULL, plot_height = NULL, render = TRUE,
show_node_id = FALSE, ...)
}
\arguments{
\item{feature_names}{names of each feature as a \code{character} vector.}
\item{model}{produced by the \code{xgb.train} function.}
\item{trees}{an integer vector of tree indices that should be visualized.
If set to \code{NULL}, all trees of the model are included.
IMPORTANT: the tree index in xgboost model is zero-based
(e.g., use \code{trees = 0:2} for the first 3 trees in a model).}
\item{plot_width}{the width of the diagram in pixels.}
\item{plot_height}{the height of the diagram in pixels.}
\item{render}{a logical flag for whether the graph should be rendered (see Value).}
\item{show_node_id}{a logical flag for whether to show node id's in the graph.}
\item{...}{currently not used.}
}
\value{
When \code{render = TRUE}:
returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}.
Similar to ggplot objects, it needs to be printed to see it when not running from command line.
When \code{render = FALSE}:
silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}.
This could be useful if one wants to modify some of the graph attributes
before rendering the graph with \code{\link[DiagrammeR]{render_graph}}.
}
\description{
Read a tree model text dump and plot the model.
}
\details{
The content of each node is organised that way:
\itemize{
\item Feature name.
\item \code{Cover}: The sum of second order gradient of training data classified to the leaf.
If it is square loss, this simply corresponds to the number of instances seen by a split
or collected by a leaf during training.
The deeper in the tree a node is, the lower this metric will be.
\item \code{Gain} (for split nodes): the information gain metric of a split
(corresponds to the importance of the node in the model).
\item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction.
}
The tree root nodes also indicate the Tree index (0-based).
The "Yes" branches are marked by the "< split_value" label.
The branches that also used for missing values are marked as bold
(as in "carrying extra capacity").
This function uses \href{http://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
}
\examples{
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
# plot all the trees
xgb.plot.tree(model = bst)
# plot only the first tree and display the node ID:
xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE)
\dontrun{
# Below is an example of how to save this plot to a file.
# Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed.
library(DiagrammeR)
gr <- xgb.plot.tree(model=bst, trees=0:1, render=FALSE)
export_graph(gr, 'tree.pdf', width=1500, height=1900)
export_graph(gr, 'tree.png', width=1500, height=1900)
}
}
| /R-package/man/xgb.plot.tree.Rd | permissive | alipay/ant-xgboost | R | false | true | 3,275 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.plot.tree.R
\name{xgb.plot.tree}
\alias{xgb.plot.tree}
\title{Plot a boosted tree model}
\usage{
xgb.plot.tree(feature_names = NULL, model = NULL, trees = NULL,
plot_width = NULL, plot_height = NULL, render = TRUE,
show_node_id = FALSE, ...)
}
\arguments{
\item{feature_names}{names of each feature as a \code{character} vector.}
\item{model}{produced by the \code{xgb.train} function.}
\item{trees}{an integer vector of tree indices that should be visualized.
If set to \code{NULL}, all trees of the model are included.
IMPORTANT: the tree index in xgboost model is zero-based
(e.g., use \code{trees = 0:2} for the first 3 trees in a model).}
\item{plot_width}{the width of the diagram in pixels.}
\item{plot_height}{the height of the diagram in pixels.}
\item{render}{a logical flag for whether the graph should be rendered (see Value).}
\item{show_node_id}{a logical flag for whether to show node id's in the graph.}
\item{...}{currently not used.}
}
\value{
When \code{render = TRUE}:
returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}.
Similar to ggplot objects, it needs to be printed to see it when not running from command line.
When \code{render = FALSE}:
silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}.
This could be useful if one wants to modify some of the graph attributes
before rendering the graph with \code{\link[DiagrammeR]{render_graph}}.
}
\description{
Read a tree model text dump and plot the model.
}
\details{
The content of each node is organised that way:
\itemize{
\item Feature name.
\item \code{Cover}: The sum of second order gradient of training data classified to the leaf.
If it is square loss, this simply corresponds to the number of instances seen by a split
or collected by a leaf during training.
The deeper in the tree a node is, the lower this metric will be.
\item \code{Gain} (for split nodes): the information gain metric of a split
(corresponds to the importance of the node in the model).
\item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction.
}
The tree root nodes also indicate the Tree index (0-based).
The "Yes" branches are marked by the "< split_value" label.
The branches that also used for missing values are marked as bold
(as in "carrying extra capacity").
This function uses \href{http://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
}
\examples{
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
# plot all the trees
xgb.plot.tree(model = bst)
# plot only the first tree and display the node ID:
xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE)
\dontrun{
# Below is an example of how to save this plot to a file.
# Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed.
library(DiagrammeR)
gr <- xgb.plot.tree(model=bst, trees=0:1, render=FALSE)
export_graph(gr, 'tree.pdf', width=1500, height=1900)
export_graph(gr, 'tree.png', width=1500, height=1900)
}
}
|
setwd("C:/Users/Mengya/Desktop/Columbia Desk/GR5243/Fall2016-proj4-mz2593")
load('data/lyr.Rdata')
load('output/Sound.Rdata')
install.packages(data.table)
library(data.table)
install.packages("xgboost")
library(xgboost)
X <- read.csv("output/musicdata.csv")
col<-colnames(X)
dim(X)
X <-matrix(unlist(X), nrow=2350, ncol = 16)
rownames(X) <-lyr[,1]
colnames(X) <- col
X <- X[ , 2:16]
dim(X)
y = read.csv("output/Y.csv")
dim(y)
y <- as.numeric(y$topic)
length(y)
dtrain<-xgb.DMatrix(data = X,label = y, missing=NaN)
best_param = list()
best_seednumber = 1234
best_err = Inf
best_err_index = 0
cv.result = data.frame(shk1=I(list()),shk2=I(list()),shk3=I(list()),shk4=I(list()),shk5=I(list()))
shrinkage = seq(0.1,0.15,0.5)
for (d in 6:10) {
for(e in 1:5){
try.maxdph = d
try.eta = shrinkage[e]
param <- list(max_depth = try.maxdph,
eta = try.eta
)
cv.nround = 1000
cv.nfold = 5
seed.number = sample.int(10000, 1)[[1]]
set.seed(seed.number)
cat("d=",d,"e=",e,'\n')
mdcv <- xgb.cv(data=dtrain, params = param, nthread=6,
nfold=cv.nfold, nrounds=cv.nround,
verbose = 0, early.stop.round=8, maximize=FALSE)
min_err = min(mdcv[, test.rmse.mean])
min_err_index = which.min(mdcv[, test.rmse.mean])
cv.result[[d-5,e]] = list(min_err,min_err_index)
if (min_err < best_err) {
best_err = min_err
best_err_index = min_err_index
best_seednumber = seed.number
best_param = param
}
}
}
save(cv.result,file="C:/Users/Mengya/Desktop/Columbia Desk/GR5243/Fall2016-proj4-mz2593/output/cv_xgb_result.RData")
xgb_best<-data.frame(best_err,best_err_index,best_seednumber,best_param)
save(xgb_best,file="C:/Users/Mengya/Desktop/Columbia Desk/GR5243/Fall2016-proj4-mz2593/output/xgb_best.RData")
nround = best_err_index
set.seed(best_seednumber)
xgb_fit<- xgb.train(data=dtrain, params=best_param, nrounds=nround)
xgb_fit
save(xgb_fit,file="C:/Users/Mengya/Desktop/Columbia Desk/GR5243/Fall2016-proj4-mz2593/output/xgb_fit.RData")
all(round(predict(xgb_fit,X, missing=NaN))<=50)
| /lib/model.R | no_license | TZstatsADS/Fall2016-proj4-mz2593 | R | false | false | 2,195 | r |
setwd("C:/Users/Mengya/Desktop/Columbia Desk/GR5243/Fall2016-proj4-mz2593")
load('data/lyr.Rdata')
load('output/Sound.Rdata')
install.packages(data.table)
library(data.table)
install.packages("xgboost")
library(xgboost)
X <- read.csv("output/musicdata.csv")
col<-colnames(X)
dim(X)
X <-matrix(unlist(X), nrow=2350, ncol = 16)
rownames(X) <-lyr[,1]
colnames(X) <- col
X <- X[ , 2:16]
dim(X)
y = read.csv("output/Y.csv")
dim(y)
y <- as.numeric(y$topic)
length(y)
dtrain<-xgb.DMatrix(data = X,label = y, missing=NaN)
best_param = list()
best_seednumber = 1234
best_err = Inf
best_err_index = 0
cv.result = data.frame(shk1=I(list()),shk2=I(list()),shk3=I(list()),shk4=I(list()),shk5=I(list()))
shrinkage = seq(0.1,0.15,0.5)
for (d in 6:10) {
for(e in 1:5){
try.maxdph = d
try.eta = shrinkage[e]
param <- list(max_depth = try.maxdph,
eta = try.eta
)
cv.nround = 1000
cv.nfold = 5
seed.number = sample.int(10000, 1)[[1]]
set.seed(seed.number)
cat("d=",d,"e=",e,'\n')
mdcv <- xgb.cv(data=dtrain, params = param, nthread=6,
nfold=cv.nfold, nrounds=cv.nround,
verbose = 0, early.stop.round=8, maximize=FALSE)
min_err = min(mdcv[, test.rmse.mean])
min_err_index = which.min(mdcv[, test.rmse.mean])
cv.result[[d-5,e]] = list(min_err,min_err_index)
if (min_err < best_err) {
best_err = min_err
best_err_index = min_err_index
best_seednumber = seed.number
best_param = param
}
}
}
save(cv.result,file="C:/Users/Mengya/Desktop/Columbia Desk/GR5243/Fall2016-proj4-mz2593/output/cv_xgb_result.RData")
xgb_best<-data.frame(best_err,best_err_index,best_seednumber,best_param)
save(xgb_best,file="C:/Users/Mengya/Desktop/Columbia Desk/GR5243/Fall2016-proj4-mz2593/output/xgb_best.RData")
nround = best_err_index
set.seed(best_seednumber)
xgb_fit<- xgb.train(data=dtrain, params=best_param, nrounds=nround)
xgb_fit
save(xgb_fit,file="C:/Users/Mengya/Desktop/Columbia Desk/GR5243/Fall2016-proj4-mz2593/output/xgb_fit.RData")
all(round(predict(xgb_fit,X, missing=NaN))<=50)
|
library(naivereg)
### Name: naive.gel
### Title: Estimete the parameters with gel after IV selecting
### Aliases: naive.gel
### ** Examples
#gel estimate after IV selecting
n = 200
phi<-c(.2,.7)
thet <- 0.2
sd <- .2
set.seed(123)
x <- matrix(arima.sim(n = n, list(order = c(2,0,1), ar = phi, ma = thet, sd = sd)), ncol = 1)
y <- x[7:n]
ym1 <- x[6:(n-1)]
ym2 <- x[5:(n-2)]
H <- cbind(x[4:(n-3)], x[3:(n-4)], x[2:(n-5)], x[1:(n-6)])
g <- y ~ ym1 + ym2
x <- H
naive.gel(g, cbind(ym1,ym2),x, tet0 =c(0,.3,.6))
| /data/genthat_extracted_code/naivereg/examples/naive.gel.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 513 | r | library(naivereg)
### Name: naive.gel
### Title: Estimete the parameters with gel after IV selecting
### Aliases: naive.gel
### ** Examples
#gel estimate after IV selecting
n = 200
phi<-c(.2,.7)
thet <- 0.2
sd <- .2
set.seed(123)
x <- matrix(arima.sim(n = n, list(order = c(2,0,1), ar = phi, ma = thet, sd = sd)), ncol = 1)
y <- x[7:n]
ym1 <- x[6:(n-1)]
ym2 <- x[5:(n-2)]
H <- cbind(x[4:(n-3)], x[3:(n-4)], x[2:(n-5)], x[1:(n-6)])
g <- y ~ ym1 + ym2
x <- H
naive.gel(g, cbind(ym1,ym2),x, tet0 =c(0,.3,.6))
|
## plot1
# Read the household power consumption data
# rawData <- read.table("household_power_consumption.txt", sep = ";", head = TRUE,
# na.strings = "?", colClasses = c("character","character","numeric", "numeric","numeric",
# "numeric","numeric", "numeric","numeric"))
# head(rawData)
# # Select sample data from the dates 2007-02-01 and 2007-02-02
# Index <- rawData$Date == "1/2/2007" | rawData$Date == "2/2/2007"
# dat1 <- rawData[Index,]
# Another way to read the sample data from the dates 2007-02-01 and 2007-02-02
rm(list = ls())
firstLine <- 66636 # the first line where Date = "1/2/2007"
lastLine <- 69516 # the last line where Date = "2/2/2007"
dat1 <- read.table("household_power_consumption.txt", sep = ";", head = TRUE,
na.strings = "?", colClasses = c("character","character","numeric",
"numeric","numeric", "numeric","numeric", "numeric","numeric"),
col.names = c("Date","Time","Global_active_power","Global_reactive_power",
"Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),
skip = firstLine, nrows = lastLine-firstLine)
# Check
dat1[1,]
dat1[dim(dat1)[1],]
# plot1: the histogram of Global Active Power
png(filename = "plot1.png", width = 480, height = 480, units = "px")
hist(dat1$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power(kilowatts)")
dev.off()
| /plot1.r | no_license | LeanYu/ExData_Plotting1 | R | false | false | 1,393 | r | ## plot1
# Read the household power consumption data
# rawData <- read.table("household_power_consumption.txt", sep = ";", head = TRUE,
# na.strings = "?", colClasses = c("character","character","numeric", "numeric","numeric",
# "numeric","numeric", "numeric","numeric"))
# head(rawData)
# # Select sample data from the dates 2007-02-01 and 2007-02-02
# Index <- rawData$Date == "1/2/2007" | rawData$Date == "2/2/2007"
# dat1 <- rawData[Index,]
# Another way to read the sample data from the dates 2007-02-01 and 2007-02-02
rm(list = ls())
firstLine <- 66636 # the first line where Date = "1/2/2007"
lastLine <- 69516 # the last line where Date = "2/2/2007"
dat1 <- read.table("household_power_consumption.txt", sep = ";", head = TRUE,
na.strings = "?", colClasses = c("character","character","numeric",
"numeric","numeric", "numeric","numeric", "numeric","numeric"),
col.names = c("Date","Time","Global_active_power","Global_reactive_power",
"Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),
skip = firstLine, nrows = lastLine-firstLine)
# Check
dat1[1,]
dat1[dim(dat1)[1],]
# plot1: the histogram of Global Active Power
png(filename = "plot1.png", width = 480, height = 480, units = "px")
hist(dat1$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power(kilowatts)")
dev.off()
|
# Week2 : Lesson 1: Lattice Plotting System
library(lattice)
library(datasets)
# Simple scatterplot
xyplot(Ozone~Wind, data = airquality)
# Convert Month to a factor variable
airquality <- transform(airquality, Month = factor(Month))
xyplot(Ozone~Wind | Month, data = airquality, layout = c(5, 1)) # by month
# Lattice behavior
p <- xyplot(Ozone~Wind, data = airquality)
print(p)
## Panel Functions
set.seed(10)
x <- rnorm(100)
f <- rep(0:1, each = 50)
y <- x + f - f * x + rnorm(100, sd = 0.5)
f <- factor(f, labels, c ("Group 1", "Group 2"))
xyplot(y~x | f, layout = c(2,1)) # plot with 2 panels
# Custom panel function
xyplot(y~x | f, panel = function(x, y, ...){
panel.xyplot(x, y, ...) # 1st call the default panel function for xyplot
panel.abline(h = median(y), lty = 2) # add a horizontal line at the median
})
# Custom panel function
xyplot(y~x | f, panel = function(x, y, ...){
panel.xyplot(x, y, ...) # 1st call the default panel function for xyplot
panel.lmline(x, y, col = 2) # overlay a simple linear regression line
})
| /04_ExploratoryDataAnalysis/code/Lesson1_LatticePlottingSystem_w2.R | no_license | pritraj90/DataScienceR | R | false | false | 1,065 | r | # Week2 : Lesson 1: Lattice Plotting System
library(lattice)
library(datasets)
# Simple scatterplot
xyplot(Ozone~Wind, data = airquality)
# Convert Month to a factor variable
airquality <- transform(airquality, Month = factor(Month))
xyplot(Ozone~Wind | Month, data = airquality, layout = c(5, 1)) # by month
# Lattice behavior
p <- xyplot(Ozone~Wind, data = airquality)
print(p)
## Panel Functions
set.seed(10)
x <- rnorm(100)
f <- rep(0:1, each = 50)
y <- x + f - f * x + rnorm(100, sd = 0.5)
f <- factor(f, labels, c ("Group 1", "Group 2"))
xyplot(y~x | f, layout = c(2,1)) # plot with 2 panels
# Custom panel function
xyplot(y~x | f, panel = function(x, y, ...){
panel.xyplot(x, y, ...) # 1st call the default panel function for xyplot
panel.abline(h = median(y), lty = 2) # add a horizontal line at the median
})
# Custom panel function
xyplot(y~x | f, panel = function(x, y, ...){
panel.xyplot(x, y, ...) # 1st call the default panel function for xyplot
panel.lmline(x, y, col = 2) # overlay a simple linear regression line
})
|
\name{yseassd}
\alias{yseassd}
\title{
Determine multi-year seasonal standard deviations.
}
\description{
The function determines multi-year seasonal standard deviation values from data of a single
CM SAF NetCDF input file.
}
\usage{
yseassd(var, infile, outfile)
}
\arguments{
\item{var}{
Name of NetCDF variable (character).
}
\item{infile}{
Filename of input NetCDF file. This may include the directory (character).
}
\item{outfile}{
Filename of output NetCDF file. This may include the directory (character)..
}
}
\value{
A NetCDF file including a timeseries of multi-year seasonal standard deviations is written.
}
\author{
Steffen Kothe
}
\examples{
## Create an example NetCDF file with a similar structure
## as used by CM SAF. The file is created with the ncdf4 package.
## Alternatively example data can be freely downloaded here:
## <https://wui.cmsaf.eu/>
library(ncdf4)
## create some (non-realistic) example data
lon <- seq(5,15,0.5)
lat <- seq(45,55,0.5)
time <- seq(as.Date('2000-01-01'), as.Date('2010-12-31'), 'month')
origin <- as.Date('1983-01-01 00:00:00')
time <- as.numeric(difftime(time,origin,units='hour'))
data <- array(250:350,dim=c(21,21,132))
## create example NetCDF
x <- ncdim_def(name='lon',units='degrees_east',vals=lon)
y <- ncdim_def(name='lat',units='degrees_north',vals=lat)
t <- ncdim_def(name='time',units='hours since 1983-01-01 00:00:00',
vals=time,unlim=TRUE)
var1 <- ncvar_def('SIS','W m-2',list(x,y,t),-1,prec='short')
vars <- list(var1)
ncnew <- nc_create('CMSAF_example_file.nc',vars)
ncvar_put(ncnew,var1,data)
ncatt_put(ncnew,'lon','standard_name','longitude',prec='text')
ncatt_put(ncnew,'lat','standard_name','latitude',prec='text')
nc_close(ncnew)
## Determine the multi-year seasonal standard deviation of the example
## CM SAF NetCDF file and write the output to a new file.
yseassd('SIS','CMSAF_example_file.nc',
'CMSAF_example_file_yseassd.nc')
} | /man/yseassd.Rd | no_license | solhailu/cmsaf | R | false | false | 1,970 | rd | \name{yseassd}
\alias{yseassd}
\title{
Determine multi-year seasonal standard deviations.
}
\description{
The function determines multi-year seasonal standard deviation values from data of a single
CM SAF NetCDF input file.
}
\usage{
yseassd(var, infile, outfile)
}
\arguments{
\item{var}{
Name of NetCDF variable (character).
}
\item{infile}{
Filename of input NetCDF file. This may include the directory (character).
}
\item{outfile}{
Filename of output NetCDF file. This may include the directory (character)..
}
}
\value{
A NetCDF file including a timeseries of multi-year seasonal standard deviations is written.
}
\author{
Steffen Kothe
}
\examples{
## Create an example NetCDF file with a similar structure
## as used by CM SAF. The file is created with the ncdf4 package.
## Alternatively example data can be freely downloaded here:
## <https://wui.cmsaf.eu/>
library(ncdf4)
## create some (non-realistic) example data
lon <- seq(5,15,0.5)
lat <- seq(45,55,0.5)
time <- seq(as.Date('2000-01-01'), as.Date('2010-12-31'), 'month')
origin <- as.Date('1983-01-01 00:00:00')
time <- as.numeric(difftime(time,origin,units='hour'))
data <- array(250:350,dim=c(21,21,132))
## create example NetCDF
x <- ncdim_def(name='lon',units='degrees_east',vals=lon)
y <- ncdim_def(name='lat',units='degrees_north',vals=lat)
t <- ncdim_def(name='time',units='hours since 1983-01-01 00:00:00',
vals=time,unlim=TRUE)
var1 <- ncvar_def('SIS','W m-2',list(x,y,t),-1,prec='short')
vars <- list(var1)
ncnew <- nc_create('CMSAF_example_file.nc',vars)
ncvar_put(ncnew,var1,data)
ncatt_put(ncnew,'lon','standard_name','longitude',prec='text')
ncatt_put(ncnew,'lat','standard_name','latitude',prec='text')
nc_close(ncnew)
## Determine the multi-year seasonal standard deviation of the example
## CM SAF NetCDF file and write the output to a new file.
yseassd('SIS','CMSAF_example_file.nc',
'CMSAF_example_file_yseassd.nc')
} |
#' A package for Chinese text segmentation
#'
#' jiebaR is a package for Chinese text segmentation, keyword extraction
#' and speech tagging. This package provides the data files required by jiebaR.
#' jiebaR supports four types of segmentation mode: Maximum Probability, Hidden Markov Model,
#' Query Segment and Mix Segment.
#'
#' You can use custom dictionary to be included in the jiebaR default dictionary.
#' jiebaR can also identify new words, but adding your own new words will ensure a higher
#' accuracy.
#'
#' @docType package
#' @name jiebaRD
#' @author Qin Wenfeng <\url{http://qinwenfeng.com}>
#' @references CppJieba \url{https://github.com/aszxqw/cppjieba};
#' @seealso JiebaR \url{https://github.com/qinwf/jiebaR};
#' @examples
#' library("jiebaRD")
NULL
| /jiebaRD/R/jiebaRD-package.r | no_license | ingted/R-Examples | R | false | false | 791 | r | #' A package for Chinese text segmentation
#'
#' jiebaR is a package for Chinese text segmentation, keyword extraction
#' and speech tagging. This package provides the data files required by jiebaR.
#' jiebaR supports four types of segmentation mode: Maximum Probability, Hidden Markov Model,
#' Query Segment and Mix Segment.
#'
#' You can use custom dictionary to be included in the jiebaR default dictionary.
#' jiebaR can also identify new words, but adding your own new words will ensure a higher
#' accuracy.
#'
#' @docType package
#' @name jiebaRD
#' @author Qin Wenfeng <\url{http://qinwenfeng.com}>
#' @references CppJieba \url{https://github.com/aszxqw/cppjieba};
#' @seealso JiebaR \url{https://github.com/qinwf/jiebaR};
#' @examples
#' library("jiebaRD")
NULL
|
rm(list = ls())
if (is.integer(dev.list())) {
dev.off()
}
cat("\014")
set.seed(1)
head2 <- function(x)
head(x)[, 1:5]
`%!in%` <- Negate(`%in%`)
library(readr)
library(dplyr)
library(tidyr)
#library(edgeR)
library(preprocessCore)
print("step 3")
### this is dataset provided by merve of genes that are never expressed, used to filter out genes that we are not interested in
never_exp <-
read.table(
"./never_exp_GMM_dream_comp.txt",
quote = "\"",
comment.char = ""
)
###creating a matching cell_id name
cell_id <-
c("ASPC1",
"DU145",
"EFO21",
"NCIH1793",
"HCC1143",
"LNCAPCLONEFGC",
"U87MG")
### CCLE expression
ccle_dat <- read.csv("./data_ccle_RNAseq_DREAMv2_FIXED.csv")
###filtering for overlapping genes
ccle_dat <- ccle_dat %>% filter(X %!in% never_exp$V1)
colnames(ccle_dat)[1] <- "gene"
ccle_cells <- colnames(ccle_dat)[-1]
### At this point ccle_dat and mean_exp_known have both been put in a form where both can be used, pushed through umap
spearman_pipe <- function(rna_seq_dat) {
rows <- rna_seq_dat$gene
rna_seq_dat$gene <- NULL
cols <- colnames(rna_seq_dat)
rna_seq_dat <-
normalize.quantiles(as.matrix(rna_seq_dat)) %>% as.data.frame()
colnames(rna_seq_dat) <- cols
rownames(rna_seq_dat) <- rows
dist_df <- cor(rna_seq_dat, method = "spearman")
return(dist_df)
}
ccle_dist <- spearman_pipe(ccle_dat)
###this is sensitivity date of 11 cell lines against 30 drugs.
sensitivity_pred <-
read.csv("./sens_v2.txt", sep="") %>% filter(X %!in% c("PANC1", "HSTS", "KRJ1", "HF2597"))
rownames(sensitivity_pred) <-
c("ASPC1",
"DU145",
"EFO21",
"NCIH1793",
"HCC1143",
"LNCAPCLONEFGC",
"U87MG")
drugs <- colnames(sensitivity_pred)
drugs <- drugs[1:30]
ccle_dist_dat <- ccle_dist %>% as.matrix() %>% as.data.frame()
ccle_dist_dat <-
ccle_dist_dat %>% dplyr::select(ASPC1, DU145, EFO21, NCIH1793, HCC1143, LNCAPCLONEFGC, U87MG)
ccle_dist_dat$cell = rownames(ccle_dist_dat)
ccle_dist_dat <-
ccle_dist_dat %>% filter(cell %in% colnames(ccle_dist_dat))
sensitivity_pred$cell = rownames(sensitivity_pred)
lm_perturb_dat <- merge(ccle_dist_dat, sensitivity_pred)
results <-
ccle_dist %>% as.matrix() %>% as.data.frame() %>% dplyr::select()
for (drug in drugs) {
sensitivity_pred_models <-
lm(
eval(parse(text = drug)) ~ ASPC1 + DU145 + EFO21 + NCIH1793 + HCC1143 + LNCAPCLONEFGC + U87MG,
lm_perturb_dat
)
temp <- ccle_dist %>% as.matrix() %>% as.data.frame()
temp <-
temp %>% dplyr::select(ASPC1, DU145, EFO21, NCIH1793, HCC1143, LNCAPCLONEFGC, U87MG)
temp$cell = rownames(temp)
lm_estimates <- predict(sensitivity_pred_models, temp)
results[drug] <- lm_estimates
}
head(results)
range01 <- function(x) {
(x - min(x)) / (max(x) - min(x))
}
res <- range01(results)
head(res)
###ordering to the template
template_final <- read.csv("./template_final.csv")
res$cell_line <- rownames(res)
rownames(res) <- NULL
try_res <- res[colnames(template_final)]
try_res <-
try_res[match(template_final$cell_line, try_res$cell_line), ]
write.csv(try_res, "/output/submission_final.csv", row.names = FALSE)
| /Distance_Projection_spearman_Part3.R | no_license | franklenoir/drug_sensitivity_competition | R | false | false | 3,153 | r | rm(list = ls())
if (is.integer(dev.list())) {
dev.off()
}
cat("\014")
set.seed(1)
head2 <- function(x)
head(x)[, 1:5]
`%!in%` <- Negate(`%in%`)
library(readr)
library(dplyr)
library(tidyr)
#library(edgeR)
library(preprocessCore)
print("step 3")
### this is dataset provided by merve of genes that are never expressed, used to filter out genes that we are not interested in
never_exp <-
read.table(
"./never_exp_GMM_dream_comp.txt",
quote = "\"",
comment.char = ""
)
###creating a matching cell_id name
cell_id <-
c("ASPC1",
"DU145",
"EFO21",
"NCIH1793",
"HCC1143",
"LNCAPCLONEFGC",
"U87MG")
### CCLE expression
ccle_dat <- read.csv("./data_ccle_RNAseq_DREAMv2_FIXED.csv")
###filtering for overlapping genes
ccle_dat <- ccle_dat %>% filter(X %!in% never_exp$V1)
colnames(ccle_dat)[1] <- "gene"
ccle_cells <- colnames(ccle_dat)[-1]
### At this point ccle_dat and mean_exp_known have both been put in a form where both can be used, pushed through umap
spearman_pipe <- function(rna_seq_dat) {
rows <- rna_seq_dat$gene
rna_seq_dat$gene <- NULL
cols <- colnames(rna_seq_dat)
rna_seq_dat <-
normalize.quantiles(as.matrix(rna_seq_dat)) %>% as.data.frame()
colnames(rna_seq_dat) <- cols
rownames(rna_seq_dat) <- rows
dist_df <- cor(rna_seq_dat, method = "spearman")
return(dist_df)
}
ccle_dist <- spearman_pipe(ccle_dat)
###this is sensitivity date of 11 cell lines against 30 drugs.
sensitivity_pred <-
read.csv("./sens_v2.txt", sep="") %>% filter(X %!in% c("PANC1", "HSTS", "KRJ1", "HF2597"))
rownames(sensitivity_pred) <-
c("ASPC1",
"DU145",
"EFO21",
"NCIH1793",
"HCC1143",
"LNCAPCLONEFGC",
"U87MG")
drugs <- colnames(sensitivity_pred)
drugs <- drugs[1:30]
ccle_dist_dat <- ccle_dist %>% as.matrix() %>% as.data.frame()
ccle_dist_dat <-
ccle_dist_dat %>% dplyr::select(ASPC1, DU145, EFO21, NCIH1793, HCC1143, LNCAPCLONEFGC, U87MG)
ccle_dist_dat$cell = rownames(ccle_dist_dat)
ccle_dist_dat <-
ccle_dist_dat %>% filter(cell %in% colnames(ccle_dist_dat))
sensitivity_pred$cell = rownames(sensitivity_pred)
lm_perturb_dat <- merge(ccle_dist_dat, sensitivity_pred)
results <-
ccle_dist %>% as.matrix() %>% as.data.frame() %>% dplyr::select()
for (drug in drugs) {
sensitivity_pred_models <-
lm(
eval(parse(text = drug)) ~ ASPC1 + DU145 + EFO21 + NCIH1793 + HCC1143 + LNCAPCLONEFGC + U87MG,
lm_perturb_dat
)
temp <- ccle_dist %>% as.matrix() %>% as.data.frame()
temp <-
temp %>% dplyr::select(ASPC1, DU145, EFO21, NCIH1793, HCC1143, LNCAPCLONEFGC, U87MG)
temp$cell = rownames(temp)
lm_estimates <- predict(sensitivity_pred_models, temp)
results[drug] <- lm_estimates
}
head(results)
range01 <- function(x) {
(x - min(x)) / (max(x) - min(x))
}
res <- range01(results)
head(res)
###ordering to the template
template_final <- read.csv("./template_final.csv")
res$cell_line <- rownames(res)
rownames(res) <- NULL
try_res <- res[colnames(template_final)]
try_res <-
try_res[match(template_final$cell_line, try_res$cell_line), ]
write.csv(try_res, "/output/submission_final.csv", row.names = FALSE)
|
args = commandArgs(TRUE)
x. = args[1]
MB = as.numeric(args[2])
SIMS = as.numeric(args[3])
print('CHECK POPULATION SIZE SET CORRECTLY')
suppressMessages(library("data.table", quietly = TRUE, verbose = FALSE))
suppressMessages(library("dplyr", quietly = TRUE, verbose = FALSE))
introgression.QC = function(x,mb,sims){
data = fread(x, header = FALSE)
data = rename(data, sim.it.chr = V1, start = V2, stop = V3, chr = V4, pop = V5, sim.it = V6, tag= V7, itr = V8)
data = mutate(data, diff = stop-start)
##Average % introgression XMb Ysims
EA.pctintr=data[pop=='pop_3', sum(as.numeric(diff))/1008/(as.numeric(mb)*as.numeric(sims))] #XMbxYsimulations for 572 individuals
EU.pctintr=data[pop=='pop_2', sum(as.numeric(diff))/1006/(as.numeric(mb)*as.numeric(sims))]
SAS.pctintr=data[pop=='pop_4', sum(as.numeric(diff))/978/(as.numeric(mb)*as.numeric(sims))]
out.list = list('EA.pctintr'=EA.pctintr, 'EU.pctintr'=EU.pctintr, 'SAS.pctintr'=SAS.pctintr)
return(out.list)
}
Z1 = introgression.QC(x.,MB,SIMS)
print(Z1[1:3])
| /PctIntr.R | no_license | abwolf/general_scripts | R | false | false | 1,071 | r | args = commandArgs(TRUE)
x. = args[1]
MB = as.numeric(args[2])
SIMS = as.numeric(args[3])
print('CHECK POPULATION SIZE SET CORRECTLY')
suppressMessages(library("data.table", quietly = TRUE, verbose = FALSE))
suppressMessages(library("dplyr", quietly = TRUE, verbose = FALSE))
introgression.QC = function(x,mb,sims){
data = fread(x, header = FALSE)
data = rename(data, sim.it.chr = V1, start = V2, stop = V3, chr = V4, pop = V5, sim.it = V6, tag= V7, itr = V8)
data = mutate(data, diff = stop-start)
##Average % introgression XMb Ysims
EA.pctintr=data[pop=='pop_3', sum(as.numeric(diff))/1008/(as.numeric(mb)*as.numeric(sims))] #XMbxYsimulations for 572 individuals
EU.pctintr=data[pop=='pop_2', sum(as.numeric(diff))/1006/(as.numeric(mb)*as.numeric(sims))]
SAS.pctintr=data[pop=='pop_4', sum(as.numeric(diff))/978/(as.numeric(mb)*as.numeric(sims))]
out.list = list('EA.pctintr'=EA.pctintr, 'EU.pctintr'=EU.pctintr, 'SAS.pctintr'=SAS.pctintr)
return(out.list)
}
Z1 = introgression.QC(x.,MB,SIMS)
print(Z1[1:3])
|
library(ape)
testtree <- read.tree("8771_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8771_2_unrooted.txt") | /codeml_files/newick_trees_processed/8771_2/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("8771_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8771_2_unrooted.txt") |
listing_extract = function(x) {
initialdata = read_html(x)
listingdata = initialdata%>%
html_nodes(".s-item__title")%>%
html_text()
listingdata
sansfirstentry = listingdata[-c(1)]
sansfirstentry
}
| /ebayfunc/R/listingextract.R | no_license | ASheehan98/Ebay-R-Package | R | false | false | 218 | r | listing_extract = function(x) {
initialdata = read_html(x)
listingdata = initialdata%>%
html_nodes(".s-item__title")%>%
html_text()
listingdata
sansfirstentry = listingdata[-c(1)]
sansfirstentry
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{FANG}
\alias{FANG}
\title{Stock prices for the "FANG" stocks.}
\format{
A "tibble" ("tidy" data frame) with 4,032 rows and 8 variables:
\describe{
\item{symbol}{stock ticker symbol}
\item{date}{trade date}
\item{open}{stock price at the open of trading, in USD}
\item{high}{stock price at the highest point during trading, in USD}
\item{low}{stock price at the lowest point during trading, in USD}
\item{close}{stock price at the close of trading, in USD}
\item{volume}{number of shares traded}
\item{adjusted}{stock price at the close of trading adjusted for stock splits, in USD}
}
}
\source{
\url{https://www.investopedia.com/terms/f/fang-stocks-fb-amzn.asp}
}
\usage{
FANG
}
\description{
A dataset containing the daily historical stock prices for the "FANG" tech stocks,
"FB", "AMZN", "NFLX", and "GOOG", spanning from the beginning of
2013 through the end of 2016.
}
\keyword{datasets}
| /man/FANG.Rd | no_license | cran/tidyquant | R | false | true | 1,029 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{FANG}
\alias{FANG}
\title{Stock prices for the "FANG" stocks.}
\format{
A "tibble" ("tidy" data frame) with 4,032 rows and 8 variables:
\describe{
\item{symbol}{stock ticker symbol}
\item{date}{trade date}
\item{open}{stock price at the open of trading, in USD}
\item{high}{stock price at the highest point during trading, in USD}
\item{low}{stock price at the lowest point during trading, in USD}
\item{close}{stock price at the close of trading, in USD}
\item{volume}{number of shares traded}
\item{adjusted}{stock price at the close of trading adjusted for stock splits, in USD}
}
}
\source{
\url{https://www.investopedia.com/terms/f/fang-stocks-fb-amzn.asp}
}
\usage{
FANG
}
\description{
A dataset containing the daily historical stock prices for the "FANG" tech stocks,
"FB", "AMZN", "NFLX", and "GOOG", spanning from the beginning of
2013 through the end of 2016.
}
\keyword{datasets}
|
lo <- function (rown, coln, nrow, ncol, cellheight = NA, cellwidth = NA
, treeheight_col = 50, treeheight_row = 0, legend, main = NULL, sub = NULL, info = NULL
, fontsize, fontsize_row, fontsize_col = cexCol * fontsize, ...){
#annotation_colors <- annTracks$colors
#row_annotation <- annTracks$annRow
#annotation <- annTracks$annCol
coln_height <- unit(10, "bigpts")
if(!is.null(coln)){
longest_coln = which.max(nchar(coln))
gp = gpar(fontsize = fontsize_col, ...)
coln_height <- coln_height + unit(1.1, "grobheight", textGrob(coln[longest_coln], rot = 90, gp = gp))
}
rown_width <- rown_width_min <- unit(10, "bigpts")
if(!is.null(rown)){
longest_rown = which.max(nchar(rown))
gp = gpar(fontsize = fontsize_row, ...)
rown_width <- rown_width_min + unit(1.2, "grobwidth", textGrob(rown[longest_rown], gp = gp))
}
gp = list(fontsize = fontsize, ...)
# Legend position
if( !is_NA(legend) ){
longest_break = which.max(nchar(as.character(legend)))
longest_break = unit(1.1, "grobwidth", textGrob(as.character(legend)[longest_break], gp = do.call(gpar, gp)))
# minimum fixed width: plan for 2 decimals and a sign
min_lw = unit(1.1, "grobwidth", textGrob("-00.00", gp = do.call(gpar, gp)))
longest_break = max(longest_break, min_lw)
title_length = unit(1.1, "grobwidth", textGrob("Scale", gp = gpar(fontface = "bold", ...)))
legend_width = unit(12, "bigpts") + longest_break * 1.2
legend_width = max(title_length, legend_width)
}
else{
legend_width = unit(0, "bigpts")
}
#.annLegend.dim <- function(annotation, fontsize){
# Width of the corresponding legend
# longest_ann <- unlist(lapply(annotation, names))
# longest_ann <- longest_ann[which.max(nchar(longest_ann))]
# annot_legend_width = unit(1, "grobwidth", textGrob(longest_ann, gp = gpar(fontsize=fontsize, ...))) + unit(10, "bigpts")
# width of the legend title
# annot_legend_title <- names(annotation)[which.max(nchar(names(annotation)))]
# annot_legend_title_width = unit(1, "grobwidth", textGrob(annot_legend_title, gp = gpar(fontface = "bold", fontsize=fontsize, ...)))
#
# total width
# max(annot_legend_width, annot_legend_title_width) + unit(5, "bigpts")
#}
# Column annotations
#if( !is_NA(annotation) ){
# Column annotation height
# annot_height = unit(ncol(annotation) * (8 + 2) + 2, "bigpts")
#}
#else{
annot_height = unit(0, "bigpts")
#}
# add a viewport for the row annotations
#if ( !is_NA(row_annotation) ) {
# Row annotation width
# row_annot_width = unit(ncol(row_annotation) * (8 + 2) + 2, "bigpts")
#}
#else {
row_annot_width = unit(0, "bigpts")
#}
# Width of the annotation legend
annot_legend_width <- unit(0, "bigpts")
#annot_legend_width <-
# if( annotation_legend && !is_NA(annotation_colors) ){
# .annLegend.dim(annotation_colors, fontsize)
# }else unit(0, "bigpts")
# Tree height
treeheight_col = unit(treeheight_col, "bigpts") + unit(5, "bigpts")
treeheight_row = unit(treeheight_row, "bigpts") + unit(5, "bigpts")
# main title
main_height <- if(!is.null(main)) unit(1, "grobheight", main) + unit(20, "bigpts") else unit(0, "bigpts")
# sub title
sub_height <- if(!is.null(sub)) unit(1, "grobheight", sub) + unit(10, "bigpts") else unit(0, "bigpts")
# info panel
if( !is.null(info) ){
info_height <- unit(1, "grobheight", info) + unit(20, "bigpts")
info_width <- unit(1, "grobwidth", info) + unit(10, "bigpts")
}else{
info_height <- unit(0, "bigpts")
info_width <- unit(0, "bigpts")
}
# Set cell sizes
if(is.na(cellwidth)){
matwidth = unit(1, "npc") - rown_width - legend_width - row_annot_width - treeheight_row - annot_legend_width
}
else{
matwidth = unit(cellwidth * ncol, "bigpts")
}
if(is.na(cellheight)){
matheight = unit(1, "npc") - treeheight_col - annot_height - main_height - coln_height - sub_height - info_height
# recompute the cell width depending on the automatic fontsize
if( is.na(cellwidth) && !is.null(rown) ){
cellheight <- convertHeight(unit(1, "grobheight", rectGrob(0,0, matwidth, matheight)), "bigpts", valueOnly = T) / nrow
fontsize_row <- convertUnit(min(unit(fontsize_row, 'points'), unit(0.6*cellheight, 'bigpts')), 'points')
rown_width <- rown_width_min + unit(1.2, "grobwidth", textGrob(rown[longest_rown], gp = gpar(fontsize=fontsize_row, ...)))
matwidth <- unit(1, "npc") - rown_width - legend_width - row_annot_width - treeheight_row - annot_legend_width
}
}
else{
matheight = unit(cellheight * nrow, "bigpts")
}
# HACK:
# - use 6 instead of 5 column for the row_annotation
# - take into account the associated legend's width
# Produce layout()
unique.name <- vplayout(NULL)
lo <- grid.layout(nrow = 7, ncol = 6
, widths = unit.c(treeheight_row, row_annot_width, matwidth, rown_width, legend_width, annot_legend_width)
, heights = unit.c(main_height, treeheight_col, annot_height, matheight, coln_height, sub_height, info_height))
hvp <- viewport( name=paste('aheatmap', unique.name, sep='-'), layout = lo)
pushViewport(hvp)
#grid.show.layout(lo); stop('sas')
# Get cell dimensions
vplayout('mat')
cellwidth = convertWidth(unit(1, "npc"), "bigpts", valueOnly = T) / ncol
cellheight = convertHeight(unit(1, "npc"), "bigpts", valueOnly = T) / nrow
upViewport()
height <- as.numeric(convertHeight(sum(lo$height), "inches"))
width <- as.numeric(convertWidth(sum(lo$width), "inches"))
# Return minimal cell dimension in bigpts to decide if borders are drawn
mindim = min(cellwidth, cellheight)
return( list(width=width, height=height, vp=hvp, mindim=mindim, cellwidth=cellwidth, cellheight=cellheight) )
}
vplayout <- function ()
{
graphic.name <- NULL
function(x, y, verbose = getOption('verbose') ){
# initialize the graph name
if( is.null(x) ){
graphic.name <<- grid:::vpAutoName()
return(graphic.name)
}
name <- NULL
if( !is.numeric(x) ){
name <- paste(graphic.name, x, sep='-')
if( !missing(y) && is(y, 'viewport') ){
y$name <- name
return(pushViewport(y))
}
if( !is.null(tryViewport(name, verbose=verbose)) )
return()
switch(x
, main={x<-1; y<-3;}
, ctree={x<-2; y<-3;}
, cann={x<-3; y<-3;}
, rtree={x<-4; y<-1;}
, rann={x<-4; y<-2;}
, mat={x<-4; y<-3;}
, rnam={x<-4; y<-4;}
, leg={x<-4; y<-5;}
, aleg={x<-4; y<-6;}
, cnam={x<-5; y<-3;}
, sub={x<-6; y<-3;}
, info={x<-7; y<-3;}
, stop("aheatmap - invalid viewport name")
)
}
if( verbose ) message("vp - create ", name)
pushViewport(viewport(layout.pos.row = x, layout.pos.col = y, name=name))
}
}
| /R/layout.R | no_license | mbootwalla/MethylHose | R | false | false | 6,610 | r | lo <- function (rown, coln, nrow, ncol, cellheight = NA, cellwidth = NA
, treeheight_col = 50, treeheight_row = 0, legend, main = NULL, sub = NULL, info = NULL
, fontsize, fontsize_row, fontsize_col = cexCol * fontsize, ...){
#annotation_colors <- annTracks$colors
#row_annotation <- annTracks$annRow
#annotation <- annTracks$annCol
coln_height <- unit(10, "bigpts")
if(!is.null(coln)){
longest_coln = which.max(nchar(coln))
gp = gpar(fontsize = fontsize_col, ...)
coln_height <- coln_height + unit(1.1, "grobheight", textGrob(coln[longest_coln], rot = 90, gp = gp))
}
rown_width <- rown_width_min <- unit(10, "bigpts")
if(!is.null(rown)){
longest_rown = which.max(nchar(rown))
gp = gpar(fontsize = fontsize_row, ...)
rown_width <- rown_width_min + unit(1.2, "grobwidth", textGrob(rown[longest_rown], gp = gp))
}
gp = list(fontsize = fontsize, ...)
# Legend position
if( !is_NA(legend) ){
longest_break = which.max(nchar(as.character(legend)))
longest_break = unit(1.1, "grobwidth", textGrob(as.character(legend)[longest_break], gp = do.call(gpar, gp)))
# minimum fixed width: plan for 2 decimals and a sign
min_lw = unit(1.1, "grobwidth", textGrob("-00.00", gp = do.call(gpar, gp)))
longest_break = max(longest_break, min_lw)
title_length = unit(1.1, "grobwidth", textGrob("Scale", gp = gpar(fontface = "bold", ...)))
legend_width = unit(12, "bigpts") + longest_break * 1.2
legend_width = max(title_length, legend_width)
}
else{
legend_width = unit(0, "bigpts")
}
#.annLegend.dim <- function(annotation, fontsize){
# Width of the corresponding legend
# longest_ann <- unlist(lapply(annotation, names))
# longest_ann <- longest_ann[which.max(nchar(longest_ann))]
# annot_legend_width = unit(1, "grobwidth", textGrob(longest_ann, gp = gpar(fontsize=fontsize, ...))) + unit(10, "bigpts")
# width of the legend title
# annot_legend_title <- names(annotation)[which.max(nchar(names(annotation)))]
# annot_legend_title_width = unit(1, "grobwidth", textGrob(annot_legend_title, gp = gpar(fontface = "bold", fontsize=fontsize, ...)))
#
# total width
# max(annot_legend_width, annot_legend_title_width) + unit(5, "bigpts")
#}
# Column annotations
#if( !is_NA(annotation) ){
# Column annotation height
# annot_height = unit(ncol(annotation) * (8 + 2) + 2, "bigpts")
#}
#else{
annot_height = unit(0, "bigpts")
#}
# add a viewport for the row annotations
#if ( !is_NA(row_annotation) ) {
# Row annotation width
# row_annot_width = unit(ncol(row_annotation) * (8 + 2) + 2, "bigpts")
#}
#else {
row_annot_width = unit(0, "bigpts")
#}
# Width of the annotation legend
annot_legend_width <- unit(0, "bigpts")
#annot_legend_width <-
# if( annotation_legend && !is_NA(annotation_colors) ){
# .annLegend.dim(annotation_colors, fontsize)
# }else unit(0, "bigpts")
# Tree height
treeheight_col = unit(treeheight_col, "bigpts") + unit(5, "bigpts")
treeheight_row = unit(treeheight_row, "bigpts") + unit(5, "bigpts")
# main title
main_height <- if(!is.null(main)) unit(1, "grobheight", main) + unit(20, "bigpts") else unit(0, "bigpts")
# sub title
sub_height <- if(!is.null(sub)) unit(1, "grobheight", sub) + unit(10, "bigpts") else unit(0, "bigpts")
# info panel
if( !is.null(info) ){
info_height <- unit(1, "grobheight", info) + unit(20, "bigpts")
info_width <- unit(1, "grobwidth", info) + unit(10, "bigpts")
}else{
info_height <- unit(0, "bigpts")
info_width <- unit(0, "bigpts")
}
# Set cell sizes
if(is.na(cellwidth)){
matwidth = unit(1, "npc") - rown_width - legend_width - row_annot_width - treeheight_row - annot_legend_width
}
else{
matwidth = unit(cellwidth * ncol, "bigpts")
}
if(is.na(cellheight)){
matheight = unit(1, "npc") - treeheight_col - annot_height - main_height - coln_height - sub_height - info_height
# recompute the cell width depending on the automatic fontsize
if( is.na(cellwidth) && !is.null(rown) ){
cellheight <- convertHeight(unit(1, "grobheight", rectGrob(0,0, matwidth, matheight)), "bigpts", valueOnly = T) / nrow
fontsize_row <- convertUnit(min(unit(fontsize_row, 'points'), unit(0.6*cellheight, 'bigpts')), 'points')
rown_width <- rown_width_min + unit(1.2, "grobwidth", textGrob(rown[longest_rown], gp = gpar(fontsize=fontsize_row, ...)))
matwidth <- unit(1, "npc") - rown_width - legend_width - row_annot_width - treeheight_row - annot_legend_width
}
}
else{
matheight = unit(cellheight * nrow, "bigpts")
}
# HACK:
# - use 6 instead of 5 column for the row_annotation
# - take into account the associated legend's width
# Produce layout()
unique.name <- vplayout(NULL)
lo <- grid.layout(nrow = 7, ncol = 6
, widths = unit.c(treeheight_row, row_annot_width, matwidth, rown_width, legend_width, annot_legend_width)
, heights = unit.c(main_height, treeheight_col, annot_height, matheight, coln_height, sub_height, info_height))
hvp <- viewport( name=paste('aheatmap', unique.name, sep='-'), layout = lo)
pushViewport(hvp)
#grid.show.layout(lo); stop('sas')
# Get cell dimensions
vplayout('mat')
cellwidth = convertWidth(unit(1, "npc"), "bigpts", valueOnly = T) / ncol
cellheight = convertHeight(unit(1, "npc"), "bigpts", valueOnly = T) / nrow
upViewport()
height <- as.numeric(convertHeight(sum(lo$height), "inches"))
width <- as.numeric(convertWidth(sum(lo$width), "inches"))
# Return minimal cell dimension in bigpts to decide if borders are drawn
mindim = min(cellwidth, cellheight)
return( list(width=width, height=height, vp=hvp, mindim=mindim, cellwidth=cellwidth, cellheight=cellheight) )
}
vplayout <- function ()
{
graphic.name <- NULL
function(x, y, verbose = getOption('verbose') ){
# initialize the graph name
if( is.null(x) ){
graphic.name <<- grid:::vpAutoName()
return(graphic.name)
}
name <- NULL
if( !is.numeric(x) ){
name <- paste(graphic.name, x, sep='-')
if( !missing(y) && is(y, 'viewport') ){
y$name <- name
return(pushViewport(y))
}
if( !is.null(tryViewport(name, verbose=verbose)) )
return()
switch(x
, main={x<-1; y<-3;}
, ctree={x<-2; y<-3;}
, cann={x<-3; y<-3;}
, rtree={x<-4; y<-1;}
, rann={x<-4; y<-2;}
, mat={x<-4; y<-3;}
, rnam={x<-4; y<-4;}
, leg={x<-4; y<-5;}
, aleg={x<-4; y<-6;}
, cnam={x<-5; y<-3;}
, sub={x<-6; y<-3;}
, info={x<-7; y<-3;}
, stop("aheatmap - invalid viewport name")
)
}
if( verbose ) message("vp - create ", name)
pushViewport(viewport(layout.pos.row = x, layout.pos.col = y, name=name))
}
}
|
#clustering sample
marks1 = ceiling(rnorm(10,6,2))
marks2 = ceiling(rnorm(10,5,2))
df = data.frame(marks1,marks2)
head(df)
km = kmeans(df,centers = 2)
km$cluster
df2 = cbind(df, cluster = km$cluster)
df2[order(df2$cluster),]
plot(df$marks1, df$marks2, col=2:3, pch=c(16,17))
points(km$centers, pch=20, cex=2, col=km$cluster)
km$centers
| /clustering-sample.R | no_license | mahamoodsalamalipm1994/analytics1 | R | false | false | 337 | r | #clustering sample
marks1 = ceiling(rnorm(10,6,2))
marks2 = ceiling(rnorm(10,5,2))
df = data.frame(marks1,marks2)
head(df)
km = kmeans(df,centers = 2)
km$cluster
df2 = cbind(df, cluster = km$cluster)
df2[order(df2$cluster),]
plot(df$marks1, df$marks2, col=2:3, pch=c(16,17))
points(km$centers, pch=20, cex=2, col=km$cluster)
km$centers
|
\name{propMissLgthCons}
\alias{propMissLgthCons}
\alias{propMissLgthCons,csDataCons-method}
\docType{methods}
\title{Proportions of empty length classes in an age-length key}
\description{
This method calculates empty length classes proportion in an age-length key obtained from a 'csDataCons' object (ca table).
'pEmpty' element describes the proportion of missing LC per alk within hl table.
'pEmptyExtr' submits the proportion of extrema missing LC (among all missing LC) per alk within hl table.
}
\usage{
propMissLgthCons(object,...)
}
\arguments{
\item{object}{A \emph{csDataCons} object.}
\item{...}{Further arguments.}
}
\author{Mathieu Merzereaud}
\seealso{\code{\link{alkLgthRec}}, \code{\link{viewGapsAlkCons}}, \code{\link[COSTcore]{csDataCons}}
}
\examples{
data(sole)
#subset to "27.7.d" & "27.7.e" areas
csSub <- subset(sole.cs,area\%in\%c("27.7.d","27.7.e"),table="hh")
conSole.cs <- csDataCons(csDataVal(csSub),strIni(spaceStrata="area"))
propMissLgthCons(conSole.cs)
}
\keyword{methods}
| /COSTdbe/man/propMissLgthCons.rd | no_license | BackupTheBerlios/cost-project | R | false | false | 1,052 | rd | \name{propMissLgthCons}
\alias{propMissLgthCons}
\alias{propMissLgthCons,csDataCons-method}
\docType{methods}
\title{Proportions of empty length classes in an age-length key}
\description{
This method calculates empty length classes proportion in an age-length key obtained from a 'csDataCons' object (ca table).
'pEmpty' element describes the proportion of missing LC per alk within hl table.
'pEmptyExtr' submits the proportion of extrema missing LC (among all missing LC) per alk within hl table.
}
\usage{
propMissLgthCons(object,...)
}
\arguments{
\item{object}{A \emph{csDataCons} object.}
\item{...}{Further arguments.}
}
\author{Mathieu Merzereaud}
\seealso{\code{\link{alkLgthRec}}, \code{\link{viewGapsAlkCons}}, \code{\link[COSTcore]{csDataCons}}
}
\examples{
data(sole)
#subset to "27.7.d" & "27.7.e" areas
csSub <- subset(sole.cs,area\%in\%c("27.7.d","27.7.e"),table="hh")
conSole.cs <- csDataCons(csDataVal(csSub),strIni(spaceStrata="area"))
propMissLgthCons(conSole.cs)
}
\keyword{methods}
|
rm(list = ls())
library(tidyverse)
set.seed(2357)
### Input data
sv <- data.table::fread('../sv.csv') %>%
rename(
LBDT = SVDT,
LBDY = SVDY
) %>%
filter(
SVSTATUS == 'Completed'
)
labs <- read.csv('../../data-dictionaries/labs.csv', colClasses = 'character') %>% select(-SEX)
labs$rando <- runif(labs %>% nrow)
labs$trend <- case_when(
labs$rando < .333 ~ -1,
labs$rando > .666 ~ 1,
TRUE ~ 0
)
scheduleOfEvents <- read.csv('../../data-dictionaries/schedule-of-events.csv', colClasses = 'character')
### Output data
lb <- NULL
for (i in 1:nrow(sv)) {
visit <- sv[i,]
lb_vis <- merge(labs, visit, all = TRUE)
for (j in 1:nrow(lb_vis)) {
LBSTNRLO <- as.numeric(lb_vis[j,'LBSTNRLO'])
LBSTNRHI <- as.numeric(lb_vis[j,'LBSTNRHI'])
mean <- (LBSTNRHI + LBSTNRLO)/2
std <- (LBSTNRHI - LBSTNRLO)/2
trend <- lb_vis[j,'trend']
if (trend == -1)
lb_vis[j,'LBSTRESN'] <- case_when(
lb_vis[j,'VISITNUM'] == 0 ~ max(rnorm(1, mean + std, std), 0),
#lb_vis[j,'VISITNUM'] < 5 ~ max(rnorm(1, mean, std), 0),
TRUE ~ max(rnorm(1, mean - 2*runif(1)*std), 0)
)
else if (trend == 1)
lb_vis[j,'LBSTRESN'] <- case_when(
lb_vis[j,'VISITNUM'] == 0 ~ max(rnorm(1, mean - std, std), 0),
#lb_vis[j,'VISITNUM'] < 5 ~ max(rnorm(1, mean, std), 0),
TRUE ~ max(rnorm(1, mean + 2*runif(1)*std), 0)
)
else
lb_vis[j,'LBSTRESN'] <- max(rnorm(1, mean, std), 0)
}
lb <- plyr::rbind.fill(lb, lb_vis)
}
#scheduleOfEvents_labs <- merge(scheduleOfEvents, labs, all = TRUE) %>%
# sample_n(nrow(scheduleOfEvents)*nrow(labs)/10) %>%
# mutate(VISIT_LBTEST = paste(VISIT, LBTEST, sep = '_'))
lb1 <- lb %>%
#mutate(
# LBSTRESN = ifelse(
# !paste(VISIT, LBTEST, sep = '_') %in% scheduleOfEvents_labs$VISIT_LBTEST,
# LBSTRESN,
# NA
# )
#) %>%
arrange(USUBJID, VISITNUM, LBTEST) %>%
select(USUBJID, VISIT, VISITNUM, LBDT, LBDY, LBCAT, LBTEST, LBSTRESU, LBSTRESN, LBSTNRLO, LBSTNRHI)
lb_summary <- lb1 %>%
filter(VISITNUM %% 1 == 0) %>%
group_by(LBTEST, VISITNUM, VISIT) %>%
summarize(
LBSTRESN = mean(LBSTRESN)
) %>%
ungroup %>%
arrange(LBTEST, VISITNUM)
chart <- lb_summary %>%
ggplot(aes(x = VISITNUM, y = LBSTRESN, group = 1)) +
geom_line() +
facet_wrap(
vars(LBTEST),
scales = 'free_y'
)
chart
### Output data
write.csv(
lb1,
'../lb-trend.csv',
row.names = FALSE,
na = ''
)
| /data/clinical-trials/sdtm/scripts/lb-trend.R | permissive | samussiah/data-library | R | false | false | 2,927 | r | rm(list = ls())
library(tidyverse)
set.seed(2357)
### Input data
sv <- data.table::fread('../sv.csv') %>%
rename(
LBDT = SVDT,
LBDY = SVDY
) %>%
filter(
SVSTATUS == 'Completed'
)
labs <- read.csv('../../data-dictionaries/labs.csv', colClasses = 'character') %>% select(-SEX)
labs$rando <- runif(labs %>% nrow)
labs$trend <- case_when(
labs$rando < .333 ~ -1,
labs$rando > .666 ~ 1,
TRUE ~ 0
)
scheduleOfEvents <- read.csv('../../data-dictionaries/schedule-of-events.csv', colClasses = 'character')
### Output data
lb <- NULL
for (i in 1:nrow(sv)) {
visit <- sv[i,]
lb_vis <- merge(labs, visit, all = TRUE)
for (j in 1:nrow(lb_vis)) {
LBSTNRLO <- as.numeric(lb_vis[j,'LBSTNRLO'])
LBSTNRHI <- as.numeric(lb_vis[j,'LBSTNRHI'])
mean <- (LBSTNRHI + LBSTNRLO)/2
std <- (LBSTNRHI - LBSTNRLO)/2
trend <- lb_vis[j,'trend']
if (trend == -1)
lb_vis[j,'LBSTRESN'] <- case_when(
lb_vis[j,'VISITNUM'] == 0 ~ max(rnorm(1, mean + std, std), 0),
#lb_vis[j,'VISITNUM'] < 5 ~ max(rnorm(1, mean, std), 0),
TRUE ~ max(rnorm(1, mean - 2*runif(1)*std), 0)
)
else if (trend == 1)
lb_vis[j,'LBSTRESN'] <- case_when(
lb_vis[j,'VISITNUM'] == 0 ~ max(rnorm(1, mean - std, std), 0),
#lb_vis[j,'VISITNUM'] < 5 ~ max(rnorm(1, mean, std), 0),
TRUE ~ max(rnorm(1, mean + 2*runif(1)*std), 0)
)
else
lb_vis[j,'LBSTRESN'] <- max(rnorm(1, mean, std), 0)
}
lb <- plyr::rbind.fill(lb, lb_vis)
}
#scheduleOfEvents_labs <- merge(scheduleOfEvents, labs, all = TRUE) %>%
# sample_n(nrow(scheduleOfEvents)*nrow(labs)/10) %>%
# mutate(VISIT_LBTEST = paste(VISIT, LBTEST, sep = '_'))
lb1 <- lb %>%
#mutate(
# LBSTRESN = ifelse(
# !paste(VISIT, LBTEST, sep = '_') %in% scheduleOfEvents_labs$VISIT_LBTEST,
# LBSTRESN,
# NA
# )
#) %>%
arrange(USUBJID, VISITNUM, LBTEST) %>%
select(USUBJID, VISIT, VISITNUM, LBDT, LBDY, LBCAT, LBTEST, LBSTRESU, LBSTRESN, LBSTNRLO, LBSTNRHI)
lb_summary <- lb1 %>%
filter(VISITNUM %% 1 == 0) %>%
group_by(LBTEST, VISITNUM, VISIT) %>%
summarize(
LBSTRESN = mean(LBSTRESN)
) %>%
ungroup %>%
arrange(LBTEST, VISITNUM)
chart <- lb_summary %>%
ggplot(aes(x = VISITNUM, y = LBSTRESN, group = 1)) +
geom_line() +
facet_wrap(
vars(LBTEST),
scales = 'free_y'
)
chart
### Output data
write.csv(
lb1,
'../lb-trend.csv',
row.names = FALSE,
na = ''
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gdal_cliprasterbyextent.R
\name{gdal_cliprasterbyextent}
\alias{gdal_cliprasterbyextent}
\title{QGIS algorithm Clip raster by extent}
\usage{
gdal_cliprasterbyextent(
INPUT = qgisprocess::qgis_default_value(),
PROJWIN = qgisprocess::qgis_default_value(),
OVERCRS = qgisprocess::qgis_default_value(),
NODATA = qgisprocess::qgis_default_value(),
OPTIONS = qgisprocess::qgis_default_value(),
DATA_TYPE = qgisprocess::qgis_default_value(),
EXTRA = qgisprocess::qgis_default_value(),
OUTPUT = qgisprocess::qgis_default_value(),
...,
.complete_output = TRUE
)
}
\arguments{
\item{INPUT}{\code{raster} - Input layer. Path to a raster layer.}
\item{PROJWIN}{\code{extent} - Clipping extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..}
\item{OVERCRS}{\code{boolean} - Override the projection for the output file. 1 for true/yes. 0 for false/no.}
\item{NODATA}{\code{number} - Assign a specified nodata value to output bands. A numeric value.}
\item{OPTIONS}{\code{string} - Additional creation options. String value.}
\item{DATA_TYPE}{\code{enum} of \verb{("Use Input Layer Data Type", "Byte", "Int16", "UInt16", "UInt32", "Int32", "Float32", "Float64", "CInt16", "CInt32", "CFloat32", "CFloat64")} - Output data type. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.}
\item{EXTRA}{\code{string} - Additional command-line parameters. String value.}
\item{OUTPUT}{\code{rasterDestination} - Clipped (extent). Path for new raster layer.}
\item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}}
\item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.}
}
\description{
QGIS Algorithm provided by GDAL Clip raster by extent (gdal:cliprasterbyextent)
}
\details{
\subsection{Outputs description}{
\itemize{
\item OUTPUT - outputRaster - Clipped
}
}
}
| /man/gdal_cliprasterbyextent.Rd | permissive | VB6Hobbyst7/r_package_qgis | R | false | true | 2,160 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gdal_cliprasterbyextent.R
\name{gdal_cliprasterbyextent}
\alias{gdal_cliprasterbyextent}
\title{QGIS algorithm Clip raster by extent}
\usage{
gdal_cliprasterbyextent(
INPUT = qgisprocess::qgis_default_value(),
PROJWIN = qgisprocess::qgis_default_value(),
OVERCRS = qgisprocess::qgis_default_value(),
NODATA = qgisprocess::qgis_default_value(),
OPTIONS = qgisprocess::qgis_default_value(),
DATA_TYPE = qgisprocess::qgis_default_value(),
EXTRA = qgisprocess::qgis_default_value(),
OUTPUT = qgisprocess::qgis_default_value(),
...,
.complete_output = TRUE
)
}
\arguments{
\item{INPUT}{\code{raster} - Input layer. Path to a raster layer.}
\item{PROJWIN}{\code{extent} - Clipping extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..}
\item{OVERCRS}{\code{boolean} - Override the projection for the output file. 1 for true/yes. 0 for false/no.}
\item{NODATA}{\code{number} - Assign a specified nodata value to output bands. A numeric value.}
\item{OPTIONS}{\code{string} - Additional creation options. String value.}
\item{DATA_TYPE}{\code{enum} of \verb{("Use Input Layer Data Type", "Byte", "Int16", "UInt16", "UInt32", "Int32", "Float32", "Float64", "CInt16", "CInt32", "CFloat32", "CFloat64")} - Output data type. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.}
\item{EXTRA}{\code{string} - Additional command-line parameters. String value.}
\item{OUTPUT}{\code{rasterDestination} - Clipped (extent). Path for new raster layer.}
\item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}}
\item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.}
}
\description{
QGIS Algorithm provided by GDAL Clip raster by extent (gdal:cliprasterbyextent)
}
\details{
\subsection{Outputs description}{
\itemize{
\item OUTPUT - outputRaster - Clipped
}
}
}
|
#-------------------------------------------------------------------------------
# tcplLoadAsid: Load assay source id and name for the given fields
#-------------------------------------------------------------------------------
#' @rdname assay_funcs
#' @import data.table
#' @export
tcplLoadAsid <- function(fld = NULL, val = NULL, add.fld = NULL) {
out <- c("assay_source.asid",
"assay_source.assay_source_name")
qstring <- .buildAssayQ(out = out,
tblo = c(6, 4:1),
fld = fld,
val = val,
add.fld = add.fld)
dat <- tcplQuery(query = qstring, db = getOption("TCPL_DB"))
dat[]
}
#------------------------------------------------------------------------------- | /tcpl/R/tcplLoadAsid.R | no_license | ingted/R-Examples | R | false | false | 830 | r | #-------------------------------------------------------------------------------
# tcplLoadAsid: Load assay source id and name for the given fields
#-------------------------------------------------------------------------------
#' @rdname assay_funcs
#' @import data.table
#' @export
tcplLoadAsid <- function(fld = NULL, val = NULL, add.fld = NULL) {
out <- c("assay_source.asid",
"assay_source.assay_source_name")
qstring <- .buildAssayQ(out = out,
tblo = c(6, 4:1),
fld = fld,
val = val,
add.fld = add.fld)
dat <- tcplQuery(query = qstring, db = getOption("TCPL_DB"))
dat[]
}
#------------------------------------------------------------------------------- |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mfa.R
\name{data_tables}
\alias{data_tables}
\title{data_tables}
\usage{
data_tables(data, sets, center, scale)
}
\arguments{
\item{data}{data set(a dataframe or matrix)}
\item{sets}{a list of vectors indicating sets/blocks of variables, can be character vectors with names or numeric vectors with position of variables in the data table}
\item{center}{logical value or numeric vector of length equal to number of active variables; if numeric vector, each variable has corresponding value subtracted from it; if TRUE, subtract column means}
\item{scale}{logical value or numeric vector of length equal to number of active variables}
}
\value{
a list which contains all tables of the dataset
}
\description{
separate data into individual tables, preprocess and store in a list
}
\examples{
wines <- read.csv("https://raw.githubusercontent.com/ucb-stat243/stat243-fall-2016/master/problem-sets/final-project/data/wines.csv", stringsAsFactors = FALSE)
sets <- list(2:7, 8:13, 14:19, 20:24, 25:30, 31:35, 36:39, 40:45, 46:50, 51:54)
scaling_vec <- apply(subset(wines, select = unlist(sets)), 2, function(x) sqrt(sum((x - mean(x))^2)))
tables <- data_tables(wines, sets, TRUE, scaling_vec)
}
| /MFA/man/data_tables.Rd | no_license | BeaGir/MFA---L.Katz-N.Long-X.Zhang-J.Prosky-B.Girard | R | false | true | 1,270 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mfa.R
\name{data_tables}
\alias{data_tables}
\title{data_tables}
\usage{
data_tables(data, sets, center, scale)
}
\arguments{
\item{data}{data set(a dataframe or matrix)}
\item{sets}{a list of vectors indicating sets/blocks of variables, can be character vectors with names or numeric vectors with position of variables in the data table}
\item{center}{logical value or numeric vector of length equal to number of active variables; if numeric vector, each variable has corresponding value subtracted from it; if TRUE, subtract column means}
\item{scale}{logical value or numeric vector of length equal to number of active variables}
}
\value{
a list which contains all tables of the dataset
}
\description{
separate data into individual tables, preprocess and store in a list
}
\examples{
wines <- read.csv("https://raw.githubusercontent.com/ucb-stat243/stat243-fall-2016/master/problem-sets/final-project/data/wines.csv", stringsAsFactors = FALSE)
sets <- list(2:7, 8:13, 14:19, 20:24, 25:30, 31:35, 36:39, 40:45, 46:50, 51:54)
scaling_vec <- apply(subset(wines, select = unlist(sets)), 2, function(x) sqrt(sum((x - mean(x))^2)))
tables <- data_tables(wines, sets, TRUE, scaling_vec)
}
|
#******************************* CLEAN DATA & PROCESS VARIABLES ***************************#
# #
# #
# #
#******************************************************************************************#
# source here library for file management
library(here)
# source global options
source(here("global_options.R"))
# set working directory for data (not public)
setwd("~/Dropbox/CMU Survey/National analysis/0 - Aggregated data")
# load data
load("data_all.RData")
# set working directory
setwd(here("0 - Data"))
# ancillary files (zip to state + state codings)
z = read.csv("02_20_uszips.csv") %>% dplyr::select(zip, state_id)
states = read.csv("A3b-coding.csv")
# process data
e = d %>% mutate(
# format date as date
date = as.Date(StartDatetime, format = "%Y-%m-%d"),
# add a unique ID
#id = row_number(.),
# format zip code as numeric
zip = as.numeric(A3),
# person-level fever
fever = substring(B2,1,2)=="1,") %>%
# get rid of colums
dplyr::select(-StartDatetime, -EndDatetime) %>%
# join to zip codes
left_join(z, "zip") %>%
# join to state ID
# state_abbv will now be the state abbreviation
mutate(state_num = as.numeric(A3b)) %>%
left_join(states, c("state_num"="code"))
# filter for date
e = e %>% filter(date>="2020-05-15" & date<="2020-11-22")
dim(e) # supplement
# label questions
names(e)[1:42] = c("HH_fever", "HH_sore_throat", "HH_cough", "HH_sob", "HH_diff_breathing",
"HH_n_sick", "HH_n", "zip_code", "state_rep", "com_n_sick", "symp", "symp_other",
"symp_days", "taken_temp", "cough_mucus", "tested_covid", "hospital",
"conditions", "flu_shot", "work_outside_home", "hcw", "nursing_home",
"travel_outside_state", "avoid_contact", "anxious", "depressed", "worried_seriously_ill",
"work_contacts", "shopping_contacts", "social_contacts", "other_contacts",
"contact_pos", "contact_pos_HH",
"gender", "gender_other", "pregnant", "age", "HH_u18", "HH_18_64", "HH_65_plus", "finances",
"highest_temp")
#### CLEANING ####
# check that numeric variables were either left blank or
# that reasonable values were added
c = e %>%
# check if numeric answers were EITHER left blank
# OR
# a reasonable value was entered
mutate(temp_chk = (is.na(highest_temp) | (highest_temp <= 104 & highest_temp > 97)),
HH_chk = (is.na(HH_n_sick) | HH_n_sick <= 30) & (is.na(HH_n) | HH_n <= 30),
n_sick_chk = (HH_n_sick <= HH_n) | is.na(HH_n_sick) | is.na(HH_n),
#com_chk = is.na(com_n_sick) | com_n_sick <= 100),
work_chk = (is.na(work_contacts) | work_contacts < 100),
social_chk = (is.na(social_contacts) | social_contacts < 100),
shopping_chk = (is.na(shopping_contacts) | shopping_contacts < 100),
other_chk = (is.na(other_contacts) | other_contacts < 100),
all_chk = HH_chk & n_sick_chk & temp_chk & work_chk & social_chk & shopping_chk & other_chk,
week = isoweek(date))
# summarize these
chk_outcomes = c %>% dplyr::summarize(temp = weighted.mean(temp_chk, weight = weight),
HH = weighted.mean(HH_chk, weight = weight),
work = weighted.mean(work_chk, weight = weight),
social = weighted.mean(social_chk, weight = weight),
shopping = weighted.mean(shopping_chk, weight = weight),
other = weighted.mean(other_chk, weight = weight),
all = weighted.mean(all_chk, weight = weight))
# subset on these
PROFANE_LANG = # We filtered out responses with profane or political language.
# We don't want to upload to GitHub but are happy to share the list
# or the cleaned file to researchers with microdata access
p = c %>% filter(all_chk) %>%
mutate(odd_symp = grepl(PROFANE_LANG,
symp_other, ignore.case = T),
all_symp = symp=="1,2,3,4,5,6,7,8,9,10,11,12,13,14",
all_but_one_symp = symp=="1,2,3,4,5,6,7,8,9,10,11,12,13",
all_and_odd = all_symp & odd_symp,
have_symp1 = grepl("1,2", symp),
have_symp2 = grepl("1,2", symp) & HH_fever==1,
have_symp3 = grepl("1,2", symp) & !is.na(highest_temp),
have_symp4 = grepl("1,2", symp) & (HH_fever==1 | !is.na(highest_temp)),
taste = grepl("13", symp),
symp_num = str_count(symp, ",")+1,
HH_cough_aug = !(HH_cough=="2" | is.na(HH_cough)),
HH_sob_aug = !(HH_sob=="2" | is.na(HH_sob)),
HH_diff_breathing_aug = !(HH_diff_breathing=="2" | is.na(HH_diff_breathing)),
cli = HH_fever=="1" & (HH_cough_aug | HH_sob_aug | HH_diff_breathing_aug),
survey_chg = date>="2020-09-08") %>%
separate(symp, into = paste("var", 1:15, sep = ""), remove = F, sep = "\\,")
p$cats = paste(ifelse(p$HH_fever==1, "A", ""), ifelse(p$HH_sore_throat==1, "B", ""), ifelse(p$HH_cough==1, "C", ""), ifelse(p$HH_sob==1, "D", ""), ifelse(p$HH_diff_breathing==1, "E", ""))
p$total = as.numeric(p$HH_fever==1) + as.numeric(p$HH_sore_throat==1) + as.numeric(p$HH_cough==1) + as.numeric(p$HH_sob==1) + as.numeric(p$HH_diff_breathing==1)
save(p, file = "~/Dropbox/CMU Survey/National analysis/0 - Aggregated data/tempfile.RData")
rm(c, d, z)
gc()
#### NEW VARIABLES ####
# make variables
q = p %>% filter(!all_symp & !all_but_one_symp & !odd_symp) %>%
filter(!is.na(state_id) & state_id!="PR") %>%
mutate(
any_work = ifelse(is.na(work_contacts),0, work_contacts),
any_shopping = ifelse(is.na(shopping_contacts),0, shopping_contacts),
any_social = ifelse(is.na(social_contacts),0, social_contacts),
any_other = ifelse(is.na(other_contacts), 0, other_contacts),
chk = !is.na(work_contacts) | !is.na(shopping_contacts) | !is.na(social_contacts) | !is.na(other_contacts),
work_mod2 = ifelse(chk, any_work, NA),
social_mod2 = ifelse(chk, any_social, NA),
shopping_mod2 = ifelse(chk, any_shopping, NA),
other_mod2 = ifelse(chk, any_other, NA),
contacts_tot = work_mod2 + shopping_mod2 + social_mod2 + other_mod2,
activities_masked_or_none = ifelse(is.na(C13), NA, C13==8 | (grepl("1", C13)==grepl("1", C13a) &
grepl("2", C13)==grepl("2", C13a) &
grepl("4", C13)==grepl("4", C13a) &
grepl("5", C13)==grepl("5", C13a) &
grepl("6", C13)==grepl("6", C13a))),
have_symp = grepl("1,2", symp),
age_cat = ifelse(age>5, "65+", "45-64"),
age_cat = ifelse(age<=3, "<45", age_cat),
age_cat = factor(age_cat, levels = c("<45", "45-64", "65+")))
# function to aggregate dataset
make_vars = function(f){
f = f %>% dplyr::summarize(date = max(date),
num = length(tested_covid),
# avoid others
avoid_peoplev2 = weighted.mean(ifelse(is.na(avoid_contact) & date<="2020-09-06", NA, avoid_contact<3), na.rm = T, w = weight),
avoid_people_num = sum(!is.na(avoid_contact) & date<="2020-09-06"),
# worry
worried_sickv2 = weighted.mean(ifelse(is.na(worried_seriously_ill), NA, worried_seriously_ill==1), na.rm = T, w = weight),
worried_sick_num = sum(!is.na(worried_seriously_ill)),
# activities masked or none
activities_masked_or_nonev2 = weighted.mean(ifelse(is.na(C13) & survey_chg, NA, activities_masked_or_none), w = weight, na.rm = T),
activities_masked_or_none_num = sum(!is.na(C13) & survey_chg),
# contacts
contacts_avg_mod = weighted.mean(contacts_tot, na.rm = T, w = weight),
contacts_num = sum(chk),
# missing data (weighted)
gender_missing = weighted.mean(is.na(gender), w = weight),
age_missing = weighted.mean(is.na(age), w = weight),
avoid_people_missing = weighted.mean(is.na(avoid_contact), w = weight),
contacts_missing = weighted.mean(!chk, w = weight),
worried_missing = weighted.mean(is.na(worried_seriously_ill), w = weight),
activities_missing = weighted.mean(is.na(C13), w = weight),
contacts_missing1 = weighted.mean(is.na(work_contacts), w = weight),
contacts_missing2 = weighted.mean(is.na(social_contacts), w = weight),
contacts_missing3 = weighted.mean(is.na(shopping_contacts), w = weight),
contacts_missing4 = weighted.mean(is.na(other_contacts), w = weight),
# missing data (unweighted)
gender_missingT = mean(is.na(gender)),
age_missingT = mean(is.na(age)),
avoid_people_missingT = mean(is.na(avoid_contact)),
contacts_missingT = mean(!chk),
worried_missingT = mean(is.na(worried_seriously_ill)),
activities_missingT = mean(is.na(C13)),
contacts_missing1T = mean(is.na(work_contacts), na.rm = T),
contacts_missing2T = mean(is.na(social_contacts), na.rm = T),
contacts_missing3T = mean(is.na(shopping_contacts), na.rm = T),
contacts_missing4T = mean(is.na(other_contacts), na.rm = T),
# gender
gender_avg = weighted.mean(gender==2, na.rm = T, w = weight),
gender_avg_total = mean(gender==2, na.rm = T),
# gender
age = weighted.mean(age>=6, na.rm = T, w = weight),
age_total = mean(age>=6, na.rm = T),
weight = sum(weight))
return(f)
}
# aggregate by states and regions
load("state_cats.RData")
tic()
h = make_vars(q %>% group_by(week, state_id)) # state_coded
j = make_vars(q %>% group_by(week, region))
summer = make_vars(q %>% filter(state_id%in%c(spring, summer)) %>%
mutate(summer = state_id %in%summer) %>% group_by(week, summer))
fall = make_vars(q %>% mutate(fall = state_id %in%(fall)) %>% group_by(week, fall))
i = make_vars(q %>% group_by(week))
toc()
# check aggregates
min(h$avoid_people_num[h$avoid_people_num>0])
min(h$worried_sick_num)
min(h$activities_masked_or_none_num[h$activities_masked_or_none_num>0])
min(h$contacts_num)
# save files
save(h, j, i, summer, fall, file = paste("summary_extract", Sys.Date(), ".RData", sep = ""))
# double-check counts
i = p %>% filter(!all_symp & !all_but_one_symp & !odd_symp)
n = sum(h$num)
# estimates for supplement
(nrow(e)-nrow(i))
(nrow(e)-nrow(i))/nrow(e)
i2 = n
(nrow(i)-(i2))
(nrow(i)-(i2))/nrow(e)
(i2)/nrow(e)
#### Statistical Tests ####
load("state_cats.RData")
# Due to large sample size, these are fairly trivial. Effect size is more important.
s = q %>% mutate(summer_val = state_id %in% summer,
pre = (date >= "2020-06-08" & date <= "2020-06-14")) %>%
filter(state_id %in% c(spring, summer)) %>%
filter((date >= "2020-06-08" & date <= "2020-06-14") | (date >= "2020-07-13" & date <= "2020-07-19"))
summary(lm(avoid_contact~summer_val*pre, data = s, weights = weight))
summary(lm(contacts_tot~summer_val*pre, data = s, weights = weight))
r = q %>% mutate(fall = state_id %in% fall,
pre = (date >= "2020-09-14" & date <= "2020-09-20")) %>%
filter((date >= "2020-09-14" & date <= "2020-09-20") | (date >= "2020-11-16" & date <= "2020-11-22"))
summary(lm(activities_masked_or_none~pre*fall, data = r, weights = weight))
summary(lm(contacts_tot~pre*fall, data = r, weights = weight))
| /1 - Scripts/1_data_processing.R | no_license | abilinski/COVID19RiskResponse | R | false | false | 12,917 | r | #******************************* CLEAN DATA & PROCESS VARIABLES ***************************#
# #
# #
# #
#******************************************************************************************#
# source here library for file management
library(here)
# source global options
source(here("global_options.R"))
# set working directory for data (not public)
setwd("~/Dropbox/CMU Survey/National analysis/0 - Aggregated data")
# load data
load("data_all.RData")
# set working directory
setwd(here("0 - Data"))
# ancillary files (zip to state + state codings)
z = read.csv("02_20_uszips.csv") %>% dplyr::select(zip, state_id)
states = read.csv("A3b-coding.csv")
# process data
e = d %>% mutate(
# format date as date
date = as.Date(StartDatetime, format = "%Y-%m-%d"),
# add a unique ID
#id = row_number(.),
# format zip code as numeric
zip = as.numeric(A3),
# person-level fever
fever = substring(B2,1,2)=="1,") %>%
# get rid of colums
dplyr::select(-StartDatetime, -EndDatetime) %>%
# join to zip codes
left_join(z, "zip") %>%
# join to state ID
# state_abbv will now be the state abbreviation
mutate(state_num = as.numeric(A3b)) %>%
left_join(states, c("state_num"="code"))
# filter for date
e = e %>% filter(date>="2020-05-15" & date<="2020-11-22")
dim(e) # supplement
# label questions
names(e)[1:42] = c("HH_fever", "HH_sore_throat", "HH_cough", "HH_sob", "HH_diff_breathing",
"HH_n_sick", "HH_n", "zip_code", "state_rep", "com_n_sick", "symp", "symp_other",
"symp_days", "taken_temp", "cough_mucus", "tested_covid", "hospital",
"conditions", "flu_shot", "work_outside_home", "hcw", "nursing_home",
"travel_outside_state", "avoid_contact", "anxious", "depressed", "worried_seriously_ill",
"work_contacts", "shopping_contacts", "social_contacts", "other_contacts",
"contact_pos", "contact_pos_HH",
"gender", "gender_other", "pregnant", "age", "HH_u18", "HH_18_64", "HH_65_plus", "finances",
"highest_temp")
#### CLEANING ####
# check that numeric variables were either left blank or
# that reasonable values were added
c = e %>%
# check if numeric answers were EITHER left blank
# OR
# a reasonable value was entered
mutate(temp_chk = (is.na(highest_temp) | (highest_temp <= 104 & highest_temp > 97)),
HH_chk = (is.na(HH_n_sick) | HH_n_sick <= 30) & (is.na(HH_n) | HH_n <= 30),
n_sick_chk = (HH_n_sick <= HH_n) | is.na(HH_n_sick) | is.na(HH_n),
#com_chk = is.na(com_n_sick) | com_n_sick <= 100),
work_chk = (is.na(work_contacts) | work_contacts < 100),
social_chk = (is.na(social_contacts) | social_contacts < 100),
shopping_chk = (is.na(shopping_contacts) | shopping_contacts < 100),
other_chk = (is.na(other_contacts) | other_contacts < 100),
all_chk = HH_chk & n_sick_chk & temp_chk & work_chk & social_chk & shopping_chk & other_chk,
week = isoweek(date))
# summarize these
chk_outcomes = c %>% dplyr::summarize(temp = weighted.mean(temp_chk, weight = weight),
HH = weighted.mean(HH_chk, weight = weight),
work = weighted.mean(work_chk, weight = weight),
social = weighted.mean(social_chk, weight = weight),
shopping = weighted.mean(shopping_chk, weight = weight),
other = weighted.mean(other_chk, weight = weight),
all = weighted.mean(all_chk, weight = weight))
# subset on these
PROFANE_LANG = # We filtered out responses with profane or political language.
# We don't want to upload to GitHub but are happy to share the list
# or the cleaned file to researchers with microdata access
p = c %>% filter(all_chk) %>%
mutate(odd_symp = grepl(PROFANE_LANG,
symp_other, ignore.case = T),
all_symp = symp=="1,2,3,4,5,6,7,8,9,10,11,12,13,14",
all_but_one_symp = symp=="1,2,3,4,5,6,7,8,9,10,11,12,13",
all_and_odd = all_symp & odd_symp,
have_symp1 = grepl("1,2", symp),
have_symp2 = grepl("1,2", symp) & HH_fever==1,
have_symp3 = grepl("1,2", symp) & !is.na(highest_temp),
have_symp4 = grepl("1,2", symp) & (HH_fever==1 | !is.na(highest_temp)),
taste = grepl("13", symp),
symp_num = str_count(symp, ",")+1,
HH_cough_aug = !(HH_cough=="2" | is.na(HH_cough)),
HH_sob_aug = !(HH_sob=="2" | is.na(HH_sob)),
HH_diff_breathing_aug = !(HH_diff_breathing=="2" | is.na(HH_diff_breathing)),
cli = HH_fever=="1" & (HH_cough_aug | HH_sob_aug | HH_diff_breathing_aug),
survey_chg = date>="2020-09-08") %>%
separate(symp, into = paste("var", 1:15, sep = ""), remove = F, sep = "\\,")
p$cats = paste(ifelse(p$HH_fever==1, "A", ""), ifelse(p$HH_sore_throat==1, "B", ""), ifelse(p$HH_cough==1, "C", ""), ifelse(p$HH_sob==1, "D", ""), ifelse(p$HH_diff_breathing==1, "E", ""))
p$total = as.numeric(p$HH_fever==1) + as.numeric(p$HH_sore_throat==1) + as.numeric(p$HH_cough==1) + as.numeric(p$HH_sob==1) + as.numeric(p$HH_diff_breathing==1)
save(p, file = "~/Dropbox/CMU Survey/National analysis/0 - Aggregated data/tempfile.RData")
rm(c, d, z)
gc()
#### NEW VARIABLES ####
# make variables
q = p %>% filter(!all_symp & !all_but_one_symp & !odd_symp) %>%
filter(!is.na(state_id) & state_id!="PR") %>%
mutate(
any_work = ifelse(is.na(work_contacts),0, work_contacts),
any_shopping = ifelse(is.na(shopping_contacts),0, shopping_contacts),
any_social = ifelse(is.na(social_contacts),0, social_contacts),
any_other = ifelse(is.na(other_contacts), 0, other_contacts),
chk = !is.na(work_contacts) | !is.na(shopping_contacts) | !is.na(social_contacts) | !is.na(other_contacts),
work_mod2 = ifelse(chk, any_work, NA),
social_mod2 = ifelse(chk, any_social, NA),
shopping_mod2 = ifelse(chk, any_shopping, NA),
other_mod2 = ifelse(chk, any_other, NA),
contacts_tot = work_mod2 + shopping_mod2 + social_mod2 + other_mod2,
activities_masked_or_none = ifelse(is.na(C13), NA, C13==8 | (grepl("1", C13)==grepl("1", C13a) &
grepl("2", C13)==grepl("2", C13a) &
grepl("4", C13)==grepl("4", C13a) &
grepl("5", C13)==grepl("5", C13a) &
grepl("6", C13)==grepl("6", C13a))),
have_symp = grepl("1,2", symp),
age_cat = ifelse(age>5, "65+", "45-64"),
age_cat = ifelse(age<=3, "<45", age_cat),
age_cat = factor(age_cat, levels = c("<45", "45-64", "65+")))
# function to aggregate dataset
make_vars = function(f){
f = f %>% dplyr::summarize(date = max(date),
num = length(tested_covid),
# avoid others
avoid_peoplev2 = weighted.mean(ifelse(is.na(avoid_contact) & date<="2020-09-06", NA, avoid_contact<3), na.rm = T, w = weight),
avoid_people_num = sum(!is.na(avoid_contact) & date<="2020-09-06"),
# worry
worried_sickv2 = weighted.mean(ifelse(is.na(worried_seriously_ill), NA, worried_seriously_ill==1), na.rm = T, w = weight),
worried_sick_num = sum(!is.na(worried_seriously_ill)),
# activities masked or none
activities_masked_or_nonev2 = weighted.mean(ifelse(is.na(C13) & survey_chg, NA, activities_masked_or_none), w = weight, na.rm = T),
activities_masked_or_none_num = sum(!is.na(C13) & survey_chg),
# contacts
contacts_avg_mod = weighted.mean(contacts_tot, na.rm = T, w = weight),
contacts_num = sum(chk),
# missing data (weighted)
gender_missing = weighted.mean(is.na(gender), w = weight),
age_missing = weighted.mean(is.na(age), w = weight),
avoid_people_missing = weighted.mean(is.na(avoid_contact), w = weight),
contacts_missing = weighted.mean(!chk, w = weight),
worried_missing = weighted.mean(is.na(worried_seriously_ill), w = weight),
activities_missing = weighted.mean(is.na(C13), w = weight),
contacts_missing1 = weighted.mean(is.na(work_contacts), w = weight),
contacts_missing2 = weighted.mean(is.na(social_contacts), w = weight),
contacts_missing3 = weighted.mean(is.na(shopping_contacts), w = weight),
contacts_missing4 = weighted.mean(is.na(other_contacts), w = weight),
# missing data (unweighted)
gender_missingT = mean(is.na(gender)),
age_missingT = mean(is.na(age)),
avoid_people_missingT = mean(is.na(avoid_contact)),
contacts_missingT = mean(!chk),
worried_missingT = mean(is.na(worried_seriously_ill)),
activities_missingT = mean(is.na(C13)),
contacts_missing1T = mean(is.na(work_contacts), na.rm = T),
contacts_missing2T = mean(is.na(social_contacts), na.rm = T),
contacts_missing3T = mean(is.na(shopping_contacts), na.rm = T),
contacts_missing4T = mean(is.na(other_contacts), na.rm = T),
# gender
gender_avg = weighted.mean(gender==2, na.rm = T, w = weight),
gender_avg_total = mean(gender==2, na.rm = T),
# gender
age = weighted.mean(age>=6, na.rm = T, w = weight),
age_total = mean(age>=6, na.rm = T),
weight = sum(weight))
return(f)
}
# aggregate by states and regions
load("state_cats.RData")
tic()
h = make_vars(q %>% group_by(week, state_id)) # state_coded
j = make_vars(q %>% group_by(week, region))
summer = make_vars(q %>% filter(state_id%in%c(spring, summer)) %>%
mutate(summer = state_id %in%summer) %>% group_by(week, summer))
fall = make_vars(q %>% mutate(fall = state_id %in%(fall)) %>% group_by(week, fall))
i = make_vars(q %>% group_by(week))
toc()
# check aggregates
min(h$avoid_people_num[h$avoid_people_num>0])
min(h$worried_sick_num)
min(h$activities_masked_or_none_num[h$activities_masked_or_none_num>0])
min(h$contacts_num)
# save files
save(h, j, i, summer, fall, file = paste("summary_extract", Sys.Date(), ".RData", sep = ""))
# double-check counts
i = p %>% filter(!all_symp & !all_but_one_symp & !odd_symp)
n = sum(h$num)
# estimates for supplement
(nrow(e)-nrow(i))
(nrow(e)-nrow(i))/nrow(e)
i2 = n
(nrow(i)-(i2))
(nrow(i)-(i2))/nrow(e)
(i2)/nrow(e)
#### Statistical Tests ####
load("state_cats.RData")
# Due to large sample size, these are fairly trivial. Effect size is more important.
s = q %>% mutate(summer_val = state_id %in% summer,
pre = (date >= "2020-06-08" & date <= "2020-06-14")) %>%
filter(state_id %in% c(spring, summer)) %>%
filter((date >= "2020-06-08" & date <= "2020-06-14") | (date >= "2020-07-13" & date <= "2020-07-19"))
summary(lm(avoid_contact~summer_val*pre, data = s, weights = weight))
summary(lm(contacts_tot~summer_val*pre, data = s, weights = weight))
r = q %>% mutate(fall = state_id %in% fall,
pre = (date >= "2020-09-14" & date <= "2020-09-20")) %>%
filter((date >= "2020-09-14" & date <= "2020-09-20") | (date >= "2020-11-16" & date <= "2020-11-22"))
summary(lm(activities_masked_or_none~pre*fall, data = r, weights = weight))
summary(lm(contacts_tot~pre*fall, data = r, weights = weight))
|
\name{doubletCells}
\alias{doubletCells}
\alias{doubletCells,ANY-method}
\alias{doubletCells,SingleCellExperiment-method}
\title{Detect doublet cells}
\description{Identify potential doublet cells based on simulations of putative doublet expression profiles.}
\usage{
\S4method{doubletCells}{ANY}(x, size.factors.norm=NULL, size.factors.content=NULL,
k=50, subset.row=NULL, niters=max(10000, ncol(x)), block=10000,
d=50, approximate=FALSE, irlba.args=list(), force.match=FALSE,
force.k=20, force.ndist=3, BNPARAM=NULL, BPPARAM=SerialParam())
\S4method{doubletCells}{SingleCellExperiment}(x, size.factors.norm=NA, ..., subset.row=NULL,
assay.type="counts", get.spikes=FALSE)
}
\arguments{
\item{x}{
A numeric matrix-like object of count values, where each column corresponds to a cell and each row corresponds to an endogenous gene.
Alternatively, a SingleCellExperiment object containing such a matrix.
}
\item{size.factors.norm}{A numeric vector of size factors for normalization of \code{x} prior to PCA and distance calculations.
If \code{NULL}, defaults to the column sums of \code{x}.
For the SingleCellExperiment method, this may be \code{NA}, in which case \code{sizeFactors(x)} is used instead.}
\item{size.factors.content}{A numeric vector of size factors for RNA content normalization of \code{x} prior to simulating doublets.}
\item{k}{An integer scalar specifying the number of nearest neighbours to use to determine the bandwidth for density calculations.}
\item{subset.row}{See \code{?"\link{scran-gene-selection}"}.}
\item{niters}{An integer scalar specifying how many simulated doublets should be generated.}
\item{block}{An integer scalar controlling the rate of doublet generation, to keep memory usage low.}
\item{d}{An integer scalar specifying the number of components to retain after the PCA.}
\item{approximate}{A logical scalar indicating whether \code{\link[irlba]{irlba}} should be used to perform the initial PCA.}
\item{irlba.args}{A list of arguments to pass to \code{\link[irlba]{irlba}} when \code{approximate=TRUE}.}
\item{force.match}{A logical scalar indicating whether remapping of simulated doublets to original cells should be performed.}
\item{force.k}{An integer scalar specifying the number of neighbours to use for remapping if \code{force.match=TRUE}.}
\item{force.ndist}{A numeric scalar specifying the bandwidth for remapping if \code{force.match=TRUE}.}
\item{BNPARAM}{A \linkS4class{BiocNeighborParam} object specifying the nearest neighbor algorithm.
Defaults to an exact algorithm if \code{NULL}, see \code{?\link{findKNN}} for more details.}
\item{BPPARAM}{A \linkS4class{BiocParallelParam} object specifying whether the neighbour searches should be parallelized.}
\item{...}{Additional arguments to pass to the ANY method.}
\item{assay.type}{A string specifying which assay values to use, e.g., \code{"counts"} or \code{"logcounts"}.}
\item{get.spikes}{See \code{?"\link{scran-gene-selection}"}.}
}
\value{
A numeric vector of doublet scores for each cell in \code{x}.
}
\details{
This function simulates doublets by adding the count vectors for two randomly chosen cells in \code{x}.
For each cell, we compute the density of simulated doublets and compare it to the density of original cells.
Genuine doublets should have a high density of simulated doublets relative to the density of its neighbourhood.
Thus, the doublet score for each cell is defined as the ratio of densities of simulated doublets to the (squared) density of the original cells.
Densities are calculated in low-dimensional space after a PCA on the log-normalized expression matrix of \code{x}.
Simulated doublets are projected into the low-dimensional space using the rotation vectors computed from the original cells.
A tricube kernel is used to compute the density around each cell.
The bandwidth of the kernel is set to the median distance to the \code{k} nearest neighbour across all cells.
The two size factor arguments have different roles:
\itemize{
\item \code{size.factors.norm} contains the size factors to be used for normalization prior to PCA and distance calculations.
This can be set to ensure that the low-dimensional space is consistent with that in the rest of the analysis.
\item \code{size.factors.content} is much more important, and represents the size factors that preserve RNA content differences.
This is usually computed from spike-in RNA and ensures that the simulated doublets have the correct ratio of contributions from the original cells.
}
It is possible to set both of these arguments, as they will not interfere with each other.
If \code{force.match=TRUE}, simulated doublets will be remapped to the nearest neighbours in the original data.
This is done by taking the (tricube-weighted) average of the PC scores for the \code{force.k} nearest neighbors.
The tricube bandwidth for remapping is chosen by taking the median distance and multiplying it by \code{force.ndist}, to protect against later neighbours that might be outliers.
The aim is to adjust for unknown differences in RNA content that would cause the simulated doublets to be systematically displaced from their true locations.
However, it may also result in spuriously high scores for single cells that happen to be close to a cluster of simulated doublets.
}
\author{
Aaron Lun
}
\examples{
# Mocking up an example.
ngenes <- 100
mu1 <- 2^rexp(ngenes)
mu2 <- 2^rnorm(ngenes)
counts.1 <- matrix(rpois(ngenes*100, mu1), nrow=ngenes)
counts.2 <- matrix(rpois(ngenes*100, mu2), nrow=ngenes)
counts.m <- matrix(rpois(ngenes*20, mu1+mu2), nrow=ngenes)
counts <- cbind(counts.1, counts.2, counts.m)
clusters <- rep(1:3, c(ncol(counts.1), ncol(counts.2), ncol(counts.m)))
# Find potential doublets...
scores <- doubletCells(counts)
boxplot(split(scores, clusters))
}
| /man/doubletCells.Rd | no_license | bioinfonerd-forks/scran | R | false | false | 5,813 | rd | \name{doubletCells}
\alias{doubletCells}
\alias{doubletCells,ANY-method}
\alias{doubletCells,SingleCellExperiment-method}
\title{Detect doublet cells}
\description{Identify potential doublet cells based on simulations of putative doublet expression profiles.}
\usage{
\S4method{doubletCells}{ANY}(x, size.factors.norm=NULL, size.factors.content=NULL,
k=50, subset.row=NULL, niters=max(10000, ncol(x)), block=10000,
d=50, approximate=FALSE, irlba.args=list(), force.match=FALSE,
force.k=20, force.ndist=3, BNPARAM=NULL, BPPARAM=SerialParam())
\S4method{doubletCells}{SingleCellExperiment}(x, size.factors.norm=NA, ..., subset.row=NULL,
assay.type="counts", get.spikes=FALSE)
}
\arguments{
\item{x}{
A numeric matrix-like object of count values, where each column corresponds to a cell and each row corresponds to an endogenous gene.
Alternatively, a SingleCellExperiment object containing such a matrix.
}
\item{size.factors.norm}{A numeric vector of size factors for normalization of \code{x} prior to PCA and distance calculations.
If \code{NULL}, defaults to the column sums of \code{x}.
For the SingleCellExperiment method, this may be \code{NA}, in which case \code{sizeFactors(x)} is used instead.}
\item{size.factors.content}{A numeric vector of size factors for RNA content normalization of \code{x} prior to simulating doublets.}
\item{k}{An integer scalar specifying the number of nearest neighbours to use to determine the bandwidth for density calculations.}
\item{subset.row}{See \code{?"\link{scran-gene-selection}"}.}
\item{niters}{An integer scalar specifying how many simulated doublets should be generated.}
\item{block}{An integer scalar controlling the rate of doublet generation, to keep memory usage low.}
\item{d}{An integer scalar specifying the number of components to retain after the PCA.}
\item{approximate}{A logical scalar indicating whether \code{\link[irlba]{irlba}} should be used to perform the initial PCA.}
\item{irlba.args}{A list of arguments to pass to \code{\link[irlba]{irlba}} when \code{approximate=TRUE}.}
\item{force.match}{A logical scalar indicating whether remapping of simulated doublets to original cells should be performed.}
\item{force.k}{An integer scalar specifying the number of neighbours to use for remapping if \code{force.match=TRUE}.}
\item{force.ndist}{A numeric scalar specifying the bandwidth for remapping if \code{force.match=TRUE}.}
\item{BNPARAM}{A \linkS4class{BiocNeighborParam} object specifying the nearest neighbor algorithm.
Defaults to an exact algorithm if \code{NULL}, see \code{?\link{findKNN}} for more details.}
\item{BPPARAM}{A \linkS4class{BiocParallelParam} object specifying whether the neighbour searches should be parallelized.}
\item{...}{Additional arguments to pass to the ANY method.}
\item{assay.type}{A string specifying which assay values to use, e.g., \code{"counts"} or \code{"logcounts"}.}
\item{get.spikes}{See \code{?"\link{scran-gene-selection}"}.}
}
\value{
A numeric vector of doublet scores for each cell in \code{x}.
}
\details{
This function simulates doublets by adding the count vectors for two randomly chosen cells in \code{x}.
For each cell, we compute the density of simulated doublets and compare it to the density of original cells.
Genuine doublets should have a high density of simulated doublets relative to the density of its neighbourhood.
Thus, the doublet score for each cell is defined as the ratio of densities of simulated doublets to the (squared) density of the original cells.
Densities are calculated in low-dimensional space after a PCA on the log-normalized expression matrix of \code{x}.
Simulated doublets are projected into the low-dimensional space using the rotation vectors computed from the original cells.
A tricube kernel is used to compute the density around each cell.
The bandwidth of the kernel is set to the median distance to the \code{k} nearest neighbour across all cells.
The two size factor arguments have different roles:
\itemize{
\item \code{size.factors.norm} contains the size factors to be used for normalization prior to PCA and distance calculations.
This can be set to ensure that the low-dimensional space is consistent with that in the rest of the analysis.
\item \code{size.factors.content} is much more important, and represents the size factors that preserve RNA content differences.
This is usually computed from spike-in RNA and ensures that the simulated doublets have the correct ratio of contributions from the original cells.
}
It is possible to set both of these arguments, as they will not interfere with each other.
If \code{force.match=TRUE}, simulated doublets will be remapped to the nearest neighbours in the original data.
This is done by taking the (tricube-weighted) average of the PC scores for the \code{force.k} nearest neighbors.
The tricube bandwidth for remapping is chosen by taking the median distance and multiplying it by \code{force.ndist}, to protect against later neighbours that might be outliers.
The aim is to adjust for unknown differences in RNA content that would cause the simulated doublets to be systematically displaced from their true locations.
However, it may also result in spuriously high scores for single cells that happen to be close to a cluster of simulated doublets.
}
\author{
Aaron Lun
}
\examples{
# Mocking up an example.
ngenes <- 100
mu1 <- 2^rexp(ngenes)
mu2 <- 2^rnorm(ngenes)
counts.1 <- matrix(rpois(ngenes*100, mu1), nrow=ngenes)
counts.2 <- matrix(rpois(ngenes*100, mu2), nrow=ngenes)
counts.m <- matrix(rpois(ngenes*20, mu1+mu2), nrow=ngenes)
counts <- cbind(counts.1, counts.2, counts.m)
clusters <- rep(1:3, c(ncol(counts.1), ncol(counts.2), ncol(counts.m)))
# Find potential doublets...
scores <- doubletCells(counts)
boxplot(split(scores, clusters))
}
|
# the formula for protective effect of MVDA
library(deSolve)
times <- seq(0, 3, by = (1/52))
#MODEL PARAMETERS
parameters <- c(cm = 12, # rate of mass intervetion deployment
ld = 52/2, # rate of loss of drug effect
ka = 365,#52/2, # 1/(time to full protective effect of vaccine after dose)
kf = 12/1, # fast rate of loss of vaccine protective effect
ks = 1/3, # slow rate of loss of vaccine protective effect
delta = 12/1, # 1/(time to transition between fast and slow rate for loss of vaccine protection)
pv0 = 0.9 # maximum starting vaccine protection level
)
inity <- 1
# MODEL INITIAL CONDITIONS
state <- c(x0 = 1, x1=0, y01=1, y02 = 0, yf=0, ys=0)
# set up a function to solve the equations
prot<-function(t, state, parameters)
{
with(as.list(c(state, parameters)),
{
dx0 <- -cm*x0
dx1 <- cm*x0-ld*x1
dy01 <- -cm*y01
dy02 <- cm*y01-ka*y02
dyf <- ka*y02-(kf+delta)*yf
dys <- kf*yf - ks*ys
# return the rate of change
list(c(dx0, dx1, dy01, dy02, dyf, dys))
}
)
}
# run the model
out <- ode(y = state, times = times, func = prot, parms = parameters)
pd <- out[,"x1"]
pv <- out[,"yf"]+out[,"ys"]
plot(times,pd,type="l",col='blue',ylim=c(0,1))
lines(times,pv,type="l",col='red')
| /exploring new MVDA/Lisa_protective_effect_17082017.r | no_license | SaiTheinThanTun/PSA-of-Savannakhet-model | R | false | false | 1,519 | r | # the formula for protective effect of MVDA
library(deSolve)
times <- seq(0, 3, by = (1/52))
#MODEL PARAMETERS
parameters <- c(cm = 12, # rate of mass intervetion deployment
ld = 52/2, # rate of loss of drug effect
ka = 365,#52/2, # 1/(time to full protective effect of vaccine after dose)
kf = 12/1, # fast rate of loss of vaccine protective effect
ks = 1/3, # slow rate of loss of vaccine protective effect
delta = 12/1, # 1/(time to transition between fast and slow rate for loss of vaccine protection)
pv0 = 0.9 # maximum starting vaccine protection level
)
inity <- 1
# MODEL INITIAL CONDITIONS
state <- c(x0 = 1, x1=0, y01=1, y02 = 0, yf=0, ys=0)
# set up a function to solve the equations
prot<-function(t, state, parameters)
{
with(as.list(c(state, parameters)),
{
dx0 <- -cm*x0
dx1 <- cm*x0-ld*x1
dy01 <- -cm*y01
dy02 <- cm*y01-ka*y02
dyf <- ka*y02-(kf+delta)*yf
dys <- kf*yf - ks*ys
# return the rate of change
list(c(dx0, dx1, dy01, dy02, dyf, dys))
}
)
}
# run the model
out <- ode(y = state, times = times, func = prot, parms = parameters)
pd <- out[,"x1"]
pv <- out[,"yf"]+out[,"ys"]
plot(times,pd,type="l",col='blue',ylim=c(0,1))
lines(times,pv,type="l",col='red')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forecastingplots.R
\name{plot_R2_verus_sweep_metric}
\alias{plot_R2_verus_sweep_metric}
\title{Plot R-squared versus mean sweep metric}
\usage{
plot_R2_verus_sweep_metric(df, col_name = "DriverDiversity",
output_filename = "R2_verus_sweep_metric", file_type = "png",
output_dir = NA)
}
\arguments{
\item{df}{dataframe generated by get_summary (filtered), get_cor_summary (filtered) or get_wait_cor_summary (filtered)}
\item{col_name}{name of column containing correlation coefficients, with or without "Cor_" suffix (default "DriverDiversity")}
\item{output_filename}{name of output image file}
\item{file_type}{either "pdf" or "png" (other values default to "pdf")}
\item{output_dir}{folder in which to save the image file; if NA then plots are displayed on screen instead}
}
\value{
a plot object
}
\description{
Plot R-squared versus mean sweep metric
}
\examples{
plot_R2_verus_sweep_metric(wait_cor_summary)
}
| /man/plot_R2_verus_sweep_metric.Rd | no_license | robjohnnoble/demonanalysis | R | false | true | 1,001 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forecastingplots.R
\name{plot_R2_verus_sweep_metric}
\alias{plot_R2_verus_sweep_metric}
\title{Plot R-squared versus mean sweep metric}
\usage{
plot_R2_verus_sweep_metric(df, col_name = "DriverDiversity",
output_filename = "R2_verus_sweep_metric", file_type = "png",
output_dir = NA)
}
\arguments{
\item{df}{dataframe generated by get_summary (filtered), get_cor_summary (filtered) or get_wait_cor_summary (filtered)}
\item{col_name}{name of column containing correlation coefficients, with or without "Cor_" suffix (default "DriverDiversity")}
\item{output_filename}{name of output image file}
\item{file_type}{either "pdf" or "png" (other values default to "pdf")}
\item{output_dir}{folder in which to save the image file; if NA then plots are displayed on screen instead}
}
\value{
a plot object
}
\description{
Plot R-squared versus mean sweep metric
}
\examples{
plot_R2_verus_sweep_metric(wait_cor_summary)
}
|
#Copyright 2011 Heewon Jeon(madjakarta@gmail.com)
#
#This file is part of KoNLP.
#
#KoNLP is free software: you can redistribute it and/or modify it under the
#terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or (at your option) any later
#version.
#KoNLP is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with KoNLP. If not, see <http://www.gnu.org/licenses/>
# additional noun dictionary from Sejong project
#
# This dictionary extracted from 21th centry Sejong project. Dictionary can be merged with current user dictionary(dic_user.txt), but it requires a lot more total memory to safy use in KoNLP.
#
# @name extra_dic
# @docType data
# @author Heewon Jeon \email{madjakarta@@gmail.com}
# @references \url{www.sejong.or.kr}
# @keywords dictionary
#' reload all Hannanum analyzer dictionary
#'
#' Mainly, user dictionary reloading for Hannanum Analyzer.
#' If you want to update user dictionary on KoNLP_dic/current/dic_user.txt, need to execute this function after editing dictionary.
#'
#' @examples
#' \dontrun{
#' ## This codes can not be run if you don't have encoding system
#' ## which can en/decode Hangul(ex) CP949, EUC-KR, UTF-8).
#' dicpath <- file.path(system.file(package="Sejong"), "dics", "handic.zip")
#' conn <- unz(dicpath, file.path("data","kE","dic_user2.txt"))
#' newdic <- read.csv(conn, sep="\t", header=FALSE, fileEncoding="UTF-8", stringsAsFactors=FALSE)
#' mergeUserDic(newdic)
#' ## backup merged new dictionary
#' backupUsrDic(ask=FALSE)
#' ## restore from backup directory
#' restoreUsrDic(ask=FALSE)
#' ## reloading new dictionary
#' reloadAllDic()}
#' @export
reloadAllDic <- function(){
if(!exists("HannanumObj", envir=.KoNLPEnv)){
assign("HannanumObj",.jnew("kr/pe/freesearch/jhannanum/comm/HannanumInterface"),.KoNLPEnv)
}
.jcall(get("HannanumObj",envir=.KoNLPEnv), "V", "reloadAllDic")
}
#' reload dictionaries for specific functions
#'
#' This function for reloading user dictionary for specific functions,
#' after you have updated user dictionary on KoNLP_dic/current/user_dic.txt.
#'
#' @param whichDics character vector which can be "extractNoun", "SimplePos09", "SimplePos22", "SimplePos22"
#' @examples
#' \dontrun{
#' reloadUserDic(c("extractNoun", "SimplePos22"))}
#' @export
reloadUserDic <- function(whichDics){
if(!exists("HannanumObj", envir=.KoNLPEnv)){
assign("HannanumObj",.jnew("kr/pe/freesearch/jhannanum/comm/HannanumInterface"),.KoNLPEnv)
}
if(!is.character(whichDics)){
stop("'whichDics' must be character!")
}
for(dic in whichDics){
ret <- .jcall(get("HannanumObj",envir=.KoNLPEnv),
"I", "reloadUserDic", get("CurrentUserDic", envir=.KoNLPEnv), dic)
if(ret < 0){
cat(sprintf("Dictionaries in %s was not reloaded\n", dic))
}
}
}
#' tag name converter
#'
#' only suppport tag convertion between KAIST and Sejong tag set.
#'
#' @param fromTag tag set name to convert from
#' @param toTag desired tag set name
#' @param tag tag name to search
convertTag <-function(fromTag, toTag, tag){
if(fromTag == toTag || (!any(c("K","S") == fromTag) ||
!any(c("K","S") == toTag))){
stop("check input parameter!")
}
dicname <- paste(fromTag,"to" ,toTag, sep="")
return(get(dicname)[tag])
}
#' use Sejong noun dictionary
#'
#' Retrive Sejong dictionary to use in KoNLP
#'
#' @param backup will backup current dictionary?
#' @references \url{http://www.sejong.or.kr/}
#' @export
useSejongDic <- function(backup=T){
if(backup == T){
backupUsrDic(ask=F)
}
buildDictionary(ext_dic="sejong")
}
#' use Insighter dictionary
#'
#' @param backup will backup current working dictionary?
#'
#' @export
useInsighterDic <- function(backup=T){
if(backup == T){
backupUsrDic(ask=F)
}
buildDictionary(ext_dic="insighter")
}
#' use Woorimalsam dictionary
#'
#' @param backup will backup current working dictionary?
#'
#' @export
useWoorimalsamDic <- function(backup=T){
if(backup == T){
backupUsrDic(ask=F)
}
buildDictionary(ext_dic="woorimalsam")
}
#' use Insighter and Woorimalsam dictionary
#'
#' @param backup will backup current working dictionary?
#'
#' @export
useNIADic <- function(backup=T){
if(backup == T){
backupUsrDic(ask=F)
}
buildDictionary(ext_dic=c("woorimalsam", "insighter"))
}
# internal function to change dictionary
useDic <- function(dicname, backup=T){
if(dicname == "Sejong"){
#actually Sejong + System Dictionary
relpath <- file.path("data","kE","dic_user2.txt")
}else if(dicname == "System"){
#actually user dictionary
relpath <- file.path("data","kE","dic_user.txt")
}else{
stop("wrong dictionary name!")
}
newdic <- readZipDic(get("SejongDicsZip", envir=.KoNLPEnv), relpath)
if(backup == T){
backupUsrDic(ask=F)
}
mergeUserDic(newdic,append=F)
}
#' use system default dictionary
#'
#' Retrive system default dictionary to use in KoNLP
#'
#' @param backup will backup current dictionary?
#' @export
useSystemDic <- function(backup=T){
#.Deprecated("buildDictionary")
useDic("System", backup)
}
#' use for backup current dic_user.txt
#'
#' Utility function for backup dic_user.txt file to backup directory.
#'
#' @examples
#' \dontrun{
#' ## This codes can not be run if you don't have encoding system
#' ## which can en/decode Hangul(ex) CP949, EUC-KR, UTF-8).
#' dicpath <- file.path(system.file(package="Sejong"), "dics", "handic.zip")
#' conn <- unz(dicpath, file.path("data","kE","dic_user2.txt"))
#' newdic <- read.csv(conn, sep="\t", header=FALSE, fileEncoding="UTF-8", stringsAsFactors=FALSE)
#' mergeUserDic(newdic)
#' ## backup merged new dictionary
#' backupUsrDic(ask=FALSE)
#' ## restore from backup directory
#' restoreUsrDic(ask=FALSE)
#' ## reloading new dictionary
#' reloadAllDic()}
#' @param ask ask to confirm backup
#' @export
backupUsrDic <- function(ask=TRUE){
UserDic <- get("CurrentUserDic",envir=.KoNLPEnv)
alteredUserDicPath <- get("backupUserDicPath", .KoNLPEnv)
response <- "Y"
if(ask){
response <- readline("Would you backup your current 'dic_user.txt' file to backup directory? (Y/n): ")
}
if(substr(response,1,1) == "Y"){
ret1 <- TRUE
if(!file.exists(alteredUserDicPath)){
ret1 <- dir.create(alteredUserDicPath)
}
ret2 <- file.copy(UserDic, alteredUserDicPath,overwrite=T)
if(ret1 && ret2){
cat("Backup was just finished!\n")
}else{
warning(sprintf("Could not copy %s\n", UserDic))
assign("CopyedUserDic", FALSE, .KoNLPEnv)
}
}
}
#' use for restoring backuped dic_user.txt
#'
#' Utility function for restoring dic_user.txt file to dictionary directory.
#'
#' @examples
#' \dontrun{
#' ## This codes can not be run if you don't have encoding system
#' ## which can en/decode Hangul(ex) CP949, EUC-KR, UTF-8).
#' dicpath <- file.path(system.file(package="Sejong"), "dics", "handic.zip")
#' conn <- unz(dicpath, file.path("data","kE","dic_user2.txt"))
#' newdic <- read.csv(conn, sep="\t", header=FALSE, fileEncoding="UTF-8", stringsAsFactors=FALSE)
#' mergeUserDic(newdic)
#' ## backup merged new dictionary
#' backupUsrDic(ask=FALSE)
#' ## restore from backup directory
#' restoreUsrDic(ask=FALSE)
#' ## reloading new dictionary
#' reloadAllDic()}
#' @param ask ask to confirm backup
#' @export
restoreUsrDic <- function(ask=TRUE){
if(!get("CopyedUserDic", .KoNLPEnv)){
stop("There is no backuped dic_user.txt!\n")
}
if(!file.exists(get("backupUserDic", .KoNLPEnv))){
stop("There is no backuped dic_user.txt to restore!\n")
}
UserDicPath <- get("CurrentUserDicPath",envir=.KoNLPEnv)
alteredUserDicPath <- get("backupUserDic", .KoNLPEnv)
response <- "Y"
if(ask){
response <- readline("Would you restore your backuped 'dic_user.txt' file to current dictionary directory? (Y/n): ")
}
if(substr(response,1,1) == "Y"){
ret <- file.copy(alteredUserDicPath, UserDicPath, overwrite=T)
if(ret){
cat("finidhed restoring!\n")
}else{
warning(sprintf("Could not copy %s\n", UserDicPath))
}
}
reloadAllDic()
}
#' appending or replacing with new data.frame
#'
#' appending new dictionary to current dictionary.
#' replaceing current dictionary with new dictionary.
#'
#' @examples
#' \dontrun{
#' ## This codes can not be run if you don't have encoding system
#' ## which can en/decode Hangul(ex) CP949, EUC-KR, UTF-8).
#' dicpath <- file.path(system.file(package="Sejong"), "dics", "handic.zip")
#' conn <- unz(dicpath, file.path("data","kE","dic_user2.txt"))
#' newdic <- read.csv(conn, sep="\t", header=FALSE, fileEncoding="UTF-8", stringsAsFactors=FALSE)
#' mergeUserDic(newdic)
#' ## backup merged new dictionary
#' backupUsrDic(ask=FALSE)
#' ## restore from backup directory
#' restoreUsrDic(ask=FALSE)
#' ## reloading new dictionary
#' reloadAllDic()}
#' @param newUserDic new user dictionary as data.frame
#' @param append append or replacing
#' @param verbose see detail error logs
#' @param ask ask to backup
#' @export
#'@importFrom utils read.csv write.table
mergeUserDic <- function(newUserDic, append=TRUE, verbose=FALSE, ask=FALSE){
if(is.data.frame(newUserDic) == FALSE | ncol(newUserDic) != 2 | nrow(newUserDic) == 0 ){
stop("check 'newUserDic'.\n")
}
if(class(newUserDic[,2]) == "factor"){
newUserDic[,2] <- as.character(newUserDic[,2])
}
if(class(newUserDic[,1]) == "factor"){
newUserDic[,1] <- as.character(newUserDic[,1])
}
# checking belows are taking too much time.
#if(all(sapply(newUserDic[,2], is.ascii)) == FALSE){
# stop("check 'newUserDic'.\n")
#}
response <- "n"
if(ask){
response <- readline("Would you backup your current 'dic_user.txt' file to backup directory? (Y/n/c): ")
}
if(substr(response,1,1) == "Y"){
backupUsrDic(ask=F)
}else if(substr(response,1,1) == "c"){
return()
}
#check all the tags
errorTags <- Filter(function(x){is.na(tags[x])}, newUserDic[,2])
if(length(errorTags) > 0){
if(verbose){
cat(errorTags,"\n" ,sep="\t")
}
stop("Unsupported tag names!\n")
}
#combine with current dic_user.txt or replace them all.
UserDic <- get("CurrentUserDic",envir=.KoNLPEnv)
oldUserDic <- read.csv(UserDic, sep="\t", header=F, fileEncoding="UTF-8", stringsAsFactors=F, comment.char="")
newDicEnc <- unique(Encoding(newUserDic[,1]))
if(length(newDicEnc) > 1){
stop("check newUserDic encodings!\n")
}
#encoding problems
localCharset <- localeToCharset()[1]
if(localCharset != "UTF-8"){
if(newDicEnc != "UTF-8"){
newUserDic[,1] <- iconv(newUserDic[,1],from=localCharset, to="UTF-8")
}
oldUserDic[,1] <- iconv(oldUserDic[,1], from=localCharset, to="UTF-8")
}
names(newUserDic) <- c("word","tag")
names(oldUserDic) <- c("word","tag")
if(append){
newestUserDic <- rbind(oldUserDic, newUserDic)
}else{
newestUserDic <- newUserDic
}
write.table(newestUserDic,file=UserDic,quote=F,row.names=F, sep="\t", col.names=F,fileEncoding="UTF-8")
cat(sprintf("%s words were added to dic_user.txt.\n", nrow(newUserDic)))
reloadAllDic()
}
readZipDic <- function(zipPath, dicPath){
dicvector <- .jcall("kr/pe/freesearch/KoNLP/KoNLPUtil",
"[S", "readZipDic", zipPath, dicPath)
Encoding(dicvector) <- "UTF-8"
return(as.data.frame(matrix(dicvector,ncol=2,byrow=T), stringsAsFactors=F))
}
#' summary of dictionaries
#'
#' show summary, head and tail of current or backup dictionaries
#'
#' @examples
#' ## show current dictionary's summary, head, tail
#' statDic("current", 10)
#' @param which "current" or "backup" dictionary
#' @param n a single integer. Size for the resulting object to view
#' @export
statDic <- function(which="current", n=6){
UserDic <- ""
if(which == "current"){
UserDic <- get("CurrentUserDic",envir=.KoNLPEnv)
}else if(which == "backup"){
UserDic <- get("backupUserDic", envir=.KoNLPEnv)
}else{
stop("No dictionary to summary!")
}
if(!file.exists(UserDic)){
stop("No dictionary to summary!\n Please check dictionary files.")
}
UserDicView <- read.csv(UserDic, sep="\t", header=F, fileEncoding="UTF-8", stringsAsFactors=F, comment.char="", colClasses='character')
#encoding problems
localCharset <- localeToCharset()[1]
if(localCharset != "UTF-8"){
UserDicView[,1] <- iconv(UserDicView[,1], from=localCharset, to="UTF-8")
}
names(UserDicView) <- c("word","tag")
UserDicView[,2] <- as.factor(UserDicView[,2])
res <- list(summary=summary(UserDicView), head=head(UserDicView, n=n), tail=tail(UserDicView, n=n))
return(res)
}
#' buildDictionary
#'
#' @param ext_dic external dictionary name which can be 'woorimalsam', 'insighter', 'sejong'.
#' @param category_dic_nms category dictionary will be used.
#' \itemize{
#' \item general
#' \item chemical
#' \item language
#' \item music
#' \item history
#' \item education
#' \item society in general
#' \item life
#' \item physical
#' \item information and communication
#' \item medicine
#' \item earth
#' \item construction
#' \item veterinary science
#' \item business
#' \item law
#' \item plant
#' \item buddhism
#' \item engineering general
#' \item folk
#' \item administration
#' \item economic
#' \item math
#' \item korean medicine
#' \item military
#' \item literature
#' \item clothes
#' \item religion normal
#' \item animal
#' \item agriculture
#' \item astronomy
#' \item transport
#' \item natural plain
#' \item industry
#' \item medium
#' \item political
#' \item geography
#' \item mining
#' \item hearing
#' \item fishing
#' \item machinery
#' \item catholic
#' \item book title
#' \item named
#' \item electrical and electronic
#' \item pharmacy
#' \item art, music and physical
#' \item useless
#' \item ocean
#' \item forestry
#' \item christian
#' \item craft
#' \item service
#' \item sports
#' \item food
#' \item art
#' \item environment
#' \item video
#' \item natural resources
#' \item industry general
#' \item smoke
#' \item philosophy
#' \item health general
#' \item proper names general
#' \item welfare
#' \item material
#' \item humanities general
#' }
#' @param user_dic \code{data.frame} which include 'word' and 'tag' fields. User can add more user defined terms and tags.
#' @param replace_usr_dic A logical scala. Should user dictionary needs to be replaced with new user defined dictionary or appended.
#' @param verbose will print detail progress. default \code{FALSE}
#'
#' @export
#' @importFrom RSQLite dbConnect dbGetQuery dbWriteTable dbDisconnect SQLite
#' @importFrom devtools install_url
buildDictionary <- function(ext_dic='woorimalsam', category_dic_nms='', user_dic=data.frame(), replace_usr_dic=F, verbose=F){
#check 'NIAdic' package installed
#this code will remove after NIAdic located on CRAN.
if (!nzchar(system.file(package = 'NIAdic'))){
if(all(c('ggplot2', 'data.table', 'scales', 'rmarkdown', 'knitr') %in% installed.packages()[,1])){
install_url("https://github.com/haven-jeon/NIADic/releases/download/0.0.1/NIAdic_0.0.1.tar.gz", dependencies=TRUE, build_vignettes=TRUE)
}else{
install_url("https://github.com/haven-jeon/NIADic/releases/download/0.0.1/NIAdic_0.0.1.tar.gz", dependencies=TRUE)
}
if(!require('NIAdic',character.only = TRUE)) stop("'NIAdic' Package not found")
}
han_db_path <- file.path(system.file(package="NIAdic"), "hangul.db")
conn <- dbConnect(SQLite(), han_db_path)
on.exit(dbDisconnect(conn))
ext_dic_df <- data.frame()
for(dic in unique(ext_dic)){
switch(dic,
sejong={
dic_df <- dbGetQuery(conn, "select term, tag, 'sejong' as dic from sejong")
ext_dic_df <- rbind(ext_dic_df, dic_df)
},
insighter={
dic_df <- dbGetQuery(conn, "select term, tag, 'insighter' as dic from insighter")
ext_dic_df <- rbind(ext_dic_df, dic_df)
},
woorimalsam={
dic_df <- dbGetQuery(conn, "select term, tag, 'woorimalsam' as dic from woorimalsam where eng_cate = 'general'")
ext_dic_df <- rbind(ext_dic_df, dic_df)
},
{
stop(sprintf("No %s dictionary!", ext_dic))
}
)
}
cate_dic_df <- data.frame()
if(is.character(category_dic_nms) & length(category_dic_nms) > 0){
cate_dic_df <- dbGetQuery(conn, sprintf("select term, tag, eng_cate as dic from woorimalsam where eng_cate in (%s)",
paste0("'",category_dic_nms,"'", collapse=',')))
}
user_dic_tot <- data.frame()
#uer dic processing
if(is.data.frame(user_dic) == TRUE & ncol(user_dic) == 2 & nrow(user_dic) > 0 ){
if(class(user_dic[,2]) == "factor"){
user_dic[,2] <- as.character(user_dic[,2])
}
if(class(user_dic[,1]) == "factor"){
user_dic[,1] <- as.character(user_dic[,1])
}
usrDicEnc <- unique(Encoding(user_dic[,1]))
if(length(usrDicEnc) > 1){
stop("check user_dic encodings!\n")
}
#encoding problems
localCharset <- localeToCharset()[1]
if(localCharset != "UTF-8"){
if(usrDicEnc != "UTF-8"){
user_dic[,1] <- iconv(user_dic[,1],from=localCharset, to="UTF-8")
}
}
#check tag is valid for user dic
errorTags <- Filter(function(x){is.na(tags[x])}, user_dic[,2])
if(length(errorTags) > 0){
cat(errorTags,"\n" ,sep="\t")
stop("Unsupported tag names on user_dic!\n")
}
names(user_dic) <- c("term","tag")
dbWriteTable(conn, "user_dic", user_dic,append=!replace_usr_dic)
user_dic_tot <- dbGetQuery(conn, "select *, 'user' as dic from user_dic")
}
result_dic <- rbind(ext_dic_df, cate_dic_df, user_dic_tot)
#check tag is valid for user dic
errorTags <- Filter(function(x){is.na(tags[x])}, result_dic[,2])
if(length(errorTags) > 0){
cat(errorTags,"\n" ,sep="\t")
stop("Unsupported tag names on user_dic!\n")
}
Encoding(result_dic$term) <- 'UTF-8'
UserDic <- get("CurrentUserDic",envir=.KoNLPEnv)
write.table(unique(result_dic[,c('term', 'tag')]),file=UserDic,quote=F,row.names=F, sep="\t", col.names=F,fileEncoding="UTF-8")
cat(sprintf("%s words dictionary was built.\n", nrow(result_dic)))
reloadAllDic()
}
| /R/manageDic.R | no_license | DataSoccer/KoNLP | R | false | false | 18,716 | r | #Copyright 2011 Heewon Jeon(madjakarta@gmail.com)
#
#This file is part of KoNLP.
#
#KoNLP is free software: you can redistribute it and/or modify it under the
#terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or (at your option) any later
#version.
#KoNLP is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with KoNLP. If not, see <http://www.gnu.org/licenses/>
# additional noun dictionary from Sejong project
#
# This dictionary extracted from 21th centry Sejong project. Dictionary can be merged with current user dictionary(dic_user.txt), but it requires a lot more total memory to safy use in KoNLP.
#
# @name extra_dic
# @docType data
# @author Heewon Jeon \email{madjakarta@@gmail.com}
# @references \url{www.sejong.or.kr}
# @keywords dictionary
#' reload all Hannanum analyzer dictionary
#'
#' Mainly, user dictionary reloading for Hannanum Analyzer.
#' If you want to update user dictionary on KoNLP_dic/current/dic_user.txt, need to execute this function after editing dictionary.
#'
#' @examples
#' \dontrun{
#' ## This codes can not be run if you don't have encoding system
#' ## which can en/decode Hangul(ex) CP949, EUC-KR, UTF-8).
#' dicpath <- file.path(system.file(package="Sejong"), "dics", "handic.zip")
#' conn <- unz(dicpath, file.path("data","kE","dic_user2.txt"))
#' newdic <- read.csv(conn, sep="\t", header=FALSE, fileEncoding="UTF-8", stringsAsFactors=FALSE)
#' mergeUserDic(newdic)
#' ## backup merged new dictionary
#' backupUsrDic(ask=FALSE)
#' ## restore from backup directory
#' restoreUsrDic(ask=FALSE)
#' ## reloading new dictionary
#' reloadAllDic()}
#' @export
reloadAllDic <- function(){
if(!exists("HannanumObj", envir=.KoNLPEnv)){
assign("HannanumObj",.jnew("kr/pe/freesearch/jhannanum/comm/HannanumInterface"),.KoNLPEnv)
}
.jcall(get("HannanumObj",envir=.KoNLPEnv), "V", "reloadAllDic")
}
#' reload dictionaries for specific functions
#'
#' This function for reloading user dictionary for specific functions,
#' after you have updated user dictionary on KoNLP_dic/current/user_dic.txt.
#'
#' @param whichDics character vector which can be "extractNoun", "SimplePos09", "SimplePos22", "SimplePos22"
#' @examples
#' \dontrun{
#' reloadUserDic(c("extractNoun", "SimplePos22"))}
#' @export
reloadUserDic <- function(whichDics){
if(!exists("HannanumObj", envir=.KoNLPEnv)){
assign("HannanumObj",.jnew("kr/pe/freesearch/jhannanum/comm/HannanumInterface"),.KoNLPEnv)
}
if(!is.character(whichDics)){
stop("'whichDics' must be character!")
}
for(dic in whichDics){
ret <- .jcall(get("HannanumObj",envir=.KoNLPEnv),
"I", "reloadUserDic", get("CurrentUserDic", envir=.KoNLPEnv), dic)
if(ret < 0){
cat(sprintf("Dictionaries in %s was not reloaded\n", dic))
}
}
}
#' tag name converter
#'
#' only suppport tag convertion between KAIST and Sejong tag set.
#'
#' @param fromTag tag set name to convert from
#' @param toTag desired tag set name
#' @param tag tag name to search
convertTag <-function(fromTag, toTag, tag){
if(fromTag == toTag || (!any(c("K","S") == fromTag) ||
!any(c("K","S") == toTag))){
stop("check input parameter!")
}
dicname <- paste(fromTag,"to" ,toTag, sep="")
return(get(dicname)[tag])
}
#' use Sejong noun dictionary
#'
#' Retrive Sejong dictionary to use in KoNLP
#'
#' @param backup will backup current dictionary?
#' @references \url{http://www.sejong.or.kr/}
#' @export
useSejongDic <- function(backup=T){
if(backup == T){
backupUsrDic(ask=F)
}
buildDictionary(ext_dic="sejong")
}
#' use Insighter dictionary
#'
#' @param backup will backup current working dictionary?
#'
#' @export
useInsighterDic <- function(backup=T){
if(backup == T){
backupUsrDic(ask=F)
}
buildDictionary(ext_dic="insighter")
}
#' use Woorimalsam dictionary
#'
#' @param backup will backup current working dictionary?
#'
#' @export
useWoorimalsamDic <- function(backup=T){
if(backup == T){
backupUsrDic(ask=F)
}
buildDictionary(ext_dic="woorimalsam")
}
#' use Insighter and Woorimalsam dictionary
#'
#' @param backup will backup current working dictionary?
#'
#' @export
useNIADic <- function(backup=T){
if(backup == T){
backupUsrDic(ask=F)
}
buildDictionary(ext_dic=c("woorimalsam", "insighter"))
}
# internal function to change dictionary
useDic <- function(dicname, backup=T){
if(dicname == "Sejong"){
#actually Sejong + System Dictionary
relpath <- file.path("data","kE","dic_user2.txt")
}else if(dicname == "System"){
#actually user dictionary
relpath <- file.path("data","kE","dic_user.txt")
}else{
stop("wrong dictionary name!")
}
newdic <- readZipDic(get("SejongDicsZip", envir=.KoNLPEnv), relpath)
if(backup == T){
backupUsrDic(ask=F)
}
mergeUserDic(newdic,append=F)
}
#' use system default dictionary
#'
#' Retrive system default dictionary to use in KoNLP
#'
#' @param backup will backup current dictionary?
#' @export
useSystemDic <- function(backup=T){
#.Deprecated("buildDictionary")
useDic("System", backup)
}
#' use for backup current dic_user.txt
#'
#' Utility function for backup dic_user.txt file to backup directory.
#'
#' @examples
#' \dontrun{
#' ## This codes can not be run if you don't have encoding system
#' ## which can en/decode Hangul(ex) CP949, EUC-KR, UTF-8).
#' dicpath <- file.path(system.file(package="Sejong"), "dics", "handic.zip")
#' conn <- unz(dicpath, file.path("data","kE","dic_user2.txt"))
#' newdic <- read.csv(conn, sep="\t", header=FALSE, fileEncoding="UTF-8", stringsAsFactors=FALSE)
#' mergeUserDic(newdic)
#' ## backup merged new dictionary
#' backupUsrDic(ask=FALSE)
#' ## restore from backup directory
#' restoreUsrDic(ask=FALSE)
#' ## reloading new dictionary
#' reloadAllDic()}
#' @param ask ask to confirm backup
#' @export
backupUsrDic <- function(ask=TRUE){
UserDic <- get("CurrentUserDic",envir=.KoNLPEnv)
alteredUserDicPath <- get("backupUserDicPath", .KoNLPEnv)
response <- "Y"
if(ask){
response <- readline("Would you backup your current 'dic_user.txt' file to backup directory? (Y/n): ")
}
if(substr(response,1,1) == "Y"){
ret1 <- TRUE
if(!file.exists(alteredUserDicPath)){
ret1 <- dir.create(alteredUserDicPath)
}
ret2 <- file.copy(UserDic, alteredUserDicPath,overwrite=T)
if(ret1 && ret2){
cat("Backup was just finished!\n")
}else{
warning(sprintf("Could not copy %s\n", UserDic))
assign("CopyedUserDic", FALSE, .KoNLPEnv)
}
}
}
#' use for restoring backuped dic_user.txt
#'
#' Utility function for restoring dic_user.txt file to dictionary directory.
#'
#' @examples
#' \dontrun{
#' ## This codes can not be run if you don't have encoding system
#' ## which can en/decode Hangul(ex) CP949, EUC-KR, UTF-8).
#' dicpath <- file.path(system.file(package="Sejong"), "dics", "handic.zip")
#' conn <- unz(dicpath, file.path("data","kE","dic_user2.txt"))
#' newdic <- read.csv(conn, sep="\t", header=FALSE, fileEncoding="UTF-8", stringsAsFactors=FALSE)
#' mergeUserDic(newdic)
#' ## backup merged new dictionary
#' backupUsrDic(ask=FALSE)
#' ## restore from backup directory
#' restoreUsrDic(ask=FALSE)
#' ## reloading new dictionary
#' reloadAllDic()}
#' @param ask ask to confirm backup
#' @export
restoreUsrDic <- function(ask=TRUE){
if(!get("CopyedUserDic", .KoNLPEnv)){
stop("There is no backuped dic_user.txt!\n")
}
if(!file.exists(get("backupUserDic", .KoNLPEnv))){
stop("There is no backuped dic_user.txt to restore!\n")
}
UserDicPath <- get("CurrentUserDicPath",envir=.KoNLPEnv)
alteredUserDicPath <- get("backupUserDic", .KoNLPEnv)
response <- "Y"
if(ask){
response <- readline("Would you restore your backuped 'dic_user.txt' file to current dictionary directory? (Y/n): ")
}
if(substr(response,1,1) == "Y"){
ret <- file.copy(alteredUserDicPath, UserDicPath, overwrite=T)
if(ret){
cat("finidhed restoring!\n")
}else{
warning(sprintf("Could not copy %s\n", UserDicPath))
}
}
reloadAllDic()
}
#' appending or replacing with new data.frame
#'
#' appending new dictionary to current dictionary.
#' replaceing current dictionary with new dictionary.
#'
#' @examples
#' \dontrun{
#' ## This codes can not be run if you don't have encoding system
#' ## which can en/decode Hangul(ex) CP949, EUC-KR, UTF-8).
#' dicpath <- file.path(system.file(package="Sejong"), "dics", "handic.zip")
#' conn <- unz(dicpath, file.path("data","kE","dic_user2.txt"))
#' newdic <- read.csv(conn, sep="\t", header=FALSE, fileEncoding="UTF-8", stringsAsFactors=FALSE)
#' mergeUserDic(newdic)
#' ## backup merged new dictionary
#' backupUsrDic(ask=FALSE)
#' ## restore from backup directory
#' restoreUsrDic(ask=FALSE)
#' ## reloading new dictionary
#' reloadAllDic()}
#' @param newUserDic new user dictionary as data.frame
#' @param append append or replacing
#' @param verbose see detail error logs
#' @param ask ask to backup
#' @export
#'@importFrom utils read.csv write.table
mergeUserDic <- function(newUserDic, append=TRUE, verbose=FALSE, ask=FALSE){
if(is.data.frame(newUserDic) == FALSE | ncol(newUserDic) != 2 | nrow(newUserDic) == 0 ){
stop("check 'newUserDic'.\n")
}
if(class(newUserDic[,2]) == "factor"){
newUserDic[,2] <- as.character(newUserDic[,2])
}
if(class(newUserDic[,1]) == "factor"){
newUserDic[,1] <- as.character(newUserDic[,1])
}
# checking belows are taking too much time.
#if(all(sapply(newUserDic[,2], is.ascii)) == FALSE){
# stop("check 'newUserDic'.\n")
#}
response <- "n"
if(ask){
response <- readline("Would you backup your current 'dic_user.txt' file to backup directory? (Y/n/c): ")
}
if(substr(response,1,1) == "Y"){
backupUsrDic(ask=F)
}else if(substr(response,1,1) == "c"){
return()
}
#check all the tags
errorTags <- Filter(function(x){is.na(tags[x])}, newUserDic[,2])
if(length(errorTags) > 0){
if(verbose){
cat(errorTags,"\n" ,sep="\t")
}
stop("Unsupported tag names!\n")
}
#combine with current dic_user.txt or replace them all.
UserDic <- get("CurrentUserDic",envir=.KoNLPEnv)
oldUserDic <- read.csv(UserDic, sep="\t", header=F, fileEncoding="UTF-8", stringsAsFactors=F, comment.char="")
newDicEnc <- unique(Encoding(newUserDic[,1]))
if(length(newDicEnc) > 1){
stop("check newUserDic encodings!\n")
}
#encoding problems
localCharset <- localeToCharset()[1]
if(localCharset != "UTF-8"){
if(newDicEnc != "UTF-8"){
newUserDic[,1] <- iconv(newUserDic[,1],from=localCharset, to="UTF-8")
}
oldUserDic[,1] <- iconv(oldUserDic[,1], from=localCharset, to="UTF-8")
}
names(newUserDic) <- c("word","tag")
names(oldUserDic) <- c("word","tag")
if(append){
newestUserDic <- rbind(oldUserDic, newUserDic)
}else{
newestUserDic <- newUserDic
}
write.table(newestUserDic,file=UserDic,quote=F,row.names=F, sep="\t", col.names=F,fileEncoding="UTF-8")
cat(sprintf("%s words were added to dic_user.txt.\n", nrow(newUserDic)))
reloadAllDic()
}
readZipDic <- function(zipPath, dicPath){
dicvector <- .jcall("kr/pe/freesearch/KoNLP/KoNLPUtil",
"[S", "readZipDic", zipPath, dicPath)
Encoding(dicvector) <- "UTF-8"
return(as.data.frame(matrix(dicvector,ncol=2,byrow=T), stringsAsFactors=F))
}
#' summary of dictionaries
#'
#' show summary, head and tail of current or backup dictionaries
#'
#' @examples
#' ## show current dictionary's summary, head, tail
#' statDic("current", 10)
#' @param which "current" or "backup" dictionary
#' @param n a single integer. Size for the resulting object to view
#' @export
statDic <- function(which="current", n=6){
UserDic <- ""
if(which == "current"){
UserDic <- get("CurrentUserDic",envir=.KoNLPEnv)
}else if(which == "backup"){
UserDic <- get("backupUserDic", envir=.KoNLPEnv)
}else{
stop("No dictionary to summary!")
}
if(!file.exists(UserDic)){
stop("No dictionary to summary!\n Please check dictionary files.")
}
UserDicView <- read.csv(UserDic, sep="\t", header=F, fileEncoding="UTF-8", stringsAsFactors=F, comment.char="", colClasses='character')
#encoding problems
localCharset <- localeToCharset()[1]
if(localCharset != "UTF-8"){
UserDicView[,1] <- iconv(UserDicView[,1], from=localCharset, to="UTF-8")
}
names(UserDicView) <- c("word","tag")
UserDicView[,2] <- as.factor(UserDicView[,2])
res <- list(summary=summary(UserDicView), head=head(UserDicView, n=n), tail=tail(UserDicView, n=n))
return(res)
}
#' buildDictionary
#'
#' @param ext_dic external dictionary name which can be 'woorimalsam', 'insighter', 'sejong'.
#' @param category_dic_nms category dictionary will be used.
#' \itemize{
#' \item general
#' \item chemical
#' \item language
#' \item music
#' \item history
#' \item education
#' \item society in general
#' \item life
#' \item physical
#' \item information and communication
#' \item medicine
#' \item earth
#' \item construction
#' \item veterinary science
#' \item business
#' \item law
#' \item plant
#' \item buddhism
#' \item engineering general
#' \item folk
#' \item administration
#' \item economic
#' \item math
#' \item korean medicine
#' \item military
#' \item literature
#' \item clothes
#' \item religion normal
#' \item animal
#' \item agriculture
#' \item astronomy
#' \item transport
#' \item natural plain
#' \item industry
#' \item medium
#' \item political
#' \item geography
#' \item mining
#' \item hearing
#' \item fishing
#' \item machinery
#' \item catholic
#' \item book title
#' \item named
#' \item electrical and electronic
#' \item pharmacy
#' \item art, music and physical
#' \item useless
#' \item ocean
#' \item forestry
#' \item christian
#' \item craft
#' \item service
#' \item sports
#' \item food
#' \item art
#' \item environment
#' \item video
#' \item natural resources
#' \item industry general
#' \item smoke
#' \item philosophy
#' \item health general
#' \item proper names general
#' \item welfare
#' \item material
#' \item humanities general
#' }
#' @param user_dic \code{data.frame} which include 'word' and 'tag' fields. User can add more user defined terms and tags.
#' @param replace_usr_dic A logical scala. Should user dictionary needs to be replaced with new user defined dictionary or appended.
#' @param verbose will print detail progress. default \code{FALSE}
#'
#' @export
#' @importFrom RSQLite dbConnect dbGetQuery dbWriteTable dbDisconnect SQLite
#' @importFrom devtools install_url
buildDictionary <- function(ext_dic='woorimalsam', category_dic_nms='', user_dic=data.frame(), replace_usr_dic=F, verbose=F){
#check 'NIAdic' package installed
#this code will remove after NIAdic located on CRAN.
if (!nzchar(system.file(package = 'NIAdic'))){
if(all(c('ggplot2', 'data.table', 'scales', 'rmarkdown', 'knitr') %in% installed.packages()[,1])){
install_url("https://github.com/haven-jeon/NIADic/releases/download/0.0.1/NIAdic_0.0.1.tar.gz", dependencies=TRUE, build_vignettes=TRUE)
}else{
install_url("https://github.com/haven-jeon/NIADic/releases/download/0.0.1/NIAdic_0.0.1.tar.gz", dependencies=TRUE)
}
if(!require('NIAdic',character.only = TRUE)) stop("'NIAdic' Package not found")
}
han_db_path <- file.path(system.file(package="NIAdic"), "hangul.db")
conn <- dbConnect(SQLite(), han_db_path)
on.exit(dbDisconnect(conn))
ext_dic_df <- data.frame()
for(dic in unique(ext_dic)){
switch(dic,
sejong={
dic_df <- dbGetQuery(conn, "select term, tag, 'sejong' as dic from sejong")
ext_dic_df <- rbind(ext_dic_df, dic_df)
},
insighter={
dic_df <- dbGetQuery(conn, "select term, tag, 'insighter' as dic from insighter")
ext_dic_df <- rbind(ext_dic_df, dic_df)
},
woorimalsam={
dic_df <- dbGetQuery(conn, "select term, tag, 'woorimalsam' as dic from woorimalsam where eng_cate = 'general'")
ext_dic_df <- rbind(ext_dic_df, dic_df)
},
{
stop(sprintf("No %s dictionary!", ext_dic))
}
)
}
cate_dic_df <- data.frame()
if(is.character(category_dic_nms) & length(category_dic_nms) > 0){
cate_dic_df <- dbGetQuery(conn, sprintf("select term, tag, eng_cate as dic from woorimalsam where eng_cate in (%s)",
paste0("'",category_dic_nms,"'", collapse=',')))
}
user_dic_tot <- data.frame()
#uer dic processing
if(is.data.frame(user_dic) == TRUE & ncol(user_dic) == 2 & nrow(user_dic) > 0 ){
if(class(user_dic[,2]) == "factor"){
user_dic[,2] <- as.character(user_dic[,2])
}
if(class(user_dic[,1]) == "factor"){
user_dic[,1] <- as.character(user_dic[,1])
}
usrDicEnc <- unique(Encoding(user_dic[,1]))
if(length(usrDicEnc) > 1){
stop("check user_dic encodings!\n")
}
#encoding problems
localCharset <- localeToCharset()[1]
if(localCharset != "UTF-8"){
if(usrDicEnc != "UTF-8"){
user_dic[,1] <- iconv(user_dic[,1],from=localCharset, to="UTF-8")
}
}
#check tag is valid for user dic
errorTags <- Filter(function(x){is.na(tags[x])}, user_dic[,2])
if(length(errorTags) > 0){
cat(errorTags,"\n" ,sep="\t")
stop("Unsupported tag names on user_dic!\n")
}
names(user_dic) <- c("term","tag")
dbWriteTable(conn, "user_dic", user_dic,append=!replace_usr_dic)
user_dic_tot <- dbGetQuery(conn, "select *, 'user' as dic from user_dic")
}
result_dic <- rbind(ext_dic_df, cate_dic_df, user_dic_tot)
#check tag is valid for user dic
errorTags <- Filter(function(x){is.na(tags[x])}, result_dic[,2])
if(length(errorTags) > 0){
cat(errorTags,"\n" ,sep="\t")
stop("Unsupported tag names on user_dic!\n")
}
Encoding(result_dic$term) <- 'UTF-8'
UserDic <- get("CurrentUserDic",envir=.KoNLPEnv)
write.table(unique(result_dic[,c('term', 'tag')]),file=UserDic,quote=F,row.names=F, sep="\t", col.names=F,fileEncoding="UTF-8")
cat(sprintf("%s words dictionary was built.\n", nrow(result_dic)))
reloadAllDic()
}
|
#' Class tskrrTune
#'
#' The class tskrrTune represents a tuned \code{\link[xnet:tskrr-class]{tskrr}}
#' model, and is the output of the function \code{\link{tune}}. Apart from
#' the model, it contains extra information on the tuning procedure. This is
#' a virtual class only.
#'
#' @slot lambda_grid a list object with the elements \code{k} and possibly
#' \code{g} indicating the tested lambda values for the row kernel \code{K}
#' and - if applicable - the column kernel \code{G}. Both elements have
#' to be numeric.
#' @slot best_loss a numeric value with the loss associated with the
#' best lambdas
#' @slot loss_values a matrix with the loss results from the searched grid.
#' The rows form the X dimension (related to the first lambda), the columns
#' form the Y dimension (related to the second lambda if applicable)
#' @slot loss_function the used loss function
#' @slot exclusion a character value describing the exclusion used
#' @slot replaceby0 a logical value indicating whether or not the cross
#' validation replaced the excluded values by zero
#' @slot onedim a logical value indicating whether the grid search
#' was done in one dimension. For homogeneous networks, this is
#' true by default.
#'
#' @seealso
#' * the function \code{tune} for the tuning itself
#' * the class \code{\link{tskrrTuneHomogeneous}} and
#' \code{tskrrTuneHeterogeneous} for the actual classes.
#' @md
#'
#' @rdname tskrrTune-class
#' @name tskrrTune-class
#' @aliases tskrrTune
#' @exportClass tskrrTune
setClass("tskrrTune",
slots = c(lambda_grid = "list",
best_loss = "numeric",
loss_values = "matrix",
loss_function = "function",
exclusion = "character",
replaceby0 = "logical",
onedim = "logical"))
validTskrrTune <- function(object){
lossval <- object@loss_values
lgrid <- object@lambda_grid
excl <- object@exclusion
# General tests
if(!all(sapply(lgrid, is.numeric)))
return("lambda_grid should have only numeric elements.")
if(length(object@best_loss) != 1)
return("best_loss should be a single value.")
if(length(excl) != 1)
return("exclusion should be a single character value.")
if(length(object@onedim) != 1)
return("onedim should be a single logical value.")
else
return(TRUE)
}
setValidity("tskrrTune", validTskrrTune)
setMethod("show",
"tskrrTune",
function(object){
# HEADER
ishomog <- is_homogeneous(object)
type <- ifelse(ishomog,"homogeneous","heterogeneous")
tl <- ifelse(ishomog,"----------","------------")
cat(paste("Tuned",type,"two-step kernel ridge regression"),
paste("-----",tl,"--------------------------------",sep="-"),
sep = "\n")
.show_tskrr(object, ishomog)
# Information on tuning
excl <- object@exclusion
if(object@replaceby0) excl <- paste(excl,"(values replaced by 0)")
if(identical(object@loss_function, loss_mse))
loss_name <- "Mean Squared Error (loss_mse)"
else if(identical(object@loss_function, loss_auc))
loss_name <- "Area under curve (loss_auc)"
else
loss_name <- "custom function by user"
cat("\nTuning information:\n")
cat("-------------------\n")
cat("exclusion setting:",object@exclusion,"\n")
cat("loss value:", object@best_loss,"\n")
cat("loss function:", loss_name,"\n")
if(object@onedim && is_heterogeneous(object))
cat("Grid search done in one dimension.\n")
})
| /R/Class_tskrrTune.R | no_license | cran/xnet | R | false | false | 3,838 | r | #' Class tskrrTune
#'
#' The class tskrrTune represents a tuned \code{\link[xnet:tskrr-class]{tskrr}}
#' model, and is the output of the function \code{\link{tune}}. Apart from
#' the model, it contains extra information on the tuning procedure. This is
#' a virtual class only.
#'
#' @slot lambda_grid a list object with the elements \code{k} and possibly
#' \code{g} indicating the tested lambda values for the row kernel \code{K}
#' and - if applicable - the column kernel \code{G}. Both elements have
#' to be numeric.
#' @slot best_loss a numeric value with the loss associated with the
#' best lambdas
#' @slot loss_values a matrix with the loss results from the searched grid.
#' The rows form the X dimension (related to the first lambda), the columns
#' form the Y dimension (related to the second lambda if applicable)
#' @slot loss_function the used loss function
#' @slot exclusion a character value describing the exclusion used
#' @slot replaceby0 a logical value indicating whether or not the cross
#' validation replaced the excluded values by zero
#' @slot onedim a logical value indicating whether the grid search
#' was done in one dimension. For homogeneous networks, this is
#' true by default.
#'
#' @seealso
#' * the function \code{tune} for the tuning itself
#' * the class \code{\link{tskrrTuneHomogeneous}} and
#' \code{tskrrTuneHeterogeneous} for the actual classes.
#' @md
#'
#' @rdname tskrrTune-class
#' @name tskrrTune-class
#' @aliases tskrrTune
#' @exportClass tskrrTune
setClass("tskrrTune",
slots = c(lambda_grid = "list",
best_loss = "numeric",
loss_values = "matrix",
loss_function = "function",
exclusion = "character",
replaceby0 = "logical",
onedim = "logical"))
validTskrrTune <- function(object){
lossval <- object@loss_values
lgrid <- object@lambda_grid
excl <- object@exclusion
# General tests
if(!all(sapply(lgrid, is.numeric)))
return("lambda_grid should have only numeric elements.")
if(length(object@best_loss) != 1)
return("best_loss should be a single value.")
if(length(excl) != 1)
return("exclusion should be a single character value.")
if(length(object@onedim) != 1)
return("onedim should be a single logical value.")
else
return(TRUE)
}
setValidity("tskrrTune", validTskrrTune)
setMethod("show",
"tskrrTune",
function(object){
# HEADER
ishomog <- is_homogeneous(object)
type <- ifelse(ishomog,"homogeneous","heterogeneous")
tl <- ifelse(ishomog,"----------","------------")
cat(paste("Tuned",type,"two-step kernel ridge regression"),
paste("-----",tl,"--------------------------------",sep="-"),
sep = "\n")
.show_tskrr(object, ishomog)
# Information on tuning
excl <- object@exclusion
if(object@replaceby0) excl <- paste(excl,"(values replaced by 0)")
if(identical(object@loss_function, loss_mse))
loss_name <- "Mean Squared Error (loss_mse)"
else if(identical(object@loss_function, loss_auc))
loss_name <- "Area under curve (loss_auc)"
else
loss_name <- "custom function by user"
cat("\nTuning information:\n")
cat("-------------------\n")
cat("exclusion setting:",object@exclusion,"\n")
cat("loss value:", object@best_loss,"\n")
cat("loss function:", loss_name,"\n")
if(object@onedim && is_heterogeneous(object))
cat("Grid search done in one dimension.\n")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{Creative}
\alias{Creative}
\title{Creative Object}
\usage{
Creative(Creative.corrections = NULL, Creative.disapprovalReasons = NULL,
Creative.filteringReasons = NULL,
Creative.filteringReasons.reasons = NULL, Creative.nativeAd = NULL,
Creative.nativeAd.appIcon = NULL, Creative.nativeAd.image = NULL,
Creative.nativeAd.logo = NULL, HTMLSnippet = NULL, accountId = NULL,
advertiserId = NULL, advertiserName = NULL, agencyId = NULL,
apiUploadTimestamp = NULL, attribute = NULL, buyerCreativeId = NULL,
clickThroughUrl = NULL, corrections = NULL, disapprovalReasons = NULL,
filteringReasons = NULL, height = NULL, impressionTrackingUrl = NULL,
nativeAd = NULL, productCategories = NULL, restrictedCategories = NULL,
sensitiveCategories = NULL, status = NULL, vendorType = NULL,
version = NULL, videoURL = NULL, width = NULL)
}
\arguments{
\item{Creative.corrections}{The \link{Creative.corrections} object or list of objects}
\item{Creative.disapprovalReasons}{The \link{Creative.disapprovalReasons} object or list of objects}
\item{Creative.filteringReasons}{The \link{Creative.filteringReasons} object or list of objects}
\item{Creative.filteringReasons.reasons}{The \link{Creative.filteringReasons.reasons} object or list of objects}
\item{Creative.nativeAd}{The \link{Creative.nativeAd} object or list of objects}
\item{Creative.nativeAd.appIcon}{The \link{Creative.nativeAd.appIcon} object or list of objects}
\item{Creative.nativeAd.image}{The \link{Creative.nativeAd.image} object or list of objects}
\item{Creative.nativeAd.logo}{The \link{Creative.nativeAd.logo} object or list of objects}
\item{HTMLSnippet}{The HTML snippet that displays the ad when inserted in the web page}
\item{accountId}{Account id}
\item{advertiserId}{Detected advertiser id, if any}
\item{advertiserName}{The name of the company being advertised in the creative}
\item{agencyId}{The agency id for this creative}
\item{apiUploadTimestamp}{The last upload timestamp of this creative if it was uploaded via API}
\item{attribute}{All attributes for the ads that may be shown from this snippet}
\item{buyerCreativeId}{A buyer-specific id identifying the creative in this ad}
\item{clickThroughUrl}{The set of destination urls for the snippet}
\item{corrections}{Shows any corrections that were applied to this creative}
\item{disapprovalReasons}{The reasons for disapproval, if any}
\item{filteringReasons}{The filtering reasons for the creative}
\item{height}{Ad height}
\item{impressionTrackingUrl}{The set of urls to be called to record an impression}
\item{nativeAd}{If nativeAd is set, HTMLSnippet and videoURL should not be set}
\item{productCategories}{Detected product categories, if any}
\item{restrictedCategories}{All restricted categories for the ads that may be shown from this snippet}
\item{sensitiveCategories}{Detected sensitive categories, if any}
\item{status}{Creative serving status}
\item{vendorType}{All vendor types for the ads that may be shown from this snippet}
\item{version}{The version for this creative}
\item{videoURL}{The url to fetch a video ad}
\item{width}{Ad width}
}
\value{
Creative object
}
\description{
Creative Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A creative and its classification data.
}
\seealso{
Other Creative functions: \code{\link{Creative.corrections}},
\code{\link{Creative.disapprovalReasons}},
\code{\link{Creative.filteringReasons.reasons}},
\code{\link{Creative.filteringReasons}},
\code{\link{Creative.nativeAd.appIcon}},
\code{\link{Creative.nativeAd.image}},
\code{\link{Creative.nativeAd.logo}},
\code{\link{Creative.nativeAd}},
\code{\link{creatives.insert}}
}
| /googleadexchangebuyerv13.auto/man/Creative.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 3,828 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{Creative}
\alias{Creative}
\title{Creative Object}
\usage{
Creative(Creative.corrections = NULL, Creative.disapprovalReasons = NULL,
Creative.filteringReasons = NULL,
Creative.filteringReasons.reasons = NULL, Creative.nativeAd = NULL,
Creative.nativeAd.appIcon = NULL, Creative.nativeAd.image = NULL,
Creative.nativeAd.logo = NULL, HTMLSnippet = NULL, accountId = NULL,
advertiserId = NULL, advertiserName = NULL, agencyId = NULL,
apiUploadTimestamp = NULL, attribute = NULL, buyerCreativeId = NULL,
clickThroughUrl = NULL, corrections = NULL, disapprovalReasons = NULL,
filteringReasons = NULL, height = NULL, impressionTrackingUrl = NULL,
nativeAd = NULL, productCategories = NULL, restrictedCategories = NULL,
sensitiveCategories = NULL, status = NULL, vendorType = NULL,
version = NULL, videoURL = NULL, width = NULL)
}
\arguments{
\item{Creative.corrections}{The \link{Creative.corrections} object or list of objects}
\item{Creative.disapprovalReasons}{The \link{Creative.disapprovalReasons} object or list of objects}
\item{Creative.filteringReasons}{The \link{Creative.filteringReasons} object or list of objects}
\item{Creative.filteringReasons.reasons}{The \link{Creative.filteringReasons.reasons} object or list of objects}
\item{Creative.nativeAd}{The \link{Creative.nativeAd} object or list of objects}
\item{Creative.nativeAd.appIcon}{The \link{Creative.nativeAd.appIcon} object or list of objects}
\item{Creative.nativeAd.image}{The \link{Creative.nativeAd.image} object or list of objects}
\item{Creative.nativeAd.logo}{The \link{Creative.nativeAd.logo} object or list of objects}
\item{HTMLSnippet}{The HTML snippet that displays the ad when inserted in the web page}
\item{accountId}{Account id}
\item{advertiserId}{Detected advertiser id, if any}
\item{advertiserName}{The name of the company being advertised in the creative}
\item{agencyId}{The agency id for this creative}
\item{apiUploadTimestamp}{The last upload timestamp of this creative if it was uploaded via API}
\item{attribute}{All attributes for the ads that may be shown from this snippet}
\item{buyerCreativeId}{A buyer-specific id identifying the creative in this ad}
\item{clickThroughUrl}{The set of destination urls for the snippet}
\item{corrections}{Shows any corrections that were applied to this creative}
\item{disapprovalReasons}{The reasons for disapproval, if any}
\item{filteringReasons}{The filtering reasons for the creative}
\item{height}{Ad height}
\item{impressionTrackingUrl}{The set of urls to be called to record an impression}
\item{nativeAd}{If nativeAd is set, HTMLSnippet and videoURL should not be set}
\item{productCategories}{Detected product categories, if any}
\item{restrictedCategories}{All restricted categories for the ads that may be shown from this snippet}
\item{sensitiveCategories}{Detected sensitive categories, if any}
\item{status}{Creative serving status}
\item{vendorType}{All vendor types for the ads that may be shown from this snippet}
\item{version}{The version for this creative}
\item{videoURL}{The url to fetch a video ad}
\item{width}{Ad width}
}
\value{
Creative object
}
\description{
Creative Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A creative and its classification data.
}
\seealso{
Other Creative functions: \code{\link{Creative.corrections}},
\code{\link{Creative.disapprovalReasons}},
\code{\link{Creative.filteringReasons.reasons}},
\code{\link{Creative.filteringReasons}},
\code{\link{Creative.nativeAd.appIcon}},
\code{\link{Creative.nativeAd.image}},
\code{\link{Creative.nativeAd.logo}},
\code{\link{Creative.nativeAd}},
\code{\link{creatives.insert}}
}
|
#' Build windows binary package.
#'
#' This function works by bundling source package, and then uploading to
#' <https://win-builder.r-project.org/>. Once building is complete you'll
#' receive a link to the built package in the email address listed in the
#' maintainer field. It usually takes around 30 minutes. As a side effect,
#' win-build also runs `R CMD check` on the package, so `check_win`
#' is also useful to check that your package is ok on windows.
#'
#' @template devtools
#' @inheritParams pkgbuild::build
#' @param manual Should the manual be built?
#' @param email An alternative email to use, default `NULL` uses the package
#' Maintainer's email.
#' @param quiet If `TRUE`, suppresses output.
#' @param ... Additional arguments passed to [pkgbuild::build()].
#' @family build functions
#' @name check_win
NULL
#' @describeIn check_win Check package on the development version of R.
#' @export
check_win_devel <- function(pkg = ".", args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ...) {
check_dots_used(action = getOption("devtools.ellipsis_action", rlang::warn))
check_win(
pkg = pkg, version = "R-devel", args = args, manual = manual,
email = email, quiet = quiet, ...
)
}
#' @describeIn check_win Check package on the release version of R.
#' @export
check_win_release <- function(pkg = ".", args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ...) {
check_dots_used(action = getOption("devtools.ellipsis_action", rlang::warn))
check_win(
pkg = pkg, version = "R-release", args = args, manual = manual,
email = email, quiet = quiet, ...
)
}
#' @describeIn check_win Check package on the previous major release version of R.
#' @export
check_win_oldrelease <- function(pkg = ".", args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ...) {
check_dots_used(action = getOption("devtools.ellipsis_action", rlang::warn))
check_win(
pkg = pkg, version = "R-oldrelease", args = args, manual = manual,
email = email, quiet = quiet, ...
)
}
check_win <- function(pkg = ".", version = c("R-devel", "R-release", "R-oldrelease"),
args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ...) {
pkg <- as.package(pkg)
if (!is.null(email)) {
desc_file <- path(pkg$path, "DESCRIPTION")
backup <- file_temp()
file_copy(desc_file, backup)
on.exit(file_move(backup, desc_file), add = TRUE)
change_maintainer_email(desc_file, email, call = parent.frame())
pkg <- as.package(pkg$path)
}
version <- match.arg(version, several.ok = TRUE)
if (!quiet) {
cli::cli_inform(c(
"Building windows version of {.pkg {pkg$package}} ({pkg$version})",
i = "Using {paste(version, collapse = ', ')} with win-builder.r-project.org."
))
email <- maintainer(pkg)$email
if (interactive() && yesno("Email results to {.strong {email}}?")) {
return(invisible())
}
}
built_path <- pkgbuild::build(pkg$path, tempdir(),
args = args,
manual = manual, quiet = quiet, ...
)
on.exit(file_delete(built_path), add = TRUE)
url <- paste0(
"ftp://win-builder.r-project.org/", version, "/",
path_file(built_path)
)
lapply(url, upload_ftp, file = built_path)
if (!quiet) {
time <- strftime(Sys.time() + 30 * 60, "%I:%M %p")
email <- maintainer(pkg)$email
cli::cat_rule(col = "cyan")
cli::cli_inform(c(
i = "Check <{.email {email}}> for the results in 15-30 mins (~{time})."
))
}
invisible()
}
change_maintainer_email <- function(path, email, call = parent.frame()) {
desc <- desc::desc(file = path)
if (!desc$has_fields("Authors@R")) {
cli::cli_abort(
"DESCRIPTION must use {.field Authors@R} field when changing {.arg email}",
call = call
)
}
if (desc$has_fields("Maintainer")) {
cli::cli_abort(
"DESCRIPTION can't use {.field Maintainer} field when changing {.arg email}",
call = call
)
}
aut <- desc$get_authors()
roles <- aut$role
## Broken person() API, vector for 1 author, list otherwise...
if (!is.list(roles)) {
roles <- list(roles)
}
is_maintainer <- vapply(roles, function(r) all("cre" %in% r), logical(1))
aut[is_maintainer]$email <- email
desc$set_authors(aut)
desc$write()
}
upload_ftp <- function(file, url, verbose = FALSE) {
rlang::check_installed("curl")
stopifnot(file_exists(file))
stopifnot(is.character(url))
con <- file(file, open = "rb")
on.exit(close(con), add = TRUE)
h <- curl::new_handle(upload = TRUE, filetime = FALSE)
curl::handle_setopt(h, readfunction = function(n) {
readBin(con, raw(), n = n)
}, verbose = verbose)
curl::curl_fetch_memory(url, handle = h)
}
| /R/check-win.R | no_license | cran/devtools | R | false | false | 4,696 | r | #' Build windows binary package.
#'
#' This function works by bundling source package, and then uploading to
#' <https://win-builder.r-project.org/>. Once building is complete you'll
#' receive a link to the built package in the email address listed in the
#' maintainer field. It usually takes around 30 minutes. As a side effect,
#' win-build also runs `R CMD check` on the package, so `check_win`
#' is also useful to check that your package is ok on windows.
#'
#' @template devtools
#' @inheritParams pkgbuild::build
#' @param manual Should the manual be built?
#' @param email An alternative email to use, default `NULL` uses the package
#' Maintainer's email.
#' @param quiet If `TRUE`, suppresses output.
#' @param ... Additional arguments passed to [pkgbuild::build()].
#' @family build functions
#' @name check_win
NULL
#' @describeIn check_win Check package on the development version of R.
#' @export
check_win_devel <- function(pkg = ".", args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ...) {
check_dots_used(action = getOption("devtools.ellipsis_action", rlang::warn))
check_win(
pkg = pkg, version = "R-devel", args = args, manual = manual,
email = email, quiet = quiet, ...
)
}
#' @describeIn check_win Check package on the release version of R.
#' @export
check_win_release <- function(pkg = ".", args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ...) {
check_dots_used(action = getOption("devtools.ellipsis_action", rlang::warn))
check_win(
pkg = pkg, version = "R-release", args = args, manual = manual,
email = email, quiet = quiet, ...
)
}
#' @describeIn check_win Check package on the previous major release version of R.
#' @export
check_win_oldrelease <- function(pkg = ".", args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ...) {
check_dots_used(action = getOption("devtools.ellipsis_action", rlang::warn))
check_win(
pkg = pkg, version = "R-oldrelease", args = args, manual = manual,
email = email, quiet = quiet, ...
)
}
check_win <- function(pkg = ".", version = c("R-devel", "R-release", "R-oldrelease"),
args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ...) {
pkg <- as.package(pkg)
if (!is.null(email)) {
desc_file <- path(pkg$path, "DESCRIPTION")
backup <- file_temp()
file_copy(desc_file, backup)
on.exit(file_move(backup, desc_file), add = TRUE)
change_maintainer_email(desc_file, email, call = parent.frame())
pkg <- as.package(pkg$path)
}
version <- match.arg(version, several.ok = TRUE)
if (!quiet) {
cli::cli_inform(c(
"Building windows version of {.pkg {pkg$package}} ({pkg$version})",
i = "Using {paste(version, collapse = ', ')} with win-builder.r-project.org."
))
email <- maintainer(pkg)$email
if (interactive() && yesno("Email results to {.strong {email}}?")) {
return(invisible())
}
}
built_path <- pkgbuild::build(pkg$path, tempdir(),
args = args,
manual = manual, quiet = quiet, ...
)
on.exit(file_delete(built_path), add = TRUE)
url <- paste0(
"ftp://win-builder.r-project.org/", version, "/",
path_file(built_path)
)
lapply(url, upload_ftp, file = built_path)
if (!quiet) {
time <- strftime(Sys.time() + 30 * 60, "%I:%M %p")
email <- maintainer(pkg)$email
cli::cat_rule(col = "cyan")
cli::cli_inform(c(
i = "Check <{.email {email}}> for the results in 15-30 mins (~{time})."
))
}
invisible()
}
change_maintainer_email <- function(path, email, call = parent.frame()) {
desc <- desc::desc(file = path)
if (!desc$has_fields("Authors@R")) {
cli::cli_abort(
"DESCRIPTION must use {.field Authors@R} field when changing {.arg email}",
call = call
)
}
if (desc$has_fields("Maintainer")) {
cli::cli_abort(
"DESCRIPTION can't use {.field Maintainer} field when changing {.arg email}",
call = call
)
}
aut <- desc$get_authors()
roles <- aut$role
## Broken person() API, vector for 1 author, list otherwise...
if (!is.list(roles)) {
roles <- list(roles)
}
is_maintainer <- vapply(roles, function(r) all("cre" %in% r), logical(1))
aut[is_maintainer]$email <- email
desc$set_authors(aut)
desc$write()
}
upload_ftp <- function(file, url, verbose = FALSE) {
rlang::check_installed("curl")
stopifnot(file_exists(file))
stopifnot(is.character(url))
con <- file(file, open = "rb")
on.exit(close(con), add = TRUE)
h <- curl::new_handle(upload = TRUE, filetime = FALSE)
curl::handle_setopt(h, readfunction = function(n) {
readBin(con, raw(), n = n)
}, verbose = verbose)
curl::curl_fetch_memory(url, handle = h)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataProvider.r
\name{getData}
\alias{getData}
\alias{getData,DataProvider-method}
\title{getData method}
\usage{
getData(object, instrument, phenomenon, dtObs)
\S4method{getData}{DataProvider}(object, instrument, phenomenon, dtObs)
}
\arguments{
\item{instrument}{instrument name}
\item{phenomenon}{phenomenon name}
\item{dtObs}{observation date}
}
\value{
a vector of value(s)
}
\description{
Extract data from a DataProvider object
}
| /man/getData.Rd | no_license | phenaff/R-Package-fInstrument | R | false | true | 517 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataProvider.r
\name{getData}
\alias{getData}
\alias{getData,DataProvider-method}
\title{getData method}
\usage{
getData(object, instrument, phenomenon, dtObs)
\S4method{getData}{DataProvider}(object, instrument, phenomenon, dtObs)
}
\arguments{
\item{instrument}{instrument name}
\item{phenomenon}{phenomenon name}
\item{dtObs}{observation date}
}
\value{
a vector of value(s)
}
\description{
Extract data from a DataProvider object
}
|
a<-q[1:31,1]
sample<- Corpus(VectorSource(a),readerControl=list(language="english"))
tm_map(sample1, function(x) iconv(enc2utf8(x), sub = "byte"))
sample<-tm_map(sample,tolower)
sample<-tm_map(sample,removePunctuation)
sample<-tm_map(sample,removeNumbers)
sample<-tm_map(sample,removeWords,stopwords('english'))
tm_map(sample,stemDocument)
dtm <- DocumentTermMatrix(sample)
(d <- stemCompletion(samplecopy, c("according")))
stripWhitespace(myCorpus[[2]])
tdm <- TermDocumentMatrix(sample, control=list(minwordLength = 3))
library("textcat")
ExampleText <- c("This is an English sentence.",
"Das ist ein deutscher Satz.",
"Esta es una frase en espa~nol."
textcat(ExampleText) | /tmCommands.R | no_license | kunal173/Twitter | R | false | false | 732 | r |
a<-q[1:31,1]
sample<- Corpus(VectorSource(a),readerControl=list(language="english"))
tm_map(sample1, function(x) iconv(enc2utf8(x), sub = "byte"))
sample<-tm_map(sample,tolower)
sample<-tm_map(sample,removePunctuation)
sample<-tm_map(sample,removeNumbers)
sample<-tm_map(sample,removeWords,stopwords('english'))
tm_map(sample,stemDocument)
dtm <- DocumentTermMatrix(sample)
(d <- stemCompletion(samplecopy, c("according")))
stripWhitespace(myCorpus[[2]])
tdm <- TermDocumentMatrix(sample, control=list(minwordLength = 3))
library("textcat")
ExampleText <- c("This is an English sentence.",
"Das ist ein deutscher Satz.",
"Esta es una frase en espa~nol."
textcat(ExampleText) |
library(tidyverse)
library(here)
# The root of the data directory
data_dir = readLines(here("data_dir.txt"), n=1)
# Convenience functions, including function datadir() to prepend data directory to a relative path
source(here("scripts/convenience_functions.R"))
## open all the results
f = list.files(datadir("cnn_preds"),full.names = TRUE)
f_fuel = f[grepl("preds_fuel",f)]
f_plot = f[!grepl("preds_fuel",f)]
a_fuel = map(f_fuel,read_csv)
a_fuel = bind_cols(a_fuel)
alldata = data.frame()
for(i in 1:length(f)) {
file = f[i]
d = read_csv(file)
d = d[,2:ncol(d)]
d$varname = colnames(d)[1]
d = d %>%
rename("pred" = 1)
alldata = bind_rows(alldata,d)
}
a_fuel = alldata %>%
filter(str_starts(varname,"pred_fuel")) %>%
pivot_wider(names_from=varname,values_from=pred, values_fn=list(pred=mean))
a_plot = alldata %>%
filter(!str_starts(varname,"pred_fuel")) %>%
select(!cardinaldir) %>%
group_by(plot_id_std,varname) %>%
mutate(id=row_number()) %>%
ungroup() %>%
pivot_wider(id_cols=c(id,plot_id_std) ,names_from=varname,values_from=pred)
d_plot = read_csv(datadir("data_prepped/plotdata_photo_dat.csv"))
plotdat = left_join(a_plot,d_plot)
d_fuel = read_csv(datadir("data_prepped/fueltransect_photo_dat.csv"))
fueldat = left_join(a_fuel,d_fuel)
write_csv(plotdat,datadir("data_w_preds/plotdata.csv"))
write_csv(fueldat,datadir("data_w_preds/fueldata.csv"))
# > names(d)
# [1] "id" "plot_id_std" "pred_cwd_center" "pred_cwd_lowcenter" "pred_litter_center"
# [6] "pred_ntrees_center" "pred_rock_center" "pred_shrub_center" "pred_shrub_lowcenter" "pred_shrubht_center"
# [11] "cardinaldir" "photo_name" "new_filename" "TOS_percent" "TOS_HT_m"
# [16] "BARESOIL" "LITTER" "ROCK" "CWD" "n_trees"
# [21] "natester" "is_valid"
# > names(d)
# [1] "plot_id_std" "cardinaldir" "pred_fuel100h_center" "pred_fuel1h_center"
# [5] "pred_fuel1h_lowcenter" "pred_fuellitter_center" "pred_fuellitter_lowcenter" "photo_name"
# [9] "new_filename" "ct1hr" "ct10hr" "ct100hr"
# [13] "litter" "natester" "is_valid"
# shrub cover
plot_fit(title = "Number of trees (plot mean)",
xvar = "n_trees",
yvar = "pred_ntrees_center",
plot_mean = TRUE)
plot_fit = function(title,xvar,yvar,plot_mean = FALSE) {
d = plotdat
if(plot_mean) {
d = d %>%
group_by(plot_id_std) %>%
summarize_all(mean)
}
r = cor(d[,xvar],d[,yvar])
r_text = paste0("r = ",r %>% round(2))
p = ggplot(d,aes( x = !!as.name(xvar),
y = !!as.name(yvar))) +
geom_point(color="aquamarine4") +
annotate("text",label=r_text,x=-Inf,y=Inf,size=5, hjust=-0.2,vjust=1.5) +
#geom_abline(a=0,b=1) +
geom_smooth(method="lm", color="grey40") +
labs(x="Observed",y="Predicted", title = title) +
theme_bw(15) +
theme(panel.grid = element_blank(),
plot.title = element_text(hjust = 0.5))
print(p)
png(datadir(paste0("figures/",title,".png")), res=250, width=1000, height=1000)
print(p)
dev.off()
}
| /scripts/evaluate_predictions.R | no_license | youngdjn/fuels-ai | R | false | false | 3,361 | r | library(tidyverse)
library(here)
# The root of the data directory
data_dir = readLines(here("data_dir.txt"), n=1)
# Convenience functions, including function datadir() to prepend data directory to a relative path
source(here("scripts/convenience_functions.R"))
## open all the results
f = list.files(datadir("cnn_preds"),full.names = TRUE)
f_fuel = f[grepl("preds_fuel",f)]
f_plot = f[!grepl("preds_fuel",f)]
a_fuel = map(f_fuel,read_csv)
a_fuel = bind_cols(a_fuel)
alldata = data.frame()
for(i in 1:length(f)) {
file = f[i]
d = read_csv(file)
d = d[,2:ncol(d)]
d$varname = colnames(d)[1]
d = d %>%
rename("pred" = 1)
alldata = bind_rows(alldata,d)
}
a_fuel = alldata %>%
filter(str_starts(varname,"pred_fuel")) %>%
pivot_wider(names_from=varname,values_from=pred, values_fn=list(pred=mean))
a_plot = alldata %>%
filter(!str_starts(varname,"pred_fuel")) %>%
select(!cardinaldir) %>%
group_by(plot_id_std,varname) %>%
mutate(id=row_number()) %>%
ungroup() %>%
pivot_wider(id_cols=c(id,plot_id_std) ,names_from=varname,values_from=pred)
d_plot = read_csv(datadir("data_prepped/plotdata_photo_dat.csv"))
plotdat = left_join(a_plot,d_plot)
d_fuel = read_csv(datadir("data_prepped/fueltransect_photo_dat.csv"))
fueldat = left_join(a_fuel,d_fuel)
write_csv(plotdat,datadir("data_w_preds/plotdata.csv"))
write_csv(fueldat,datadir("data_w_preds/fueldata.csv"))
# > names(d)
# [1] "id" "plot_id_std" "pred_cwd_center" "pred_cwd_lowcenter" "pred_litter_center"
# [6] "pred_ntrees_center" "pred_rock_center" "pred_shrub_center" "pred_shrub_lowcenter" "pred_shrubht_center"
# [11] "cardinaldir" "photo_name" "new_filename" "TOS_percent" "TOS_HT_m"
# [16] "BARESOIL" "LITTER" "ROCK" "CWD" "n_trees"
# [21] "natester" "is_valid"
# > names(d)
# [1] "plot_id_std" "cardinaldir" "pred_fuel100h_center" "pred_fuel1h_center"
# [5] "pred_fuel1h_lowcenter" "pred_fuellitter_center" "pred_fuellitter_lowcenter" "photo_name"
# [9] "new_filename" "ct1hr" "ct10hr" "ct100hr"
# [13] "litter" "natester" "is_valid"
# shrub cover
plot_fit(title = "Number of trees (plot mean)",
xvar = "n_trees",
yvar = "pred_ntrees_center",
plot_mean = TRUE)
plot_fit = function(title,xvar,yvar,plot_mean = FALSE) {
d = plotdat
if(plot_mean) {
d = d %>%
group_by(plot_id_std) %>%
summarize_all(mean)
}
r = cor(d[,xvar],d[,yvar])
r_text = paste0("r = ",r %>% round(2))
p = ggplot(d,aes( x = !!as.name(xvar),
y = !!as.name(yvar))) +
geom_point(color="aquamarine4") +
annotate("text",label=r_text,x=-Inf,y=Inf,size=5, hjust=-0.2,vjust=1.5) +
#geom_abline(a=0,b=1) +
geom_smooth(method="lm", color="grey40") +
labs(x="Observed",y="Predicted", title = title) +
theme_bw(15) +
theme(panel.grid = element_blank(),
plot.title = element_text(hjust = 0.5))
print(p)
png(datadir(paste0("figures/",title,".png")), res=250, width=1000, height=1000)
print(p)
dev.off()
}
|
#day4
#1 feb 2019
#Aadam Rawoot
#Tidy data
library(tidyverse)
library(lubridate)
load("data/SACTN_mangled.RData")
#utr- underwater temp reader
ggplot(data= SANCTN1, aes(x = date, y = temp)) +
geom_line(aes(colour = site, group = paste0(site, src))) +
labs(X = "date", y = "temperature (°C)") +
ggtitle("Dates and temperature of various readings using UTR at Port Nolloth")
ggplot(data = SACTN1, aes(x = date, y = temp)) +#aes specifies variables
geom_line(aes(colour = site, group = paste0(site, src))) +#paste0 lets you group multiple variables
labs(x = "dates", y = "Temperature (°C)", colour = "Site") +
ggtitle("Dates and temperature of various readings using UTR at Port Nolloth")
theme_bw()#correct code- sites by colour groups sites by temp, place and reader
#tidyverse has gather,spread,unite etc functions
SACTN2_TIDY <- SACTN2 %>%
gather(DEA, KZNSB, SAWS, key = "src", value = "temp")#gather function used to combine many columns into a single variable column
SACTN3_tidy <- SACTN3 %>%
spread(key = var, value = val)
SACTN4a_tidy <- SACTN4a %>%
separate(col = index, into = c("site", "src"), sep = "/ ") #separate columns into 2
SACTN4b_tidy <- SACTN4b %>%
unite(year, month, day, col = "date", sep = "-")#unite used to combine month,day,column into date separate by hyphon
SACTN4_tidy <- left_join(SACTN4a_tidy, SACTN4b_tidy)#left join function detects similar words, R will group by site, soure and date
# [A.A]
# More comments could be added
# EXplain more
# Script runs complete
| /day 4 Tidy data.R | no_license | AAADDAM/Intro_R_UWC | R | false | false | 1,545 | r | #day4
#1 feb 2019
#Aadam Rawoot
#Tidy data
library(tidyverse)
library(lubridate)
load("data/SACTN_mangled.RData")
#utr- underwater temp reader
ggplot(data= SANCTN1, aes(x = date, y = temp)) +
geom_line(aes(colour = site, group = paste0(site, src))) +
labs(X = "date", y = "temperature (°C)") +
ggtitle("Dates and temperature of various readings using UTR at Port Nolloth")
ggplot(data = SACTN1, aes(x = date, y = temp)) +#aes specifies variables
geom_line(aes(colour = site, group = paste0(site, src))) +#paste0 lets you group multiple variables
labs(x = "dates", y = "Temperature (°C)", colour = "Site") +
ggtitle("Dates and temperature of various readings using UTR at Port Nolloth")
theme_bw()#correct code- sites by colour groups sites by temp, place and reader
#tidyverse has gather,spread,unite etc functions
SACTN2_TIDY <- SACTN2 %>%
gather(DEA, KZNSB, SAWS, key = "src", value = "temp")#gather function used to combine many columns into a single variable column
SACTN3_tidy <- SACTN3 %>%
spread(key = var, value = val)
SACTN4a_tidy <- SACTN4a %>%
separate(col = index, into = c("site", "src"), sep = "/ ") #separate columns into 2
SACTN4b_tidy <- SACTN4b %>%
unite(year, month, day, col = "date", sep = "-")#unite used to combine month,day,column into date separate by hyphon
SACTN4_tidy <- left_join(SACTN4a_tidy, SACTN4b_tidy)#left join function detects similar words, R will group by site, soure and date
# [A.A]
# More comments could be added
# EXplain more
# Script runs complete
|
##This function creates a "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invx <- NULL
get <- function() x
getinv <- function() invx
setinv <- function(invMatToBeSet) invx <<- invMatToBeSet
set <- function(newMat) {
x <<- newMat
invx <<- NULL
}
list(set = set, get = get, getinv = getinv, setinv = setinv)
}
## This function computes the inverse of the "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
invx <- x$getinv()
if(!is.null(invx)) {
message("getting cached inverse matrix")
return(invx)
}
newmatrix <- x$get()
invx <- solve(newmatrix)
x$setinv(invx)
invx
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | mkronhol/ProgrammingAssignment2 | R | false | false | 882 | r | ##This function creates a "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invx <- NULL
get <- function() x
getinv <- function() invx
setinv <- function(invMatToBeSet) invx <<- invMatToBeSet
set <- function(newMat) {
x <<- newMat
invx <<- NULL
}
list(set = set, get = get, getinv = getinv, setinv = setinv)
}
## This function computes the inverse of the "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
invx <- x$getinv()
if(!is.null(invx)) {
message("getting cached inverse matrix")
return(invx)
}
newmatrix <- x$get()
invx <- solve(newmatrix)
x$setinv(invx)
invx
## Return a matrix that is the inverse of 'x'
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alphashape3d.R, R/hxsurf.R
\name{as.mesh3d}
\alias{as.mesh3d}
\alias{as.mesh3d.ashape3d}
\alias{as.mesh3d.hxsurf}
\alias{as.mesh3d.boundingbox}
\title{Convert an object to an rgl mesh3d}
\usage{
\method{as.mesh3d}{ashape3d}(x, tri_to_keep = 2L, ...)
\method{as.mesh3d}{hxsurf}(x, Regions = NULL, material = NULL,
drop = TRUE, ...)
\method{as.mesh3d}{boundingbox}(x, ...)
}
\arguments{
\item{x}{Object to convert to mesh3d}
\item{tri_to_keep}{Which alphashape triangles to keep (expert use only - see
\code{triang} entry in \bold{Value} section of
\code{\link[alphashape3d]{ashape3d}} docs for details.)}
\item{...}{Additional arguments for methods}
\item{Regions}{Character vector or regions to select from \code{hxsurf}
object}
\item{material}{rgl materials such as \code{color}}
\item{drop}{Whether to drop unused vertices (default TRUE)}
}
\value{
a \code{\link[rgl]{mesh3d}} object which can be plotted and
manipulated using \code{\link{rgl}} and \code{nat} packages.
}
\description{
\code{as.mesh3d.ashape3d} converts an
\code{alphashape3d::ashape3d} object into a nat/rgl compatible
\code{mesh3d} surface
Note that this provides a link to the Rvcg package
\code{as.mesh3d.boundingbox} converts a nat
\code{\link{boundingbox}} object into an rgl compatible \code{mesh3d}
object.
}
\details{
An \href{https://en.wikipedia.org/wiki/Alpha_shape}{alpha shape} is
a generalisation of a convex hull enclosing a set of points. Unlike a
convex hull, the resultant surface can be partly concave allowing the
surface to more closely follow the set of points.
In this implementation, the parameter alpha is a scale factor with units of
length that defines a spatial domain. When alpha is larger the alpha shape
approaches the convex hull; when alpha is smaller the alpha shape has a
greater number of faces / vertices i.e. it follows the points more closely.
}
\examples{
\donttest{
library(alphashape3d)
kcs20.a=ashape3d(xyzmatrix(kcs20), alpha = 10)
plot(kcs20.a)
# convert to mesh3d
kcs20.mesh=as.mesh3d(kcs20.a)
# check that all points are inside mesh
all(pointsinside(kcs20, kcs20.mesh))
# and show that we can also use the alphashape directly
all(pointsinside(kcs20, kcs20.a))
nclear3d()
wire3d(kcs20.mesh)
plot3d(kcs20, col=type, lwd=2)
}
bb=boundingbox(kcs20)
mbb=as.mesh3d(bb)
\donttest{
plot3d(kcs20)
# simple plot
plot3d(bb)
shade3d(mbb, col='red', alpha=0.3)
}
}
\seealso{
\code{\link[alphashape3d]{ashape3d}}, \code{\link[rgl]{mesh3d}}
\code{\link[rgl]{as.mesh3d}}, \code{\link[rgl]{tmesh3d}},
\code{\link{as.hxsurf}}, \code{\link{read.hxsurf}}
Other hxsurf: \code{\link{as.hxsurf}},
\code{\link{materials}}, \code{\link{plot3d.hxsurf}},
\code{\link{read.hxsurf}}, \code{\link{subset.hxsurf}},
\code{\link{write.hxsurf}}
}
\concept{hxsurf}
| /man/as.mesh3d.Rd | no_license | tomka/nat | R | false | true | 2,879 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alphashape3d.R, R/hxsurf.R
\name{as.mesh3d}
\alias{as.mesh3d}
\alias{as.mesh3d.ashape3d}
\alias{as.mesh3d.hxsurf}
\alias{as.mesh3d.boundingbox}
\title{Convert an object to an rgl mesh3d}
\usage{
\method{as.mesh3d}{ashape3d}(x, tri_to_keep = 2L, ...)
\method{as.mesh3d}{hxsurf}(x, Regions = NULL, material = NULL,
drop = TRUE, ...)
\method{as.mesh3d}{boundingbox}(x, ...)
}
\arguments{
\item{x}{Object to convert to mesh3d}
\item{tri_to_keep}{Which alphashape triangles to keep (expert use only - see
\code{triang} entry in \bold{Value} section of
\code{\link[alphashape3d]{ashape3d}} docs for details.)}
\item{...}{Additional arguments for methods}
\item{Regions}{Character vector or regions to select from \code{hxsurf}
object}
\item{material}{rgl materials such as \code{color}}
\item{drop}{Whether to drop unused vertices (default TRUE)}
}
\value{
a \code{\link[rgl]{mesh3d}} object which can be plotted and
manipulated using \code{\link{rgl}} and \code{nat} packages.
}
\description{
\code{as.mesh3d.ashape3d} converts an
\code{alphashape3d::ashape3d} object into a nat/rgl compatible
\code{mesh3d} surface
Note that this provides a link to the Rvcg package
\code{as.mesh3d.boundingbox} converts a nat
\code{\link{boundingbox}} object into an rgl compatible \code{mesh3d}
object.
}
\details{
An \href{https://en.wikipedia.org/wiki/Alpha_shape}{alpha shape} is
a generalisation of a convex hull enclosing a set of points. Unlike a
convex hull, the resultant surface can be partly concave allowing the
surface to more closely follow the set of points.
In this implementation, the parameter alpha is a scale factor with units of
length that defines a spatial domain. When alpha is larger the alpha shape
approaches the convex hull; when alpha is smaller the alpha shape has a
greater number of faces / vertices i.e. it follows the points more closely.
}
\examples{
\donttest{
library(alphashape3d)
kcs20.a=ashape3d(xyzmatrix(kcs20), alpha = 10)
plot(kcs20.a)
# convert to mesh3d
kcs20.mesh=as.mesh3d(kcs20.a)
# check that all points are inside mesh
all(pointsinside(kcs20, kcs20.mesh))
# and show that we can also use the alphashape directly
all(pointsinside(kcs20, kcs20.a))
nclear3d()
wire3d(kcs20.mesh)
plot3d(kcs20, col=type, lwd=2)
}
bb=boundingbox(kcs20)
mbb=as.mesh3d(bb)
\donttest{
plot3d(kcs20)
# simple plot
plot3d(bb)
shade3d(mbb, col='red', alpha=0.3)
}
}
\seealso{
\code{\link[alphashape3d]{ashape3d}}, \code{\link[rgl]{mesh3d}}
\code{\link[rgl]{as.mesh3d}}, \code{\link[rgl]{tmesh3d}},
\code{\link{as.hxsurf}}, \code{\link{read.hxsurf}}
Other hxsurf: \code{\link{as.hxsurf}},
\code{\link{materials}}, \code{\link{plot3d.hxsurf}},
\code{\link{read.hxsurf}}, \code{\link{subset.hxsurf}},
\code{\link{write.hxsurf}}
}
\concept{hxsurf}
|
options(scipen=999,digits = 4)
library(strucchange)
library(rlist)
library(tidyverse)
library(parallel)
library(timeSeries)
library(zoo)
#setwd("/mnt/MyDoc/Dropbox/Research/MonitoringStructureBreaks/code")
setClass(
"Bsquid",
slots = c(
vec = "numeric",
rho0 = "numeric",
rho = "numeric",
p21 = "numeric",
param = "list" ,
cnFctr = "numeric"
)
)
## sample class
#bs <- new("Bsquid",
# vec = df1$y,
# rho0 = c(0.5,0.5),
# rho = 0.01,
# p21 = 0,
# param = list(mu1.positive=list(prior=c(0.50,1.50),step=0.01),
# mu1.negative=list(prior=c(-1.50,-0.50),step=0.01)),
# cnFctr = 0.01
# )
setGeneric(name = "Bsquid.main",
def = function(theObject)
{
standardGeneric("Bsquid.main")
})
setMethod("Bsquid.main",
signature = "Bsquid",
definition = function(theObject)
{
others <- list(
Y = theObject@vec,
rho0 = theObject@rho0,
rho = theObject@rho,
p21 = theObject@p21
)
pm <- c(seq(theObject@param$mu1.positive$prior[1],theObject@param$mu1.positive$prior[2],by=theObject@param$mu1.positive$step),
seq(theObject@param$mu1.negative$prior[1],theObject@param$mu1.negative$prior[2],by=theObject@param$mu1.negative$step))
pp <- t(sapply(pm,getLikelihood, others=others, simplify = TRUE,USE.NAMES = FALSE))
p.df <- data.frame(mu1=pm,loglk=pp[,1],pi=pp[,2])
p.df$w <- exp(p.df$loglk-mean(p.df$loglk))/(sum(exp(p.df$loglk-mean(p.df$loglk))))
pistar <- getPistar(param.space <- p.df[,c(1,4)], c=theObject@cnFctr, rho=theObject@rho)
return(c(sum(p.df$mu1*p.df$w), sum(p.df$pi*p.df$w), pistar))
}
)
############################################################################
############################################################################
############################################################################
getLikelihood = function(mu1=numeric(), others=list() ){
Y = others$Y
rho = others$rho
rho0 = others$rho0
p21 = others$p21
#initialize result can improve performance
l <- cbind(dnorm(Y,mean=0,sd=1),dnorm(Y,mean=mu1,sd=1))
pi <- matrix(data=0,nrow=length(Y),ncol=2)
f <- vector(mode = 'numeric',length=length(Y))
# state 1 2
# p = [ 1-rho, p21
# rho,1-p21]
p <- matrix(c(1-rho,p21 ,rho,1-p21),nrow=2,ncol=2,byrow = TRUE)
f[1] <- c(1,1)%*%(p%*%rho0*l[1,])
pi[1,] <- (p%*%rho0*l[1,])/f[1]
for(tmp.c in 2:length(Y)){
f[tmp.c] <- c(1,1)%*%(p%*%pi[tmp.c-1,]*l[tmp.c,])
pi[tmp.c,] <- (p%*%pi[tmp.c-1,]*l[tmp.c,])/f[tmp.c]
}
return(c(sum(log(f)), pi[length(Y),2]))
}
getTranVector <- function(pi,T.lst){
rho <- T.lst$rho
t.tmp <- T.lst$t.tmp
df <- T.lst$df
like <- round(100*(df$lr*(pi+rho*(1-pi)))/(df$lr*(pi+rho*(1-pi))+(1-rho)*(1-pi)))
like[which(like=='inf')] <- 1
df$l <- like
pp <- (df%>%group_by(l)%>%summarize(pp1=sum(P1),pp2=sum(P2)))
pp$P <- (1-pi)*(1-rho)*pp$pp1+(pi+(1-pi)*rho)*pp$pp2
pp <- pp[which(pp$l>0),]
pp$P <- pp$P/sum(pp$P)
t.tmp[pp$l] = pp$P
return(t.tmp)
}
getTranMatrix <- function(arg1,arg2,arg3,arg4){
mu1 = arg1
rho = arg2
smplSp = arg3
pi.v = arg4
T.lst <- list(df=data.frame(
Z1 = dnorm(smplSp,mean=0,sd=1),
Z2 = dnorm(smplSp,mean=mu1,sd=1),
P1 = dnorm(smplSp,mean=0,sd=1)/sum(dnorm(smplSp,mean=0,sd=1)),
P2 = dnorm(smplSp,mean=mu1,sd=1)/sum(dnorm(smplSp,mean=mu1,sd=1)),
lr = dnorm(smplSp,mean=mu1,sd=1)/dnorm(smplSp,mean=0,sd=1) ),
t.tmp = vector(mode='numeric',length=length(pi.v)),
rho = rho
)
return(sapply(pi.v,getTranVector,T.lst))
#piPrime is a matrix with dimension: size of pi by size of data space
#the matrix is defined as given pi what is pi_prime at each data point
}
############################################################################
############################################################################
############################################################################
# the optimization function will take the parameter space as an input.
# the value of pi_prime will be calculated for each theta
# then aggregate the pi_prime across the parameter space with the given weight
# there are two important spaces in this function: the data space and the pi space
getPistar=function(param.space,c,rho){
##specify data space by specify the upper and lower bound
#param.space <- p.df[,c(1,4)]
#c = 0.01
rho <- rho
lowBound <- -3
upperBound <- 3
inc = 0.01
smplSp <- seq(from=lowBound, to=upperBound, by=inc)
P <- seq(from=0,to=1,by=0.01)
pi.v <- P[which(P>0)]
# get weighted average of transition matrix across parameter space, weighted with posterior parameter distribution
mx.pm <- lapply(param.space$mu1, getTranMatrix,arg2=rho,arg3=smplSp,arg4=pi.v)
T <- Reduce("+",Map("*", mx.pm, param.space$w))
h <- c*T%*%pi.v+1+(c-1)*pi.v
g <- as.matrix(1-pi.v)
r <- cbind(g,h)
Q <- pmin(g,h)
ep = 1
cnt = 0
while(ep > 0.001 & cnt <500){
Q1 <- Q
cnt = cnt+1
h <- T%*%Q+c*pi.v
Q <- pmin(g,h)
ep <- max(abs(Q1-Q),0,na.rm=TRUE)
diff <- (g-h)^2
if(cnt < 499){
piStar <- pi.v[which.min(diff)]
}else{
piStar <- NA
}
}
return(piStar)
} | /code/Archived/Bsquid.1.0.R | no_license | ethorondor/BSPT | R | false | false | 5,514 | r | options(scipen=999,digits = 4)
library(strucchange)
library(rlist)
library(tidyverse)
library(parallel)
library(timeSeries)
library(zoo)
#setwd("/mnt/MyDoc/Dropbox/Research/MonitoringStructureBreaks/code")
setClass(
"Bsquid",
slots = c(
vec = "numeric",
rho0 = "numeric",
rho = "numeric",
p21 = "numeric",
param = "list" ,
cnFctr = "numeric"
)
)
## sample class
#bs <- new("Bsquid",
# vec = df1$y,
# rho0 = c(0.5,0.5),
# rho = 0.01,
# p21 = 0,
# param = list(mu1.positive=list(prior=c(0.50,1.50),step=0.01),
# mu1.negative=list(prior=c(-1.50,-0.50),step=0.01)),
# cnFctr = 0.01
# )
setGeneric(name = "Bsquid.main",
def = function(theObject)
{
standardGeneric("Bsquid.main")
})
setMethod("Bsquid.main",
signature = "Bsquid",
definition = function(theObject)
{
others <- list(
Y = theObject@vec,
rho0 = theObject@rho0,
rho = theObject@rho,
p21 = theObject@p21
)
pm <- c(seq(theObject@param$mu1.positive$prior[1],theObject@param$mu1.positive$prior[2],by=theObject@param$mu1.positive$step),
seq(theObject@param$mu1.negative$prior[1],theObject@param$mu1.negative$prior[2],by=theObject@param$mu1.negative$step))
pp <- t(sapply(pm,getLikelihood, others=others, simplify = TRUE,USE.NAMES = FALSE))
p.df <- data.frame(mu1=pm,loglk=pp[,1],pi=pp[,2])
p.df$w <- exp(p.df$loglk-mean(p.df$loglk))/(sum(exp(p.df$loglk-mean(p.df$loglk))))
pistar <- getPistar(param.space <- p.df[,c(1,4)], c=theObject@cnFctr, rho=theObject@rho)
return(c(sum(p.df$mu1*p.df$w), sum(p.df$pi*p.df$w), pistar))
}
)
############################################################################
############################################################################
############################################################################
getLikelihood = function(mu1=numeric(), others=list() ){
Y = others$Y
rho = others$rho
rho0 = others$rho0
p21 = others$p21
#initialize result can improve performance
l <- cbind(dnorm(Y,mean=0,sd=1),dnorm(Y,mean=mu1,sd=1))
pi <- matrix(data=0,nrow=length(Y),ncol=2)
f <- vector(mode = 'numeric',length=length(Y))
# state 1 2
# p = [ 1-rho, p21
# rho,1-p21]
p <- matrix(c(1-rho,p21 ,rho,1-p21),nrow=2,ncol=2,byrow = TRUE)
f[1] <- c(1,1)%*%(p%*%rho0*l[1,])
pi[1,] <- (p%*%rho0*l[1,])/f[1]
for(tmp.c in 2:length(Y)){
f[tmp.c] <- c(1,1)%*%(p%*%pi[tmp.c-1,]*l[tmp.c,])
pi[tmp.c,] <- (p%*%pi[tmp.c-1,]*l[tmp.c,])/f[tmp.c]
}
return(c(sum(log(f)), pi[length(Y),2]))
}
getTranVector <- function(pi,T.lst){
rho <- T.lst$rho
t.tmp <- T.lst$t.tmp
df <- T.lst$df
like <- round(100*(df$lr*(pi+rho*(1-pi)))/(df$lr*(pi+rho*(1-pi))+(1-rho)*(1-pi)))
like[which(like=='inf')] <- 1
df$l <- like
pp <- (df%>%group_by(l)%>%summarize(pp1=sum(P1),pp2=sum(P2)))
pp$P <- (1-pi)*(1-rho)*pp$pp1+(pi+(1-pi)*rho)*pp$pp2
pp <- pp[which(pp$l>0),]
pp$P <- pp$P/sum(pp$P)
t.tmp[pp$l] = pp$P
return(t.tmp)
}
getTranMatrix <- function(arg1,arg2,arg3,arg4){
mu1 = arg1
rho = arg2
smplSp = arg3
pi.v = arg4
T.lst <- list(df=data.frame(
Z1 = dnorm(smplSp,mean=0,sd=1),
Z2 = dnorm(smplSp,mean=mu1,sd=1),
P1 = dnorm(smplSp,mean=0,sd=1)/sum(dnorm(smplSp,mean=0,sd=1)),
P2 = dnorm(smplSp,mean=mu1,sd=1)/sum(dnorm(smplSp,mean=mu1,sd=1)),
lr = dnorm(smplSp,mean=mu1,sd=1)/dnorm(smplSp,mean=0,sd=1) ),
t.tmp = vector(mode='numeric',length=length(pi.v)),
rho = rho
)
return(sapply(pi.v,getTranVector,T.lst))
#piPrime is a matrix with dimension: size of pi by size of data space
#the matrix is defined as given pi what is pi_prime at each data point
}
############################################################################
############################################################################
############################################################################
# the optimization function will take the parameter space as an input.
# the value of pi_prime will be calculated for each theta
# then aggregate the pi_prime across the parameter space with the given weight
# there are two important spaces in this function: the data space and the pi space
getPistar=function(param.space,c,rho){
##specify data space by specify the upper and lower bound
#param.space <- p.df[,c(1,4)]
#c = 0.01
rho <- rho
lowBound <- -3
upperBound <- 3
inc = 0.01
smplSp <- seq(from=lowBound, to=upperBound, by=inc)
P <- seq(from=0,to=1,by=0.01)
pi.v <- P[which(P>0)]
# get weighted average of transition matrix across parameter space, weighted with posterior parameter distribution
mx.pm <- lapply(param.space$mu1, getTranMatrix,arg2=rho,arg3=smplSp,arg4=pi.v)
T <- Reduce("+",Map("*", mx.pm, param.space$w))
h <- c*T%*%pi.v+1+(c-1)*pi.v
g <- as.matrix(1-pi.v)
r <- cbind(g,h)
Q <- pmin(g,h)
ep = 1
cnt = 0
while(ep > 0.001 & cnt <500){
Q1 <- Q
cnt = cnt+1
h <- T%*%Q+c*pi.v
Q <- pmin(g,h)
ep <- max(abs(Q1-Q),0,na.rm=TRUE)
diff <- (g-h)^2
if(cnt < 499){
piStar <- pi.v[which.min(diff)]
}else{
piStar <- NA
}
}
return(piStar)
} |
library(dplyr)
library(mcclust)
compute_mpear_label <- function(label_traces){
ltmat <- as.matrix(label_traces)
ltmat <- ltmat + 1
psm <- comp.psm(ltmat)
mpear <- maxpear(psm)
mpear_label <- mpear$cl
return(mpear_label)
}
gg_color_hue <- function(n) {
hues = seq(15, 375, length=n+1)
hcl(h=hues, l=65, c=100)[1:n]
}
#' bsxfun {pracma} with single expansion (Matlab style)
#' @param func the function used by bsxfun
#' @param x a matrix
#' @param y a vector need to be expanded
#' @param expandByRow applies only when x is a square matrix
#' @return value of func
bsxfun.se <- function(func, x, y, expandByRow=TRUE) {
if(length(y) == 1) return(pracma::arrayfun(func, x, y)) else
stopifnot(nrow(x) == length(y) || ncol(x) == length(y))
expandCol <- nrow(x) == length(y)
expandRow <- ncol(x) == length(y)
if(expandCol & expandRow & expandByRow) expandCol <- FALSE
if(expandCol & expandRow & !expandByRow) expandRow <- FALSE
# repeat row (if dim2expand = 1, then length(y) = ncol(x))
if(expandRow) y.repmat <- matrix(rep(as.numeric(y), each=nrow(x)), nrow=nrow(x))
# repeat col (if dim2expand = 2, then length(y) = nrow(x))
if(expandCol) y.repmat <- matrix(rep(as.numeric(y), ncol(x)), ncol=ncol(x))
pracma::bsxfun(func, x, y.repmat)
}
# matlab style helper
repmat = function(X,m,n){
##R equivalent of repmat (matlab)
mx = dim(X)[1]
nx = dim(X)[2]
matrix(t(matrix(X,mx,nx*n)),mx*m,nx*n,byrow=T)
}
#' Compute log(sum(exp(x),dim)) while avoiding numerical underflow
#' @param x a matrix
#' @param margin used for apply
#' @return log(sum(exp(x),dim)) a matrix of the sample size as x
logsumexp <- function(x, margin=1) {
if ( ! is.matrix(x) ) {
x <- as.matrix(x)
}
# subtract the largest in each column
y <- apply(x, margin, max)
if (nrow(x) == ncol(x)) {
x <- bsxfun.se("-", x, y, expandByRow = F)
} else {
x <- bsxfun.se("-", x, y)
}
s <- y + log(apply(exp(x), margin, sum))
i <- which(!is.finite(s))
if(length(i) > 0) s[i] <- y[i]
s
}
Assign <- function(x, centers, s) {
n <- length(x)
k <- length(centers)
logRho <- array(0, dim= c(n ,k))
for (ii in 1:k) {
logRho[,ii] = bsxfun.se("-", -(x-centers[ii])^2/(2*s[ii]), log(s[ii]))
}
if (n==k) {
logR <- bsxfun.se("-", logRho, logsumexp(logRho, 1), expandByRow = F) # 10.49
} else {
logR <- bsxfun.se("-", logRho, logsumexp(logRho, 1)) # 10.49
}
R <- exp(logR)
return(list(R=R, logR=logR))
}
GetMultFromCcf <- function(bn, dn, ccf, major_cn, minor_cn, purity, epi = 1e-3) {
total_cn = major_cn + minor_cn
z = (1-purity)*2 + purity*total_cn
k = max(major_cn)
multPool <- seq(0, k, by = 1)
aa = repmat(as.matrix(purity * ccf * (1 - epi) / z), 1, k+1)
bb = repmat(as.matrix(epi * ( z - purity * (1-ccf) * total_cn)), 1, k+1)
pp = bsxfun.se("*", aa, multPool) + bb
ll = bsxfun.se("*", log(pp), bn) + bsxfun.se("*", log(1-pp), dn-bn)
return(multPool[apply(ll, 1, which.max)])
}
GetCcfFromLabel <- function(ccfTraces, idx, label) {
allData <- data.frame(mutation_id = idx, cluster_id = label,
ccf = apply(ccfTraces, 1, median, na.rm = T), stringsAsFactors = F)
idxDf <- data.frame(mutation_id = idx, stringsAsFactors = F)
tt <- table(allData$cluster_id)
clusterMean <- vector(mode = "numeric", length = length(tt))
clusterSd <- clusterMean
for (ii in seq_along(tt)) {
dataIdx <- which(allData$cluster_id %in% as.integer(names(tt[ii])))
clusterMean[ii] <- if (tt[ii]/sum(tt) > 0.01 &
length(dataIdx) > 1
) {
median(c( ccfTraces[dataIdx, ]), na.rm = T )
} else {NA}
clusterSd[ii] <- if (tt[ii]/sum(tt) > 0.01 &
length(dataIdx) > 1
) {
if (sd(c( ccfTraces[dataIdx, ]), na.rm = T) ==0) {
1e-20
} else { sd( c( ccfTraces[dataIdx, ]), na.rm = T)}
} else {NA}
}
clusterDf <- data.frame(cluster_id=as.integer(names(tt)),
average_ccf = clusterMean,
lower_95_ci = clusterMean - 2*clusterSd,
upper_95_ci = clusterMean + 2*clusterSd)
allData <- left_join(allData, clusterDf, by="cluster_id" )
clusterDf <- filter(clusterDf, !is.na(average_ccf) & !is.na(lower_95_ci))
## Reassign low support data
lowSupportData <- dplyr::filter(allData, is.na(average_ccf) | is.na(lower_95_ci))
allData <- dplyr::filter(allData, !is.na(average_ccf))
lowSupportDataFlag <- F
if (nrow(lowSupportData) > 0) {
lowSupportDataFlag <- T
lowSupportR <- Assign(lowSupportData$ccf, clusterDf$average_ccf,
((clusterDf$upper_95_ci - clusterDf$average_ccf)/2)^2)$R
lowSupportLabel <- apply(lowSupportR, 1, which.max)
lowSupportData$cluster_id <- clusterDf$cluster_id[lowSupportLabel]
lowSupportData$average_ccf <- clusterDf$average_ccf[lowSupportLabel]
lowSupportData$lower_95_ci <- clusterDf$lower_95_ci[lowSupportLabel]
lowSupportData$upper_95_ci <- clusterDf$upper_95_ci[lowSupportLabel]
allData <- rbind(allData, lowSupportData)
rm(lowSupportR)
}
allData$cluster_id <- match(allData$average_ccf, sort(unique(allData$average_ccf)))
return(left_join(idxDf, allData, by = "mutation_id"))
}
load("inputTmp/myfile")
id <- Reduce(rbind, strsplit(as.character(ssm$mutation_id), ":", fixed = T), c())
## write results
if (dir.exists(resultsFolder)) {
"Folder exists and continue."
} else {
dir.create(resultsFolder, recursive = T)
}
# load trace and mpear label
traceFile <- dir(paste0(pycloneFolder, "/trace"),
pattern = "cellular_prevalence", full.names = T)
paramsTrace <- read.delim(traceFile, stringsAsFactors = F, header = F)
idx <- as.character(paramsTrace[1,])
#burnIn <- 20
#paramsTrace <- as.matrix( paramsTrace[-1:-burnIn, ]) #!!!!!!!!need to be change
paramsTrace <- as.matrix(paramsTrace[-1:-(burnIn+1), ])
class(paramsTrace) <- "numeric"
mpearFile <- paste0(pycloneFolder, "/pyclone_mpear.tsv")
if (file.exists(mpearFile)) {
mpear <- read.delim(paste0(pycloneFolder, "/pyclone_mpear.tsv"), stringsAsFactors = F)
} else {
labelTrace <- read.delim(paste0(pycloneFolder, "/trace/labels.tsv.bz2"), stringsAsFactors = F)
#burnIn <- 20
#mpearLabels <- compute_mpear_label(labelTrace[-1:-burnIn,])
mpearLabels <- compute_mpear_label(labelTrace[-1:-burnIn,])
mpear <- data.frame(mutation_id = colnames(labelTrace), cluster_id = mpearLabels)
write.table(mpear, file = paste0(pycloneFolder, '/pyclone_mpear.tsv'),
row.names = F, sep = "\t", quote = F)
rm(labelTrace)
}
allData <- GetCcfFromLabel(t(paramsTrace), idx, mpear$cluster_id)
rm(paramsTrace)
# load tsv
sampleTsvFile <- paste0(pycloneFolder, "/pyclone_data.tsv")
sampleTsv <- read.delim(sampleTsvFile, stringsAsFactors = F)
sampleTsv <- mutate(sampleTsv, vaf = var_counts/(ref_counts+var_counts))
allData <- left_join(allData, sampleTsv, by = "mutation_id")
rm(sampleTsv)
mutAssign <- data.frame(mutation_id = ssm$mutation_id, chr = id[,3], pos = id[,4])
allData <- left_join(allData, mutAssign, by = "mutation_id")
allData <- dplyr::filter(allData, !is.na(average_ccf))
rm(mutAssign)
# co-assignments matrix file
#labelFile <- dir(paste0(pycloneFolder, "/trace/"), pattern = "labels", full.names = T)
#labelTrace <- read.delim(labelFile, stringsAsFactors = F)
#labelTrace <- as.matrix(labelTrace[-1:-(burnIn+1), ]) + 1
#labelTrace <- as.matrix(labelTrace[-1:-(burnIn+1), ]) + 1
#psm <- comp.psm(labelTrace)
#fn <- paste0(resultsFolder, "/",
# sampleName, "_coassignment_probabilities.txt")
#write.table(psm, file = fn, sep = "\t", row.names = F, quote = F)
#shellCommand <- paste0("gzip -f ", fn)
#system(shellCommand, intern = TRUE)
#rm(psm)
# index file
#index <- allData[, c("chr", "pos")]
#index$col <- seq_along(allData$mutation_id)
#fn <- paste0(resultsFolder, "/",
# sampleName, "_index.txt")
#write.table(index, file = fn, sep = "\t", row.names = F, quote = F)
#shellCommand <- paste0("gzip -f ", fn)
#system(shellCommand, intern = TRUE)
# cluster certainty file
clusterCertainty <- subset(allData,
select = c("chr", "pos", "cluster_id",
"average_ccf", "lower_95_ci", "upper_95_ci"))
clusterCertainty <- rename(clusterCertainty, most_likely_assignment = cluster_id)
clusterCertainty$most_likely_assignment <-
match(clusterCertainty$most_likely_assignment,
sort(unique(clusterCertainty$most_likely_assignment)))
tmp11 <- clusterCertainty[, c("chr", "pos", "most_likely_assignment","average_ccf")]
tmp11 <- rename(tmp11, cluster = most_likely_assignment)
tmp11 <- mutate(tmp11,average_ccf1=as.numeric(average_ccf) * cellularity)
tmp11 <- rename(tmp11, proportion = average_ccf1)
dir.create(paste0(resultsFolder, "pyclone/mutation_assignments"), recursive = T)
fn <- paste0(resultsFolder, "pyclone/mutation_assignments/",
prefix, "_mutation_assignments.txt")
write.table(tmp11, file = fn, sep = "\t", row.names = F, quote = F)
#shellCommand <- paste0("gzip -f ", fn)
#system(shellCommand, intern = TRUE)
# multiplicity
allData <- mutate(allData, multiplicity = GetMultFromCcf(bn = ssm$var_counts,
dn = var_counts + ref_counts,
ccf = ccf,
major_cn = major_cn,
minor_cn = minor_cn,
purity = cellularity))
mult <- allData[, c("chr", "pos", "multiplicity")]
mult$tumour_copynumber <- allData$major_cn+allData$minor_cn
dir.create(paste0(resultsFolder, "pyclone/multiplicity"), recursive = T)
fn <- paste0(resultsFolder, "pyclone/multiplicity/",
prefix, "_multiplicity.txt")
write.table(mult, file = fn, sep = "\t", row.names = F, quote = F)
#shellCommand <- paste0("gzip -f ", fn)
#system(shellCommand, intern = TRUE)
# subclonal_structure file
tmp1 <- as.data.frame(table(clusterCertainty$most_likely_assignment), stringsAsFactors = F)
tmp2 <- as.data.frame(table(clusterCertainty$average_ccf), stringsAsFactors = F)
tmp <- left_join(tmp1, tmp2, by ="Freq")
delx <- duplicated(tmp$Var1.x)
dely <- duplicated(tmp$Var1.y)
dels <- xor(delx,dely)
delI <- which(dels)
if (!length(delI)==0){
tmp <- tmp[-delI,]
}
tmp <- rename(tmp, cluster = Var1.x, n_ssms = Freq, proportion = Var1.y)
tmp <- mutate(tmp, proportion = as.numeric(proportion) * cellularity)
dir.create(paste0(resultsFolder, "pyclone/subclonal_structure"), recursive = T)
fn <- paste0(resultsFolder, "pyclone/subclonal_structure/",
prefix, "_subclonal_structure.txt")
write.table(tmp, file = fn, sep = "\t", row.names = F, quote = F)
#shellCommand <- paste0("gzip -f ", fn)
#system(shellCommand, intern = TRUE)
# save sample summary
#allData$purity = ssm$purity
#fn <- paste0(pycloneFolder, "/", sampleName, "_pyclone_results_table.csv")
#write.csv(allData, file = fn , row.names = F)
# graph summary
#fn = paste0(resultsFolder, "/",
# sampleName, "_pyclone_results_summary.pdf")
#pdf(fn, width=8, height=8)
#myColors <- gg_color_hue(n_distinct(allData$cluster_id))
#par(mfrow=c(2,2))
#plot(allData$ccf, allData$vaf, col = myColors[allData$cluster_id],
# xlab = "cancer cell fraction", ylab = "variant allele frequecy",
# main = "ccf vs vaf (colored by cluster memebership)")
#hist(allData$ccf, density=20, breaks=20, prob=TRUE,
# main = "ccf histogram",
# xlab = "cancer cell fraction")
#clusterSize <- table(allData$average_ccf)/nrow(allData)
#names(clusterSize) <- as.character(format(round(as.numeric(names(clusterSize)), 2), nsmall = 2))
#tmp1 <- as.data.frame(table(allData$average_ccf))
#tmp2 <- as.data.frame(table(allData$cluster_id))
#tmp3 <- left_join(tmp1, tmp2, by ="Freq")
#barplot(clusterSize, las = 2, col = myColors[tmp3$Var1.y], xlab = "cluster mean", ylab="mutation proportions",
# main = "cluster sizes")
#dev.off()
| /Pipeline/pycloneL/postProcess.R | no_license | mrtonks/Winter-MSc-Team-Project | R | false | false | 12,222 | r | library(dplyr)
library(mcclust)
compute_mpear_label <- function(label_traces){
ltmat <- as.matrix(label_traces)
ltmat <- ltmat + 1
psm <- comp.psm(ltmat)
mpear <- maxpear(psm)
mpear_label <- mpear$cl
return(mpear_label)
}
gg_color_hue <- function(n) {
hues = seq(15, 375, length=n+1)
hcl(h=hues, l=65, c=100)[1:n]
}
#' bsxfun {pracma} with single expansion (Matlab style)
#' @param func the function used by bsxfun
#' @param x a matrix
#' @param y a vector need to be expanded
#' @param expandByRow applies only when x is a square matrix
#' @return value of func
bsxfun.se <- function(func, x, y, expandByRow=TRUE) {
if(length(y) == 1) return(pracma::arrayfun(func, x, y)) else
stopifnot(nrow(x) == length(y) || ncol(x) == length(y))
expandCol <- nrow(x) == length(y)
expandRow <- ncol(x) == length(y)
if(expandCol & expandRow & expandByRow) expandCol <- FALSE
if(expandCol & expandRow & !expandByRow) expandRow <- FALSE
# repeat row (if dim2expand = 1, then length(y) = ncol(x))
if(expandRow) y.repmat <- matrix(rep(as.numeric(y), each=nrow(x)), nrow=nrow(x))
# repeat col (if dim2expand = 2, then length(y) = nrow(x))
if(expandCol) y.repmat <- matrix(rep(as.numeric(y), ncol(x)), ncol=ncol(x))
pracma::bsxfun(func, x, y.repmat)
}
# matlab style helper
repmat = function(X,m,n){
##R equivalent of repmat (matlab)
mx = dim(X)[1]
nx = dim(X)[2]
matrix(t(matrix(X,mx,nx*n)),mx*m,nx*n,byrow=T)
}
#' Compute log(sum(exp(x),dim)) while avoiding numerical underflow
#' @param x a matrix
#' @param margin used for apply
#' @return log(sum(exp(x),dim)) a matrix of the sample size as x
logsumexp <- function(x, margin=1) {
if ( ! is.matrix(x) ) {
x <- as.matrix(x)
}
# subtract the largest in each column
y <- apply(x, margin, max)
if (nrow(x) == ncol(x)) {
x <- bsxfun.se("-", x, y, expandByRow = F)
} else {
x <- bsxfun.se("-", x, y)
}
s <- y + log(apply(exp(x), margin, sum))
i <- which(!is.finite(s))
if(length(i) > 0) s[i] <- y[i]
s
}
Assign <- function(x, centers, s) {
n <- length(x)
k <- length(centers)
logRho <- array(0, dim= c(n ,k))
for (ii in 1:k) {
logRho[,ii] = bsxfun.se("-", -(x-centers[ii])^2/(2*s[ii]), log(s[ii]))
}
if (n==k) {
logR <- bsxfun.se("-", logRho, logsumexp(logRho, 1), expandByRow = F) # 10.49
} else {
logR <- bsxfun.se("-", logRho, logsumexp(logRho, 1)) # 10.49
}
R <- exp(logR)
return(list(R=R, logR=logR))
}
GetMultFromCcf <- function(bn, dn, ccf, major_cn, minor_cn, purity, epi = 1e-3) {
total_cn = major_cn + minor_cn
z = (1-purity)*2 + purity*total_cn
k = max(major_cn)
multPool <- seq(0, k, by = 1)
aa = repmat(as.matrix(purity * ccf * (1 - epi) / z), 1, k+1)
bb = repmat(as.matrix(epi * ( z - purity * (1-ccf) * total_cn)), 1, k+1)
pp = bsxfun.se("*", aa, multPool) + bb
ll = bsxfun.se("*", log(pp), bn) + bsxfun.se("*", log(1-pp), dn-bn)
return(multPool[apply(ll, 1, which.max)])
}
GetCcfFromLabel <- function(ccfTraces, idx, label) {
allData <- data.frame(mutation_id = idx, cluster_id = label,
ccf = apply(ccfTraces, 1, median, na.rm = T), stringsAsFactors = F)
idxDf <- data.frame(mutation_id = idx, stringsAsFactors = F)
tt <- table(allData$cluster_id)
clusterMean <- vector(mode = "numeric", length = length(tt))
clusterSd <- clusterMean
for (ii in seq_along(tt)) {
dataIdx <- which(allData$cluster_id %in% as.integer(names(tt[ii])))
clusterMean[ii] <- if (tt[ii]/sum(tt) > 0.01 &
length(dataIdx) > 1
) {
median(c( ccfTraces[dataIdx, ]), na.rm = T )
} else {NA}
clusterSd[ii] <- if (tt[ii]/sum(tt) > 0.01 &
length(dataIdx) > 1
) {
if (sd(c( ccfTraces[dataIdx, ]), na.rm = T) ==0) {
1e-20
} else { sd( c( ccfTraces[dataIdx, ]), na.rm = T)}
} else {NA}
}
clusterDf <- data.frame(cluster_id=as.integer(names(tt)),
average_ccf = clusterMean,
lower_95_ci = clusterMean - 2*clusterSd,
upper_95_ci = clusterMean + 2*clusterSd)
allData <- left_join(allData, clusterDf, by="cluster_id" )
clusterDf <- filter(clusterDf, !is.na(average_ccf) & !is.na(lower_95_ci))
## Reassign low support data
lowSupportData <- dplyr::filter(allData, is.na(average_ccf) | is.na(lower_95_ci))
allData <- dplyr::filter(allData, !is.na(average_ccf))
lowSupportDataFlag <- F
if (nrow(lowSupportData) > 0) {
lowSupportDataFlag <- T
lowSupportR <- Assign(lowSupportData$ccf, clusterDf$average_ccf,
((clusterDf$upper_95_ci - clusterDf$average_ccf)/2)^2)$R
lowSupportLabel <- apply(lowSupportR, 1, which.max)
lowSupportData$cluster_id <- clusterDf$cluster_id[lowSupportLabel]
lowSupportData$average_ccf <- clusterDf$average_ccf[lowSupportLabel]
lowSupportData$lower_95_ci <- clusterDf$lower_95_ci[lowSupportLabel]
lowSupportData$upper_95_ci <- clusterDf$upper_95_ci[lowSupportLabel]
allData <- rbind(allData, lowSupportData)
rm(lowSupportR)
}
allData$cluster_id <- match(allData$average_ccf, sort(unique(allData$average_ccf)))
return(left_join(idxDf, allData, by = "mutation_id"))
}
load("inputTmp/myfile")
id <- Reduce(rbind, strsplit(as.character(ssm$mutation_id), ":", fixed = T), c())
## write results
if (dir.exists(resultsFolder)) {
"Folder exists and continue."
} else {
dir.create(resultsFolder, recursive = T)
}
# load trace and mpear label
traceFile <- dir(paste0(pycloneFolder, "/trace"),
pattern = "cellular_prevalence", full.names = T)
paramsTrace <- read.delim(traceFile, stringsAsFactors = F, header = F)
idx <- as.character(paramsTrace[1,])
#burnIn <- 20
#paramsTrace <- as.matrix( paramsTrace[-1:-burnIn, ]) #!!!!!!!!need to be change
paramsTrace <- as.matrix(paramsTrace[-1:-(burnIn+1), ])
class(paramsTrace) <- "numeric"
mpearFile <- paste0(pycloneFolder, "/pyclone_mpear.tsv")
if (file.exists(mpearFile)) {
mpear <- read.delim(paste0(pycloneFolder, "/pyclone_mpear.tsv"), stringsAsFactors = F)
} else {
labelTrace <- read.delim(paste0(pycloneFolder, "/trace/labels.tsv.bz2"), stringsAsFactors = F)
#burnIn <- 20
#mpearLabels <- compute_mpear_label(labelTrace[-1:-burnIn,])
mpearLabels <- compute_mpear_label(labelTrace[-1:-burnIn,])
mpear <- data.frame(mutation_id = colnames(labelTrace), cluster_id = mpearLabels)
write.table(mpear, file = paste0(pycloneFolder, '/pyclone_mpear.tsv'),
row.names = F, sep = "\t", quote = F)
rm(labelTrace)
}
allData <- GetCcfFromLabel(t(paramsTrace), idx, mpear$cluster_id)
rm(paramsTrace)
# load tsv
sampleTsvFile <- paste0(pycloneFolder, "/pyclone_data.tsv")
sampleTsv <- read.delim(sampleTsvFile, stringsAsFactors = F)
sampleTsv <- mutate(sampleTsv, vaf = var_counts/(ref_counts+var_counts))
allData <- left_join(allData, sampleTsv, by = "mutation_id")
rm(sampleTsv)
mutAssign <- data.frame(mutation_id = ssm$mutation_id, chr = id[,3], pos = id[,4])
allData <- left_join(allData, mutAssign, by = "mutation_id")
allData <- dplyr::filter(allData, !is.na(average_ccf))
rm(mutAssign)
# co-assignments matrix file
#labelFile <- dir(paste0(pycloneFolder, "/trace/"), pattern = "labels", full.names = T)
#labelTrace <- read.delim(labelFile, stringsAsFactors = F)
#labelTrace <- as.matrix(labelTrace[-1:-(burnIn+1), ]) + 1
#labelTrace <- as.matrix(labelTrace[-1:-(burnIn+1), ]) + 1
#psm <- comp.psm(labelTrace)
#fn <- paste0(resultsFolder, "/",
# sampleName, "_coassignment_probabilities.txt")
#write.table(psm, file = fn, sep = "\t", row.names = F, quote = F)
#shellCommand <- paste0("gzip -f ", fn)
#system(shellCommand, intern = TRUE)
#rm(psm)
# index file
#index <- allData[, c("chr", "pos")]
#index$col <- seq_along(allData$mutation_id)
#fn <- paste0(resultsFolder, "/",
# sampleName, "_index.txt")
#write.table(index, file = fn, sep = "\t", row.names = F, quote = F)
#shellCommand <- paste0("gzip -f ", fn)
#system(shellCommand, intern = TRUE)
# cluster certainty file
clusterCertainty <- subset(allData,
select = c("chr", "pos", "cluster_id",
"average_ccf", "lower_95_ci", "upper_95_ci"))
clusterCertainty <- rename(clusterCertainty, most_likely_assignment = cluster_id)
clusterCertainty$most_likely_assignment <-
match(clusterCertainty$most_likely_assignment,
sort(unique(clusterCertainty$most_likely_assignment)))
tmp11 <- clusterCertainty[, c("chr", "pos", "most_likely_assignment","average_ccf")]
tmp11 <- rename(tmp11, cluster = most_likely_assignment)
tmp11 <- mutate(tmp11,average_ccf1=as.numeric(average_ccf) * cellularity)
tmp11 <- rename(tmp11, proportion = average_ccf1)
dir.create(paste0(resultsFolder, "pyclone/mutation_assignments"), recursive = T)
fn <- paste0(resultsFolder, "pyclone/mutation_assignments/",
prefix, "_mutation_assignments.txt")
write.table(tmp11, file = fn, sep = "\t", row.names = F, quote = F)
#shellCommand <- paste0("gzip -f ", fn)
#system(shellCommand, intern = TRUE)
# multiplicity
allData <- mutate(allData, multiplicity = GetMultFromCcf(bn = ssm$var_counts,
dn = var_counts + ref_counts,
ccf = ccf,
major_cn = major_cn,
minor_cn = minor_cn,
purity = cellularity))
mult <- allData[, c("chr", "pos", "multiplicity")]
mult$tumour_copynumber <- allData$major_cn+allData$minor_cn
dir.create(paste0(resultsFolder, "pyclone/multiplicity"), recursive = T)
fn <- paste0(resultsFolder, "pyclone/multiplicity/",
prefix, "_multiplicity.txt")
write.table(mult, file = fn, sep = "\t", row.names = F, quote = F)
#shellCommand <- paste0("gzip -f ", fn)
#system(shellCommand, intern = TRUE)
# subclonal_structure file
tmp1 <- as.data.frame(table(clusterCertainty$most_likely_assignment), stringsAsFactors = F)
tmp2 <- as.data.frame(table(clusterCertainty$average_ccf), stringsAsFactors = F)
tmp <- left_join(tmp1, tmp2, by ="Freq")
delx <- duplicated(tmp$Var1.x)
dely <- duplicated(tmp$Var1.y)
dels <- xor(delx,dely)
delI <- which(dels)
if (!length(delI)==0){
tmp <- tmp[-delI,]
}
tmp <- rename(tmp, cluster = Var1.x, n_ssms = Freq, proportion = Var1.y)
tmp <- mutate(tmp, proportion = as.numeric(proportion) * cellularity)
dir.create(paste0(resultsFolder, "pyclone/subclonal_structure"), recursive = T)
fn <- paste0(resultsFolder, "pyclone/subclonal_structure/",
prefix, "_subclonal_structure.txt")
write.table(tmp, file = fn, sep = "\t", row.names = F, quote = F)
#shellCommand <- paste0("gzip -f ", fn)
#system(shellCommand, intern = TRUE)
# save sample summary
#allData$purity = ssm$purity
#fn <- paste0(pycloneFolder, "/", sampleName, "_pyclone_results_table.csv")
#write.csv(allData, file = fn , row.names = F)
# graph summary
#fn = paste0(resultsFolder, "/",
# sampleName, "_pyclone_results_summary.pdf")
#pdf(fn, width=8, height=8)
#myColors <- gg_color_hue(n_distinct(allData$cluster_id))
#par(mfrow=c(2,2))
#plot(allData$ccf, allData$vaf, col = myColors[allData$cluster_id],
# xlab = "cancer cell fraction", ylab = "variant allele frequecy",
# main = "ccf vs vaf (colored by cluster memebership)")
#hist(allData$ccf, density=20, breaks=20, prob=TRUE,
# main = "ccf histogram",
# xlab = "cancer cell fraction")
#clusterSize <- table(allData$average_ccf)/nrow(allData)
#names(clusterSize) <- as.character(format(round(as.numeric(names(clusterSize)), 2), nsmall = 2))
#tmp1 <- as.data.frame(table(allData$average_ccf))
#tmp2 <- as.data.frame(table(allData$cluster_id))
#tmp3 <- left_join(tmp1, tmp2, by ="Freq")
#barplot(clusterSize, las = 2, col = myColors[tmp3$Var1.y], xlab = "cluster mean", ylab="mutation proportions",
# main = "cluster sizes")
#dev.off()
|
kendall2<-function(x,y) {
n <- length(x)
if (n<=0) {
print('Wektor x jest pusty lub ma jeden element!')
return(NA)
} else if(n!=length(y)) {
print('Wektory sa roznej dlugosci!')
return(NA)
} else {
c<-0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
if(x[i]==x[j]) {
print('wartosci x nie sa rozne!')
return(NA)
break
}
if(y[i]==y[j]) {
print('wartosci y nie sa rozne!')
return(NA)
break
}
if(x[i]>x[j] & y[i]>y[j]) c <- c+1
if(x[i]<x[j] & y[i]<y[j]) c <- c+1
}
}
return(2*c/(n*(n-1)/2)-1)
}
}
| /fuzzedpackages/SimilaR/inst/testdata/data/kendall4.R | no_license | akhikolla/testpackages | R | false | false | 733 | r | kendall2<-function(x,y) {
n <- length(x)
if (n<=0) {
print('Wektor x jest pusty lub ma jeden element!')
return(NA)
} else if(n!=length(y)) {
print('Wektory sa roznej dlugosci!')
return(NA)
} else {
c<-0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
if(x[i]==x[j]) {
print('wartosci x nie sa rozne!')
return(NA)
break
}
if(y[i]==y[j]) {
print('wartosci y nie sa rozne!')
return(NA)
break
}
if(x[i]>x[j] & y[i]>y[j]) c <- c+1
if(x[i]<x[j] & y[i]<y[j]) c <- c+1
}
}
return(2*c/(n*(n-1)/2)-1)
}
}
|
# @file VignetteDataFetch.R
#
# Copyright 2016 Observational Health Data Sciences and Informatics
#
# This file is part of PatientLevelPrediction
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code should be used to fetch the data that is used in the vignettes.
library(SqlRender)
library(DatabaseConnector)
library(PatientLevelPrediction)
setwd("s:/temp")
options(fftempdir = "s:/FFtemp")
pw <- NULL
dbms <- "sql server"
user <- NULL
server <- "RNDUSRDHIT07.jnj.com"
cdmDatabaseSchema <- "cdm_truven_mdcd.dbo"
resultsDatabaseSchema <- "scratch.dbo"
port <- NULL
dbms <- "postgresql"
server <- "localhost/ohdsi"
user <- "postgres"
pw <- "F1r3starter"
cdmDatabaseSchema <- "cdm4_sim"
resultsDatabaseSchema <- "scratch"
port <- NULL
pw <- NULL
dbms <- "pdw"
user <- NULL
server <- "JRDUSAPSCTL01"
cdmDatabaseSchema <- "cdm_truven_mdcd_v5.dbo"
resultsDatabaseSchema <- "scratch.dbo"
oracleTempSchema <- NULL
port <- 17001
cdmVersion <- "5"
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = dbms,
server = server,
user = user,
password = pw,
port = port)
sql <- SqlRender::loadRenderTranslateSql("HospitalizationCohorts.sql",
packageName = "PatientLevelPrediction",
dbms = dbms,
cdmDatabaseSchema = cdmDatabaseSchema,
resultsDatabaseSchema = resultsDatabaseSchema,
post_time = 30,
pre_time = 365)
connection <- DatabaseConnector::connect(connectionDetails)
DatabaseConnector::executeSql(connection, sql)
# Check number of subjects per cohort:
sql <- "SELECT cohort_definition_id, COUNT(*) AS count FROM @resultsDatabaseSchema.rehospitalization GROUP BY cohort_definition_id"
sql <- SqlRender::renderSql(sql, resultsDatabaseSchema = resultsDatabaseSchema)$sql
sql <- SqlRender::translateSql(sql, targetDialect = connectionDetails$dbms)$sql
DatabaseConnector::querySql(connection, sql)
dbDisconnect(connection)
covariateSettings <- createCovariateSettings(useCovariateDemographics = TRUE,
useCovariateDemographicsGender = TRUE,
useCovariateDemographicsRace = TRUE,
useCovariateDemographicsEthnicity = TRUE,
useCovariateDemographicsAge = TRUE,
useCovariateDemographicsYear = TRUE,
useCovariateDemographicsMonth = TRUE,
useCovariateConditionOccurrence = TRUE,
useCovariateConditionOccurrence365d = TRUE,
useCovariateConditionOccurrence30d = TRUE,
useCovariateConditionOccurrenceInpt180d = TRUE,
useCovariateConditionEra = TRUE,
useCovariateConditionEraEver = TRUE,
useCovariateConditionEraOverlap = TRUE,
useCovariateConditionGroup = TRUE,
useCovariateConditionGroupMeddra = TRUE,
useCovariateConditionGroupSnomed = TRUE,
useCovariateDrugExposure = TRUE,
useCovariateDrugExposure365d = TRUE,
useCovariateDrugExposure30d = TRUE,
useCovariateDrugEra = TRUE,
useCovariateDrugEra365d = TRUE,
useCovariateDrugEra30d = TRUE,
useCovariateDrugEraOverlap = TRUE,
useCovariateDrugEraEver = TRUE,
useCovariateDrugGroup = TRUE,
useCovariateProcedureOccurrence = TRUE,
useCovariateProcedureOccurrence365d = TRUE,
useCovariateProcedureOccurrence30d = TRUE,
useCovariateProcedureGroup = TRUE,
useCovariateObservation = TRUE,
useCovariateObservation365d = TRUE,
useCovariateObservation30d = TRUE,
useCovariateObservationCount365d = TRUE,
useCovariateMeasurement = TRUE,
useCovariateMeasurement365d = TRUE,
useCovariateMeasurement30d = TRUE,
useCovariateMeasurementCount365d = TRUE,
useCovariateMeasurementBelow = TRUE,
useCovariateMeasurementAbove = TRUE,
useCovariateConceptCounts = TRUE,
useCovariateRiskScores = TRUE,
useCovariateRiskScoresCharlson = TRUE,
useCovariateRiskScoresDCSI = TRUE,
useCovariateRiskScoresCHADS2 = TRUE,
useCovariateRiskScoresCHADS2VASc = TRUE,
useCovariateInteractionYear = FALSE,
useCovariateInteractionMonth = FALSE,
excludedCovariateConceptIds = c(),
includedCovariateConceptIds = c(),
deleteCovariatesSmallCount = 100)
plpData <- getDbPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
cohortDatabaseSchema = resultsDatabaseSchema,
cohortTable = "rehospitalization",
cohortIds = 1,
washoutWindow = 183,
useCohortEndDate = TRUE,
windowPersistence = 0,
covariateSettings = covariateSettings,
outcomeDatabaseSchema = resultsDatabaseSchema,
outcomeTable = "rehospitalization",
outcomeIds = 2,
firstOutcomeOnly = FALSE,
cdmVersion = cdmVersion)
savePlpData(plpData, "s:/temp/PlpVignette/plpData")
# plpData <- loadPlpData('s:/temp/PlpVignette/plpData')
means <- computeCovariateMeans(plpData = plpData, outcomeId = 2)
saveRDS(means, "s:/temp/PlpVignette/means.rds")
# plotCovariateDifferenceOfTopVariables(means)
parts <- splitData(plpData, c(0.75, 0.25))
savePlpData(parts[[1]], "s:/temp/PlpVignette/plpData_train")
savePlpData(parts[[2]], "s:/temp/PlpVignette/plpData_test")
# parts <- list(); parts[[1]] <- loadPlpData('s:/temp/PlpVignette/plpData_train'); parts[[2]] <-
# loadPlpData('s:/temp/PlpVignette/plpData_test')
model <- fitPredictiveModel(parts[[1]],
modelType = "logistic",
prior = createPrior("laplace",
exclude = c(0),
useCrossValidation = TRUE),
control = createControl(noiseLevel = "quiet",
cvType = "auto",
startingVariance = 0.001,
tolerance = 1e-07,
cvRepetitions = 10,
seed = 123,
threads = 30))
saveRDS(model, file = "s:/temp/PlpVignette/model.rds")
# model <- readRDS('s:/temp/PlpVignette/model.rds')
prediction <- predictProbabilities(model, parts[[2]])
saveRDS(prediction, file = "s:/temp/PlpVignette/prediction.rds")
# prediction <- readRDS('s:/temp/PlpVignette/prediction.rds')
computeAuc(prediction, parts[[2]])
plotRoc(prediction, parts[[2]])
plotCalibration(prediction, parts[[2]], numberOfStrata = 10)
modelDetails <- getModelDetails(model, parts[[2]])
head(modelDetails)
#### Datafetch for custom covariate builders #####
createLooCovariateSettings <- function(useLengthOfObs = TRUE) {
covariateSettings <- list(useLengthOfObs = useLengthOfObs)
attr(covariateSettings, "fun") <- "getDbLooCovariateData"
class(covariateSettings) <- "covariateSettings"
return(covariateSettings)
}
getDbLooCovariateData <- function(connection,
oracleTempSchema = NULL,
cdmDatabaseSchema,
cdmVersion = "4",
cohortTempTable = "cohort_person",
rowIdField = "subject_id",
covariateSettings) {
writeLines("Constructing length of observation covariates")
if (covariateSettings$useLengthOfObs == FALSE) {
return(NULL)
}
# Temp table names must start with a '#' in SQL Server, our source dialect:
if (substr(cohortTempTable, 1, 1) != "#") {
cohortTempTable <- paste("#", cohortTempTable, sep = "")
}
# Some SQL to construct the covariate:
sql <- paste("SELECT @row_id_field AS row_id, 1 AS covariate_id,",
"DATEDIFF(DAY, cohort_start_date, observation_period_start_date)",
"AS covariate_value",
"FROM @cohort_temp_table c",
"INNER JOIN @cdm_database_schema.observation_period op",
"ON op.person_id = c.subject_id",
"WHERE cohort_start_date >= observation_period_start_date",
"AND cohort_start_date <= observation_period_end_date")
sql <- SqlRender::renderSql(sql,
cohort_temp_table = cohortTempTable,
row_id_field = rowIdField,
cdm_database_schema = cdmDatabaseSchema)$sql
sql <- SqlRender::translateSql(sql, targetDialect = attr(connection, "dbms"))$sql
# Retrieve the covariate:
covariates <- DatabaseConnector::querySql.ffdf(connection, sql)
# Convert colum names to camelCase:
colnames(covariates) <- SqlRender::snakeCaseToCamelCase(colnames(covariates))
# Construct covariate reference:
covariateRef <- data.frame(covariateId = 1,
covariateName = "Length of observation",
analysisId = 1,
conceptId = 0)
covariateRef <- ff::as.ffdf(covariateRef)
metaData <- list(sql = sql, call = match.call())
result <- list(covariates = covariates, covariateRef = covariateRef, metaData = metaData)
class(result) <- "covariateData"
return(result)
}
looCovariateSettings <- createLooCovariateSettings(useLengthOfObs = TRUE)
plpData <- getDbPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = resultsDatabaseSchema,
cohortTable = "mschuemi_stroke",
cohortIds = 1,
useCohortEndDate = TRUE,
windowPersistence = 0,
covariateSettings = looCovariateSettings,
outcomeDatabaseSchema = resultsDatabaseSchema,
outcomeTable = "mschuemi_stroke",
outcomeIds = 2,
firstOutcomeOnly = TRUE,
cdmVersion = cdmVersion)
covariateSettings <- createCovariateSettings(useCovariateDemographics = TRUE,
useCovariateDemographicsGender = TRUE,
useCovariateDemographicsRace = TRUE,
useCovariateDemographicsEthnicity = TRUE,
useCovariateDemographicsAge = TRUE,
useCovariateDemographicsYear = TRUE,
useCovariateDemographicsMonth = TRUE)
looCovariateSettings <- createLooCovariateSettings(useLengthOfObs = TRUE)
covariateSettingsList <- list(covariateSettings, looCovariateSettings)
plpData <- getDbPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = resultsDatabaseSchema,
cohortTable = "mschuemi_stroke",
cohortIds = 1,
useCohortEndDate = TRUE,
windowPersistence = 0,
covariateSettings = covariateSettingsList,
outcomeDatabaseSchema = resultsDatabaseSchema,
outcomeTable = "mschuemi_stroke",
outcomeIds = 2,
firstOutcomeOnly = TRUE,
cdmVersion = cdmVersion)
covariateSettings <- createHdpsCovariateSettings(useCovariateCohortIdIs1 = FALSE,
useCovariateDemographics = TRUE,
useCovariateDemographicsGender = TRUE,
useCovariateDemographicsRace = TRUE,
useCovariateDemographicsEthnicity = TRUE,
useCovariateDemographicsAge = TRUE,
useCovariateDemographicsYear = TRUE,
useCovariateDemographicsMonth = TRUE,
useCovariateConditionOccurrence = TRUE,
useCovariate3DigitIcd9Inpatient180d = TRUE,
useCovariate3DigitIcd9Inpatient180dMedF = TRUE,
useCovariate3DigitIcd9Inpatient180d75F = TRUE,
useCovariate3DigitIcd9Ambulatory180d = TRUE,
useCovariate3DigitIcd9Ambulatory180dMedF = TRUE,
useCovariate3DigitIcd9Ambulatory180d75F = TRUE,
useCovariateDrugExposure = TRUE,
useCovariateIngredientExposure180d = TRUE,
useCovariateIngredientExposure180dMedF = TRUE,
useCovariateIngredientExposure180d75F = TRUE,
useCovariateProcedureOccurrence = TRUE,
useCovariateProcedureOccurrenceInpatient180d = TRUE,
useCovariateProcedureOccurrenceInpatient180dMedF = TRUE,
useCovariateProcedureOccurrenceInpatient180d75F = TRUE,
useCovariateProcedureOccurrenceAmbulatory180d = TRUE,
useCovariateProcedureOccurrenceAmbulatory180dMedF = TRUE,
useCovariateProcedureOccurrenceAmbulatory180d75F = TRUE,
excludedCovariateConceptIds = c(),
includedCovariateConceptIds = c(),
deleteCovariatesSmallCount = 100)
#### Datafetch for cohort attribute covariate builder #####
library(SqlRender)
library(DatabaseConnector)
library(PatientLevelPrediction)
setwd("s:/temp")
options(fftempdir = "s:/FFtemp")
pw <- NULL
dbms <- "sql server"
user <- NULL
server <- "RNDUSRDHIT07.jnj.com"
cdmDatabaseSchema <- "cdm_truven_mdcd.dbo"
resultsDatabaseSchema <- "scratch.dbo"
port <- NULL
dbms <- "postgresql"
server <- "localhost/ohdsi"
user <- "postgres"
pw <- "F1r3starter"
cdmDatabaseSchema <- "cdm4_sim"
resultsDatabaseSchema <- "scratch"
port <- NULL
pw <- NULL
dbms <- "pdw"
user <- NULL
server <- "JRDUSAPSCTL01"
cdmDatabaseSchema <- "cdm_truven_mdcd_v5.dbo"
cohortDatabaseSchema <- "scratch.dbo"
oracleTempSchema <- NULL
port <- 17001
cdmVersion <- "5"
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = dbms,
server = server,
user = user,
password = pw,
port = port)
connection <- DatabaseConnector::connect(connectionDetails)
# Build cohorts:
sql <- SqlRender::loadRenderTranslateSql("HospitalizationCohorts.sql",
packageName = "PatientLevelPrediction",
dbms = dbms,
cdmDatabaseSchema = cdmDatabaseSchema,
resultsDatabaseSchema = cohortDatabaseSchema,
post_time = 30,
pre_time = 365)
DatabaseConnector::executeSql(connection, sql)
# Build cohort attributes:
sql <- SqlRender::loadRenderTranslateSql("LengthOfObsCohortAttr.sql",
packageName = "PatientLevelPrediction",
dbms = dbms,
cdm_database_schema = cdmDatabaseSchema,
cohort_database_schema = cohortDatabaseSchema,
cohort_table = "rehospitalization",
cohort_attribute_table = "loo_cohort_attribute",
attribute_definition_table = "loo_attribute_definition",
cohort_definition_ids = c(1, 2))
DatabaseConnector::executeSql(connection, sql)
querySql(connection, "SELECT TOP 100 * FROM scratch.dbo.loo_cohort_attribute")
looCovariateSettings <- createCohortAttrCovariateSettings(attrDatabaseSchema = cohortDatabaseSchema,
cohortAttrTable = "loo_cohort_attribute",
attrDefinitionTable = "loo_attribute_definition",
includeAttrIds = c())
plpData <- getDbPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = "rehospitalization",
cohortIds = 1,
useCohortEndDate = TRUE,
windowPersistence = 0,
covariateSettings = looCovariateSettings,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = "rehospitalization",
outcomeIds = 2,
firstOutcomeOnly = TRUE,
cdmVersion = cdmVersion)
summary(plpData)
plpData$covariates
sql <- "DROP TABLE @cohort_database_schema.rehospitalization"
sql <- SqlRender::renderSql(sql, cohort_database_schema = cohortDatabaseSchema)$sql
sql <- SqlRender::translateSql(sql, targetDialect = attr(connection, "dbms"))$sql
looCovariateSettings <- createCohortAttrCovariateSettings(attrDatabaseSchema = cohortDatabaseSchema,
cohortAttrTable = "loo_cohort_attribute",
attrDefinitionTable = "loo_attribute_definition",
includeAttrIds = c())
covariateSettingsList <- list(looCovariateSettings, looCovariateSettings)
plpData <- getDbPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = "rehospitalization",
cohortIds = 1,
useCohortEndDate = TRUE,
windowPersistence = 0,
covariateSettings = covariateSettingsList,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = "rehospitalization",
outcomeIds = 2,
firstOutcomeOnly = TRUE,
cdmVersion = cdmVersion)
| /extras/VignetteDataFetch.R | permissive | datamandala/PatientLevelPrediction | R | false | false | 22,435 | r | # @file VignetteDataFetch.R
#
# Copyright 2016 Observational Health Data Sciences and Informatics
#
# This file is part of PatientLevelPrediction
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code should be used to fetch the data that is used in the vignettes.
library(SqlRender)
library(DatabaseConnector)
library(PatientLevelPrediction)
setwd("s:/temp")
options(fftempdir = "s:/FFtemp")
pw <- NULL
dbms <- "sql server"
user <- NULL
server <- "RNDUSRDHIT07.jnj.com"
cdmDatabaseSchema <- "cdm_truven_mdcd.dbo"
resultsDatabaseSchema <- "scratch.dbo"
port <- NULL
dbms <- "postgresql"
server <- "localhost/ohdsi"
user <- "postgres"
pw <- "F1r3starter"
cdmDatabaseSchema <- "cdm4_sim"
resultsDatabaseSchema <- "scratch"
port <- NULL
pw <- NULL
dbms <- "pdw"
user <- NULL
server <- "JRDUSAPSCTL01"
cdmDatabaseSchema <- "cdm_truven_mdcd_v5.dbo"
resultsDatabaseSchema <- "scratch.dbo"
oracleTempSchema <- NULL
port <- 17001
cdmVersion <- "5"
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = dbms,
server = server,
user = user,
password = pw,
port = port)
sql <- SqlRender::loadRenderTranslateSql("HospitalizationCohorts.sql",
packageName = "PatientLevelPrediction",
dbms = dbms,
cdmDatabaseSchema = cdmDatabaseSchema,
resultsDatabaseSchema = resultsDatabaseSchema,
post_time = 30,
pre_time = 365)
connection <- DatabaseConnector::connect(connectionDetails)
DatabaseConnector::executeSql(connection, sql)
# Check number of subjects per cohort:
sql <- "SELECT cohort_definition_id, COUNT(*) AS count FROM @resultsDatabaseSchema.rehospitalization GROUP BY cohort_definition_id"
sql <- SqlRender::renderSql(sql, resultsDatabaseSchema = resultsDatabaseSchema)$sql
sql <- SqlRender::translateSql(sql, targetDialect = connectionDetails$dbms)$sql
DatabaseConnector::querySql(connection, sql)
dbDisconnect(connection)
covariateSettings <- createCovariateSettings(useCovariateDemographics = TRUE,
useCovariateDemographicsGender = TRUE,
useCovariateDemographicsRace = TRUE,
useCovariateDemographicsEthnicity = TRUE,
useCovariateDemographicsAge = TRUE,
useCovariateDemographicsYear = TRUE,
useCovariateDemographicsMonth = TRUE,
useCovariateConditionOccurrence = TRUE,
useCovariateConditionOccurrence365d = TRUE,
useCovariateConditionOccurrence30d = TRUE,
useCovariateConditionOccurrenceInpt180d = TRUE,
useCovariateConditionEra = TRUE,
useCovariateConditionEraEver = TRUE,
useCovariateConditionEraOverlap = TRUE,
useCovariateConditionGroup = TRUE,
useCovariateConditionGroupMeddra = TRUE,
useCovariateConditionGroupSnomed = TRUE,
useCovariateDrugExposure = TRUE,
useCovariateDrugExposure365d = TRUE,
useCovariateDrugExposure30d = TRUE,
useCovariateDrugEra = TRUE,
useCovariateDrugEra365d = TRUE,
useCovariateDrugEra30d = TRUE,
useCovariateDrugEraOverlap = TRUE,
useCovariateDrugEraEver = TRUE,
useCovariateDrugGroup = TRUE,
useCovariateProcedureOccurrence = TRUE,
useCovariateProcedureOccurrence365d = TRUE,
useCovariateProcedureOccurrence30d = TRUE,
useCovariateProcedureGroup = TRUE,
useCovariateObservation = TRUE,
useCovariateObservation365d = TRUE,
useCovariateObservation30d = TRUE,
useCovariateObservationCount365d = TRUE,
useCovariateMeasurement = TRUE,
useCovariateMeasurement365d = TRUE,
useCovariateMeasurement30d = TRUE,
useCovariateMeasurementCount365d = TRUE,
useCovariateMeasurementBelow = TRUE,
useCovariateMeasurementAbove = TRUE,
useCovariateConceptCounts = TRUE,
useCovariateRiskScores = TRUE,
useCovariateRiskScoresCharlson = TRUE,
useCovariateRiskScoresDCSI = TRUE,
useCovariateRiskScoresCHADS2 = TRUE,
useCovariateRiskScoresCHADS2VASc = TRUE,
useCovariateInteractionYear = FALSE,
useCovariateInteractionMonth = FALSE,
excludedCovariateConceptIds = c(),
includedCovariateConceptIds = c(),
deleteCovariatesSmallCount = 100)
plpData <- getDbPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
cohortDatabaseSchema = resultsDatabaseSchema,
cohortTable = "rehospitalization",
cohortIds = 1,
washoutWindow = 183,
useCohortEndDate = TRUE,
windowPersistence = 0,
covariateSettings = covariateSettings,
outcomeDatabaseSchema = resultsDatabaseSchema,
outcomeTable = "rehospitalization",
outcomeIds = 2,
firstOutcomeOnly = FALSE,
cdmVersion = cdmVersion)
savePlpData(plpData, "s:/temp/PlpVignette/plpData")
# plpData <- loadPlpData('s:/temp/PlpVignette/plpData')
means <- computeCovariateMeans(plpData = plpData, outcomeId = 2)
saveRDS(means, "s:/temp/PlpVignette/means.rds")
# plotCovariateDifferenceOfTopVariables(means)
parts <- splitData(plpData, c(0.75, 0.25))
savePlpData(parts[[1]], "s:/temp/PlpVignette/plpData_train")
savePlpData(parts[[2]], "s:/temp/PlpVignette/plpData_test")
# parts <- list(); parts[[1]] <- loadPlpData('s:/temp/PlpVignette/plpData_train'); parts[[2]] <-
# loadPlpData('s:/temp/PlpVignette/plpData_test')
model <- fitPredictiveModel(parts[[1]],
modelType = "logistic",
prior = createPrior("laplace",
exclude = c(0),
useCrossValidation = TRUE),
control = createControl(noiseLevel = "quiet",
cvType = "auto",
startingVariance = 0.001,
tolerance = 1e-07,
cvRepetitions = 10,
seed = 123,
threads = 30))
saveRDS(model, file = "s:/temp/PlpVignette/model.rds")
# model <- readRDS('s:/temp/PlpVignette/model.rds')
prediction <- predictProbabilities(model, parts[[2]])
saveRDS(prediction, file = "s:/temp/PlpVignette/prediction.rds")
# prediction <- readRDS('s:/temp/PlpVignette/prediction.rds')
computeAuc(prediction, parts[[2]])
plotRoc(prediction, parts[[2]])
plotCalibration(prediction, parts[[2]], numberOfStrata = 10)
modelDetails <- getModelDetails(model, parts[[2]])
head(modelDetails)
#### Datafetch for custom covariate builders #####
createLooCovariateSettings <- function(useLengthOfObs = TRUE) {
covariateSettings <- list(useLengthOfObs = useLengthOfObs)
attr(covariateSettings, "fun") <- "getDbLooCovariateData"
class(covariateSettings) <- "covariateSettings"
return(covariateSettings)
}
getDbLooCovariateData <- function(connection,
oracleTempSchema = NULL,
cdmDatabaseSchema,
cdmVersion = "4",
cohortTempTable = "cohort_person",
rowIdField = "subject_id",
covariateSettings) {
writeLines("Constructing length of observation covariates")
if (covariateSettings$useLengthOfObs == FALSE) {
return(NULL)
}
# Temp table names must start with a '#' in SQL Server, our source dialect:
if (substr(cohortTempTable, 1, 1) != "#") {
cohortTempTable <- paste("#", cohortTempTable, sep = "")
}
# Some SQL to construct the covariate:
sql <- paste("SELECT @row_id_field AS row_id, 1 AS covariate_id,",
"DATEDIFF(DAY, cohort_start_date, observation_period_start_date)",
"AS covariate_value",
"FROM @cohort_temp_table c",
"INNER JOIN @cdm_database_schema.observation_period op",
"ON op.person_id = c.subject_id",
"WHERE cohort_start_date >= observation_period_start_date",
"AND cohort_start_date <= observation_period_end_date")
sql <- SqlRender::renderSql(sql,
cohort_temp_table = cohortTempTable,
row_id_field = rowIdField,
cdm_database_schema = cdmDatabaseSchema)$sql
sql <- SqlRender::translateSql(sql, targetDialect = attr(connection, "dbms"))$sql
# Retrieve the covariate:
covariates <- DatabaseConnector::querySql.ffdf(connection, sql)
# Convert colum names to camelCase:
colnames(covariates) <- SqlRender::snakeCaseToCamelCase(colnames(covariates))
# Construct covariate reference:
covariateRef <- data.frame(covariateId = 1,
covariateName = "Length of observation",
analysisId = 1,
conceptId = 0)
covariateRef <- ff::as.ffdf(covariateRef)
metaData <- list(sql = sql, call = match.call())
result <- list(covariates = covariates, covariateRef = covariateRef, metaData = metaData)
class(result) <- "covariateData"
return(result)
}
looCovariateSettings <- createLooCovariateSettings(useLengthOfObs = TRUE)
plpData <- getDbPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = resultsDatabaseSchema,
cohortTable = "mschuemi_stroke",
cohortIds = 1,
useCohortEndDate = TRUE,
windowPersistence = 0,
covariateSettings = looCovariateSettings,
outcomeDatabaseSchema = resultsDatabaseSchema,
outcomeTable = "mschuemi_stroke",
outcomeIds = 2,
firstOutcomeOnly = TRUE,
cdmVersion = cdmVersion)
covariateSettings <- createCovariateSettings(useCovariateDemographics = TRUE,
useCovariateDemographicsGender = TRUE,
useCovariateDemographicsRace = TRUE,
useCovariateDemographicsEthnicity = TRUE,
useCovariateDemographicsAge = TRUE,
useCovariateDemographicsYear = TRUE,
useCovariateDemographicsMonth = TRUE)
looCovariateSettings <- createLooCovariateSettings(useLengthOfObs = TRUE)
covariateSettingsList <- list(covariateSettings, looCovariateSettings)
plpData <- getDbPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = resultsDatabaseSchema,
cohortTable = "mschuemi_stroke",
cohortIds = 1,
useCohortEndDate = TRUE,
windowPersistence = 0,
covariateSettings = covariateSettingsList,
outcomeDatabaseSchema = resultsDatabaseSchema,
outcomeTable = "mschuemi_stroke",
outcomeIds = 2,
firstOutcomeOnly = TRUE,
cdmVersion = cdmVersion)
covariateSettings <- createHdpsCovariateSettings(useCovariateCohortIdIs1 = FALSE,
useCovariateDemographics = TRUE,
useCovariateDemographicsGender = TRUE,
useCovariateDemographicsRace = TRUE,
useCovariateDemographicsEthnicity = TRUE,
useCovariateDemographicsAge = TRUE,
useCovariateDemographicsYear = TRUE,
useCovariateDemographicsMonth = TRUE,
useCovariateConditionOccurrence = TRUE,
useCovariate3DigitIcd9Inpatient180d = TRUE,
useCovariate3DigitIcd9Inpatient180dMedF = TRUE,
useCovariate3DigitIcd9Inpatient180d75F = TRUE,
useCovariate3DigitIcd9Ambulatory180d = TRUE,
useCovariate3DigitIcd9Ambulatory180dMedF = TRUE,
useCovariate3DigitIcd9Ambulatory180d75F = TRUE,
useCovariateDrugExposure = TRUE,
useCovariateIngredientExposure180d = TRUE,
useCovariateIngredientExposure180dMedF = TRUE,
useCovariateIngredientExposure180d75F = TRUE,
useCovariateProcedureOccurrence = TRUE,
useCovariateProcedureOccurrenceInpatient180d = TRUE,
useCovariateProcedureOccurrenceInpatient180dMedF = TRUE,
useCovariateProcedureOccurrenceInpatient180d75F = TRUE,
useCovariateProcedureOccurrenceAmbulatory180d = TRUE,
useCovariateProcedureOccurrenceAmbulatory180dMedF = TRUE,
useCovariateProcedureOccurrenceAmbulatory180d75F = TRUE,
excludedCovariateConceptIds = c(),
includedCovariateConceptIds = c(),
deleteCovariatesSmallCount = 100)
#### Datafetch for cohort attribute covariate builder #####
library(SqlRender)
library(DatabaseConnector)
library(PatientLevelPrediction)
setwd("s:/temp")
options(fftempdir = "s:/FFtemp")
pw <- NULL
dbms <- "sql server"
user <- NULL
server <- "RNDUSRDHIT07.jnj.com"
cdmDatabaseSchema <- "cdm_truven_mdcd.dbo"
resultsDatabaseSchema <- "scratch.dbo"
port <- NULL
dbms <- "postgresql"
server <- "localhost/ohdsi"
user <- "postgres"
pw <- "F1r3starter"
cdmDatabaseSchema <- "cdm4_sim"
resultsDatabaseSchema <- "scratch"
port <- NULL
pw <- NULL
dbms <- "pdw"
user <- NULL
server <- "JRDUSAPSCTL01"
cdmDatabaseSchema <- "cdm_truven_mdcd_v5.dbo"
cohortDatabaseSchema <- "scratch.dbo"
oracleTempSchema <- NULL
port <- 17001
cdmVersion <- "5"
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = dbms,
server = server,
user = user,
password = pw,
port = port)
connection <- DatabaseConnector::connect(connectionDetails)
# Build cohorts:
sql <- SqlRender::loadRenderTranslateSql("HospitalizationCohorts.sql",
packageName = "PatientLevelPrediction",
dbms = dbms,
cdmDatabaseSchema = cdmDatabaseSchema,
resultsDatabaseSchema = cohortDatabaseSchema,
post_time = 30,
pre_time = 365)
DatabaseConnector::executeSql(connection, sql)
# Build cohort attributes:
sql <- SqlRender::loadRenderTranslateSql("LengthOfObsCohortAttr.sql",
packageName = "PatientLevelPrediction",
dbms = dbms,
cdm_database_schema = cdmDatabaseSchema,
cohort_database_schema = cohortDatabaseSchema,
cohort_table = "rehospitalization",
cohort_attribute_table = "loo_cohort_attribute",
attribute_definition_table = "loo_attribute_definition",
cohort_definition_ids = c(1, 2))
DatabaseConnector::executeSql(connection, sql)
querySql(connection, "SELECT TOP 100 * FROM scratch.dbo.loo_cohort_attribute")
looCovariateSettings <- createCohortAttrCovariateSettings(attrDatabaseSchema = cohortDatabaseSchema,
cohortAttrTable = "loo_cohort_attribute",
attrDefinitionTable = "loo_attribute_definition",
includeAttrIds = c())
plpData <- getDbPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = "rehospitalization",
cohortIds = 1,
useCohortEndDate = TRUE,
windowPersistence = 0,
covariateSettings = looCovariateSettings,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = "rehospitalization",
outcomeIds = 2,
firstOutcomeOnly = TRUE,
cdmVersion = cdmVersion)
summary(plpData)
plpData$covariates
sql <- "DROP TABLE @cohort_database_schema.rehospitalization"
sql <- SqlRender::renderSql(sql, cohort_database_schema = cohortDatabaseSchema)$sql
sql <- SqlRender::translateSql(sql, targetDialect = attr(connection, "dbms"))$sql
looCovariateSettings <- createCohortAttrCovariateSettings(attrDatabaseSchema = cohortDatabaseSchema,
cohortAttrTable = "loo_cohort_attribute",
attrDefinitionTable = "loo_attribute_definition",
includeAttrIds = c())
covariateSettingsList <- list(looCovariateSettings, looCovariateSettings)
plpData <- getDbPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = "rehospitalization",
cohortIds = 1,
useCohortEndDate = TRUE,
windowPersistence = 0,
covariateSettings = covariateSettingsList,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = "rehospitalization",
outcomeIds = 2,
firstOutcomeOnly = TRUE,
cdmVersion = cdmVersion)
|
library(AHMbook)
### Name: simOcc
### Title: Simulate data for static occupancy models under wide range of
### conditions
### Aliases: simOcc
### ** Examples
# Generate data with the default arguments and look at the structure:
tmp <- simOcc()
str(tmp)
# Simplest possible occupancy model, with constant occupancy and detection
str(simOcc(mean.occ=0.6, beta1=0, beta2=0, beta3=0, mean.det=0.3, time.effects=c(0, 0),
alpha1=0, alpha2=0, alpha3=0, sd.lp=0, b=0))
# psi = 1 (i.e., species occurs at every site)
str(simOcc(mean.occ=1))
# p = 1 (i.e., species is always detected when it occurs)
str(simOcc(mean.det=1))
# Other potentially interesting settings include these:
str(simOcc(J = 2)) # Only 2 surveys
str(simOcc(M = 1, J = 100)) # No spatial replicates, but 100 measurements
str(simOcc(beta3 = 1)) # Including interaction elev-wind on p
str(simOcc(mean.occ = 0.96)) # A really common species
str(simOcc(mean.occ = 0.05)) # A really rare species
str(simOcc(mean.det = 0.96)) # A really easy species
str(simOcc(mean.det = 0.05)) # A really hard species
str(simOcc(mean.det = 0)) # The dreaded invisible species
str(simOcc(alpha1=-2, beta1=2)) # Opposing effects of elev on psi and p
str(simOcc(J = 10, time.effects = c(-5, 5))) # Huge time effects on p
str(simOcc(sd.lp = 10)) # Huge (random) site effects on p
str(simOcc(J = 10, b = 0)) # No behavioural response in p
str(simOcc(J = 10, b = 2)) # Trap happiness
str(simOcc(J = 10, b = -2)) # Trap shyness
| /data/genthat_extracted_code/AHMbook/examples/simOcc.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,579 | r | library(AHMbook)
### Name: simOcc
### Title: Simulate data for static occupancy models under wide range of
### conditions
### Aliases: simOcc
### ** Examples
# Generate data with the default arguments and look at the structure:
tmp <- simOcc()
str(tmp)
# Simplest possible occupancy model, with constant occupancy and detection
str(simOcc(mean.occ=0.6, beta1=0, beta2=0, beta3=0, mean.det=0.3, time.effects=c(0, 0),
alpha1=0, alpha2=0, alpha3=0, sd.lp=0, b=0))
# psi = 1 (i.e., species occurs at every site)
str(simOcc(mean.occ=1))
# p = 1 (i.e., species is always detected when it occurs)
str(simOcc(mean.det=1))
# Other potentially interesting settings include these:
str(simOcc(J = 2)) # Only 2 surveys
str(simOcc(M = 1, J = 100)) # No spatial replicates, but 100 measurements
str(simOcc(beta3 = 1)) # Including interaction elev-wind on p
str(simOcc(mean.occ = 0.96)) # A really common species
str(simOcc(mean.occ = 0.05)) # A really rare species
str(simOcc(mean.det = 0.96)) # A really easy species
str(simOcc(mean.det = 0.05)) # A really hard species
str(simOcc(mean.det = 0)) # The dreaded invisible species
str(simOcc(alpha1=-2, beta1=2)) # Opposing effects of elev on psi and p
str(simOcc(J = 10, time.effects = c(-5, 5))) # Huge time effects on p
str(simOcc(sd.lp = 10)) # Huge (random) site effects on p
str(simOcc(J = 10, b = 0)) # No behavioural response in p
str(simOcc(J = 10, b = 2)) # Trap happiness
str(simOcc(J = 10, b = -2)) # Trap shyness
|
###########################################################
###########################################################
###
### Collection of very basic functions
###
### File created by Gjalt-Jorn Peters. Questions? You can
### contact me through http://behaviorchange.eu.
###
###########################################################
###########################################################
### Function to remove zero at start of number
noZero <- function (str) {
return(gsub("0\\.", ".", str));
}
### Function to format Pearson r
formatR <- function (r, digits) {
return(noZero(round(r, digits)));
}
### The regular ifelse cannot return objects
ifelseObj <- function(condition, ifTrue, ifFalse) {
if (condition) {
return(ifTrue);
}
else {
return(ifFalse);
}
}
### Basically what Marc Schwartz suggested at Thu Jul 1 19:10:28 CEST 2010
### on the R-help mailing list, see https://stat.ethz.ch/pipermail/r-help/2010-July/244299.html
is.odd <- function(vector) {
return((vector %% 2) != 0);
}
is.even <- function(vector) {
return((vector %% 2) == 0);
}
### Case insensitive '%in' variant
`%IN%` <- function(find, table) {
return(toupper(find) %in% toupper(table));
}
### Paste0 but then immediately displaying on screen
cat0 <- function(..., sep="") {
return(cat(..., sep=sep));
}
### Check whether elements are true, specifying how 'NA' should be seen
isTrue <- function(x, na = FALSE) {
naValues <- ifelse(rep(na, length(x)),
is.na(x),
rep(FALSE, length(x)));
return(ifelse(is.na(x), naValues, x==TRUE));
}
### Check whether something is a number
is.nr <- function(x) {
if (!is.null(x)) {
if (!is.na(x)) {
if (is.numeric(x)) {
return(TRUE);
}
}
}
return(FALSE);
}
| /userfriendlyscience/R/userfriendlyscienceBasics.R | no_license | ingted/R-Examples | R | false | false | 1,859 | r | ###########################################################
###########################################################
###
### Collection of very basic functions
###
### File created by Gjalt-Jorn Peters. Questions? You can
### contact me through http://behaviorchange.eu.
###
###########################################################
###########################################################
### Function to remove zero at start of number
noZero <- function (str) {
return(gsub("0\\.", ".", str));
}
### Function to format Pearson r
formatR <- function (r, digits) {
return(noZero(round(r, digits)));
}
### The regular ifelse cannot return objects
ifelseObj <- function(condition, ifTrue, ifFalse) {
if (condition) {
return(ifTrue);
}
else {
return(ifFalse);
}
}
### Basically what Marc Schwartz suggested at Thu Jul 1 19:10:28 CEST 2010
### on the R-help mailing list, see https://stat.ethz.ch/pipermail/r-help/2010-July/244299.html
is.odd <- function(vector) {
return((vector %% 2) != 0);
}
is.even <- function(vector) {
return((vector %% 2) == 0);
}
### Case insensitive '%in' variant
`%IN%` <- function(find, table) {
return(toupper(find) %in% toupper(table));
}
### Paste0 but then immediately displaying on screen
cat0 <- function(..., sep="") {
return(cat(..., sep=sep));
}
### Check whether elements are true, specifying how 'NA' should be seen
isTrue <- function(x, na = FALSE) {
naValues <- ifelse(rep(na, length(x)),
is.na(x),
rep(FALSE, length(x)));
return(ifelse(is.na(x), naValues, x==TRUE));
}
### Check whether something is a number
is.nr <- function(x) {
if (!is.null(x)) {
if (!is.na(x)) {
if (is.numeric(x)) {
return(TRUE);
}
}
}
return(FALSE);
}
|
### Function fine litter fall:
# This function uses data to calculate NPP from fine litterfall.
## Read-in data:
# this is what we have in db:
# names(data_flf) <- c("plot_code", "year","month", "day","litterfall_trap_num", "litterfall_trap_size_m2","leaves_g_per_trap","twigs_g_per_trap","flowers_g_per_trap","fruits_g_per_trap",
# "bromeliads_g_per_trap", "epiphytes_g_per_trap","other_g_per_trap", "palm_leaves_g", "palm_flower_g", "palm_fruit_g", "quality_code", "comments")
# plotsize = 1 ha ### TO DO: Different plot size is not an option yet.
library(zoo)
require(ggplot2)
library(dplyr)
flf <- function(data_flf, ..., ret_type = c("concat", "list")) {
# the following lines are to ensure each column is in the correct format
script.dir <- function() {
getSrcDirectory(script.dir);
}
source(paste(script.dir(), "functions.r", sep = "/"))
flf_column_types = c(
"plot_code" = "character",
"year" = "integer",
"month" = "integer",
"day" = "integer",
"litterfall_trap_num" = "Factor",
"litterfall_trap_size_m2" = "numeric",
"leaves_g_per_trap" = "numeric",
"twigs_g_per_trap" = "numeric",
"flowers_g_per_trap" = "numeric",
"fruits_g_per_trap" = "numeric",
"seeds_g_per_trap" = "numeric",
"bromeliads_g_per_trap" = "numeric",
"epiphytes_g_per_trap" = "numeric",
"other_g_per_trap" = "numeric",
"palm_leaves_g_per_trap" = "numeric",
"palm_flower_g_per_trap" = "numeric",
"palm_fruit_g_per_trap" = "numeric",
"total_litter_g_per_trap" = "numeric",
"quality_code" = "factor",
"comments" = "character"
)
# set column datatypes as defined above # TO DO; this doesn't work. Error: attempt to apply non-function.
# data_flf = set_df_coltypes(data_flf, flf_column_types)
if (class(data_flf) != "data.frame") { # if it's not a dataframe, assume it's a path+filename
data_flf <- read.csv(data_flf)
}
ret_type = match.arg(ret_type)
pb = txtProgressBar(max = length(unique(data_flf$plot_code)), style = 3); i = 0
output = list()
first_run = T
for (thisplot in unique(data_flf$plot_code)) {
output[[thisplot]] = flf_oneplot(data_flf, thisplot, ...)
if (first_run) {
first_run = F
output_concat = output[[thisplot]]
} else {
output_concat = rbind(output_concat, output[[thisplot]])
}
i = i + 1
setTxtProgressBar(pb, i)
}
close(pb)
if (ret_type == "list") { # return plot results in different list elements
return(output)
} else { # return results concatenated across plots
return(output_concat)
}
}
# START HERE!!
flf_oneplot <- function(data_flf, plotname, ret="monthly.means.ts", verbose = T) {
# add plotsize=1
# ret = monthly.means.subplot or monthly.means.ts for plot averages.
# verbose = print out unique id's for debugging or otherwise
if (class(data_flf) != "data.frame") { # if it's not a dataframe, assume it's a path+filename
data_flf <- read.csv(data_flf)
}
# some data was getting imported in the wrong format
data_flf$leaves_g_per_trap = as.numeric(data_flf$leaves_g_per_trap)
data_flf$fruits_g_per_trap = as.numeric(data_flf$fruits_g_per_trap)
data_flf$seeds_g_per_trap = as.numeric(as.character(data_flf$seeds_g_per_trap))
data_flf$day = as.integer(as.character(data_flf$day))
# new data frame
data_flf2 <- c()
# define each parameter
if (missing(plotname)) { # calculate for first-mentioned plot if plot not specified. rethink whether we should really have this here...
plotname = data_flf$plot_code[1]
}
data_flf2 = subset(data_flf, plot_code == plotname)
data_flf2 = data_flf2 %>% dplyr::rename(plot = plot_code,
num = litterfall_trap_num,
leaves = leaves_g_per_trap,
twigs = twigs_g_per_trap,
flowers = flowers_g_per_trap,
fruits = fruits_g_per_trap,
brom = bromeliads_g_per_trap,
epi = epiphytes_g_per_trap,
other = other_g_per_trap) %>%
dplyr::mutate(seeds = NA,
date = as.Date(paste(data_flf2$year, data_flf2$month, data_flf2$day, sep="."), format="%Y.%m.%d"))
# Calculate total litterfall (sum of branches, leaves, flowers, fruits, seeds, Broms, Epiphs, other...):
x <- cbind(data_flf2$leaves, data_flf2$twigs, data_flf2$flowers, data_flf2$fruits, data_flf2$seeds, data_flf2$brom, data_flf2$epi, data_flf2$other)
data_flf2$total <- rowSums(x, na.rm = T)
# In some cases, only total litterfall is recorded
total_only = data_flf2$total == 0 & ! is.na(data_flf2$total_litter_g_per_trap)
data_flf2[total_only,]$total = data_flf2[total_only,]$total_litter_g_per_trap
### Sanity check of the inputs.
data_flf2$total[which(data_flf2$total>1500)] <- NA # remove outliers with totalf > 1000
data_flf2$total[which(data_flf2$total<0)] <- NA # remove implausible totallf (negative litter)
# Calculate leaf area ****need density from photos, we assume average SLA = 100g/m2
# leaflaifA = leaffA/100 # convert to area
### flf per trap per day
# TO DO: For the first collection interval, assume 14 days. At the moment, the code ignores the first collection.
data_flf2$codeb <- paste(data_flf2$plot, data_flf2$num, sep=".")
data_flf2$codew <- paste(data_flf2$plot, data_flf2$num, data_flf2$year, data_flf2$month, data_flf2$day, sep=".")
uid <- unique(data_flf2$codeb)
xx <- c()
yy <- c()
aa <- c()
bb <- c()
cc <- c()
dd <- c()
ee <- c()
ff <- c()
gg <- c()
hh <- c()
for (i in 1:length(data_flf2$num)) {
sub <- subset(data_flf2, subset=(data_flf2$codeb == uid[i]))
if(length(sub$codeb) > 1) {
#meas_int <- difftime(sub$date[1:(length(sub$date)-1)], sub$date[2:length(sub$date)], units="days")
meas_int <- get_time_diffs(sub$date)
aleaves <- tail(sub$leaves,-1)
atwigs <- tail(sub$twigs,-1)
aflowers <- tail(sub$flowers,-1)
afruits <- tail(sub$fruits,-1)
abrom <- tail(sub$brom,-1)
aepi <- tail(sub$epi,-1)
aother <- tail(sub$other,-1)
atotal <- tail(sub$total,-1)
bleaves <- aleaves/(meas_int)
btwigs <- atwigs/(meas_int)
bflowers <- aflowers/(meas_int)
bfruits <- afruits/(meas_int)
bbrom <- abrom/(meas_int)
bepi <- aepi/(meas_int)
bother <- aother/(meas_int)
btotal <- atotal/(meas_int)
id <- tail(sub$codew,-1)
xx <- c(xx, id)
yy <- c(yy, meas_int)
aa <- c(aa, bleaves)
bb <- c(bb, btwigs)
cc <- c(cc, bflowers)
dd <- c(dd, bfruits)
ee <- c(ee, bbrom)
ff <- c(ff, bepi)
gg <- c(gg, bother)
hh <- c(hh, btotal)
#print(xx)
} else {
# print(paste("row number:", i))
# print(paste("trap number:", sub$num))
# print(paste("subset length:", length(sub$codeb)))
if(exists("error_df")) {
error_df <- rbind(error_df, data.frame(row = i, trap = sub$num[i], sub_len = length(sub$codeb)))
} else {
error_df <- data.frame(row = i, trap = sub$num[i], sub_len = length(sub$codeb))
}
}
}
error_df_g <<- error_df # assigning to global variable outside the function.
print(paste(nrow(error_df), "errors in the data. See error_df_g."))
data2 <- data.frame(xx, yy, aa, bb, cc, dd, ee, ff, gg, hh)
colnames(data2) <- c("id", "meas_int_days", "bleavesflf_g_trap_day", "btwigs", "bflowers", "bfruits", "bbrom", "bepi", "bother", "btotal")
# get day, month, year from data_flf2
#data3a <- sqldf("SELECT data_flf2.*, data2.* FROM data2 JOIN data_flf2 ON data2.id = data_flf2.codew")
data_flf2$id <- data_flf2$codew
data3 <- merge(data_flf2, data2, by = "id")
data3$leavesflf_g_trap_day <- as.numeric(as.character(data3$bleavesflf_g_trap_day))
data3$meas_int_days <- as.numeric(as.character(data3$meas_int_days))
data3$twigs <- as.numeric(as.character(data3$btwigs))
data3$flowers <- as.numeric(as.character(data3$bflowers))
data3$fruits <- as.numeric(as.character(data3$bfruits))
data3$brom <- as.numeric(as.character(data3$bbrom))
data3$epi <- as.numeric(as.character(data3$bepi))
data3$other <- as.numeric(as.character(data3$bother))
data3$total <- as.numeric(as.character(data3$btotal))
### Conversions: flf per ha per day (for each trap)
# Raw data is in g / litter trap = g / 0.25m2
# Convert to ha: *(10000/0.25)
# Convert to Mg: *1 g = 0.000001 Mg
# Convert to C: *0.49
data3$leavesflf_MgC_ha_month <- (((data3$leavesflf_g_trap_day*(10000/0.25))*0.000001)*0.49)*30
data3$twigsflf <- (((data3$twigs*(10000/0.25))*0.000001)*0.49)*30
data3$flowersflf <- (((data3$flowers*(10000/0.25))*0.000001)*0.49)*30
data3$fruitsflf <- (((data3$fruits*(10000/0.25))*0.000001)*0.49)*30
data3$bromflf <- (((data3$brom*(10000/0.25))*0.000001)*0.49)*30
data3$epiflf <- (((data3$epi*(10000/0.25))*0.000001)*0.49)*30
data3$otherflf <- (((data3$other*(10000/0.25))*0.000001)*0.49)*30
data3$totalflf <- (((data3$total*(10000/0.25))*0.000001)*0.49)*30
# flf per ha per month (for each trap)
data4 = data3 %>% group_by(plot, num, year, month) %>%
dplyr::summarize(leavesflf_MgC_ha_month_trap = mean(leavesflf_MgC_ha_month, na.rm = T),
twigsflf_MgC_ha_month_trap = mean(twigsflf, na.rm = T),
flowersflf_MgC_ha_month_trap = mean(flowersflf, na.rm = T),
fruitsflf_MgC_ha_month_trap = mean(fruitsflf, na.rm = T),
bromflf_MgC_ha_month_trap = mean(bromflf, na.rm = T),
epiflf_MgC_ha_month_trap = mean(epiflf, na.rm = T),
otherflf_MgC_ha_month_trap = mean(otherflf, na.rm = T),
totalflf_MgC_ha_month_trap = mean(totalflf, na.rm = T),
interval = - mean(meas_int_days, na.rm = T),
sd_leavesflf = sd(leavesflf_MgC_ha_month, na.rm = T),
sd_twigsflf = sd(twigsflf, na.rm = T),
sd_flowersflf = sd(flowersflf, na.rm = T),
sd_fruitsflf = sd(fruitsflf, na.rm = T),
sd_bromflf = sd(bromflf, na.rm = T),
sd_epiflf = sd(epiflf, na.rm = T),
sd_otherflf = sd(otherflf, na.rm = T),
sd_totalflf = sd(totalflf, na.rm = T)) %>%
dplyr::rename(litterfall_trap_num = num)
# calculate standard error sd/sqrt(length(unique(data3$year)))
data4$se_leavesflf <- data4$sd_leavesflf/sqrt(length(unique(data3$year)))
data4$se_twigsflf <- data4$sd_twigsflf/sqrt(length(unique(data3$year)))
data4$se_flowersflf <- data4$sd_flowersflf/sqrt(length(unique(data3$year)))
data4$se_fruitsflf <- data4$sd_fruitsflf/sqrt(length(unique(data3$year)))
data4$se_bromflf <- data4$sd_bromflf/sqrt(length(unique(data3$year)))
data4$se_epiflf <- data4$sd_epiflf/sqrt(length(unique(data3$year)))
data4$se_otherflf <- data4$sd_otherflf/sqrt(length(unique(data3$year)))
data4$se_totalflf <- data4$sd_totalflf/sqrt(length(unique(data3$year)))
# flf per ha per month (average of all the traps)
data5 = data3 %>% group_by(plot, year, month) %>%
dplyr::summarize(leavesflf_MgC_ha_month = mean(leavesflf_MgC_ha_month, na.rm = T),
twigsflf_MgC_ha_month = mean(twigsflf, na.rm = T),
flowersflf_MgC_ha_month = mean(flowersflf, na.rm = T),
fruitsflf_MgC_ha_month = mean(fruitsflf, na.rm = T),
bromflf_MgC_ha_month = mean(bromflf, na.rm = T),
epiflf_MgC_ha_month = mean(epiflf, na.rm = T),
otherflf_MgC_ha_month = mean(otherflf, na.rm = T),
totalflf_MgC_ha_month = mean(totalflf, na.rm = T),
sd_leavesflf = sd(leavesflf_MgC_ha_month, na.rm = T),
sd_twigsflf = sd(twigsflf, na.rm = T),
sd_flowersflf = sd(flowersflf, na.rm = T),
sd_fruitsflf = sd(fruitsflf, na.rm = T),
sd_bromflf = sd(bromflf, na.rm = T),
sd_epiflf = sd(epiflf, na.rm = T),
sd_otherflf = sd(otherflf, na.rm = T),
sd_totalflf = sd(totalflf, na.rm = T))
# calculate standard error sd/sqrt(length(unique(data3$year)))
data5[data5=="-Inf"] <- NaN
data5$se_leavesflf <- data5$sd_leavesflf/sqrt(length(unique(data3$num)))
data5$se_twigsflf <- data5$sd_twigsflf/sqrt(length(unique(data3$num)))
data5$se_flowersflf <- data5$sd_flowersflf/sqrt(length(unique(data3$num)))
data5$se_fruitsflf <- data5$sd_fruitsflf/sqrt(length(unique(data3$num)))
data5$se_bromflf <- data5$sd_bromflf/sqrt(length(unique(data3$num)))
data5$se_epiflf <- data5$sd_epiflf/sqrt(length(unique(data3$num)))
data5$se_otherflf <- data5$sd_otherflf/sqrt(length(unique(data3$num)))
data5$se_totalflf <- data5$sd_totalflf/sqrt(length(unique(data3$num)))
# NPP litterfall in g m-2 mo-1
data5$totalflf_g_m2_mo <- data5$totalflf_MgC_ha_month * 0.49 * 10000 * 0.000001
yy = data.frame(data5)
# Return either monthly means (ret="monthly.means") or annual means (ret="annual.means")
switch(ret,
monthly.means.subplot = {return(data4)},
monthly.means.ts = {return(data5)}
)
}
| /a_Archive/flf_2016.R | no_license | OxfordEcosystemsLab/GEMcarbon.R | R | false | false | 14,406 | r | ### Function fine litter fall:
# This function uses data to calculate NPP from fine litterfall.
## Read-in data:
# this is what we have in db:
# names(data_flf) <- c("plot_code", "year","month", "day","litterfall_trap_num", "litterfall_trap_size_m2","leaves_g_per_trap","twigs_g_per_trap","flowers_g_per_trap","fruits_g_per_trap",
# "bromeliads_g_per_trap", "epiphytes_g_per_trap","other_g_per_trap", "palm_leaves_g", "palm_flower_g", "palm_fruit_g", "quality_code", "comments")
# plotsize = 1 ha ### TO DO: Different plot size is not an option yet.
library(zoo)
require(ggplot2)
library(dplyr)
flf <- function(data_flf, ..., ret_type = c("concat", "list")) {
# the following lines are to ensure each column is in the correct format
script.dir <- function() {
getSrcDirectory(script.dir);
}
source(paste(script.dir(), "functions.r", sep = "/"))
flf_column_types = c(
"plot_code" = "character",
"year" = "integer",
"month" = "integer",
"day" = "integer",
"litterfall_trap_num" = "Factor",
"litterfall_trap_size_m2" = "numeric",
"leaves_g_per_trap" = "numeric",
"twigs_g_per_trap" = "numeric",
"flowers_g_per_trap" = "numeric",
"fruits_g_per_trap" = "numeric",
"seeds_g_per_trap" = "numeric",
"bromeliads_g_per_trap" = "numeric",
"epiphytes_g_per_trap" = "numeric",
"other_g_per_trap" = "numeric",
"palm_leaves_g_per_trap" = "numeric",
"palm_flower_g_per_trap" = "numeric",
"palm_fruit_g_per_trap" = "numeric",
"total_litter_g_per_trap" = "numeric",
"quality_code" = "factor",
"comments" = "character"
)
# set column datatypes as defined above # TO DO; this doesn't work. Error: attempt to apply non-function.
# data_flf = set_df_coltypes(data_flf, flf_column_types)
if (class(data_flf) != "data.frame") { # if it's not a dataframe, assume it's a path+filename
data_flf <- read.csv(data_flf)
}
ret_type = match.arg(ret_type)
pb = txtProgressBar(max = length(unique(data_flf$plot_code)), style = 3); i = 0
output = list()
first_run = T
for (thisplot in unique(data_flf$plot_code)) {
output[[thisplot]] = flf_oneplot(data_flf, thisplot, ...)
if (first_run) {
first_run = F
output_concat = output[[thisplot]]
} else {
output_concat = rbind(output_concat, output[[thisplot]])
}
i = i + 1
setTxtProgressBar(pb, i)
}
close(pb)
if (ret_type == "list") { # return plot results in different list elements
return(output)
} else { # return results concatenated across plots
return(output_concat)
}
}
# START HERE!!
flf_oneplot <- function(data_flf, plotname, ret="monthly.means.ts", verbose = T) {
# add plotsize=1
# ret = monthly.means.subplot or monthly.means.ts for plot averages.
# verbose = print out unique id's for debugging or otherwise
if (class(data_flf) != "data.frame") { # if it's not a dataframe, assume it's a path+filename
data_flf <- read.csv(data_flf)
}
# some data was getting imported in the wrong format
data_flf$leaves_g_per_trap = as.numeric(data_flf$leaves_g_per_trap)
data_flf$fruits_g_per_trap = as.numeric(data_flf$fruits_g_per_trap)
data_flf$seeds_g_per_trap = as.numeric(as.character(data_flf$seeds_g_per_trap))
data_flf$day = as.integer(as.character(data_flf$day))
# new data frame
data_flf2 <- c()
# define each parameter
if (missing(plotname)) { # calculate for first-mentioned plot if plot not specified. rethink whether we should really have this here...
plotname = data_flf$plot_code[1]
}
data_flf2 = subset(data_flf, plot_code == plotname)
data_flf2 = data_flf2 %>% dplyr::rename(plot = plot_code,
num = litterfall_trap_num,
leaves = leaves_g_per_trap,
twigs = twigs_g_per_trap,
flowers = flowers_g_per_trap,
fruits = fruits_g_per_trap,
brom = bromeliads_g_per_trap,
epi = epiphytes_g_per_trap,
other = other_g_per_trap) %>%
dplyr::mutate(seeds = NA,
date = as.Date(paste(data_flf2$year, data_flf2$month, data_flf2$day, sep="."), format="%Y.%m.%d"))
# Calculate total litterfall (sum of branches, leaves, flowers, fruits, seeds, Broms, Epiphs, other...):
x <- cbind(data_flf2$leaves, data_flf2$twigs, data_flf2$flowers, data_flf2$fruits, data_flf2$seeds, data_flf2$brom, data_flf2$epi, data_flf2$other)
data_flf2$total <- rowSums(x, na.rm = T)
# In some cases, only total litterfall is recorded
total_only = data_flf2$total == 0 & ! is.na(data_flf2$total_litter_g_per_trap)
data_flf2[total_only,]$total = data_flf2[total_only,]$total_litter_g_per_trap
### Sanity check of the inputs.
data_flf2$total[which(data_flf2$total>1500)] <- NA # remove outliers with totalf > 1000
data_flf2$total[which(data_flf2$total<0)] <- NA # remove implausible totallf (negative litter)
# Calculate leaf area ****need density from photos, we assume average SLA = 100g/m2
# leaflaifA = leaffA/100 # convert to area
### flf per trap per day
# TO DO: For the first collection interval, assume 14 days. At the moment, the code ignores the first collection.
data_flf2$codeb <- paste(data_flf2$plot, data_flf2$num, sep=".")
data_flf2$codew <- paste(data_flf2$plot, data_flf2$num, data_flf2$year, data_flf2$month, data_flf2$day, sep=".")
uid <- unique(data_flf2$codeb)
xx <- c()
yy <- c()
aa <- c()
bb <- c()
cc <- c()
dd <- c()
ee <- c()
ff <- c()
gg <- c()
hh <- c()
for (i in 1:length(data_flf2$num)) {
sub <- subset(data_flf2, subset=(data_flf2$codeb == uid[i]))
if(length(sub$codeb) > 1) {
#meas_int <- difftime(sub$date[1:(length(sub$date)-1)], sub$date[2:length(sub$date)], units="days")
meas_int <- get_time_diffs(sub$date)
aleaves <- tail(sub$leaves,-1)
atwigs <- tail(sub$twigs,-1)
aflowers <- tail(sub$flowers,-1)
afruits <- tail(sub$fruits,-1)
abrom <- tail(sub$brom,-1)
aepi <- tail(sub$epi,-1)
aother <- tail(sub$other,-1)
atotal <- tail(sub$total,-1)
bleaves <- aleaves/(meas_int)
btwigs <- atwigs/(meas_int)
bflowers <- aflowers/(meas_int)
bfruits <- afruits/(meas_int)
bbrom <- abrom/(meas_int)
bepi <- aepi/(meas_int)
bother <- aother/(meas_int)
btotal <- atotal/(meas_int)
id <- tail(sub$codew,-1)
xx <- c(xx, id)
yy <- c(yy, meas_int)
aa <- c(aa, bleaves)
bb <- c(bb, btwigs)
cc <- c(cc, bflowers)
dd <- c(dd, bfruits)
ee <- c(ee, bbrom)
ff <- c(ff, bepi)
gg <- c(gg, bother)
hh <- c(hh, btotal)
#print(xx)
} else {
# print(paste("row number:", i))
# print(paste("trap number:", sub$num))
# print(paste("subset length:", length(sub$codeb)))
if(exists("error_df")) {
error_df <- rbind(error_df, data.frame(row = i, trap = sub$num[i], sub_len = length(sub$codeb)))
} else {
error_df <- data.frame(row = i, trap = sub$num[i], sub_len = length(sub$codeb))
}
}
}
error_df_g <<- error_df # assigning to global variable outside the function.
print(paste(nrow(error_df), "errors in the data. See error_df_g."))
data2 <- data.frame(xx, yy, aa, bb, cc, dd, ee, ff, gg, hh)
colnames(data2) <- c("id", "meas_int_days", "bleavesflf_g_trap_day", "btwigs", "bflowers", "bfruits", "bbrom", "bepi", "bother", "btotal")
# get day, month, year from data_flf2
#data3a <- sqldf("SELECT data_flf2.*, data2.* FROM data2 JOIN data_flf2 ON data2.id = data_flf2.codew")
data_flf2$id <- data_flf2$codew
data3 <- merge(data_flf2, data2, by = "id")
data3$leavesflf_g_trap_day <- as.numeric(as.character(data3$bleavesflf_g_trap_day))
data3$meas_int_days <- as.numeric(as.character(data3$meas_int_days))
data3$twigs <- as.numeric(as.character(data3$btwigs))
data3$flowers <- as.numeric(as.character(data3$bflowers))
data3$fruits <- as.numeric(as.character(data3$bfruits))
data3$brom <- as.numeric(as.character(data3$bbrom))
data3$epi <- as.numeric(as.character(data3$bepi))
data3$other <- as.numeric(as.character(data3$bother))
data3$total <- as.numeric(as.character(data3$btotal))
### Conversions: flf per ha per day (for each trap)
# Raw data is in g / litter trap = g / 0.25m2
# Convert to ha: *(10000/0.25)
# Convert to Mg: *1 g = 0.000001 Mg
# Convert to C: *0.49
data3$leavesflf_MgC_ha_month <- (((data3$leavesflf_g_trap_day*(10000/0.25))*0.000001)*0.49)*30
data3$twigsflf <- (((data3$twigs*(10000/0.25))*0.000001)*0.49)*30
data3$flowersflf <- (((data3$flowers*(10000/0.25))*0.000001)*0.49)*30
data3$fruitsflf <- (((data3$fruits*(10000/0.25))*0.000001)*0.49)*30
data3$bromflf <- (((data3$brom*(10000/0.25))*0.000001)*0.49)*30
data3$epiflf <- (((data3$epi*(10000/0.25))*0.000001)*0.49)*30
data3$otherflf <- (((data3$other*(10000/0.25))*0.000001)*0.49)*30
data3$totalflf <- (((data3$total*(10000/0.25))*0.000001)*0.49)*30
# flf per ha per month (for each trap)
data4 = data3 %>% group_by(plot, num, year, month) %>%
dplyr::summarize(leavesflf_MgC_ha_month_trap = mean(leavesflf_MgC_ha_month, na.rm = T),
twigsflf_MgC_ha_month_trap = mean(twigsflf, na.rm = T),
flowersflf_MgC_ha_month_trap = mean(flowersflf, na.rm = T),
fruitsflf_MgC_ha_month_trap = mean(fruitsflf, na.rm = T),
bromflf_MgC_ha_month_trap = mean(bromflf, na.rm = T),
epiflf_MgC_ha_month_trap = mean(epiflf, na.rm = T),
otherflf_MgC_ha_month_trap = mean(otherflf, na.rm = T),
totalflf_MgC_ha_month_trap = mean(totalflf, na.rm = T),
interval = - mean(meas_int_days, na.rm = T),
sd_leavesflf = sd(leavesflf_MgC_ha_month, na.rm = T),
sd_twigsflf = sd(twigsflf, na.rm = T),
sd_flowersflf = sd(flowersflf, na.rm = T),
sd_fruitsflf = sd(fruitsflf, na.rm = T),
sd_bromflf = sd(bromflf, na.rm = T),
sd_epiflf = sd(epiflf, na.rm = T),
sd_otherflf = sd(otherflf, na.rm = T),
sd_totalflf = sd(totalflf, na.rm = T)) %>%
dplyr::rename(litterfall_trap_num = num)
# calculate standard error sd/sqrt(length(unique(data3$year)))
data4$se_leavesflf <- data4$sd_leavesflf/sqrt(length(unique(data3$year)))
data4$se_twigsflf <- data4$sd_twigsflf/sqrt(length(unique(data3$year)))
data4$se_flowersflf <- data4$sd_flowersflf/sqrt(length(unique(data3$year)))
data4$se_fruitsflf <- data4$sd_fruitsflf/sqrt(length(unique(data3$year)))
data4$se_bromflf <- data4$sd_bromflf/sqrt(length(unique(data3$year)))
data4$se_epiflf <- data4$sd_epiflf/sqrt(length(unique(data3$year)))
data4$se_otherflf <- data4$sd_otherflf/sqrt(length(unique(data3$year)))
data4$se_totalflf <- data4$sd_totalflf/sqrt(length(unique(data3$year)))
# flf per ha per month (average of all the traps)
data5 = data3 %>% group_by(plot, year, month) %>%
dplyr::summarize(leavesflf_MgC_ha_month = mean(leavesflf_MgC_ha_month, na.rm = T),
twigsflf_MgC_ha_month = mean(twigsflf, na.rm = T),
flowersflf_MgC_ha_month = mean(flowersflf, na.rm = T),
fruitsflf_MgC_ha_month = mean(fruitsflf, na.rm = T),
bromflf_MgC_ha_month = mean(bromflf, na.rm = T),
epiflf_MgC_ha_month = mean(epiflf, na.rm = T),
otherflf_MgC_ha_month = mean(otherflf, na.rm = T),
totalflf_MgC_ha_month = mean(totalflf, na.rm = T),
sd_leavesflf = sd(leavesflf_MgC_ha_month, na.rm = T),
sd_twigsflf = sd(twigsflf, na.rm = T),
sd_flowersflf = sd(flowersflf, na.rm = T),
sd_fruitsflf = sd(fruitsflf, na.rm = T),
sd_bromflf = sd(bromflf, na.rm = T),
sd_epiflf = sd(epiflf, na.rm = T),
sd_otherflf = sd(otherflf, na.rm = T),
sd_totalflf = sd(totalflf, na.rm = T))
# calculate standard error sd/sqrt(length(unique(data3$year)))
data5[data5=="-Inf"] <- NaN
data5$se_leavesflf <- data5$sd_leavesflf/sqrt(length(unique(data3$num)))
data5$se_twigsflf <- data5$sd_twigsflf/sqrt(length(unique(data3$num)))
data5$se_flowersflf <- data5$sd_flowersflf/sqrt(length(unique(data3$num)))
data5$se_fruitsflf <- data5$sd_fruitsflf/sqrt(length(unique(data3$num)))
data5$se_bromflf <- data5$sd_bromflf/sqrt(length(unique(data3$num)))
data5$se_epiflf <- data5$sd_epiflf/sqrt(length(unique(data3$num)))
data5$se_otherflf <- data5$sd_otherflf/sqrt(length(unique(data3$num)))
data5$se_totalflf <- data5$sd_totalflf/sqrt(length(unique(data3$num)))
# NPP litterfall in g m-2 mo-1
data5$totalflf_g_m2_mo <- data5$totalflf_MgC_ha_month * 0.49 * 10000 * 0.000001
yy = data.frame(data5)
# Return either monthly means (ret="monthly.means") or annual means (ret="annual.means")
switch(ret,
monthly.means.subplot = {return(data4)},
monthly.means.ts = {return(data5)}
)
}
|
# aya43@sfu.ca 20161220
# Uses different distance measures to calculate distance & plot samples
root = "~/projects/flowCAP-II"
result_dir = "result"; suppressWarnings(dir.create (result_dir))
setwd(root)
options(stringsAsFactors=FALSE)
options(device="cairo")
options(na.rm=T)
#Input
phenoMeta_dir = paste(result_dir, "/phenoMeta.Rdata", sep="")
sampleMeta_dir = paste(result_dir, "/sampleMeta.Rdata", sep="")
matrixCount_dir = paste(result_dir, "/matrixCount.Rdata", sep="")
matrixCountAdj_dir = paste(result_dir, "/matrixCountAdj.Rdata", sep="")
matrixProp_dir = paste(result_dir, "/matrixProp.Rdata", sep="")
#Output
py_dir = paste(result_dir, "_py", sep=""); suppressWarnings(dir.create (py_dir))
matrixCount_dirpy = paste(py_dir, "/matrixCount.csv", sep="")
matrixProp_dirpy = paste(py_dir, "/matrixProp.csv", sep="")
matrixCountAdj_dirpy = paste(py_dir, "/matrixCountAdj.csv", sep="")
matrixCount_cortrim_dirpy = paste(py_dir, "/matrixCount_cortrim.csv", sep="")
matrixProp_cortrim_dirpy = paste(py_dir, "/matrixProp_cortrim.csv", sep="")
matrixCountAdj_cortrim_dirpy = paste(py_dir, "/matrixCountAdj_cortrim.csv", sep="")
aml_dirpy = paste(py_dir, "/aml.csv", sep="")
libr(stringr)
source("code/_funcAlice.R")
dodist = F
doHC = F
doTsne = T
start = Sys.time()
load(sampleMeta_dir)
write.csv(sampleMeta$aml, file=aml_dirpy, row.names=F)
for (mcp in 1:3) { # Load & fix cell count/countAdj/proportion matrix; 1:3 are regular matrices, 4... are pvalues
if (mcp==1) {
m = get(load(matrixCountAdj_dir))
write.csv(m, file=matrixCountAdj_dirpy, row.names=F)
cormatrix = cor(m)
#cormatrix_n = cor(m[which(sampleMeta$aml=="normal"),])
#cormatrix_a = cor(m[which(sampleMeta$aml=="aml"),])
cor = findCorrelation(cormatrix)
#cor_n = findCorrelation(cormatrix_n)
#cor_a = findCorrelation(cormatrix_a)
write.csv(m[,-cor], file=matrixCountAdj_cortrim_dirpy, row.names=F)
} else if (mcp==2) {
m = get(load(matrixCount_dir))
write.csv(m, file=matrixCount_dirpy, row.names=F)
} else if (mcp==3) {
m = get(load(matrixProp_dir))
write.csv(m, file=matrixProp_dirpy, row.names=F)
}
}
TimeOutput(start)
| /flowtype_flowcap_pipeline-master/201708/00.5_pyData.R | no_license | aya49/flowGraph_experiments | R | false | false | 2,178 | r | # aya43@sfu.ca 20161220
# Uses different distance measures to calculate distance & plot samples
root = "~/projects/flowCAP-II"
result_dir = "result"; suppressWarnings(dir.create (result_dir))
setwd(root)
options(stringsAsFactors=FALSE)
options(device="cairo")
options(na.rm=T)
#Input
phenoMeta_dir = paste(result_dir, "/phenoMeta.Rdata", sep="")
sampleMeta_dir = paste(result_dir, "/sampleMeta.Rdata", sep="")
matrixCount_dir = paste(result_dir, "/matrixCount.Rdata", sep="")
matrixCountAdj_dir = paste(result_dir, "/matrixCountAdj.Rdata", sep="")
matrixProp_dir = paste(result_dir, "/matrixProp.Rdata", sep="")
#Output
py_dir = paste(result_dir, "_py", sep=""); suppressWarnings(dir.create (py_dir))
matrixCount_dirpy = paste(py_dir, "/matrixCount.csv", sep="")
matrixProp_dirpy = paste(py_dir, "/matrixProp.csv", sep="")
matrixCountAdj_dirpy = paste(py_dir, "/matrixCountAdj.csv", sep="")
matrixCount_cortrim_dirpy = paste(py_dir, "/matrixCount_cortrim.csv", sep="")
matrixProp_cortrim_dirpy = paste(py_dir, "/matrixProp_cortrim.csv", sep="")
matrixCountAdj_cortrim_dirpy = paste(py_dir, "/matrixCountAdj_cortrim.csv", sep="")
aml_dirpy = paste(py_dir, "/aml.csv", sep="")
libr(stringr)
source("code/_funcAlice.R")
dodist = F
doHC = F
doTsne = T
start = Sys.time()
load(sampleMeta_dir)
write.csv(sampleMeta$aml, file=aml_dirpy, row.names=F)
for (mcp in 1:3) { # Load & fix cell count/countAdj/proportion matrix; 1:3 are regular matrices, 4... are pvalues
if (mcp==1) {
m = get(load(matrixCountAdj_dir))
write.csv(m, file=matrixCountAdj_dirpy, row.names=F)
cormatrix = cor(m)
#cormatrix_n = cor(m[which(sampleMeta$aml=="normal"),])
#cormatrix_a = cor(m[which(sampleMeta$aml=="aml"),])
cor = findCorrelation(cormatrix)
#cor_n = findCorrelation(cormatrix_n)
#cor_a = findCorrelation(cormatrix_a)
write.csv(m[,-cor], file=matrixCountAdj_cortrim_dirpy, row.names=F)
} else if (mcp==2) {
m = get(load(matrixCount_dir))
write.csv(m, file=matrixCount_dirpy, row.names=F)
} else if (mcp==3) {
m = get(load(matrixProp_dir))
write.csv(m, file=matrixProp_dirpy, row.names=F)
}
}
TimeOutput(start)
|
# Load the data
mum <- readxl::read_excel("data-raw/MUM.xlsx")
## Mutate the data. Taken from 36-202 Lab 7 Spring 2018
mum <- dplyr::mutate(mum, Visibility = as.factor(Visibility),
TestResult = as.factor(TestResult))
# Save the data
## readr::write_csv(mum, "data-raw/mum.csv")
usethis::use_data(mum, overwrite = TRUE) | /data-raw/mum.R | permissive | frank113/cmu202 | R | false | false | 338 | r | # Load the data
mum <- readxl::read_excel("data-raw/MUM.xlsx")
## Mutate the data. Taken from 36-202 Lab 7 Spring 2018
mum <- dplyr::mutate(mum, Visibility = as.factor(Visibility),
TestResult = as.factor(TestResult))
# Save the data
## readr::write_csv(mum, "data-raw/mum.csv")
usethis::use_data(mum, overwrite = TRUE) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FST functions.R
\name{WC_FST_FiniteSample_Diploids_2Alleles}
\alias{WC_FST_FiniteSample_Diploids_2Alleles}
\title{FST calculation for biallelic diploid data}
\usage{
WC_FST_FiniteSample_Diploids_2Alleles(Sample_Mat)
}
\arguments{
\item{Sample_Mat}{This is an array with a row for each population, and three values per row: Number of Homozygotes of one type, number of heterozygotes, number of homozygotes of other type.}
}
\value{
Returns a list of values related to FST:
\itemize{
\item He: the expected heterozygosity of the locus
\item FST: Fst (with sample size correction)
\item T1: The numerator of the Fst calculation (a from Weir and Cockerham 1984)
\item T2NoCorr: The denominator of the Fst calculation (a+b+c from Weir and Cockerham 1984)
}
}
\description{
Calculates FST with correction for local sample sizes, for diploid biallelic data.
}
| /man/WC_FST_FiniteSample_Diploids_2Alleles.Rd | no_license | aviary-j/OutFLANK | R | false | true | 946 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FST functions.R
\name{WC_FST_FiniteSample_Diploids_2Alleles}
\alias{WC_FST_FiniteSample_Diploids_2Alleles}
\title{FST calculation for biallelic diploid data}
\usage{
WC_FST_FiniteSample_Diploids_2Alleles(Sample_Mat)
}
\arguments{
\item{Sample_Mat}{This is an array with a row for each population, and three values per row: Number of Homozygotes of one type, number of heterozygotes, number of homozygotes of other type.}
}
\value{
Returns a list of values related to FST:
\itemize{
\item He: the expected heterozygosity of the locus
\item FST: Fst (with sample size correction)
\item T1: The numerator of the Fst calculation (a from Weir and Cockerham 1984)
\item T2NoCorr: The denominator of the Fst calculation (a+b+c from Weir and Cockerham 1984)
}
}
\description{
Calculates FST with correction for local sample sizes, for diploid biallelic data.
}
|
[
{
"title": "ARIMA models with long lags",
"href": "http://robjhyndman.com/hyndsight/arima-models-with-long-lags/"
},
{
"title": "Happy PI day",
"href": "http://wiekvoet.blogspot.com/2016/03/happy-pi-day.html"
},
{
"title": "New R package raincpc: Obtain and Analyze Global Rainfall data from the Climate Prediction Center (CPC)",
"href": "http://rationshop.blogspot.com/2013/11/new-r-package-raincpc-obtain-and.html"
},
{
"title": "RStudio v0.98 Preview (Debugging Tools and More)",
"href": "https://blog.rstudio.org/2013/09/24/rstudio-v0-98-preview/"
},
{
"title": "RQuantLib 0.2.11",
"href": "http://dirk.eddelbuettel.com/blog/2009/03/03/"
},
{
"title": "The statistics software signal",
"href": "http://andrewgelman.com/2013/01/05/the-statistics-software-signal/"
},
{
"title": "A prize of US$3,000,000 for a data mining competition to improve healthcare",
"href": "https://rdatamining.wordpress.com/2011/12/13/a-prize-of-us3000000-for-a-data-mining-competition-to-improve-healthcare/"
},
{
"title": "Reserving based on log-incremental payments in R, part III",
"href": "http://www.magesblog.com/2013/01/reserving-based-on-log-incremental_22.html"
},
{
"title": "R Markdown to other document formats",
"href": "http://conjugateprior.org/2012/12/r-markdown-to-other-document-formats/?utm_source=rss&utm_medium=rss&utm_campaign=r-markdown-to-other-document-formats"
},
{
"title": "R Continues Climbing",
"href": "https://analystatlarge.wordpress.com/2014/06/16/r-continues-climbing/"
},
{
"title": "Propagation of the news of OBL’s death via Twitter",
"href": "http://blog.revolutionanalytics.com/2011/05/propagation-of-the-news-of-obls-death-via-twitter.html"
},
{
"title": "useR 2012: impressions, tutorials",
"href": "http://civilstat.com/2012/06/user-2012-impressions-tutorials/"
},
{
"title": "Tcl/Tk GUI Example with Variable Input by User",
"href": "http://thebiobucket.blogspot.com/2012/08/tcltk-gui-example-with-variable-input.html"
},
{
"title": "Implementing Circles example",
"href": "http://rsnippets.blogspot.com/2012/02/implementing-circles-example.html"
},
{
"title": "NBA Stat in a Shiny App",
"href": "http://junma5.weebly.com/data-blog/nba-stat-in-a-shiny-app"
},
{
"title": "analyze the united states decennial census public use microdata sample (pums) with r and monetdb",
"href": "http://www.asdfree.com/2013/07/analyze-united-states-decennial-census.html"
},
{
"title": "FOMC Dates – Scraping Data From Web Pages",
"href": "http://www.returnandrisk.com/2014/11/scraping-data-from-web-pages-fomc-dates.html"
},
{
"title": "14 jobs for R-users / from around the world (2015-12-17)",
"href": "https://www.r-users.com/jobs/senior-scientist-life-and-health-modeling-specialist-boston-us/"
},
{
"title": "R/ggplot2 tip: aes_string",
"href": "https://nsaunders.wordpress.com/2013/02/26/rggplot2-tip-aes_string/"
},
{
"title": "One Way Analysis of Variance Exercises",
"href": "http://r-exercises.com/2016/09/30/one-way-analysis-of-variance-exercises/"
},
{
"title": "R Markdown & Bloggin’: Part 1 – Inserting Code",
"href": "http://equastat.com/r-markdown-blogging-part-1-inserting-code/"
},
{
"title": "Sorting Numeric Vectors in C++ and R",
"href": "http://gallery.rcpp.org/articles/sorting/"
},
{
"title": "Sixth Torino R net meeting",
"href": "http://torinor.net/2013/11/14/sixt-torino-r-net-meeting/"
},
{
"title": "Analysing the movements of a cat",
"href": "http://blog.revolutionanalytics.com/2016/03/analysing-the-movements-of-a-cat.html"
},
{
"title": "Exporting Data From R to KDB",
"href": "http://www.theresearchkitchen.com/archives/776"
},
{
"title": "Pair-Trading with S&P500 Companies – Part I.",
"href": "https://web.archive.org/web/http://blog.quanttrader.org/2011/03/pair-trading-with-sp500-companies-part-i/"
},
{
"title": "Update your Windows PATH – revisited",
"href": "https://rappster.wordpress.com/2011/12/16/update-your-windows-path-revisited/"
},
{
"title": "dynamic mixtures [at NBBC15]",
"href": "https://xianblog.wordpress.com/2015/06/18/dynamic-mixtures-at-nbbc15/"
},
{
"title": "How accurate or reliable are R calculations?",
"href": "https://ryouready.wordpress.com/2009/03/28/how-accurate-or-reliable-are-r-calculations/"
},
{
"title": "Fearsome Engines, Part 1",
"href": "https://4dpiecharts.com/2013/09/07/fearsome-engines-part-1/"
},
{
"title": "Shorting at High: Algo Trading Strategy in R",
"href": "https://www.quantinsti.com/blog/shorting-high-algo-trading-strategy-r/"
},
{
"title": "Shootout 2012: Test & Val Sets proyections",
"href": "http://nir-quimiometria.blogspot.com/2012/11/shootout-2012-test-val-sets-proyections.html"
},
{
"title": "Shiny Server 0.4",
"href": "https://blog.rstudio.org/2013/12/03/shiny-server-0-4/"
},
{
"title": "Comment: Search and Replace",
"href": "http://xrgb.blogspot.com/2013/01/comment-search-and-replace.html"
},
{
"title": "Further Comments on the ASA Manifesto",
"href": "https://matloff.wordpress.com/2016/03/09/further-comments-on-the-asa-manifesto/"
},
{
"title": "Example 9.26: More circular plotting",
"href": "https://feedproxy.google.com/~r/SASandR/~3/rzNHYAp0C_o/example-926-more-circular-plotting.html"
},
{
"title": "About to teach Statistical Graphics and Visualization course at CMU",
"href": "http://civilstat.com/2015/08/about-to-teach-statistical-graphics-and-visualization-course-at-cmu/"
},
{
"title": "Boxplots or raw data graphs?",
"href": "http://stevepowell.blot.im/boxplots-or-raw-data-graphs"
},
{
"title": "pgfSweave 1.1.0 now on CRAN!",
"href": "http://cameron.bracken.bz/pgfsweave-1-1-0-now-on-cran"
},
{
"title": "littler 0.3.0 — on CRAN !!",
"href": "http://dirk.eddelbuettel.com/blog/2015/10/29/"
},
{
"title": "For loops in R can lose class information",
"href": "http://www.win-vector.com/blog/2016/03/for-loops-in-r-can-lose-class-information/"
},
{
"title": "Building a Data Science Platform for R&D, Part 3 – R, R Studio Server, SparkR & Sparklyr",
"href": "https://alexioannides.com/2016/08/22/building-a-data-science-platform-for-rd-part-3-r-r-studio-server-sparkr-sparklyr/"
},
{
"title": "RcppNumerical: Numerical integration and optimization with Rcpp",
"href": "http://statr.me/2016/04/rcppnumerical-numerical-integration-optimization-rcpp/"
},
{
"title": "Estimating mixed graphical models",
"href": "http://jmbh.github.io//Estimation-of-mixed-graphical-models/"
},
{
"title": "Introduction to XGBoost R package",
"href": "https://web.archive.org/web/http://dmlc.ml/rstats/2016/03/08/xgboost.html"
},
{
"title": "Multidimensional scaling of REM album covers: FlagSpace revisited",
"href": "https://robertgrantstats.wordpress.com/2012/12/12/multidimensional-scaling-of-rem-album-covers/"
},
{
"title": "Interaction plot from cell means",
"href": "http://psychologicalstatistics.blogspot.com/2010/02/interaction-plot-from-cell-means.html"
},
{
"title": "Control the Function Scope by the R Package Namescope",
"href": "https://tomizonor.wordpress.com/2013/09/16/scope-namescope/"
},
{
"title": "Reading OECD.Stat into R",
"href": "https://stronginstruments.com/2014/03/02/reading-oecd-stat-into-r/"
},
{
"title": "Web Hosted R Syntax Highlighter",
"href": "https://web.archive.org/web/http://blog.r-enthusiasts.com/2013/03/24/web-hosted-r-syntax-highlighter/"
}
]
| /json/335.r | no_license | rweekly/rweekly.org | R | false | false | 7,820 | r | [
{
"title": "ARIMA models with long lags",
"href": "http://robjhyndman.com/hyndsight/arima-models-with-long-lags/"
},
{
"title": "Happy PI day",
"href": "http://wiekvoet.blogspot.com/2016/03/happy-pi-day.html"
},
{
"title": "New R package raincpc: Obtain and Analyze Global Rainfall data from the Climate Prediction Center (CPC)",
"href": "http://rationshop.blogspot.com/2013/11/new-r-package-raincpc-obtain-and.html"
},
{
"title": "RStudio v0.98 Preview (Debugging Tools and More)",
"href": "https://blog.rstudio.org/2013/09/24/rstudio-v0-98-preview/"
},
{
"title": "RQuantLib 0.2.11",
"href": "http://dirk.eddelbuettel.com/blog/2009/03/03/"
},
{
"title": "The statistics software signal",
"href": "http://andrewgelman.com/2013/01/05/the-statistics-software-signal/"
},
{
"title": "A prize of US$3,000,000 for a data mining competition to improve healthcare",
"href": "https://rdatamining.wordpress.com/2011/12/13/a-prize-of-us3000000-for-a-data-mining-competition-to-improve-healthcare/"
},
{
"title": "Reserving based on log-incremental payments in R, part III",
"href": "http://www.magesblog.com/2013/01/reserving-based-on-log-incremental_22.html"
},
{
"title": "R Markdown to other document formats",
"href": "http://conjugateprior.org/2012/12/r-markdown-to-other-document-formats/?utm_source=rss&utm_medium=rss&utm_campaign=r-markdown-to-other-document-formats"
},
{
"title": "R Continues Climbing",
"href": "https://analystatlarge.wordpress.com/2014/06/16/r-continues-climbing/"
},
{
"title": "Propagation of the news of OBL’s death via Twitter",
"href": "http://blog.revolutionanalytics.com/2011/05/propagation-of-the-news-of-obls-death-via-twitter.html"
},
{
"title": "useR 2012: impressions, tutorials",
"href": "http://civilstat.com/2012/06/user-2012-impressions-tutorials/"
},
{
"title": "Tcl/Tk GUI Example with Variable Input by User",
"href": "http://thebiobucket.blogspot.com/2012/08/tcltk-gui-example-with-variable-input.html"
},
{
"title": "Implementing Circles example",
"href": "http://rsnippets.blogspot.com/2012/02/implementing-circles-example.html"
},
{
"title": "NBA Stat in a Shiny App",
"href": "http://junma5.weebly.com/data-blog/nba-stat-in-a-shiny-app"
},
{
"title": "analyze the united states decennial census public use microdata sample (pums) with r and monetdb",
"href": "http://www.asdfree.com/2013/07/analyze-united-states-decennial-census.html"
},
{
"title": "FOMC Dates – Scraping Data From Web Pages",
"href": "http://www.returnandrisk.com/2014/11/scraping-data-from-web-pages-fomc-dates.html"
},
{
"title": "14 jobs for R-users / from around the world (2015-12-17)",
"href": "https://www.r-users.com/jobs/senior-scientist-life-and-health-modeling-specialist-boston-us/"
},
{
"title": "R/ggplot2 tip: aes_string",
"href": "https://nsaunders.wordpress.com/2013/02/26/rggplot2-tip-aes_string/"
},
{
"title": "One Way Analysis of Variance Exercises",
"href": "http://r-exercises.com/2016/09/30/one-way-analysis-of-variance-exercises/"
},
{
"title": "R Markdown & Bloggin’: Part 1 – Inserting Code",
"href": "http://equastat.com/r-markdown-blogging-part-1-inserting-code/"
},
{
"title": "Sorting Numeric Vectors in C++ and R",
"href": "http://gallery.rcpp.org/articles/sorting/"
},
{
"title": "Sixth Torino R net meeting",
"href": "http://torinor.net/2013/11/14/sixt-torino-r-net-meeting/"
},
{
"title": "Analysing the movements of a cat",
"href": "http://blog.revolutionanalytics.com/2016/03/analysing-the-movements-of-a-cat.html"
},
{
"title": "Exporting Data From R to KDB",
"href": "http://www.theresearchkitchen.com/archives/776"
},
{
"title": "Pair-Trading with S&P500 Companies – Part I.",
"href": "https://web.archive.org/web/http://blog.quanttrader.org/2011/03/pair-trading-with-sp500-companies-part-i/"
},
{
"title": "Update your Windows PATH – revisited",
"href": "https://rappster.wordpress.com/2011/12/16/update-your-windows-path-revisited/"
},
{
"title": "dynamic mixtures [at NBBC15]",
"href": "https://xianblog.wordpress.com/2015/06/18/dynamic-mixtures-at-nbbc15/"
},
{
"title": "How accurate or reliable are R calculations?",
"href": "https://ryouready.wordpress.com/2009/03/28/how-accurate-or-reliable-are-r-calculations/"
},
{
"title": "Fearsome Engines, Part 1",
"href": "https://4dpiecharts.com/2013/09/07/fearsome-engines-part-1/"
},
{
"title": "Shorting at High: Algo Trading Strategy in R",
"href": "https://www.quantinsti.com/blog/shorting-high-algo-trading-strategy-r/"
},
{
"title": "Shootout 2012: Test & Val Sets proyections",
"href": "http://nir-quimiometria.blogspot.com/2012/11/shootout-2012-test-val-sets-proyections.html"
},
{
"title": "Shiny Server 0.4",
"href": "https://blog.rstudio.org/2013/12/03/shiny-server-0-4/"
},
{
"title": "Comment: Search and Replace",
"href": "http://xrgb.blogspot.com/2013/01/comment-search-and-replace.html"
},
{
"title": "Further Comments on the ASA Manifesto",
"href": "https://matloff.wordpress.com/2016/03/09/further-comments-on-the-asa-manifesto/"
},
{
"title": "Example 9.26: More circular plotting",
"href": "https://feedproxy.google.com/~r/SASandR/~3/rzNHYAp0C_o/example-926-more-circular-plotting.html"
},
{
"title": "About to teach Statistical Graphics and Visualization course at CMU",
"href": "http://civilstat.com/2015/08/about-to-teach-statistical-graphics-and-visualization-course-at-cmu/"
},
{
"title": "Boxplots or raw data graphs?",
"href": "http://stevepowell.blot.im/boxplots-or-raw-data-graphs"
},
{
"title": "pgfSweave 1.1.0 now on CRAN!",
"href": "http://cameron.bracken.bz/pgfsweave-1-1-0-now-on-cran"
},
{
"title": "littler 0.3.0 — on CRAN !!",
"href": "http://dirk.eddelbuettel.com/blog/2015/10/29/"
},
{
"title": "For loops in R can lose class information",
"href": "http://www.win-vector.com/blog/2016/03/for-loops-in-r-can-lose-class-information/"
},
{
"title": "Building a Data Science Platform for R&D, Part 3 – R, R Studio Server, SparkR & Sparklyr",
"href": "https://alexioannides.com/2016/08/22/building-a-data-science-platform-for-rd-part-3-r-r-studio-server-sparkr-sparklyr/"
},
{
"title": "RcppNumerical: Numerical integration and optimization with Rcpp",
"href": "http://statr.me/2016/04/rcppnumerical-numerical-integration-optimization-rcpp/"
},
{
"title": "Estimating mixed graphical models",
"href": "http://jmbh.github.io//Estimation-of-mixed-graphical-models/"
},
{
"title": "Introduction to XGBoost R package",
"href": "https://web.archive.org/web/http://dmlc.ml/rstats/2016/03/08/xgboost.html"
},
{
"title": "Multidimensional scaling of REM album covers: FlagSpace revisited",
"href": "https://robertgrantstats.wordpress.com/2012/12/12/multidimensional-scaling-of-rem-album-covers/"
},
{
"title": "Interaction plot from cell means",
"href": "http://psychologicalstatistics.blogspot.com/2010/02/interaction-plot-from-cell-means.html"
},
{
"title": "Control the Function Scope by the R Package Namescope",
"href": "https://tomizonor.wordpress.com/2013/09/16/scope-namescope/"
},
{
"title": "Reading OECD.Stat into R",
"href": "https://stronginstruments.com/2014/03/02/reading-oecd-stat-into-r/"
},
{
"title": "Web Hosted R Syntax Highlighter",
"href": "https://web.archive.org/web/http://blog.r-enthusiasts.com/2013/03/24/web-hosted-r-syntax-highlighter/"
}
]
|
library(pacman)
p_load(tidyverse, data.table)
lu <- "https://coronavirus.data.gov.uk/downloads/supplements/lookup_table.csv"
msoa <- "https://coronavirus.data.gov.uk/downloads/msoa_data/MSOAs_latest.csv"
la <- "https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv"
lu <- read_csv(lu)
msoa <- read_csv(msoa)
la <- read_csv(la)
summary(msoa)
head(msoa)
head(lu)
msoa <- msoa %>%
left_join(lu, by = c("areaCode" = "MSOA"))
glimpse(msoa)
daily_ltla <- msoa %>%
group_by(UTLA, UTLA_areaName, date) %>%
summarise(case_sum = sum(newCasesBySpecimenDateRollingSum, na.rm = TRUE))
daily_ltla_sheet <- la %>%
janitor::clean_names() %>%
mutate(date = lubridate::epiweek(specimen_date)) %>%
group_by(area_code, area_name, date) %>%
mutate(date, case_sum_1 = sum(daily_lab_confirmed_cases, na.rm = TRUE)) %>%
select(area_name, area_name, specimen_date, date, case_sum_1) %>%
left_join(daily_ltla, by = c("area_code" = "UTLA", "specimen_date" = "date")) %>%
select(area_name, area_code, specimen_date, date, case_sum_1, case_sum)
daily_ltla_sheet %>%
filter(!is.na(case_sum)) %>%
mutate(prop = case_sum/case_sum_1) %>%
filter(prop > 1)
select(area_code, area_name, date, prop) %>%
pivot_wider(names_from = "date", values_from = "prop") %>%
#head()
ggplot(aes(date, fct_rev(area_name), fill = prop)) +
geom_tile() +
viridis::scale_fill_viridis() +
theme(axis.text.y = element_text(size = rel(.5)))
msoa %>%
filter(str_detect(areaName, "Balsham")) %>%
gt::gt() | /msoa-cases.R | no_license | julianflowers12/test-and-trace | R | false | false | 1,603 | r | library(pacman)
p_load(tidyverse, data.table)
lu <- "https://coronavirus.data.gov.uk/downloads/supplements/lookup_table.csv"
msoa <- "https://coronavirus.data.gov.uk/downloads/msoa_data/MSOAs_latest.csv"
la <- "https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv"
lu <- read_csv(lu)
msoa <- read_csv(msoa)
la <- read_csv(la)
summary(msoa)
head(msoa)
head(lu)
msoa <- msoa %>%
left_join(lu, by = c("areaCode" = "MSOA"))
glimpse(msoa)
daily_ltla <- msoa %>%
group_by(UTLA, UTLA_areaName, date) %>%
summarise(case_sum = sum(newCasesBySpecimenDateRollingSum, na.rm = TRUE))
daily_ltla_sheet <- la %>%
janitor::clean_names() %>%
mutate(date = lubridate::epiweek(specimen_date)) %>%
group_by(area_code, area_name, date) %>%
mutate(date, case_sum_1 = sum(daily_lab_confirmed_cases, na.rm = TRUE)) %>%
select(area_name, area_name, specimen_date, date, case_sum_1) %>%
left_join(daily_ltla, by = c("area_code" = "UTLA", "specimen_date" = "date")) %>%
select(area_name, area_code, specimen_date, date, case_sum_1, case_sum)
daily_ltla_sheet %>%
filter(!is.na(case_sum)) %>%
mutate(prop = case_sum/case_sum_1) %>%
filter(prop > 1)
select(area_code, area_name, date, prop) %>%
pivot_wider(names_from = "date", values_from = "prop") %>%
#head()
ggplot(aes(date, fct_rev(area_name), fill = prop)) +
geom_tile() +
viridis::scale_fill_viridis() +
theme(axis.text.y = element_text(size = rel(.5)))
msoa %>%
filter(str_detect(areaName, "Balsham")) %>%
gt::gt() |
library(dplyr)
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",destfile = "household_power_consumption.zip")
unzip(zipfile = "household_power_consumption.zip",overwrite = TRUE)
elec_pwr <- read.csv(".\\household_power_consumption.txt",sep=";",header = TRUE,stringsAsFactors=FALSE,na.strings='?',colClasses=c("character","character",rep("numeric",7)))
elec_pwr_2days <- subset(elec_pwr,elec_pwr$Date == "1/2/2007"|elec_pwr$Date == "2/2/2007")
elec_pwr_2days <- mutate(elec_pwr_2days,DateTime = as.POSIXct(paste(Date,Time,sep = " "),format='%d/%m/%Y %H:%M:%S'))
dev.print(png, file = "plot4.png", width = 480, height = 480)
png(file = "plot4.png", bg = "transparent")
par(mfrow = c(2, 2))
with(elec_pwr_2days,plot(DateTime,Global_active_power,type="l",xlab = "",ylab="Global Active Power (kilowatts)"))
with(elec_pwr_2days,plot(DateTime,Voltage,type="l",xlab = "datetime", ylab = "Voltage"))
with(elec_pwr_2days,plot(DateTime,Sub_metering_1,type="l",ylim = c(0,40),xlab = "", ylab = ""))
par(new = T)
with(elec_pwr_2days,plot(DateTime,Sub_metering_2,type="l",ylim = c(0,40),xlab = "", ylab = "",col = "Red"))
par(new = T)
with(elec_pwr_2days,plot(DateTime,Sub_metering_3,type="l",ylim = c(0,40),xlab = "", ylab = "Energy sub metering",col = "Blue"))
legend("topright",legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"),lty=c(1,1,1), col=c("Black","Red","Blue"))
with(elec_pwr_2days,plot(DateTime,Global_reactive_power,type="l",xlab = "datetime", ylab = "Global_reactive_power"))
dev.off() | /plot4.R | no_license | kevinmx21/ExData_Plotting1 | R | false | false | 1,594 | r | library(dplyr)
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",destfile = "household_power_consumption.zip")
unzip(zipfile = "household_power_consumption.zip",overwrite = TRUE)
elec_pwr <- read.csv(".\\household_power_consumption.txt",sep=";",header = TRUE,stringsAsFactors=FALSE,na.strings='?',colClasses=c("character","character",rep("numeric",7)))
elec_pwr_2days <- subset(elec_pwr,elec_pwr$Date == "1/2/2007"|elec_pwr$Date == "2/2/2007")
elec_pwr_2days <- mutate(elec_pwr_2days,DateTime = as.POSIXct(paste(Date,Time,sep = " "),format='%d/%m/%Y %H:%M:%S'))
dev.print(png, file = "plot4.png", width = 480, height = 480)
png(file = "plot4.png", bg = "transparent")
par(mfrow = c(2, 2))
with(elec_pwr_2days,plot(DateTime,Global_active_power,type="l",xlab = "",ylab="Global Active Power (kilowatts)"))
with(elec_pwr_2days,plot(DateTime,Voltage,type="l",xlab = "datetime", ylab = "Voltage"))
with(elec_pwr_2days,plot(DateTime,Sub_metering_1,type="l",ylim = c(0,40),xlab = "", ylab = ""))
par(new = T)
with(elec_pwr_2days,plot(DateTime,Sub_metering_2,type="l",ylim = c(0,40),xlab = "", ylab = "",col = "Red"))
par(new = T)
with(elec_pwr_2days,plot(DateTime,Sub_metering_3,type="l",ylim = c(0,40),xlab = "", ylab = "Energy sub metering",col = "Blue"))
legend("topright",legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"),lty=c(1,1,1), col=c("Black","Red","Blue"))
with(elec_pwr_2days,plot(DateTime,Global_reactive_power,type="l",xlab = "datetime", ylab = "Global_reactive_power"))
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genus_metrics.R
\name{pct_orthocladiinae}
\alias{pct_orthocladiinae}
\title{Percentage of Orthocladiinae}
\usage{
pct_orthocladiinae(long)
}
\arguments{
\item{long}{Taxonomic counts arrange in a long data format (i.e., each
row represents a unique sample and taxon).}
}
\value{
The percentage of Orthocladiinae (Diptera; Chironomidae) individuals.
Orthocladiinae is a subfamily of the family Chironomidae.
}
\description{
Percentage of Orthocladiinae
}
| /man/pct_orthocladiinae.Rd | no_license | esocid/Benthos | R | false | true | 531 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genus_metrics.R
\name{pct_orthocladiinae}
\alias{pct_orthocladiinae}
\title{Percentage of Orthocladiinae}
\usage{
pct_orthocladiinae(long)
}
\arguments{
\item{long}{Taxonomic counts arrange in a long data format (i.e., each
row represents a unique sample and taxon).}
}
\value{
The percentage of Orthocladiinae (Diptera; Chironomidae) individuals.
Orthocladiinae is a subfamily of the family Chironomidae.
}
\description{
Percentage of Orthocladiinae
}
|
#' Information on tuning parameters within an object
#'
#' @param x An object, such as a list of `param` objects or an actual `param`
#' object.
#' @param ... Only used for the `param` method so that multiple `param` objects
#'can be passed to the function.
#' @export
parameters <- function(x, ...) {
UseMethod("parameters")
}
#' @export
#' @rdname parameters
parameters.default <- function(x, ...) {
rlang::abort("`parameters` objects cannot be created from this type of object.")
}
#' @export
#' @rdname parameters
parameters.param <- function(x, ...) {
x <- list(x, ...)
res <- parameters(x)
res
}
#' @export
#' @rdname parameters
parameters.list <- function(x, ...) {
elem_param <- purrr::map_lgl(x, inherits, "param")
if (any(!elem_param)) {
rlang::abort("The objects should all be `param` objects.")
}
elem_name <- purrr::map_chr(x, ~ names(.x$label))
elem_id <- names(x)
if (length(elem_id) == 0) {
elem_id <- elem_name
} else {
elem_id[elem_id == ""] <- elem_name[elem_id == ""]
}
p <- length(x)
parameters_constr(
elem_name,
elem_id,
rep("list", p),
rep("unknown", p),
rep("unknown", p),
x
)
}
chr_check <- function(x) {
cl <- match.call()
if (is.null(x)) {
rlang::abort(
glue::glue("Element `{cl$x}` should not be NULL.")
)
}
if (!is.character(x)) {
rlang::abort(
glue::glue("Element `{cl$x}` should be a character string.")
)
}
invisible(TRUE)
}
unique_check <- function(x) {
x2 <- x[!is.na(x)]
is_dup <- duplicated(x2)
if (any(is_dup)) {
dup_list <- x2[is_dup]
cl <- match.call()
msg <- paste0("Element `", deparse(cl$x), "` should have unique values. Duplicates exist ",
"for item(s): ",
paste0("'", dup_list, "'", collapse = ", "))
rlang::abort(msg)
}
invisible(TRUE)
}
param_or_na <- function(x) {
inherits(x, "param") | all(is.na(x))
}
#' Construct a new parameter set object
#'
#' @param name,id,source,component,component_id Character strings with the same
#' length.
#' @param object A list of `param` objects or NA values.
#' @return A tibble that encapsulates the input vectors into a tibble with an
#' additional class of "parameters".
#' @keywords internal
#' @export
parameters_constr <-
function(name, id, source, component, component_id, object) {
chr_check(name)
chr_check(id)
chr_check(source)
chr_check(component)
chr_check(component_id)
unique_check(id)
if (is.null(object)) {
rlang::abort("Element `object` should not be NULL.")
}
if (!is.list(object)) {
rlang::abort("`object` should be a list.")
}
is_good_boi <- map_lgl(object, param_or_na)
if (any(!is_good_boi)) {
rlang::abort(
paste0(
"`object` values in the following positions should be NA or a ",
"`param` object:",
paste0(which(!is_good_boi), collapse = ", ")
)
)
}
res <-
tibble(
name = name,
id = id,
source = source,
component = component,
component_id = component_id,
object = object
)
class(res) <- c("parameters", class(res))
res
}
unk_check <- function(x) {
if (all(is.na(x))) {
res <- NA
} else {
res <- has_unknowns(x)
}
res
}
#' @export
print.parameters <- function(x, ...) {
x <- tibble::as_tibble(x)
cat("Collection of", nrow(x), "parameters for tuning\n\n")
print_x <- x %>% dplyr::select(identifier = id, type = name, object)
print_x$object <- purrr::map_chr(print_x$object, dplyr::type_sum)
print.data.frame(print_x, row.names = FALSE)
cat("\n")
null_obj <- map_lgl(x$object, ~ all(is.na(.x)))
num_missing <- sum(null_obj)
if (num_missing > 0) {
if (num_missing == 1) {
cat("One needs a `param` object: '", x$identifier[null_obj], "'\n\n", sep = "")
} else {
cat("Several need `param` objects: ",
paste0("'", x$identifier[null_obj], "'", collapse = ", "),
"\n\n")
}
}
other_obj <-
x %>%
dplyr::filter(!is.na(object)) %>%
mutate(
not_final = map_lgl(object, unk_check),
label = map_chr(object, ~ .x$label),
note = paste0(" ", label, " ('", id, "')\n")
)
if (any(other_obj$not_final)) {
# There's a more elegant way to do this, I'm sure:
mod_obj <- as_tibble(other_obj) %>% dplyr::filter(source == "model_spec" & not_final)
if (nrow(mod_obj) > 0) {
cat("Model parameters needing finalization:\n")
cat(mod_obj$note, sep = "")
cat("\n")
}
rec_obj <- as_tibble(other_obj) %>% dplyr::filter(source == "recipe" & not_final)
if (nrow(rec_obj) > 0) {
cat("Recipe parameters needing finalization:\n")
cat(rec_obj$note, sep = "")
cat("\n")
}
lst_obj <- as_tibble(other_obj) %>% dplyr::filter(source == "list" & not_final)
if (nrow(lst_obj) > 0) {
cat("Parameters needing finalization:\n")
cat(lst_obj$note, sep = "")
cat("\n")
}
cat("See `?dials::finalize` or `?dials::update.parameters` for more information.\n\n")
}
invisible(x)
}
# ------------------------------------------------------------------------------
#' Update a single parameter in a parameter set
#'
#' @param object A parameter set.
#' @param ... One or more unquoted named values separated by commas. The names
#' should correspond to the `id` values in the parameter set. The values should
#' be parameter objects or `NA` values.
#' @return The modified parameter set.
#' @examples
#' params <- list(lambda = penalty(), alpha = mixture(), `rand forest` = mtry())
#' pset <- parameters(params)
#' pset
#'
#' update(pset, `rand forest` = finalize(mtry(), mtcars), alpha = mixture(c(.1, .2)))
#' @export
update.parameters <- function(object, ...) {
args <- rlang::list2(...)
if (length(args) == 0) {
rlang::abort("Please supply at least one parameter object.")
}
nms <- names(args)
if (length(nms) == 0 || any(nms == "")) {
rlang::abort("All arguments should be named.")
}
in_set <- nms %in% object$id
if (!all(in_set)) {
msg <- paste0("'", nms[!in_set], "'", collapse = ", ")
msg <- paste("At least one parameter does not match any id's in the set:",
msg)
rlang::abort(msg)
}
not_param <- !purrr::map_lgl(args, inherits, "param")
not_null <- !purrr::map_lgl(args, ~ all(is.na(.x)))
bad_input <- not_param & not_null
if (any(bad_input)) {
msg <- paste0("'", nms[bad_input], "'", collapse = ", ")
msg <- paste("At least one parameter is not a dials parameter object",
"or NA:", msg)
rlang::abort(msg)
}
for (p in nms) {
ind <- which(object$id == p)
object$object[[ind]] <- args[[p]]
}
object
}
# ------------------------------------------------------------------------------
#' @export
`[.parameters` <- function(x, i, j, drop = FALSE, ...) {
out <- NextMethod()
parameters_reconstruct(out, x)
}
# ------------------------------------------------------------------------------
#' @export
`names<-.parameters` <- function(x, value) {
out <- NextMethod()
# If anything is renamed, we fall back. This ensures
# that simply swapping existing column names triggers a fall back.
if (!identical_names(out, x)) {
out <- tib_upcast(out)
return(out)
}
parameters_reconstruct(out, x)
}
identical_names <- function(x, y) {
x_names <- names(x)
y_names <- names(y)
identical(x_names, y_names)
}
# ------------------------------------------------------------------------------
#' @export
#' @rdname parameters
param_set <- function(x, ...) {
rlang::warn(
paste0(
"`param_set()` is deprecated in favor of `parameters()`. ",
"`param_set()` will be available until version 0.0.5."
)
)
parameters(x, ...)
}
| /R/parameters.R | no_license | yadevi/dials | R | false | false | 7,837 | r | #' Information on tuning parameters within an object
#'
#' @param x An object, such as a list of `param` objects or an actual `param`
#' object.
#' @param ... Only used for the `param` method so that multiple `param` objects
#'can be passed to the function.
#' @export
parameters <- function(x, ...) {
UseMethod("parameters")
}
#' @export
#' @rdname parameters
parameters.default <- function(x, ...) {
rlang::abort("`parameters` objects cannot be created from this type of object.")
}
#' @export
#' @rdname parameters
parameters.param <- function(x, ...) {
x <- list(x, ...)
res <- parameters(x)
res
}
#' @export
#' @rdname parameters
parameters.list <- function(x, ...) {
elem_param <- purrr::map_lgl(x, inherits, "param")
if (any(!elem_param)) {
rlang::abort("The objects should all be `param` objects.")
}
elem_name <- purrr::map_chr(x, ~ names(.x$label))
elem_id <- names(x)
if (length(elem_id) == 0) {
elem_id <- elem_name
} else {
elem_id[elem_id == ""] <- elem_name[elem_id == ""]
}
p <- length(x)
parameters_constr(
elem_name,
elem_id,
rep("list", p),
rep("unknown", p),
rep("unknown", p),
x
)
}
chr_check <- function(x) {
cl <- match.call()
if (is.null(x)) {
rlang::abort(
glue::glue("Element `{cl$x}` should not be NULL.")
)
}
if (!is.character(x)) {
rlang::abort(
glue::glue("Element `{cl$x}` should be a character string.")
)
}
invisible(TRUE)
}
unique_check <- function(x) {
x2 <- x[!is.na(x)]
is_dup <- duplicated(x2)
if (any(is_dup)) {
dup_list <- x2[is_dup]
cl <- match.call()
msg <- paste0("Element `", deparse(cl$x), "` should have unique values. Duplicates exist ",
"for item(s): ",
paste0("'", dup_list, "'", collapse = ", "))
rlang::abort(msg)
}
invisible(TRUE)
}
param_or_na <- function(x) {
inherits(x, "param") | all(is.na(x))
}
#' Construct a new parameter set object
#'
#' @param name,id,source,component,component_id Character strings with the same
#' length.
#' @param object A list of `param` objects or NA values.
#' @return A tibble that encapsulates the input vectors into a tibble with an
#' additional class of "parameters".
#' @keywords internal
#' @export
parameters_constr <-
function(name, id, source, component, component_id, object) {
chr_check(name)
chr_check(id)
chr_check(source)
chr_check(component)
chr_check(component_id)
unique_check(id)
if (is.null(object)) {
rlang::abort("Element `object` should not be NULL.")
}
if (!is.list(object)) {
rlang::abort("`object` should be a list.")
}
is_good_boi <- map_lgl(object, param_or_na)
if (any(!is_good_boi)) {
rlang::abort(
paste0(
"`object` values in the following positions should be NA or a ",
"`param` object:",
paste0(which(!is_good_boi), collapse = ", ")
)
)
}
res <-
tibble(
name = name,
id = id,
source = source,
component = component,
component_id = component_id,
object = object
)
class(res) <- c("parameters", class(res))
res
}
unk_check <- function(x) {
if (all(is.na(x))) {
res <- NA
} else {
res <- has_unknowns(x)
}
res
}
#' @export
print.parameters <- function(x, ...) {
x <- tibble::as_tibble(x)
cat("Collection of", nrow(x), "parameters for tuning\n\n")
print_x <- x %>% dplyr::select(identifier = id, type = name, object)
print_x$object <- purrr::map_chr(print_x$object, dplyr::type_sum)
print.data.frame(print_x, row.names = FALSE)
cat("\n")
null_obj <- map_lgl(x$object, ~ all(is.na(.x)))
num_missing <- sum(null_obj)
if (num_missing > 0) {
if (num_missing == 1) {
cat("One needs a `param` object: '", x$identifier[null_obj], "'\n\n", sep = "")
} else {
cat("Several need `param` objects: ",
paste0("'", x$identifier[null_obj], "'", collapse = ", "),
"\n\n")
}
}
other_obj <-
x %>%
dplyr::filter(!is.na(object)) %>%
mutate(
not_final = map_lgl(object, unk_check),
label = map_chr(object, ~ .x$label),
note = paste0(" ", label, " ('", id, "')\n")
)
if (any(other_obj$not_final)) {
# There's a more elegant way to do this, I'm sure:
mod_obj <- as_tibble(other_obj) %>% dplyr::filter(source == "model_spec" & not_final)
if (nrow(mod_obj) > 0) {
cat("Model parameters needing finalization:\n")
cat(mod_obj$note, sep = "")
cat("\n")
}
rec_obj <- as_tibble(other_obj) %>% dplyr::filter(source == "recipe" & not_final)
if (nrow(rec_obj) > 0) {
cat("Recipe parameters needing finalization:\n")
cat(rec_obj$note, sep = "")
cat("\n")
}
lst_obj <- as_tibble(other_obj) %>% dplyr::filter(source == "list" & not_final)
if (nrow(lst_obj) > 0) {
cat("Parameters needing finalization:\n")
cat(lst_obj$note, sep = "")
cat("\n")
}
cat("See `?dials::finalize` or `?dials::update.parameters` for more information.\n\n")
}
invisible(x)
}
# ------------------------------------------------------------------------------
#' Update a single parameter in a parameter set
#'
#' @param object A parameter set.
#' @param ... One or more unquoted named values separated by commas. The names
#' should correspond to the `id` values in the parameter set. The values should
#' be parameter objects or `NA` values.
#' @return The modified parameter set.
#' @examples
#' params <- list(lambda = penalty(), alpha = mixture(), `rand forest` = mtry())
#' pset <- parameters(params)
#' pset
#'
#' update(pset, `rand forest` = finalize(mtry(), mtcars), alpha = mixture(c(.1, .2)))
#' @export
update.parameters <- function(object, ...) {
args <- rlang::list2(...)
if (length(args) == 0) {
rlang::abort("Please supply at least one parameter object.")
}
nms <- names(args)
if (length(nms) == 0 || any(nms == "")) {
rlang::abort("All arguments should be named.")
}
in_set <- nms %in% object$id
if (!all(in_set)) {
msg <- paste0("'", nms[!in_set], "'", collapse = ", ")
msg <- paste("At least one parameter does not match any id's in the set:",
msg)
rlang::abort(msg)
}
not_param <- !purrr::map_lgl(args, inherits, "param")
not_null <- !purrr::map_lgl(args, ~ all(is.na(.x)))
bad_input <- not_param & not_null
if (any(bad_input)) {
msg <- paste0("'", nms[bad_input], "'", collapse = ", ")
msg <- paste("At least one parameter is not a dials parameter object",
"or NA:", msg)
rlang::abort(msg)
}
for (p in nms) {
ind <- which(object$id == p)
object$object[[ind]] <- args[[p]]
}
object
}
# ------------------------------------------------------------------------------
#' @export
`[.parameters` <- function(x, i, j, drop = FALSE, ...) {
out <- NextMethod()
parameters_reconstruct(out, x)
}
# ------------------------------------------------------------------------------
#' @export
`names<-.parameters` <- function(x, value) {
out <- NextMethod()
# If anything is renamed, we fall back. This ensures
# that simply swapping existing column names triggers a fall back.
if (!identical_names(out, x)) {
out <- tib_upcast(out)
return(out)
}
parameters_reconstruct(out, x)
}
identical_names <- function(x, y) {
x_names <- names(x)
y_names <- names(y)
identical(x_names, y_names)
}
# ------------------------------------------------------------------------------
#' @export
#' @rdname parameters
param_set <- function(x, ...) {
rlang::warn(
paste0(
"`param_set()` is deprecated in favor of `parameters()`. ",
"`param_set()` will be available until version 0.0.5."
)
)
parameters(x, ...)
}
|
library("ape")
library("ggplot2")
library("ggtree")
library("RColorBrewer")
require('gtools')
#function to call the max() fxn but ignoring NA, important for drawing tip labels
my.max <- function(x) ifelse( !all(is.na(x)), max(x, na.rm=T), NA)
#Read in tree, Newick format
infile <- "~/Desktop/raxml_satDNA/RAxML_bipartitions.dmau_rsp_Xchrom_dere_MAFFT.phy_automre.converted.done"
spp <- "dmau"
reptype <- "rsp"
sat.tree <- read.tree(sprintf("%s",infile))
sat.tree <- root(sat.tree, "RSP_LIKE.dere.consensus_1_169")
#Output of htseq RPM counting script, run thru eccDNA_repeat_variants_counts_table.R (on BH)
mapping <- read.table(sprintf("~/Desktop/satDNA_mapping/4-mauE_rsp.txt",spp,reptype),sep="\t",header=T)
#######################################################
#making a data frame with annotations to pass to ggtree using the %<+% operator
dd <- data.frame(tips=sat.tree$tip.label,cyto=sapply(strsplit(sat.tree$tip.label,"\\."), `[`, 2))
#translation: make data frame with first column matching tip lapels in phylo object, second column is the cyto band (done by splitting tip labels on "." and saving second place)
#translation: if making tree for 1.688, take third field
dd$cyto <- gsub("[A-Z]","",dd$cyto)
dd$cyto <- as.factor(dd$cyto)
#translation: remove the cyto subdivision A-F, keeping it as a factor
#then sort the levels (i.e. cyto bands) numerically:
dd$cyto <- factor(dd$cyto, levels = mixedsort(levels(dd$cyto)))
row.names(dd) <- NULL
#Make column in annotations to hold variant ID:
dd$var <- NA
#Make another column to hold the % total reads represented by that variant
dd$RPM <- NA
#Make another column to hold pch shape
dd$tshape <- 19
#Make yet another column to hold alpha value
dd$talpha <- 1
#Assign RPM value to each variant to the annotations data frame (dd) from mapping data:
for(i in 1:nrow(dd)) {
label <- as.character(dd[i,1]) #for each row in annotations dataframe, save label (containing start-end) as variable
cyto <- as.character(dd[i,"cyto"])
if(cyto=="het") {
dd[i,"talpha"] <- 0.75
}
#Parsing the label to get the contig name, just involves a lot of juggling
contig <- as.character(unlist(strsplit(label,"\\."))[3])
contig <- gsub("U_", "U.", contig)
contig <- as.character(unlist(strsplit(contig,"_"))[1])
contig <- gsub("U.", "U_", contig)
label <- sub("U_","U.",label) #fix annoying formatting thing with the U contigs
start <- as.numeric(unlist(strsplit(label,"_"))[3]) #parse it and save as variable = "start"
end <- as.numeric(unlist(strsplit(label,"_"))[4])
#############################################################################
for(j in 1:nrow(mapping)) {
mapcoord <- as.character(mapping[i,1])
mapcoord <- sub("U_","U.",mapcoord) #fix annoying formatting thing with the U contigs
mapstart <- as.numeric(unlist(strsplit(mapcoord,"_"))[3])
mapRPM <- as.numeric(as.character(mapping[i,3]))
if(start == mapstart) {
dd[i,"RPM"] <- mapRPM
break
}
}
}
#manually set value for tree root (D. ere), which won't have anything associated with it
#dd$RPM[dd$cyto=="dere"] <- (my.max(dd$RPM)/10)
dd$RPM[dd$cyto=="dere"] <- 800
#Define color palette################################
#Full set:
cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "#FFCCFF", "#9933FF", "#0000FF", "#000099", "black", "grey")
#Select below as needed, ensures colors are consistent across species
#D. mau 1.688:
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "#FFCCFF", "#9933FF", "#0000FF", "black", "grey")
#D. mau Rsp:
cols <- c("#660033", "#CC0000", "#FF6633", "#FF9966", "#66CC99", "#99FFFF", "black")
#D. mel 1.688:
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "#FFCCFF", "#9933FF", "black", "grey")
#D. mel Rsp:
#cols <- c("#CC0000","#FF6633","#FFCC00","black","grey")
#D.sech 1.688:
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "#FFCCFF", "#9933FF", "#0000FF", "#000099", "black", "grey")
#D. sech Rsp:
cols <- c("#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "black","grey")
#D. sech Rsp new coordinates:
cols <- c("#FF6699", "#CC0000", "#FF6633", "#FFCC00", "black","grey")
#D. sim 1.688:
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "#FFCCFF", "#9933FF", "#0000FF", "black", "grey")
#D. sim Rsp
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "black", "grey")
#D. sim Rsp new coordinates
cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "black", "grey")
#D. ere 1.688
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#339900", "black")
#D. ere Rsp
#cols <- c("#660033", "#FF6699", "#CC0000", "black")
###################################################
p <- ggtree(sat.tree,layout="rectangular") %<+% dd
#Get nodes to highlight based on bootstrapping values
d <- p$data
d <- d[!d$isTip,]
d$label <- as.numeric(d$label)
d <- subset(d,d$label>=90)
nodes <- d$node
###################################################
p <- ggtree(sat.tree,layout="rectangular") %<+% dd +
geom_tippoint(aes(color=cyto,size=RPM,shape=tshape,alpha=talpha))+
xlim(0,0.7)+
scale_colour_manual(values=cols)+
#scale_alpha_discrete(range=c(0.5,1))+
theme(legend.position="right",legend.key=element_blank())+
geom_treescale(x=0.4,y=0,offset=5)+
scale_shape_identity()+
#geom_text2(aes(label=label, subset = !is.na(as.numeric(label)) & as.numeric(label) > 100), color="black")+
geom_point2(aes(subset=(node %in% nodes),shape=15), size=3,alpha=0.5,color="#996666")
p+scale_alpha(range=c(0.75,1))
outfile <- gsub("automre","BSnodes.edges.scaledtips.pdf",infile)
pdf(sprintf("%s",outfile),width=11,height=8.5)
p+scale_alpha(range=c(0.75,1))
dev.off()
###########################################################################################################
| /Supplemental_figures/FigsS7_to_S14/Rsp_like/tree_draw_phyML_ggtree_eccDNA_simclade_rsp_version.R | no_license | LarracuenteLab/simulans_clade_satDNA_evolution | R | false | false | 6,439 | r | library("ape")
library("ggplot2")
library("ggtree")
library("RColorBrewer")
require('gtools')
#function to call the max() fxn but ignoring NA, important for drawing tip labels
my.max <- function(x) ifelse( !all(is.na(x)), max(x, na.rm=T), NA)
#Read in tree, Newick format
infile <- "~/Desktop/raxml_satDNA/RAxML_bipartitions.dmau_rsp_Xchrom_dere_MAFFT.phy_automre.converted.done"
spp <- "dmau"
reptype <- "rsp"
sat.tree <- read.tree(sprintf("%s",infile))
sat.tree <- root(sat.tree, "RSP_LIKE.dere.consensus_1_169")
#Output of htseq RPM counting script, run thru eccDNA_repeat_variants_counts_table.R (on BH)
mapping <- read.table(sprintf("~/Desktop/satDNA_mapping/4-mauE_rsp.txt",spp,reptype),sep="\t",header=T)
#######################################################
#making a data frame with annotations to pass to ggtree using the %<+% operator
dd <- data.frame(tips=sat.tree$tip.label,cyto=sapply(strsplit(sat.tree$tip.label,"\\."), `[`, 2))
#translation: make data frame with first column matching tip lapels in phylo object, second column is the cyto band (done by splitting tip labels on "." and saving second place)
#translation: if making tree for 1.688, take third field
dd$cyto <- gsub("[A-Z]","",dd$cyto)
dd$cyto <- as.factor(dd$cyto)
#translation: remove the cyto subdivision A-F, keeping it as a factor
#then sort the levels (i.e. cyto bands) numerically:
dd$cyto <- factor(dd$cyto, levels = mixedsort(levels(dd$cyto)))
row.names(dd) <- NULL
#Make column in annotations to hold variant ID:
dd$var <- NA
#Make another column to hold the % total reads represented by that variant
dd$RPM <- NA
#Make another column to hold pch shape
dd$tshape <- 19
#Make yet another column to hold alpha value
dd$talpha <- 1
#Assign RPM value to each variant to the annotations data frame (dd) from mapping data:
for(i in 1:nrow(dd)) {
label <- as.character(dd[i,1]) #for each row in annotations dataframe, save label (containing start-end) as variable
cyto <- as.character(dd[i,"cyto"])
if(cyto=="het") {
dd[i,"talpha"] <- 0.75
}
#Parsing the label to get the contig name, just involves a lot of juggling
contig <- as.character(unlist(strsplit(label,"\\."))[3])
contig <- gsub("U_", "U.", contig)
contig <- as.character(unlist(strsplit(contig,"_"))[1])
contig <- gsub("U.", "U_", contig)
label <- sub("U_","U.",label) #fix annoying formatting thing with the U contigs
start <- as.numeric(unlist(strsplit(label,"_"))[3]) #parse it and save as variable = "start"
end <- as.numeric(unlist(strsplit(label,"_"))[4])
#############################################################################
for(j in 1:nrow(mapping)) {
mapcoord <- as.character(mapping[i,1])
mapcoord <- sub("U_","U.",mapcoord) #fix annoying formatting thing with the U contigs
mapstart <- as.numeric(unlist(strsplit(mapcoord,"_"))[3])
mapRPM <- as.numeric(as.character(mapping[i,3]))
if(start == mapstart) {
dd[i,"RPM"] <- mapRPM
break
}
}
}
#manually set value for tree root (D. ere), which won't have anything associated with it
#dd$RPM[dd$cyto=="dere"] <- (my.max(dd$RPM)/10)
dd$RPM[dd$cyto=="dere"] <- 800
#Define color palette################################
#Full set:
cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "#FFCCFF", "#9933FF", "#0000FF", "#000099", "black", "grey")
#Select below as needed, ensures colors are consistent across species
#D. mau 1.688:
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "#FFCCFF", "#9933FF", "#0000FF", "black", "grey")
#D. mau Rsp:
cols <- c("#660033", "#CC0000", "#FF6633", "#FF9966", "#66CC99", "#99FFFF", "black")
#D. mel 1.688:
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "#FFCCFF", "#9933FF", "black", "grey")
#D. mel Rsp:
#cols <- c("#CC0000","#FF6633","#FFCC00","black","grey")
#D.sech 1.688:
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "#FFCCFF", "#9933FF", "#0000FF", "#000099", "black", "grey")
#D. sech Rsp:
cols <- c("#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "black","grey")
#D. sech Rsp new coordinates:
cols <- c("#FF6699", "#CC0000", "#FF6633", "#FFCC00", "black","grey")
#D. sim 1.688:
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "#FFCCFF", "#9933FF", "#0000FF", "black", "grey")
#D. sim Rsp
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "black", "grey")
#D. sim Rsp new coordinates
cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#99FF33", "#339900", "#336600", "#66CC99", "#99FFFF", "black", "grey")
#D. ere 1.688
#cols <- c("#660033", "#FF6699", "#CC0000", "#FF6633", "#FF9966", "#FFCC00", "#FFFF33", "#339900", "black")
#D. ere Rsp
#cols <- c("#660033", "#FF6699", "#CC0000", "black")
###################################################
p <- ggtree(sat.tree,layout="rectangular") %<+% dd
#Get nodes to highlight based on bootstrapping values
d <- p$data
d <- d[!d$isTip,]
d$label <- as.numeric(d$label)
d <- subset(d,d$label>=90)
nodes <- d$node
###################################################
p <- ggtree(sat.tree,layout="rectangular") %<+% dd +
geom_tippoint(aes(color=cyto,size=RPM,shape=tshape,alpha=talpha))+
xlim(0,0.7)+
scale_colour_manual(values=cols)+
#scale_alpha_discrete(range=c(0.5,1))+
theme(legend.position="right",legend.key=element_blank())+
geom_treescale(x=0.4,y=0,offset=5)+
scale_shape_identity()+
#geom_text2(aes(label=label, subset = !is.na(as.numeric(label)) & as.numeric(label) > 100), color="black")+
geom_point2(aes(subset=(node %in% nodes),shape=15), size=3,alpha=0.5,color="#996666")
p+scale_alpha(range=c(0.75,1))
outfile <- gsub("automre","BSnodes.edges.scaledtips.pdf",infile)
pdf(sprintf("%s",outfile),width=11,height=8.5)
p+scale_alpha(range=c(0.75,1))
dev.off()
###########################################################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hiReadsProcessor.R
\name{getSectorsForSamples}
\alias{getSectorsForSamples}
\title{Get sectors for samples defined in the sampleInfo object.}
\usage{
getSectorsForSamples(
sampleInfo,
sector = NULL,
samplename = NULL,
returnDf = FALSE
)
}
\arguments{
\item{sampleInfo}{sample information SimpleList object, which samples per sector/quadrant information along with other metadata.}
\item{sector}{a specific sector or vector of sectors if known ahead of time. Default is NULL, which extracts all sectors.}
\item{samplename}{a specific sample or vector of samplenames to get sectors for. Default is NULL, which extracts all samples.}
\item{returnDf}{return results in a dataframe. Default is FALSE.}
}
\value{
If returnDf=TRUE, then a dataframe of sector associated with each samplename, else a named list of length two: x[["sectors"]] and x[["samplenames"]]
}
\description{
Given a sampleInfo object, the function gets the sectors for each samplename. This is an accessory function utilized by other functions of this package to aid sector retrieval.
}
\examples{
load(file.path(system.file("data", package = "hiReadsProcessor"),
"FLX_seqProps.RData"))
samples <- c('Roth-MLV3p-CD4TMLVWell6-Tsp509I',
'Roth-MLV3p-CD4TMLVWell6-MseI', 'Roth-MLV3p-CD4TMLVwell5-MuA')
getSectorsForSamples(seqProps, samplename=samples)
getSectorsForSamples(seqProps, samplename=samples, returnDf=TRUE)
}
\seealso{
\code{\link{extractSeqs}}, \code{\link{extractFeature}},
\code{\link{addFeature}}
}
| /man/getSectorsForSamples.Rd | no_license | malnirav/hiReadsProcessor | R | false | true | 1,566 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hiReadsProcessor.R
\name{getSectorsForSamples}
\alias{getSectorsForSamples}
\title{Get sectors for samples defined in the sampleInfo object.}
\usage{
getSectorsForSamples(
sampleInfo,
sector = NULL,
samplename = NULL,
returnDf = FALSE
)
}
\arguments{
\item{sampleInfo}{sample information SimpleList object, which samples per sector/quadrant information along with other metadata.}
\item{sector}{a specific sector or vector of sectors if known ahead of time. Default is NULL, which extracts all sectors.}
\item{samplename}{a specific sample or vector of samplenames to get sectors for. Default is NULL, which extracts all samples.}
\item{returnDf}{return results in a dataframe. Default is FALSE.}
}
\value{
If returnDf=TRUE, then a dataframe of sector associated with each samplename, else a named list of length two: x[["sectors"]] and x[["samplenames"]]
}
\description{
Given a sampleInfo object, the function gets the sectors for each samplename. This is an accessory function utilized by other functions of this package to aid sector retrieval.
}
\examples{
load(file.path(system.file("data", package = "hiReadsProcessor"),
"FLX_seqProps.RData"))
samples <- c('Roth-MLV3p-CD4TMLVWell6-Tsp509I',
'Roth-MLV3p-CD4TMLVWell6-MseI', 'Roth-MLV3p-CD4TMLVwell5-MuA')
getSectorsForSamples(seqProps, samplename=samples)
getSectorsForSamples(seqProps, samplename=samples, returnDf=TRUE)
}
\seealso{
\code{\link{extractSeqs}}, \code{\link{extractFeature}},
\code{\link{addFeature}}
}
|
testlist <- list(type = 101L, z = 2.00995441322518e-310)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609894908-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 110 | r | testlist <- list(type = 101L, z = 2.00995441322518e-310)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
#!/usr/bin/env
args <- commandArgs(trailingOnly = TRUE)
if (length(args) == 0) {
stop("Please provide the Samples directory path", call.=FALSE)
}
library(azuremlsdk)
subscription_id <- Sys.getenv("TEST_SUBSCRIPTION_ID", unset = NA)
resource_group <- Sys.getenv("TEST_RESOURCE_GROUP")
workspace_name <- Sys.getenv("TEST_WORKSPACE_NAME")
cluster_name <- Sys.getenv("TEST_CLUSTER_NAME")
root_dir <- getwd()
getPathLeaves <- function(path){
children <- list.dirs(path, recursive = FALSE)
if(length(children) == 0)
return(path)
ret <- list()
for(child in children){
ret[[length(ret)+1]] <- getPathLeaves(child)
}
return(unlist(ret))
}
validate_samples <- function(args) {
directory = args[1]
sample_dirs = getPathLeaves(directory)
skip_tests = c()
if (length(args) > 1) {
skip_tests = unlist(strsplit(args[2], ";"))
}
for (sub_dir in sample_dirs) {
if (basename(sub_dir) %in% skip_tests) {
next
}
entry_script <- paste0(basename(sub_dir), ".R")
setwd(sub_dir)
tryCatch({
source(entry_script)
setwd(root_dir)
},
error = function(e) {
stop(entry_script, "\n", message(e))
})
}
}
if(!is.na(subscription_id)) {
ws <- get_workspace(name = workspace_name,
subscription_id = subscription_id,
resource_group = resource_group)
write_workspace_config(ws, path = root_dir)
validate_samples(args)
}
| /.azure-pipelines/scripts/validate_samples.R | permissive | Azure/azureml-sdk-for-r | R | false | false | 1,443 | r | #!/usr/bin/env
args <- commandArgs(trailingOnly = TRUE)
if (length(args) == 0) {
stop("Please provide the Samples directory path", call.=FALSE)
}
library(azuremlsdk)
subscription_id <- Sys.getenv("TEST_SUBSCRIPTION_ID", unset = NA)
resource_group <- Sys.getenv("TEST_RESOURCE_GROUP")
workspace_name <- Sys.getenv("TEST_WORKSPACE_NAME")
cluster_name <- Sys.getenv("TEST_CLUSTER_NAME")
root_dir <- getwd()
getPathLeaves <- function(path){
children <- list.dirs(path, recursive = FALSE)
if(length(children) == 0)
return(path)
ret <- list()
for(child in children){
ret[[length(ret)+1]] <- getPathLeaves(child)
}
return(unlist(ret))
}
validate_samples <- function(args) {
directory = args[1]
sample_dirs = getPathLeaves(directory)
skip_tests = c()
if (length(args) > 1) {
skip_tests = unlist(strsplit(args[2], ";"))
}
for (sub_dir in sample_dirs) {
if (basename(sub_dir) %in% skip_tests) {
next
}
entry_script <- paste0(basename(sub_dir), ".R")
setwd(sub_dir)
tryCatch({
source(entry_script)
setwd(root_dir)
},
error = function(e) {
stop(entry_script, "\n", message(e))
})
}
}
if(!is.na(subscription_id)) {
ws <- get_workspace(name = workspace_name,
subscription_id = subscription_id,
resource_group = resource_group)
write_workspace_config(ws, path = root_dir)
validate_samples(args)
}
|
# Formula to isolate cases by copyright sign, and to ditch the metadata at the start, then create a dataframe with case name, code, date and body
westlaw_cases <- function(x, key){
# The copyright notice below acts as the separator between cases in a Westlaw cases txt file
separator <- which(x == "© 2015 Sweet & Maxwell")
# isolate odd separator markers (beginning of cases)
o0 <- seq(1, length(separator), by=2)
sep_odd <- separator[o0]
# isolate even separator markers (end of cases)
e0 <- seq(2, length(separator), by=2)
sep_even <- separator[e0]
#create an empty container matrix
full_cases.m <- matrix(nrow = length(sep_begin), ncol = 5)
# work through the dataset using the information isolated above in order to isolate on a case-by-case basis
for(i in 1:length(sep_begin)){
# Start by isolating the case name
# This is complicated by the fact that the case name is placed inconsistently in different places across cases. So I work through a series of assumptions
# 1. That there is a ' v ' in the line (as in "Eden v Foster")
# 2. If there is nothing with that character (as in "re Duomatic"), then take the first line
# 3. If there are more than one lines with ' v ' in the line, then take the first one.
a <- x[(sep_begin[i]+1):(sep_end[i]-1)]
top <- a[1:8]
# assume there's a "v" in the name
name0 <- grep(" v ", top)
if(length(name0) == 1){
full_cases.m[i,1] <- top[name0]
} else {
# if not, take the first line
if(length(name0) == 0){
full_cases.m[i,1] <- top[1]
# if there are more than one lines with "v" in the first ten lines, choose the first one
} else {
if(length(name0) > 1){
full_cases.m[i,1] <- name0[1]
}
}
}
# Now the dates, court and code
# 1. Find a line with a month name in the first 12 lines
# 2 Take the following line as the case code (that's where they usually are)
# 3. Take the line before as the court (Chancery etc)
# If there is nothing there (the date line was at the top), then return an NA
months <- c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December")
for(q in 1:12){
m <- grep(months[q], top)
if(length(m) > 0){
#code <- m-1
#import dates
full_cases.m[i,2] <- top[m[1]]
#import codes
full_cases.m[i,3] <- top[(m[1])+1]
#import court
if(length(top[(m[1])-1]) > 0){
full_cases.m[i,4] <- top[(m[1])-1]
} else {
full_cases.m[i,4] = NA
}
}
}
# I'm going to have to change this to get 50 words on each side of the key word.
isolatekey <- function(a, key){
collapsed0 <- strsplit(a, " ")
collapsed <- unlist(collapsed0)
#Isolated the keyword and fullstops
unacc <- grep("unaccountable", collapsed)
fullstops <- grep("\\.", collapsed)
# Now isolate the relevant sentence
if(length(unacc) == 1){
end00 <- which(fullstops >= unacc)
end0 <- fullstops[end00]
end <- end0[1]
start00 <- which(fullstops < unacc)
start0 <- fullstops[start00]
start <- start0[length(start0)]+1
sentence <- collapsed[start:end]
sentence <- paste(sentence, collapse = " ")
} else {
if(length(unacc) > 1)
{
d = NULL
for(i in 1:length(unacc)){
end00 <- which(fullstops >= unacc[i])
end0 <- fullstops[end00]
end <- end0[1]
start00 <- which(fullstops < unacc[i])
start0 <- fullstops[start00]
start <- start0[length(start0)]+1
sentence0 <- collapsed[start:end]
sentence0 <- paste(sentence0, collapse = " ")
sentence <- c(d, sentence0)
}
}
}
return(sentence)
}
sentence <- isolatekey(a, key)
full_cases.m[i,5] <- sentence
}
y <- as.data.frame(full_cases.m)
# Proper column names
colnames(y) <- c("Case", "Date", "Code", "Court", "Body")
#Format date
y$Date <- as.Date(y$Date, format="%d %B %Y")
#Remove leading page numbers
y[,1] <- gsub('\\*[[:digit:]]+', '', y[,1])
return(y)}
| /Unaccountability/Westlaw Cases Function.R | no_license | cokelly/Chicago | R | false | false | 4,670 | r | # Formula to isolate cases by copyright sign, and to ditch the metadata at the start, then create a dataframe with case name, code, date and body
westlaw_cases <- function(x, key){
# The copyright notice below acts as the separator between cases in a Westlaw cases txt file
separator <- which(x == "© 2015 Sweet & Maxwell")
# isolate odd separator markers (beginning of cases)
o0 <- seq(1, length(separator), by=2)
sep_odd <- separator[o0]
# isolate even separator markers (end of cases)
e0 <- seq(2, length(separator), by=2)
sep_even <- separator[e0]
#create an empty container matrix
full_cases.m <- matrix(nrow = length(sep_begin), ncol = 5)
# work through the dataset using the information isolated above in order to isolate on a case-by-case basis
for(i in 1:length(sep_begin)){
# Start by isolating the case name
# This is complicated by the fact that the case name is placed inconsistently in different places across cases. So I work through a series of assumptions
# 1. That there is a ' v ' in the line (as in "Eden v Foster")
# 2. If there is nothing with that character (as in "re Duomatic"), then take the first line
# 3. If there are more than one lines with ' v ' in the line, then take the first one.
a <- x[(sep_begin[i]+1):(sep_end[i]-1)]
top <- a[1:8]
# assume there's a "v" in the name
name0 <- grep(" v ", top)
if(length(name0) == 1){
full_cases.m[i,1] <- top[name0]
} else {
# if not, take the first line
if(length(name0) == 0){
full_cases.m[i,1] <- top[1]
# if there are more than one lines with "v" in the first ten lines, choose the first one
} else {
if(length(name0) > 1){
full_cases.m[i,1] <- name0[1]
}
}
}
# Now the dates, court and code
# 1. Find a line with a month name in the first 12 lines
# 2 Take the following line as the case code (that's where they usually are)
# 3. Take the line before as the court (Chancery etc)
# If there is nothing there (the date line was at the top), then return an NA
months <- c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December")
for(q in 1:12){
m <- grep(months[q], top)
if(length(m) > 0){
#code <- m-1
#import dates
full_cases.m[i,2] <- top[m[1]]
#import codes
full_cases.m[i,3] <- top[(m[1])+1]
#import court
if(length(top[(m[1])-1]) > 0){
full_cases.m[i,4] <- top[(m[1])-1]
} else {
full_cases.m[i,4] = NA
}
}
}
# I'm going to have to change this to get 50 words on each side of the key word.
isolatekey <- function(a, key){
collapsed0 <- strsplit(a, " ")
collapsed <- unlist(collapsed0)
#Isolated the keyword and fullstops
unacc <- grep("unaccountable", collapsed)
fullstops <- grep("\\.", collapsed)
# Now isolate the relevant sentence
if(length(unacc) == 1){
end00 <- which(fullstops >= unacc)
end0 <- fullstops[end00]
end <- end0[1]
start00 <- which(fullstops < unacc)
start0 <- fullstops[start00]
start <- start0[length(start0)]+1
sentence <- collapsed[start:end]
sentence <- paste(sentence, collapse = " ")
} else {
if(length(unacc) > 1)
{
d = NULL
for(i in 1:length(unacc)){
end00 <- which(fullstops >= unacc[i])
end0 <- fullstops[end00]
end <- end0[1]
start00 <- which(fullstops < unacc[i])
start0 <- fullstops[start00]
start <- start0[length(start0)]+1
sentence0 <- collapsed[start:end]
sentence0 <- paste(sentence0, collapse = " ")
sentence <- c(d, sentence0)
}
}
}
return(sentence)
}
sentence <- isolatekey(a, key)
full_cases.m[i,5] <- sentence
}
y <- as.data.frame(full_cases.m)
# Proper column names
colnames(y) <- c("Case", "Date", "Code", "Court", "Body")
#Format date
y$Date <- as.Date(y$Date, format="%d %B %Y")
#Remove leading page numbers
y[,1] <- gsub('\\*[[:digit:]]+', '', y[,1])
return(y)}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/learn-params-wanbia.R
\name{get_log_leaf_entries}
\alias{get_log_leaf_entries}
\title{Assuming that the cpt is a leaf, returns 1 instead of a CPT entry when value missing}
\usage{
get_log_leaf_entries(cpt, x)
}
\arguments{
\item{x}{a vector of values}
}
\description{
Assuming that the cpt is a leaf, returns 1 instead of a CPT entry when value missing
}
\keyword{internal}
| /man/get_log_leaf_entries.Rd | no_license | bmihaljevic/bnclassify | R | false | true | 452 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/learn-params-wanbia.R
\name{get_log_leaf_entries}
\alias{get_log_leaf_entries}
\title{Assuming that the cpt is a leaf, returns 1 instead of a CPT entry when value missing}
\usage{
get_log_leaf_entries(cpt, x)
}
\arguments{
\item{x}{a vector of values}
}
\description{
Assuming that the cpt is a leaf, returns 1 instead of a CPT entry when value missing
}
\keyword{internal}
|
###### global variables #######
# colours
custom.col <-c('#fabed4','#f58231','#FF2F09', '#dcbeff','#f032e6','#911eb4','#ffe119', '#808000','#000075','#0CF091','#a9a9a9', '#9a6324', '#800000','#33614E', '#4363d8','#bcf60c','#aaffc3','#42d4f4','#68A864','#000000')
## gene annotations exported from subtiwiki combined with
## Antisense information (TableS11 in http://genome.jouy.inra.fr/basysbio/bsubtranscriptome/)
## and regulatory network information (https://www.frontiersin.org/articles/10.3389/fmicb.2016.00275/full)
gene_annotations<-read.csv("data/GeneAnnotations.csv", sep="\t",header = TRUE,quote='',stringsAsFactors = FALSE)
rownames(gene_annotations)<-gene_annotations$gene_name
## regulon list
df_regulons<-read.csv("data/regulons.csv", sep=",",header = TRUE,row.names=NULL,quote='',stringsAsFactors = FALSE)
###### functions ########
# run DE test for the treatment group against control group, cutoff at 'p.value' and 'lfc'
# 'treatment' - treatment group index, 'blocklist' - biological replicate list (NULL then do not remove correlation between duplicate spots. )
# print volcano plot (show names of top 'highlight' p-value genes), MDplot plot (hightlight significant genes if 'highlight' !=0)
# save the upregulated gene list and downregulated gene list, return the top table of significant genes []
DEgenes<-function(data,treatment,block_list=NULL,filedirec,p.value=0.05,lfc=0,highlight=50){
design <- model.matrix(~treatment)
fit <- lmFit(data, design)
if(length(block_list)==0){
fit <- lmFit(data,design)
}
else{
dupcor <- duplicateCorrelation(data,design,block=block_list)
fit <- lmFit(data,design,block=block_list,correlation=dupcor$consensus.correlation)
}
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
n<-dim(data)[1]
diff<-topTable(fit, coef="treatment1",n=n, p.value=p.value,lfc=lfc)
uplist<-rownames(diff[diff$logFC>0,])
downlist<-rownames(diff[diff$logFC<0,])
write.table(uplist, paste0(filedirec,'_uplist.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
write.table(downlist, paste0(filedirec,'_downlist.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
write.table(diff, paste0(filedirec,'_DEGs.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
pdf(paste0(filedirec,'_volcano.pdf'))
x<-unlist(strsplit(filedirec,split ='/'))
titlename<-x[length(x)]
if(highlight)
volcanoplot(fit,coef = 'treatment1',highlight = highlight,names=rownames(fit$t),main=titlename)
else
volcanoplot(fit,coef = 'treatment1',main=titlename)
abline(v=1, col="blue")
abline(v=-1, col="blue")
abline(h=-log10(0.05), col="blue")
dev.off()
results <- decideTests(fit,p.value = p.value, lfc=lfc)
pdf(paste0(filedirec,'_MDplot.pdf'), width = 10 ,height = 8)
plotMD(fit,coef= 'treatment1',status=results[,2],values=c(1,-1),hl.col=c('red','blue'),hl.cex=0.3)
x<-fit$Amean
y<-as.matrix(fit$coefficients)[,'treatment1']
if(highlight){
text(x = x[uplist],y = y[uplist],labels = uplist,cex = 0.3,col='red')
text(x = x[downlist],y = y[downlist],labels = downlist,cex = 0.3,col='blue')
}
dev.off()
return (diff)
}
# run DE test for the treatment group against control group, cutoff at 'p.value' and 'lfc'
# 'treatment' - treatment group index, 'blocklist' - biological replicate list (block_list='NULL', correlation=FALSE then do not remove correlation between duplicate spots. )
# print volcanoplot (show names of top 'highlight' p-value genes), MDplot (hightlight significant genes if 'highlight' !=0)
# save the upregulated gene list and downregulated gene list only look at p-values, return the whole table of DE test
DEtest<-function(data,treatment,block_list=NULL,correlation=FALSE,filedirec,p.value=1,lfc=0,highlight=10){
design <- model.matrix(~treatment)
fit <- lmFit(data, design)
if(correlation){
dupcor <- duplicateCorrelation(data,design,block=block_list)
fit <- lmFit(data,design,block=block_list,correlation=dupcor$consensus.correlation)
cat('remove correlation between duplicates\n')
}
else
fit <- lmFit(data,design)
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
n<-dim(data)[1]
diff<-topTable(fit, coef="treatment1",n=n, p.value=p.value,lfc=lfc)
uplist<-rownames(diff[diff$logFC>0,])
downlist<-rownames(diff[diff$logFC<0,])
write.table(uplist, paste0(filedirec,'_uplist.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
write.table(downlist, paste0(filedirec,'_downlist.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
if(length(uplist)>0){
uplist_anno<- cbind(gene_annotations[uplist,c(2:6,8:13)],diff[uplist,c(1,5)])
write.table(uplist_anno, paste0(filedirec,'_uplist.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = F)
}
if(length(downlist)>0){
downlist_anno<- cbind(gene_annotations[downlist,c(2:6,8:13)],diff[downlist,c(1,5)])
write.table(downlist_anno, paste0(filedirec,'_downlist.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = F)
}
write.table(diff, paste0(filedirec,'_DEGs.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
results <- decideTests(fit,p.value = p.value, lfc=lfc)
pdf(paste0(filedirec,'_MDplot.pdf'), width = 10 ,height = 8)
plotMD(fit,coef= 'treatment1',status=results[,2],values=c(1,-1),hl.col=c('red','blue'),hl.cex=0.3)
x<-fit$Amean
y<-as.matrix(fit$coefficients)[,'treatment1']
if(highlight){
if(length(uplist)>0)
text(x = x[uplist],y = y[uplist],labels = uplist,cex = 0.5,col='red')
if(length(downlist)>0)
text(x = x[downlist],y = y[downlist],labels = downlist,cex = 0.5,col='blue')
}
dev.off()
pdf(paste0(filedirec,'_volcano.pdf'))
wholetable<-topTable(fit, coef="treatment1",n=n, p.value=1,lfc=0)
x<-unlist(strsplit(filedirec,split ='/'))
titlename<-x[length(x)]
x<-wholetable$logFC
y<--log10(wholetable$adj.P.Val)
plot(x,y,pch=16,cex=0.5,xlab = "Log2 Fold Change",ylab='-log10(p-value)',main=titlename,col='grey')
diff<-topTable(fit, coef="treatment1",n=n, p.value=p.value,lfc=lfc) # look at p-value and log fold change
if(length(diff)){
x<-diff$logFC
y<--log10(diff$adj.P.Val)
points(x,y,pch=16,cex=0.5)
if(highlight){
uplist_x<-x[x>0]
uplist_y<-y[x>0]
uplist_label<-rownames(diff)[x>0]
downlist_x<-x[x<0]
downlist_y<-y[x<0]
downlist_label<-rownames(diff)[x<0]
if(length(uplist_x)>0)
text(x = uplist_x[1:highlight],y = uplist_y[1:highlight],labels = uplist_label[1:highlight],cex = 1,col='red')
if(length(downlist_x)>0)
text(x = downlist_x[1:highlight],y = downlist_y[1:highlight],labels = downlist_label[1:highlight],cex = 1,col='blue')
}
dev.off()
}
return (wholetable)
}
# m_index - design matrix(num_samples, num_conditions)
# write matrix (num_samples, num_conditions) of expression upregulated/downregulated signs, fold changes, p-values.
DEG_patterns<-function(data,m_index,block_list=NULL,condition_list,filedirec,p.value=1,lfc=0){
m_sign<-matrix(0, nrow=length(rownames(data)),ncol=length(condition_list),
dimnames = list(rownames(data),condition_list))
m_fc<-matrix(0, nrow=length(rownames(data)),ncol=length(condition_list),
dimnames = list(rownames(data),condition_list))
m_pvalue<-matrix(0, nrow=length(rownames(data)),ncol=length(condition_list),
dimnames = list(rownames(data),condition_list))
cat('Differential expression analysis performing for conditions:\n')
for (i in 1:length(condition_list)){
condition<-condition_list[i]
cat(i,condition,'\n')
index<-m_index[,condition]
index<-which(index!='non')
treatment<-as.factor(m_index[index,condition])
wholetable<-DEtest(data[,index],treatment,filedire=paste0(filedirec,'/',condition),p.value=0.05,lfc=1)
m_sign[rownames(wholetable[wholetable$logFC>lfc & wholetable$adj.P.Val<p.value,]),condition]<-1
m_sign[rownames(wholetable[wholetable$logFC<(-lfc) & wholetable$adj.P.Val<p.value,]),condition]<--1
m_fc[rownames(wholetable),condition]<-wholetable$logFC
m_pvalue[rownames(wholetable),condition]<-wholetable$adj.P.Val
}
write.table(m_sign,paste0(filedirec,'/expression_signs.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_fc,paste0(filedirec,'/logfoldchanges.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_pvalue,paste0(filedirec,'/pvalues.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
}
DEtest_cluster <- function(data,identity,block_list,clusterID,p.cutoff=0.05,lfc.cutoff=1) {
identity[identity !=clusterID] <- 0
identity[identity ==clusterID] <- 1
ct<-factor(identity)
design <- model.matrix(~ct)
# dupcor <- duplicateCorrelation(data,design,block=block_list)
# fit <- lmFit(data,design,block=block_list,correlation=dupcor$consensus.correlation)
fit <- lmFit(data,design)
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
n<-dim(data)[1]
diff<-topTable(fit, coef="ct1",n=n, p.value=p.cutoff,lfc=lfc.cutoff)
uplist<-rownames(diff[diff$logFC>0,])
downlist<-rownames(diff[diff$logFC<0,])
results <- decideTests(fit,p.value = p.cutoff, lfc=lfc.cutoff)
plotMD(fit,coef= 'ct1',status=results[,2],values=c(1,-1),hl.col=c('red','blue'),hl.cex=0.2)
x<-fit$Amean
y<-as.matrix(fit$coefficients)[,'ct1']
if(length(uplist)>0)
text(x = x[uplist],y = y[uplist],labels = uplist,cex = 0.5,col='red')
if(length(downlist)>0)
text(x = x[downlist],y = y[downlist],labels = downlist,cex = 0.5,col='blue')
wholetable<-topTable(fit, coef="ct1",n=n, p.value=1,lfc=0)
return (wholetable)
}
DEG_clusters<-function(data,identity,block_list,p.cutoff=0.05,lfc.cutoff=0,filedirec){
num_clusters<-length(unique(identity))
m_sign<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,
dimnames = list(rownames(data),1:num_clusters))
m_fc<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,
dimnames = list(rownames(data),1:num_clusters))
m_pvalue<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,
dimnames = list(rownames(data),1:num_clusters))
cat('Differential expression analysis performing for each cluster VS others:\n')
DE_list<-list()
for(i in 1:num_clusters){
pdf(paste0(filedirec,'/MD_cluster',i,'.pdf'), width = 10 ,height = 8)
wholetable<-DEtest_cluster(data,identity,block_list,clusterID=i,p.cutoff,lfc.cutoff)
dev.off()
cat('cluster ',i,'\n')
uplist<-rownames(wholetable[wholetable$logFC>lfc.cutoff & wholetable$adj.P.Val<p.cutoff,])
downlist<-rownames(wholetable[-wholetable$logFC>lfc.cutoff & wholetable$adj.P.Val<p.cutoff,])
write.table(uplist
, paste0(filedirec,'/UP_cluster',i,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
write.table(downlist
, paste0(filedirec,'/DOWN_cluster',i,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
m_sign[uplist,i]<-1
m_sign[downlist,i]<--1
m_fc[rownames(wholetable),i]<-wholetable$logFC
m_pvalue[rownames(wholetable),i]<-wholetable$adj.P.Val
ID<-paste0('cluster',i)
DE_list[[paste0('up_',ID)]]<-uplist
DE_list[[paste0('down_',ID)]]<-downlist
}
write.table(m_sign,paste0(filedirec,'/expression_signs.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_fc,paste0(filedirec,'/fold_changes.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_pvalue,paste0(filedirec,'/pvalues.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
return(DE_list)
}
DEG_clusterspair <- function(data,identity,block_list,clusterID1,clusterID2,p.cutoff=0.01,lfc.cutoff=1) {
identity[identity ==clusterID1] <- 0
identity[identity ==clusterID2] <- 1
ct<-factor(identity)
design <- model.matrix(~ct)
dupcor <- duplicateCorrelation(data,design,block=block_list)
dupcor$consensus.correlation
fit <- lmFit(data,design,block=block_list,correlation=dupcor$consensus.correlation)
#fit <- lmFit(object@data,design)
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
results <- decideTests(fit,p.value = p.cutoff, lfc=lfc.cutoff)
plotMD(fit,coef= 'ct1',status=results[,2],values=c(1,-1),hl.col=c('red','blue'),hl.cex=0.2)
x<-fit$Amean
y<-as.matrix(fit$coefficients)[,'ct1']
highlight1<-names(results[,2][results[,2]==1])
if(length(highlight1)>0)
text(x = x[highlight1],y = y[highlight1],labels = highlight1,cex = 0.5,col='red')
highlight2<-names(results[,2][results[,2]==-1])
if(length(highlight2)>0)
text(x = x[highlight2],y = y[highlight2],labels = highlight2,cex = 0.5,col='blue')
top<-topTable(fit, coef="ct1",n=5875, p.value=p.cutoff,lfc=lfc.cutoff)
return (list('fit'=fit,'topgenes'=top))
}
umap_visual<-function (umap_embedding,cluster=NULL,sample_list,filedir,filetoken){
df_umap<-as.data.frame(umap_embedding)
colnames(df_umap)<-c('component1','component2')
df_umap<-cbind(df_umap,sample_list)
setwd(filedir)
if (!is.null(cluster)){
num_clusters<-length(unique(cluster))
label_clusters<-paste0('cluster_',1:num_clusters)
df_umap$cl<-factor(paste0('cluster_',as.character(cluster)),levels=label_clusters)
p<-ggplot(df_umap,aes(x=component1,y=component2,color=cl,text =paste("Sample ID:", SampleID,"\nannotation:", annotation,"\nExperiment ",experiment)))+geom_point(size=2)+scale_color_manual(values = custom.col[1:num_clusters])+theme(legend.title = element_blank())+ggtitle('Clustering samples in umap space')
ggsave(file=paste0(filetoken,'.pdf'), plot = p, units="in", width=12, height=10, dpi = 300)
p<-ggplotly(p)
htmlwidgets::saveWidget(p, file=paste0(filetoken,'.html'))
}
else{
num_conditions<-length(unique(df_umap$condition))
p<-ggplot(df_umap,aes(x=component1,y=component2,color=condition,text =paste("Sample ID:", SampleID,"\nannotation:", annotation,"\nExperiment ",experiment)))+geom_point(size=2)+theme(legend.title = element_blank())+ggtitle('Samples colored by major conditions in umap space')
ggsave(file=paste0(filetoken,'.pdf'), plot = p,units="in", width=12, height=10, dpi = 300)
p<-ggplotly(p)
htmlwidgets::saveWidget(p, file=paste0(filetoken,'.html'))
}
setwd(paste(rep('../',stringr::str_count(filedir, "/")+1),collapse=''))
return (df_umap)
}
DEtest_cluster <- function(data,identity,clusterID,p.cutoff=0.05,lfc.cutoff=1) {
identity[identity !=clusterID] <- 0
identity[identity ==clusterID] <- 1
ct<-factor(identity)
design <- model.matrix(~ct)
fit <- lmFit(data,design)
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
n<-dim(data)[1]
diff<-topTable(fit, coef="ct1",n=n, p.value=p.cutoff,lfc=lfc.cutoff)
uplist<-rownames(diff[diff$logFC>0,])
downlist<-rownames(diff[diff$logFC<0,])
results <- decideTests(fit,p.value = p.cutoff, lfc=lfc.cutoff)
plotMD(fit,coef= 'ct1',status=results[,2],values=c(1,-1),hl.col=c('red','blue'),hl.cex=0.2)
x<-fit$Amean
y<-as.matrix(fit$coefficients)[,'ct1']
if(length(uplist)>0)
text(x = x[uplist],y = y[uplist],labels = uplist,cex = 0.5,col='red')
if(length(downlist)>0)
text(x = x[downlist],y = y[downlist],labels = downlist,cex = 0.5,col='blue')
wholetable<-topTable(fit, coef="ct1",n=n, p.value=1,lfc=0)
return (wholetable)
}
DEG_clusters<-function(data,identity,p.cutoff=0.05,lfc.cutoff=0,filedirec){
dir.create(filedirec, showWarnings = FALSE)
num_clusters<-length(unique(identity))
m_sign<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,dimnames = list(rownames(data),1:num_clusters))
m_fc<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,dimnames = list(rownames(data),1:num_clusters))
m_pvalue<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,dimnames = list(rownames(data),1:num_clusters))
DE_list<-list()
for(i in 1:num_clusters){
pdf(paste0(filedirec,'/MD_cluster',i,'.pdf'), width = 10 ,height = 8)
wholetable<-DEtest_cluster(data,identity,clusterID=i,p.cutoff,lfc.cutoff)
dev.off()
uplist<-rownames(wholetable[wholetable$logFC>lfc.cutoff & wholetable$adj.P.Val<p.cutoff,])
downlist<-rownames(wholetable[-wholetable$logFC>lfc.cutoff & wholetable$adj.P.Val<p.cutoff,])
write.table(uplist
, paste0(filedirec,'/UP_cluster',i,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
write.table(downlist
, paste0(filedirec,'/DOWN_cluster',i,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
m_sign[uplist,i]<-1
m_sign[downlist,i]<--1
m_fc[rownames(wholetable),i]<-wholetable$logFC
m_pvalue[rownames(wholetable),i]<-wholetable$adj.P.Val
ID<-paste0('cluster',i)
DE_list[[paste0('up_',ID)]]<-uplist
DE_list[[paste0('down_',ID)]]<-downlist
}
write.table(m_sign,paste0(filedirec,'/expression_signs.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_fc,paste0(filedirec,'/fold_changes.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_pvalue,paste0(filedirec,'/pvalues.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
return(DE_list)
}
ARG_clusters<-function(data,identity,upthreshold,downthreshold,percent_threshold,filedirec){
dir.create(filedirec, showWarnings = FALSE)
clustersizes<-table(identity)
n_clusters<-length(unique(identity))
genenames<-rownames(data)
AR_list<-list()
m_percent<-matrix(0, nrow=dim(data)[1],ncol=2*n_clusters,dimnames = list(rownames(data),c(rbind(paste0('up',1:n_clusters), paste0('down',1:n_clusters)))))
m_avg<--matrix(0, nrow=dim(data)[1],ncol=n_clusters,dimnames = list(rownames(data),paste0('cluster',1:n_clusters)))
for (clusterno in 1:n_clusters){
current_uppercent<-rowSums(data[,which(identity==clusterno)]>upthreshold)/clustersizes[clusterno]
uplist<-genenames[current_uppercent>=percent_threshold]
write.table(uplist,paste0(filedirec,'/UP_cluster',clusterno,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
m_percent[,clusterno*2-1]<-current_uppercent
current_downpercent<-rowSums(data[,which(identity==clusterno)]<downthreshold)/clustersizes[clusterno]
downlist<-genenames[current_downpercent>=percent_threshold]
write.table(downlist,paste0(filedirec,'/DOWN_cluster',clusterno,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
m_percent[,clusterno*2]<-current_downpercent
m_avg[,clusterno]<-rowMeans(data[,which(identity==clusterno)])
ID<-paste0('cluster',clusterno)
AR_list[[paste0('up_',ID)]]<-uplist
AR_list[[paste0('down_',ID)]]<-downlist
}
write.table(m_percent,paste0(filedirec,'/percentages.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_avg,paste0(filedirec,'/intensities.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
return (AR_list)
}
write.markeranno<-function(maker_lists,p_values=NULL,lfcs=NULL,percentages=NULL,intensities=NULL,filename,n=10){
df_makerannos <- data.frame(clusterID=numeric(),mode=character(),gene_name=character(),
p_value=numeric(),fold_change=numeric(),percentage=numeric(),intensity=numeric(),stringsAsFactors=FALSE)
clusterIDs<-names(maker_lists) #both up and down, length(clusterIDs)=2*num_clusters
modes<-c('upregulated','downregulated')
for (i in 1:length(clusterIDs)){
temp_genes<-maker_lists[[clusterIDs[i]]]
if(length(temp_genes)>=n)
temp_genes<-temp_genes[1:n]
temp_df<-data.frame(rep(ceiling(i/2),length(temp_genes)),rep(modes[(i+1)%%2+1],length(temp_genes)),temp_genes,
p_values[temp_genes,ceiling(i/2)],lfcs[temp_genes,ceiling(i/2)],percentages[temp_genes,i],intensities[temp_genes,ceiling(i/2)])
colnames(temp_df)<-c('clusterID','mode','gene_name','p_value','fold_change','percentage','intensity')
df_makerannos = rbind(df_makerannos,temp_df)
}
df_makerannos<-cbind(df_makerannos,gene_annotations[match(df_makerannos$gene_name,gene_annotations$gene_name),3:13])
write.table(df_makerannos,filename, append = FALSE, sep = "\t",quote=FALSE,col.names = T, row.names = F)
return(df_makerannos)
}
write.sampleanno<-function(cluster,sample_list,filename){
df_annos <- data.frame(clusterID=numeric(),sample_annotation=character(),majoy_treatment=character(),stringsAsFactors=FALSE)
for (i in 1:length(unique(cluster))){
indices<-as.numeric(names(cluster)[which(cluster==i)])
annotations<-unique(sample_list$annotation[indices])
temp_df<-data.frame(rep(i,length(annotations)),annotations,sample_list$condition[match(annotations,sample_list$annotation)])
names(temp_df)<-c('clusterID','sample_annotation','majoy_treatment')
df_annos = rbind(df_annos,temp_df)
}
write.table(df_annos,filename, append = FALSE, sep = "\t",quote=FALSE,col.names = T, row.names = F)
}
Regulator_clusters<-function(data,cluster,df_regulators,filetoken){
m_regexp<-matrix(0, nrow=length(cluster),ncol=length(unique(df_regulators$regulator)),
dimnames = list(names(cluster),sort(unique(df_regulators$regulator))))
m_percentages<-matrix(0, nrow=length(unique(cluster)),ncol=length(unique(df_regulators$regulator)),
dimnames = list(1:length(unique(cluster)),sort(unique(df_regulators$regulator))))
m_intensities<-matrix(0, nrow=length(unique(cluster)),ncol=length(unique(df_regulators$regulator)),
dimnames = list(1:length(unique(cluster)),sort(unique(df_regulators$regulator))))
genenames<-rownames(data)
top30_chromosome<-apply(data,2,quantile,probs=0.7)
bottom30_chromosome<-apply(data,2,quantile,probs=0.3)
for(c in 1:dim(m_intensities)[2]){
regulated_genes<-df_regulators$gene[df_regulators$regulator==colnames(m_intensities)[c]]
regulated_genes<-regulated_genes[regulated_genes%in%genenames]
if (length(regulated_genes)>1)
m_regexp[,c]<-colMeans(data[regulated_genes,])
else
m_regexp[,c]<-data[regulated_genes,]
for(r in 1:dim(m_intensities)[1]){
sample_indices<-names(which(cluster==r))
upthreshold_exp<-top30_chromosome[sample_indices]
downthreshold_exp<-bottom30_chromosome[sample_indices]
if(length(regulated_genes)>1){
# mean expression in cluster r and regulator c
exp_regcluster<-data[regulated_genes,sample_indices]
m_intensities[r,c]<-mean(exp_regcluster) # mean expression in cluster r and regulator c
# percentage of samples whose regulator mean expression higher than top 30 expressions in chromosome
regulator_exp<-colMeans(exp_regcluster)
if(m_intensities[r,c]>=0)
m_percentages[r,c]<-sum(regulator_exp>=upthreshold_exp)/length(sample_indices)
else
m_percentages[r,c]<-sum(regulator_exp<=downthreshold_exp)/length(sample_indices)
}
}
}
write.table(m_intensities,paste0(filetoken,'intensities.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_percentages,paste0(filetoken,'percentages.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
return (m_regexp)
}
| /TranscriptionalLandscape/util.R | no_license | neverbehym/transcriptional-biomarkers-subtilis | R | false | false | 23,348 | r | ###### global variables #######
# colours
custom.col <-c('#fabed4','#f58231','#FF2F09', '#dcbeff','#f032e6','#911eb4','#ffe119', '#808000','#000075','#0CF091','#a9a9a9', '#9a6324', '#800000','#33614E', '#4363d8','#bcf60c','#aaffc3','#42d4f4','#68A864','#000000')
## gene annotations exported from subtiwiki combined with
## Antisense information (TableS11 in http://genome.jouy.inra.fr/basysbio/bsubtranscriptome/)
## and regulatory network information (https://www.frontiersin.org/articles/10.3389/fmicb.2016.00275/full)
gene_annotations<-read.csv("data/GeneAnnotations.csv", sep="\t",header = TRUE,quote='',stringsAsFactors = FALSE)
rownames(gene_annotations)<-gene_annotations$gene_name
## regulon list
df_regulons<-read.csv("data/regulons.csv", sep=",",header = TRUE,row.names=NULL,quote='',stringsAsFactors = FALSE)
###### functions ########
# run DE test for the treatment group against control group, cutoff at 'p.value' and 'lfc'
# 'treatment' - treatment group index, 'blocklist' - biological replicate list (NULL then do not remove correlation between duplicate spots. )
# print volcano plot (show names of top 'highlight' p-value genes), MDplot plot (hightlight significant genes if 'highlight' !=0)
# save the upregulated gene list and downregulated gene list, return the top table of significant genes []
DEgenes<-function(data,treatment,block_list=NULL,filedirec,p.value=0.05,lfc=0,highlight=50){
design <- model.matrix(~treatment)
fit <- lmFit(data, design)
if(length(block_list)==0){
fit <- lmFit(data,design)
}
else{
dupcor <- duplicateCorrelation(data,design,block=block_list)
fit <- lmFit(data,design,block=block_list,correlation=dupcor$consensus.correlation)
}
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
n<-dim(data)[1]
diff<-topTable(fit, coef="treatment1",n=n, p.value=p.value,lfc=lfc)
uplist<-rownames(diff[diff$logFC>0,])
downlist<-rownames(diff[diff$logFC<0,])
write.table(uplist, paste0(filedirec,'_uplist.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
write.table(downlist, paste0(filedirec,'_downlist.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
write.table(diff, paste0(filedirec,'_DEGs.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
pdf(paste0(filedirec,'_volcano.pdf'))
x<-unlist(strsplit(filedirec,split ='/'))
titlename<-x[length(x)]
if(highlight)
volcanoplot(fit,coef = 'treatment1',highlight = highlight,names=rownames(fit$t),main=titlename)
else
volcanoplot(fit,coef = 'treatment1',main=titlename)
abline(v=1, col="blue")
abline(v=-1, col="blue")
abline(h=-log10(0.05), col="blue")
dev.off()
results <- decideTests(fit,p.value = p.value, lfc=lfc)
pdf(paste0(filedirec,'_MDplot.pdf'), width = 10 ,height = 8)
plotMD(fit,coef= 'treatment1',status=results[,2],values=c(1,-1),hl.col=c('red','blue'),hl.cex=0.3)
x<-fit$Amean
y<-as.matrix(fit$coefficients)[,'treatment1']
if(highlight){
text(x = x[uplist],y = y[uplist],labels = uplist,cex = 0.3,col='red')
text(x = x[downlist],y = y[downlist],labels = downlist,cex = 0.3,col='blue')
}
dev.off()
return (diff)
}
# run DE test for the treatment group against control group, cutoff at 'p.value' and 'lfc'
# 'treatment' - treatment group index, 'blocklist' - biological replicate list (block_list='NULL', correlation=FALSE then do not remove correlation between duplicate spots. )
# print volcanoplot (show names of top 'highlight' p-value genes), MDplot (hightlight significant genes if 'highlight' !=0)
# save the upregulated gene list and downregulated gene list only look at p-values, return the whole table of DE test
DEtest<-function(data,treatment,block_list=NULL,correlation=FALSE,filedirec,p.value=1,lfc=0,highlight=10){
design <- model.matrix(~treatment)
fit <- lmFit(data, design)
if(correlation){
dupcor <- duplicateCorrelation(data,design,block=block_list)
fit <- lmFit(data,design,block=block_list,correlation=dupcor$consensus.correlation)
cat('remove correlation between duplicates\n')
}
else
fit <- lmFit(data,design)
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
n<-dim(data)[1]
diff<-topTable(fit, coef="treatment1",n=n, p.value=p.value,lfc=lfc)
uplist<-rownames(diff[diff$logFC>0,])
downlist<-rownames(diff[diff$logFC<0,])
write.table(uplist, paste0(filedirec,'_uplist.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
write.table(downlist, paste0(filedirec,'_downlist.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
if(length(uplist)>0){
uplist_anno<- cbind(gene_annotations[uplist,c(2:6,8:13)],diff[uplist,c(1,5)])
write.table(uplist_anno, paste0(filedirec,'_uplist.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = F)
}
if(length(downlist)>0){
downlist_anno<- cbind(gene_annotations[downlist,c(2:6,8:13)],diff[downlist,c(1,5)])
write.table(downlist_anno, paste0(filedirec,'_downlist.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = F)
}
write.table(diff, paste0(filedirec,'_DEGs.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
results <- decideTests(fit,p.value = p.value, lfc=lfc)
pdf(paste0(filedirec,'_MDplot.pdf'), width = 10 ,height = 8)
plotMD(fit,coef= 'treatment1',status=results[,2],values=c(1,-1),hl.col=c('red','blue'),hl.cex=0.3)
x<-fit$Amean
y<-as.matrix(fit$coefficients)[,'treatment1']
if(highlight){
if(length(uplist)>0)
text(x = x[uplist],y = y[uplist],labels = uplist,cex = 0.5,col='red')
if(length(downlist)>0)
text(x = x[downlist],y = y[downlist],labels = downlist,cex = 0.5,col='blue')
}
dev.off()
pdf(paste0(filedirec,'_volcano.pdf'))
wholetable<-topTable(fit, coef="treatment1",n=n, p.value=1,lfc=0)
x<-unlist(strsplit(filedirec,split ='/'))
titlename<-x[length(x)]
x<-wholetable$logFC
y<--log10(wholetable$adj.P.Val)
plot(x,y,pch=16,cex=0.5,xlab = "Log2 Fold Change",ylab='-log10(p-value)',main=titlename,col='grey')
diff<-topTable(fit, coef="treatment1",n=n, p.value=p.value,lfc=lfc) # look at p-value and log fold change
if(length(diff)){
x<-diff$logFC
y<--log10(diff$adj.P.Val)
points(x,y,pch=16,cex=0.5)
if(highlight){
uplist_x<-x[x>0]
uplist_y<-y[x>0]
uplist_label<-rownames(diff)[x>0]
downlist_x<-x[x<0]
downlist_y<-y[x<0]
downlist_label<-rownames(diff)[x<0]
if(length(uplist_x)>0)
text(x = uplist_x[1:highlight],y = uplist_y[1:highlight],labels = uplist_label[1:highlight],cex = 1,col='red')
if(length(downlist_x)>0)
text(x = downlist_x[1:highlight],y = downlist_y[1:highlight],labels = downlist_label[1:highlight],cex = 1,col='blue')
}
dev.off()
}
return (wholetable)
}
# m_index - design matrix(num_samples, num_conditions)
# write matrix (num_samples, num_conditions) of expression upregulated/downregulated signs, fold changes, p-values.
DEG_patterns<-function(data,m_index,block_list=NULL,condition_list,filedirec,p.value=1,lfc=0){
m_sign<-matrix(0, nrow=length(rownames(data)),ncol=length(condition_list),
dimnames = list(rownames(data),condition_list))
m_fc<-matrix(0, nrow=length(rownames(data)),ncol=length(condition_list),
dimnames = list(rownames(data),condition_list))
m_pvalue<-matrix(0, nrow=length(rownames(data)),ncol=length(condition_list),
dimnames = list(rownames(data),condition_list))
cat('Differential expression analysis performing for conditions:\n')
for (i in 1:length(condition_list)){
condition<-condition_list[i]
cat(i,condition,'\n')
index<-m_index[,condition]
index<-which(index!='non')
treatment<-as.factor(m_index[index,condition])
wholetable<-DEtest(data[,index],treatment,filedire=paste0(filedirec,'/',condition),p.value=0.05,lfc=1)
m_sign[rownames(wholetable[wholetable$logFC>lfc & wholetable$adj.P.Val<p.value,]),condition]<-1
m_sign[rownames(wholetable[wholetable$logFC<(-lfc) & wholetable$adj.P.Val<p.value,]),condition]<--1
m_fc[rownames(wholetable),condition]<-wholetable$logFC
m_pvalue[rownames(wholetable),condition]<-wholetable$adj.P.Val
}
write.table(m_sign,paste0(filedirec,'/expression_signs.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_fc,paste0(filedirec,'/logfoldchanges.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_pvalue,paste0(filedirec,'/pvalues.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
}
DEtest_cluster <- function(data,identity,block_list,clusterID,p.cutoff=0.05,lfc.cutoff=1) {
identity[identity !=clusterID] <- 0
identity[identity ==clusterID] <- 1
ct<-factor(identity)
design <- model.matrix(~ct)
# dupcor <- duplicateCorrelation(data,design,block=block_list)
# fit <- lmFit(data,design,block=block_list,correlation=dupcor$consensus.correlation)
fit <- lmFit(data,design)
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
n<-dim(data)[1]
diff<-topTable(fit, coef="ct1",n=n, p.value=p.cutoff,lfc=lfc.cutoff)
uplist<-rownames(diff[diff$logFC>0,])
downlist<-rownames(diff[diff$logFC<0,])
results <- decideTests(fit,p.value = p.cutoff, lfc=lfc.cutoff)
plotMD(fit,coef= 'ct1',status=results[,2],values=c(1,-1),hl.col=c('red','blue'),hl.cex=0.2)
x<-fit$Amean
y<-as.matrix(fit$coefficients)[,'ct1']
if(length(uplist)>0)
text(x = x[uplist],y = y[uplist],labels = uplist,cex = 0.5,col='red')
if(length(downlist)>0)
text(x = x[downlist],y = y[downlist],labels = downlist,cex = 0.5,col='blue')
wholetable<-topTable(fit, coef="ct1",n=n, p.value=1,lfc=0)
return (wholetable)
}
DEG_clusters<-function(data,identity,block_list,p.cutoff=0.05,lfc.cutoff=0,filedirec){
num_clusters<-length(unique(identity))
m_sign<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,
dimnames = list(rownames(data),1:num_clusters))
m_fc<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,
dimnames = list(rownames(data),1:num_clusters))
m_pvalue<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,
dimnames = list(rownames(data),1:num_clusters))
cat('Differential expression analysis performing for each cluster VS others:\n')
DE_list<-list()
for(i in 1:num_clusters){
pdf(paste0(filedirec,'/MD_cluster',i,'.pdf'), width = 10 ,height = 8)
wholetable<-DEtest_cluster(data,identity,block_list,clusterID=i,p.cutoff,lfc.cutoff)
dev.off()
cat('cluster ',i,'\n')
uplist<-rownames(wholetable[wholetable$logFC>lfc.cutoff & wholetable$adj.P.Val<p.cutoff,])
downlist<-rownames(wholetable[-wholetable$logFC>lfc.cutoff & wholetable$adj.P.Val<p.cutoff,])
write.table(uplist
, paste0(filedirec,'/UP_cluster',i,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
write.table(downlist
, paste0(filedirec,'/DOWN_cluster',i,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
m_sign[uplist,i]<-1
m_sign[downlist,i]<--1
m_fc[rownames(wholetable),i]<-wholetable$logFC
m_pvalue[rownames(wholetable),i]<-wholetable$adj.P.Val
ID<-paste0('cluster',i)
DE_list[[paste0('up_',ID)]]<-uplist
DE_list[[paste0('down_',ID)]]<-downlist
}
write.table(m_sign,paste0(filedirec,'/expression_signs.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_fc,paste0(filedirec,'/fold_changes.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_pvalue,paste0(filedirec,'/pvalues.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
return(DE_list)
}
DEG_clusterspair <- function(data,identity,block_list,clusterID1,clusterID2,p.cutoff=0.01,lfc.cutoff=1) {
identity[identity ==clusterID1] <- 0
identity[identity ==clusterID2] <- 1
ct<-factor(identity)
design <- model.matrix(~ct)
dupcor <- duplicateCorrelation(data,design,block=block_list)
dupcor$consensus.correlation
fit <- lmFit(data,design,block=block_list,correlation=dupcor$consensus.correlation)
#fit <- lmFit(object@data,design)
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
results <- decideTests(fit,p.value = p.cutoff, lfc=lfc.cutoff)
plotMD(fit,coef= 'ct1',status=results[,2],values=c(1,-1),hl.col=c('red','blue'),hl.cex=0.2)
x<-fit$Amean
y<-as.matrix(fit$coefficients)[,'ct1']
highlight1<-names(results[,2][results[,2]==1])
if(length(highlight1)>0)
text(x = x[highlight1],y = y[highlight1],labels = highlight1,cex = 0.5,col='red')
highlight2<-names(results[,2][results[,2]==-1])
if(length(highlight2)>0)
text(x = x[highlight2],y = y[highlight2],labels = highlight2,cex = 0.5,col='blue')
top<-topTable(fit, coef="ct1",n=5875, p.value=p.cutoff,lfc=lfc.cutoff)
return (list('fit'=fit,'topgenes'=top))
}
umap_visual<-function (umap_embedding,cluster=NULL,sample_list,filedir,filetoken){
df_umap<-as.data.frame(umap_embedding)
colnames(df_umap)<-c('component1','component2')
df_umap<-cbind(df_umap,sample_list)
setwd(filedir)
if (!is.null(cluster)){
num_clusters<-length(unique(cluster))
label_clusters<-paste0('cluster_',1:num_clusters)
df_umap$cl<-factor(paste0('cluster_',as.character(cluster)),levels=label_clusters)
p<-ggplot(df_umap,aes(x=component1,y=component2,color=cl,text =paste("Sample ID:", SampleID,"\nannotation:", annotation,"\nExperiment ",experiment)))+geom_point(size=2)+scale_color_manual(values = custom.col[1:num_clusters])+theme(legend.title = element_blank())+ggtitle('Clustering samples in umap space')
ggsave(file=paste0(filetoken,'.pdf'), plot = p, units="in", width=12, height=10, dpi = 300)
p<-ggplotly(p)
htmlwidgets::saveWidget(p, file=paste0(filetoken,'.html'))
}
else{
num_conditions<-length(unique(df_umap$condition))
p<-ggplot(df_umap,aes(x=component1,y=component2,color=condition,text =paste("Sample ID:", SampleID,"\nannotation:", annotation,"\nExperiment ",experiment)))+geom_point(size=2)+theme(legend.title = element_blank())+ggtitle('Samples colored by major conditions in umap space')
ggsave(file=paste0(filetoken,'.pdf'), plot = p,units="in", width=12, height=10, dpi = 300)
p<-ggplotly(p)
htmlwidgets::saveWidget(p, file=paste0(filetoken,'.html'))
}
setwd(paste(rep('../',stringr::str_count(filedir, "/")+1),collapse=''))
return (df_umap)
}
DEtest_cluster <- function(data,identity,clusterID,p.cutoff=0.05,lfc.cutoff=1) {
identity[identity !=clusterID] <- 0
identity[identity ==clusterID] <- 1
ct<-factor(identity)
design <- model.matrix(~ct)
fit <- lmFit(data,design)
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
n<-dim(data)[1]
diff<-topTable(fit, coef="ct1",n=n, p.value=p.cutoff,lfc=lfc.cutoff)
uplist<-rownames(diff[diff$logFC>0,])
downlist<-rownames(diff[diff$logFC<0,])
results <- decideTests(fit,p.value = p.cutoff, lfc=lfc.cutoff)
plotMD(fit,coef= 'ct1',status=results[,2],values=c(1,-1),hl.col=c('red','blue'),hl.cex=0.2)
x<-fit$Amean
y<-as.matrix(fit$coefficients)[,'ct1']
if(length(uplist)>0)
text(x = x[uplist],y = y[uplist],labels = uplist,cex = 0.5,col='red')
if(length(downlist)>0)
text(x = x[downlist],y = y[downlist],labels = downlist,cex = 0.5,col='blue')
wholetable<-topTable(fit, coef="ct1",n=n, p.value=1,lfc=0)
return (wholetable)
}
DEG_clusters<-function(data,identity,p.cutoff=0.05,lfc.cutoff=0,filedirec){
dir.create(filedirec, showWarnings = FALSE)
num_clusters<-length(unique(identity))
m_sign<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,dimnames = list(rownames(data),1:num_clusters))
m_fc<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,dimnames = list(rownames(data),1:num_clusters))
m_pvalue<-matrix(0, nrow=length(rownames(data)),ncol=num_clusters,dimnames = list(rownames(data),1:num_clusters))
DE_list<-list()
for(i in 1:num_clusters){
pdf(paste0(filedirec,'/MD_cluster',i,'.pdf'), width = 10 ,height = 8)
wholetable<-DEtest_cluster(data,identity,clusterID=i,p.cutoff,lfc.cutoff)
dev.off()
uplist<-rownames(wholetable[wholetable$logFC>lfc.cutoff & wholetable$adj.P.Val<p.cutoff,])
downlist<-rownames(wholetable[-wholetable$logFC>lfc.cutoff & wholetable$adj.P.Val<p.cutoff,])
write.table(uplist
, paste0(filedirec,'/UP_cluster',i,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
write.table(downlist
, paste0(filedirec,'/DOWN_cluster',i,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
m_sign[uplist,i]<-1
m_sign[downlist,i]<--1
m_fc[rownames(wholetable),i]<-wholetable$logFC
m_pvalue[rownames(wholetable),i]<-wholetable$adj.P.Val
ID<-paste0('cluster',i)
DE_list[[paste0('up_',ID)]]<-uplist
DE_list[[paste0('down_',ID)]]<-downlist
}
write.table(m_sign,paste0(filedirec,'/expression_signs.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_fc,paste0(filedirec,'/fold_changes.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_pvalue,paste0(filedirec,'/pvalues.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
return(DE_list)
}
ARG_clusters<-function(data,identity,upthreshold,downthreshold,percent_threshold,filedirec){
dir.create(filedirec, showWarnings = FALSE)
clustersizes<-table(identity)
n_clusters<-length(unique(identity))
genenames<-rownames(data)
AR_list<-list()
m_percent<-matrix(0, nrow=dim(data)[1],ncol=2*n_clusters,dimnames = list(rownames(data),c(rbind(paste0('up',1:n_clusters), paste0('down',1:n_clusters)))))
m_avg<--matrix(0, nrow=dim(data)[1],ncol=n_clusters,dimnames = list(rownames(data),paste0('cluster',1:n_clusters)))
for (clusterno in 1:n_clusters){
current_uppercent<-rowSums(data[,which(identity==clusterno)]>upthreshold)/clustersizes[clusterno]
uplist<-genenames[current_uppercent>=percent_threshold]
write.table(uplist,paste0(filedirec,'/UP_cluster',clusterno,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
m_percent[,clusterno*2-1]<-current_uppercent
current_downpercent<-rowSums(data[,which(identity==clusterno)]<downthreshold)/clustersizes[clusterno]
downlist<-genenames[current_downpercent>=percent_threshold]
write.table(downlist,paste0(filedirec,'/DOWN_cluster',clusterno,'.txt'), append = FALSE, sep = ",",quote=FALSE,col.names = F, row.names = F)
m_percent[,clusterno*2]<-current_downpercent
m_avg[,clusterno]<-rowMeans(data[,which(identity==clusterno)])
ID<-paste0('cluster',clusterno)
AR_list[[paste0('up_',ID)]]<-uplist
AR_list[[paste0('down_',ID)]]<-downlist
}
write.table(m_percent,paste0(filedirec,'/percentages.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_avg,paste0(filedirec,'/intensities.csv'), append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
return (AR_list)
}
write.markeranno<-function(maker_lists,p_values=NULL,lfcs=NULL,percentages=NULL,intensities=NULL,filename,n=10){
df_makerannos <- data.frame(clusterID=numeric(),mode=character(),gene_name=character(),
p_value=numeric(),fold_change=numeric(),percentage=numeric(),intensity=numeric(),stringsAsFactors=FALSE)
clusterIDs<-names(maker_lists) #both up and down, length(clusterIDs)=2*num_clusters
modes<-c('upregulated','downregulated')
for (i in 1:length(clusterIDs)){
temp_genes<-maker_lists[[clusterIDs[i]]]
if(length(temp_genes)>=n)
temp_genes<-temp_genes[1:n]
temp_df<-data.frame(rep(ceiling(i/2),length(temp_genes)),rep(modes[(i+1)%%2+1],length(temp_genes)),temp_genes,
p_values[temp_genes,ceiling(i/2)],lfcs[temp_genes,ceiling(i/2)],percentages[temp_genes,i],intensities[temp_genes,ceiling(i/2)])
colnames(temp_df)<-c('clusterID','mode','gene_name','p_value','fold_change','percentage','intensity')
df_makerannos = rbind(df_makerannos,temp_df)
}
df_makerannos<-cbind(df_makerannos,gene_annotations[match(df_makerannos$gene_name,gene_annotations$gene_name),3:13])
write.table(df_makerannos,filename, append = FALSE, sep = "\t",quote=FALSE,col.names = T, row.names = F)
return(df_makerannos)
}
write.sampleanno<-function(cluster,sample_list,filename){
df_annos <- data.frame(clusterID=numeric(),sample_annotation=character(),majoy_treatment=character(),stringsAsFactors=FALSE)
for (i in 1:length(unique(cluster))){
indices<-as.numeric(names(cluster)[which(cluster==i)])
annotations<-unique(sample_list$annotation[indices])
temp_df<-data.frame(rep(i,length(annotations)),annotations,sample_list$condition[match(annotations,sample_list$annotation)])
names(temp_df)<-c('clusterID','sample_annotation','majoy_treatment')
df_annos = rbind(df_annos,temp_df)
}
write.table(df_annos,filename, append = FALSE, sep = "\t",quote=FALSE,col.names = T, row.names = F)
}
Regulator_clusters<-function(data,cluster,df_regulators,filetoken){
m_regexp<-matrix(0, nrow=length(cluster),ncol=length(unique(df_regulators$regulator)),
dimnames = list(names(cluster),sort(unique(df_regulators$regulator))))
m_percentages<-matrix(0, nrow=length(unique(cluster)),ncol=length(unique(df_regulators$regulator)),
dimnames = list(1:length(unique(cluster)),sort(unique(df_regulators$regulator))))
m_intensities<-matrix(0, nrow=length(unique(cluster)),ncol=length(unique(df_regulators$regulator)),
dimnames = list(1:length(unique(cluster)),sort(unique(df_regulators$regulator))))
genenames<-rownames(data)
top30_chromosome<-apply(data,2,quantile,probs=0.7)
bottom30_chromosome<-apply(data,2,quantile,probs=0.3)
for(c in 1:dim(m_intensities)[2]){
regulated_genes<-df_regulators$gene[df_regulators$regulator==colnames(m_intensities)[c]]
regulated_genes<-regulated_genes[regulated_genes%in%genenames]
if (length(regulated_genes)>1)
m_regexp[,c]<-colMeans(data[regulated_genes,])
else
m_regexp[,c]<-data[regulated_genes,]
for(r in 1:dim(m_intensities)[1]){
sample_indices<-names(which(cluster==r))
upthreshold_exp<-top30_chromosome[sample_indices]
downthreshold_exp<-bottom30_chromosome[sample_indices]
if(length(regulated_genes)>1){
# mean expression in cluster r and regulator c
exp_regcluster<-data[regulated_genes,sample_indices]
m_intensities[r,c]<-mean(exp_regcluster) # mean expression in cluster r and regulator c
# percentage of samples whose regulator mean expression higher than top 30 expressions in chromosome
regulator_exp<-colMeans(exp_regcluster)
if(m_intensities[r,c]>=0)
m_percentages[r,c]<-sum(regulator_exp>=upthreshold_exp)/length(sample_indices)
else
m_percentages[r,c]<-sum(regulator_exp<=downthreshold_exp)/length(sample_indices)
}
}
}
write.table(m_intensities,paste0(filetoken,'intensities.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
write.table(m_percentages,paste0(filetoken,'percentages.csv'),append = FALSE, sep = ",",quote=FALSE,col.names = T, row.names = T)
return (m_regexp)
}
|
library(magrittr); library(ggplot2)
source("R/utils.R")
x <- "Elisa"
df <- get_freq_nome(x)
df %>%
ggplot(aes(x = decada, y = freq)) +
geom_point() +
geom_line(aes(group = 1)) +
labs(title = paste("Nascimentos por década", x, sep = " - ")) +
xlab("Década de nascimento") +
ylab("Pessoas") +
ggsave("figures/nomes-decada.pdf")
| /hw-rstudio-project.R | no_license | analise-viz-dados-1sem-2020/hw-rstudio-project-elisasanches94 | R | false | false | 349 | r | library(magrittr); library(ggplot2)
source("R/utils.R")
x <- "Elisa"
df <- get_freq_nome(x)
df %>%
ggplot(aes(x = decada, y = freq)) +
geom_point() +
geom_line(aes(group = 1)) +
labs(title = paste("Nascimentos por década", x, sep = " - ")) +
xlab("Década de nascimento") +
ylab("Pessoas") +
ggsave("figures/nomes-decada.pdf")
|
library(CRFutil)
grphf <- ~A:B:C
adj <- ug(grphf, result="matrix")
f0 <- function(y){ as.numeric(c((y==1),(y==2)))}
n.states <- 2
known.model <- make.crf(adj, n.states)
# True node pots:
PsiA <- c(2,1)
PsiB <- c(1,3)
PsiC <- c(3,1)
# True edge pots:
PsiAB <-
rbind(
c(3, 6.1),
c(6.1, 3.6)
)
PsiBC <-
rbind(
c(2.5, 3.1),
c(3.1, 2)
)
PsiAC <-
rbind(
c(4, 1.1),
c(1.1, 4.3)
)
known.model$node.pot[1,] <- PsiA
known.model$node.pot[2,] <- PsiB
known.model$node.pot[2,] <- PsiC
known.model$edge.pot[[1]] <- PsiAB
known.model$edge.pot[[2]] <- PsiAC
known.model$edge.pot[[3]] <- PsiBC
# So now sample from the model as if we obtained an experimental sample:
num.samps <- 25
set.seed(1)
samps <- sample.exact(known.model, num.samps)
mrf.sample.plot(samps)
library(rgl)
# Instantiate a two node two parameter model to fit:
fit <- make.crf(adj, n.states)
fit <- make.features(fit)
fit <- make.par(fit, 2)
fit$node.par[1,1,] <- 1
fit$node.par[2,1,] <- 2
fit$node.par[3,1,] <- 3
fit$node.par
fit$edge.par[[1]][1,1,1] <- 4
fit$edge.par[[1]][2,2,1] <- 5
fit$edge.par[[2]][1,1,1] <- 6
fit$edge.par[[2]][2,2,1] <- 7
fit$edge.par[[3]][1,1,1] <- 8
fit$edge.par[[3]][2,2,1] <- 9
fit$edge.par
n2p <- nodes2params.list2(fit, storeQ = T)
n2p
fit$adj.nodes
X.all <- expand.grid(c(1,2),c(1,2),c(1,2))
X.all
f0 <- function(y){ as.numeric(c((y==1),(y==2)))}
# Log Pseudo-Likelihood:
t1 <- fit$node.par[1,,1]
t2 <- fit$node.par[2,,1]
t3 <- fit$node.par[3,,1]
w12 <- fit$edge.par[[1]][,,]
w13 <- fit$edge.par[[2]][,,]
w23 <- fit$edge.par[[3]][,,]
X <- c(1,1,1)
# X1
c( f0(X[1])%*%t1, f0(X[1])%*%w12%*%f0(X[2]), f0(X[1])%*%w13%*%f0(X[3]) )
# X2
c( f0(X[2])%*%t2, f0(X[2])%*%w12%*%f0(X[1]), f0(X[2])%*%w23%*%f0(X[3]) )
# X3
c( f0(X[3])%*%t3, f0(X[3])%*%w23%*%f0(X[2]), f0(X[3])%*%w13%*%f0(X[1]) )
# Complements:
X <- X.all[8,]
X
c(
row.match(complement.at.idx(X, complement.index = 1), table = X.all),
row.match(complement.at.idx(X, complement.index = 2), table = X.all),
row.match(complement.at.idx(X, complement.index = 3), table = X.all) )
complement.at.idx(X, complement.index = 1)
complement.at.idx(X, complement.index = 2)
complement.at.idx(X, complement.index = 3)
# Log Likelihood:
X <- c(1,1,1)
phi.X <- phi.features(X, edges.mat = fit$edges, node.par = fit$node.par, edge.par = fit$edge.par, ff = f0)
theta <- 1:9
theta * phi.X
| /tests/pseudo-lik_vs_lik_test1.R | no_license | npetraco/CRFutil | R | false | false | 2,375 | r | library(CRFutil)
grphf <- ~A:B:C
adj <- ug(grphf, result="matrix")
f0 <- function(y){ as.numeric(c((y==1),(y==2)))}
n.states <- 2
known.model <- make.crf(adj, n.states)
# True node pots:
PsiA <- c(2,1)
PsiB <- c(1,3)
PsiC <- c(3,1)
# True edge pots:
PsiAB <-
rbind(
c(3, 6.1),
c(6.1, 3.6)
)
PsiBC <-
rbind(
c(2.5, 3.1),
c(3.1, 2)
)
PsiAC <-
rbind(
c(4, 1.1),
c(1.1, 4.3)
)
known.model$node.pot[1,] <- PsiA
known.model$node.pot[2,] <- PsiB
known.model$node.pot[2,] <- PsiC
known.model$edge.pot[[1]] <- PsiAB
known.model$edge.pot[[2]] <- PsiAC
known.model$edge.pot[[3]] <- PsiBC
# So now sample from the model as if we obtained an experimental sample:
num.samps <- 25
set.seed(1)
samps <- sample.exact(known.model, num.samps)
mrf.sample.plot(samps)
library(rgl)
# Instantiate a two node two parameter model to fit:
fit <- make.crf(adj, n.states)
fit <- make.features(fit)
fit <- make.par(fit, 2)
fit$node.par[1,1,] <- 1
fit$node.par[2,1,] <- 2
fit$node.par[3,1,] <- 3
fit$node.par
fit$edge.par[[1]][1,1,1] <- 4
fit$edge.par[[1]][2,2,1] <- 5
fit$edge.par[[2]][1,1,1] <- 6
fit$edge.par[[2]][2,2,1] <- 7
fit$edge.par[[3]][1,1,1] <- 8
fit$edge.par[[3]][2,2,1] <- 9
fit$edge.par
n2p <- nodes2params.list2(fit, storeQ = T)
n2p
fit$adj.nodes
X.all <- expand.grid(c(1,2),c(1,2),c(1,2))
X.all
f0 <- function(y){ as.numeric(c((y==1),(y==2)))}
# Log Pseudo-Likelihood:
t1 <- fit$node.par[1,,1]
t2 <- fit$node.par[2,,1]
t3 <- fit$node.par[3,,1]
w12 <- fit$edge.par[[1]][,,]
w13 <- fit$edge.par[[2]][,,]
w23 <- fit$edge.par[[3]][,,]
X <- c(1,1,1)
# X1
c( f0(X[1])%*%t1, f0(X[1])%*%w12%*%f0(X[2]), f0(X[1])%*%w13%*%f0(X[3]) )
# X2
c( f0(X[2])%*%t2, f0(X[2])%*%w12%*%f0(X[1]), f0(X[2])%*%w23%*%f0(X[3]) )
# X3
c( f0(X[3])%*%t3, f0(X[3])%*%w23%*%f0(X[2]), f0(X[3])%*%w13%*%f0(X[1]) )
# Complements:
X <- X.all[8,]
X
c(
row.match(complement.at.idx(X, complement.index = 1), table = X.all),
row.match(complement.at.idx(X, complement.index = 2), table = X.all),
row.match(complement.at.idx(X, complement.index = 3), table = X.all) )
complement.at.idx(X, complement.index = 1)
complement.at.idx(X, complement.index = 2)
complement.at.idx(X, complement.index = 3)
# Log Likelihood:
X <- c(1,1,1)
phi.X <- phi.features(X, edges.mat = fit$edges, node.par = fit$node.par, edge.par = fit$edge.par, ff = f0)
theta <- 1:9
theta * phi.X
|
checkResult <- function(x, xy)
{
standardise <- function(x) kwb.utils::fullySorted(x[, sort(names(x))])
x <- standardise(x)
y <- standardise(merge(xy$x, xy$y))
isIdentical <- identical(x, y)
if (! isIdentical) {
cat("str(x):\n"); str(x)
cat("str(y):\n"); str(y)
}
isIdentical
}
test_that("unmerge() works", {
z <- data.frame(
insp = c(1, 1, 2, 3, 4, 5),
pipe = c(1, 1, 1, 2, 2, 3),
p1 = c("a", "a", "a", "b", "b", "c"),
p2 = c("A", "A", "A", "B", "B", "c"),
i1 = c(100, 150, 200, 100, 200, 100)
)
expect_true(checkResult(z, unmerge(z, "insp")))
expect_true(checkResult(z, unmerge(z, "i1")))
expect_true(checkResult(z, unmerge(z, "pipe")))
expect_true(checkResult(z, unmerge(z, "p1")))
expect_true(checkResult(z, unmerge(z, "p2")))
expect_true(checkResult(z, unmerge(z, c("p1", "p2"))))
})
| /tests/testthat/test-function-unmerge.R | permissive | KWB-R/kwb.utils | R | false | false | 864 | r | checkResult <- function(x, xy)
{
standardise <- function(x) kwb.utils::fullySorted(x[, sort(names(x))])
x <- standardise(x)
y <- standardise(merge(xy$x, xy$y))
isIdentical <- identical(x, y)
if (! isIdentical) {
cat("str(x):\n"); str(x)
cat("str(y):\n"); str(y)
}
isIdentical
}
test_that("unmerge() works", {
z <- data.frame(
insp = c(1, 1, 2, 3, 4, 5),
pipe = c(1, 1, 1, 2, 2, 3),
p1 = c("a", "a", "a", "b", "b", "c"),
p2 = c("A", "A", "A", "B", "B", "c"),
i1 = c(100, 150, 200, 100, 200, 100)
)
expect_true(checkResult(z, unmerge(z, "insp")))
expect_true(checkResult(z, unmerge(z, "i1")))
expect_true(checkResult(z, unmerge(z, "pipe")))
expect_true(checkResult(z, unmerge(z, "p1")))
expect_true(checkResult(z, unmerge(z, "p2")))
expect_true(checkResult(z, unmerge(z, c("p1", "p2"))))
})
|
# library(devtools)
# install_github('zdk123/SpiecEasi')
library(SpiecEasi)
library(metagenomeSeq)
library(phyloseq)
library(dplyr)
#setwd("/Users/paulsoj1/Desktop/tmp/nasal")
setwd("/Users/joycehsiao/Dropbox/GitHub/nasalmicrobiome/data")
x = readRDS("finalMRobj.rds")
x = filterData(x,present=100)
x = x[-grep("Chlor",fData(x)$class),]
x = x[-grep("TM",fData(x)$class),]
cnts= MRcounts(x)
gom1 = cnts[,which(pData(x)$GOM==1)] %>% t
gom2 = cnts[,which(pData(x)$GOM==2)] %>% t
nc = 3
se.est1 <- spiec.easi(gom1,lambda.min.ratio=1e-2, nlambda=20,
icov.select.params=list(rep.num=20, ncores=nc))
se.est2 <- spiec.easi(gom2,lambda.min.ratio=1e-2, nlambda=20,
icov.select.params=list(rep.num=20, ncores=nc))
library(igraph)
pdf("~/Desktop/nasal_graphs.pdf",width=8,height=8)
b = MRexperiment2biom(x[,which(pData(x)$GOM==1)])
p = phyloseq::import_biom(b)
gom1g <- adj2igraph(se.est1$refit, vertex.attr=list(name=taxa_names(p)))
phyloseq::plot_network(gom1g,p, type='taxa', color="Rank3", label=NULL)
b2 = MRexperiment2biom(x[,which(pData(x)$GOM==2)])
p2 = phyloseq::import_biom(b2)
gom2g <- adj2igraph(se.est2$refit, vertex.attr=list(name=taxa_names(p)))
phyloseq::plot_network(gom2g,p2, type='taxa', color="Rank3", label=NULL)
dev.off()
dd.gom1 <- degree_distribution(gom1g, cumulative=FALSE)
dd.gom2 <- degree_distribution(gom2g, cumulative=FALSE)
## ave degree
sum(seq_along(dd.gom1)*dd.gom1)-1
## [1] 2.367347
sum(seq_along(dd.gom2)*dd.gom2)-1
## [1] 1.37037
## plot degree distributions
plot(seq_along(dd.gom1)-1, dd.gom1, type='b', xlim=c(0,8),ylim=c(0,0.55),
ylab="Frequency", xlab="Degree", col='red')
points(seq_along(dd.gom2)-1, dd.gom2 , type='b')
legend("topright", c("Cluster2", "Cluster1"), col=c("black", "red"), pch=1, lty=1)
| /code/network.R | no_license | jhsiao999/nasalmicrobiome | R | false | false | 1,794 | r | # library(devtools)
# install_github('zdk123/SpiecEasi')
library(SpiecEasi)
library(metagenomeSeq)
library(phyloseq)
library(dplyr)
#setwd("/Users/paulsoj1/Desktop/tmp/nasal")
setwd("/Users/joycehsiao/Dropbox/GitHub/nasalmicrobiome/data")
x = readRDS("finalMRobj.rds")
x = filterData(x,present=100)
x = x[-grep("Chlor",fData(x)$class),]
x = x[-grep("TM",fData(x)$class),]
cnts= MRcounts(x)
gom1 = cnts[,which(pData(x)$GOM==1)] %>% t
gom2 = cnts[,which(pData(x)$GOM==2)] %>% t
nc = 3
se.est1 <- spiec.easi(gom1,lambda.min.ratio=1e-2, nlambda=20,
icov.select.params=list(rep.num=20, ncores=nc))
se.est2 <- spiec.easi(gom2,lambda.min.ratio=1e-2, nlambda=20,
icov.select.params=list(rep.num=20, ncores=nc))
library(igraph)
pdf("~/Desktop/nasal_graphs.pdf",width=8,height=8)
b = MRexperiment2biom(x[,which(pData(x)$GOM==1)])
p = phyloseq::import_biom(b)
gom1g <- adj2igraph(se.est1$refit, vertex.attr=list(name=taxa_names(p)))
phyloseq::plot_network(gom1g,p, type='taxa', color="Rank3", label=NULL)
b2 = MRexperiment2biom(x[,which(pData(x)$GOM==2)])
p2 = phyloseq::import_biom(b2)
gom2g <- adj2igraph(se.est2$refit, vertex.attr=list(name=taxa_names(p)))
phyloseq::plot_network(gom2g,p2, type='taxa', color="Rank3", label=NULL)
dev.off()
dd.gom1 <- degree_distribution(gom1g, cumulative=FALSE)
dd.gom2 <- degree_distribution(gom2g, cumulative=FALSE)
## ave degree
sum(seq_along(dd.gom1)*dd.gom1)-1
## [1] 2.367347
sum(seq_along(dd.gom2)*dd.gom2)-1
## [1] 1.37037
## plot degree distributions
plot(seq_along(dd.gom1)-1, dd.gom1, type='b', xlim=c(0,8),ylim=c(0,0.55),
ylab="Frequency", xlab="Degree", col='red')
points(seq_along(dd.gom2)-1, dd.gom2 , type='b')
legend("topright", c("Cluster2", "Cluster1"), col=c("black", "red"), pch=1, lty=1)
|
createModelData = function(specs){
data = specs$inputData[specs$observedVariables]
# append data with latent variables, if any
if (specs$nLatents > 0){
data = cbind(data, matrix(data = 0, nrow = specs$nUnits, ncol = specs$nLatents))
names(data)[(specs$nObservedVariables+1):(specs$nObservedVariables+specs$nLatents)] = specs$latentVariables
}
# if (specs$nCategoricalLatents > 0){
# data = cbind(data, rep(0, specs$nUnits))
# names(data)[length(names(data))] = "classnumber_"
# }
return(data)
}
| /R/createModelData.R | no_license | sailendramishra/blatent | R | false | false | 528 | r | createModelData = function(specs){
data = specs$inputData[specs$observedVariables]
# append data with latent variables, if any
if (specs$nLatents > 0){
data = cbind(data, matrix(data = 0, nrow = specs$nUnits, ncol = specs$nLatents))
names(data)[(specs$nObservedVariables+1):(specs$nObservedVariables+specs$nLatents)] = specs$latentVariables
}
# if (specs$nCategoricalLatents > 0){
# data = cbind(data, rep(0, specs$nUnits))
# names(data)[length(names(data))] = "classnumber_"
# }
return(data)
}
|
.Random.seed <-
c(403L, 10L, 446860401L, 1169110691L, -1182745614L, 1232461636L,
-1441968121L, 204115421L, 1093466836L, 4589050L, -1489000123L,
627994015L, -1576759514L, -1842585680L, -1998144429L, -1857652879L,
466734752L, -1401286082L, -1485276855L, 1263904763L, 1649961930L,
-1200011988L, -1215858385L, 1808345061L, -2071873892L, -880680206L,
-480705683L, 1021058215L, -1365657906L, 112036904L, -29249493L,
100721225L, 1543195480L, -605649402L, -419950623L, 895335795L,
-1929865758L, 1394268500L, 192874583L, -256936883L, -1332361244L,
-1290594870L, 1518173237L, -589243761L, -1595862122L, 1262034752L,
385036803L, 41371681L, -1742740720L, 459636974L, -1991073639L,
-942406357L, 1998863386L, -929657124L, 922822367L, -252489547L,
-941451700L, -1795327326L, 1760043517L, -949441801L, 71973918L,
1465480920L, -2032620997L, -1482373159L, 969641320L, 89586838L,
1487010321L, -149785661L, 897486802L, -597996956L, 755468711L,
-460642563L, -932994316L, -1291049510L, -637601499L, -1059987393L,
1460985798L, -229030832L, 815556723L, -329405295L, -705934144L,
29359774L, 132633193L, -1104588133L, 438902698L, 1922083788L,
1324344975L, -1479166267L, -1100382596L, -1397566766L, 594204237L,
-1161725177L, -1233666322L, -211289144L, -1636660021L, -1153338071L,
-1161292424L, 471632550L, 1036938433L, 476351827L, -638252542L,
537629492L, -1498742601L, 1106290861L, 68015556L, -2082724374L,
-1907017131L, -1995783185L, 366175222L, -1532308192L, -1525875997L,
180542337L, -1914749456L, -197696370L, -474862599L, 912837003L,
-2113112646L, 1394990268L, -2112933889L, -747516587L, -1724354964L,
326838978L, -32756451L, 1331471255L, 880097022L, -1305877320L,
-2040834405L, 1987835129L, -673937848L, -2096864906L, 1973831857L,
-1157067549L, -729629774L, -819764220L, 248358727L, -876315107L,
678106260L, -1725434822L, 1389514373L, -877875361L, -502468634L,
-1169964432L, 2019042707L, -46616015L, -1180258080L, 646506878L,
-1182233591L, 2027576379L, -1675566582L, 606530028L, -1111970193L,
1756627365L, 1372330332L, -1721760462L, -169133011L, -424815897L,
1565382670L, 666946024L, 174394347L, -2129513335L, -410701800L,
1401319238L, 187051425L, 411067443L, -1557124702L, -240284524L,
-1363105001L, -1227037427L, -2018520028L, -282557430L, 183245173L,
617758927L, -1205439530L, 461925248L, -163782717L, 1819269473L,
-290017328L, 114140974L, 2063973081L, -524248853L, 2031577434L,
1411035932L, 877651487L, 60341877L, -1302226164L, 1702008546L,
-1665827907L, 897162295L, 244731614L, 1134766616L, 2052552059L,
-21779175L, 673739176L, -1389460138L, -620071471L, -1700655741L,
1038111762L, -1131339868L, 961361767L, 1109157053L, -757115852L,
-270362726L, -1638087707L, 1520123775L, 1043783430L, -1037035376L,
2000990515L, 1171171025L, 1687893120L, -1897319074L, -1420457303L,
588289371L, -61372566L, 390778380L, -59357617L, 1111624453L,
-1503917124L, 146775698L, -1780777587L, -691950137L, -1133539666L,
-138254584L, -56593141L, -430373911L, 933835960L, -44522650L,
731432961L, 1769576083L, -1094702782L, -841722820L, 716479346L,
776390256L, 537947172L, 1894343480L, 1725041818L, -1863095600L,
1636692156L, 1597697684L, 27754114L, 85326016L, -2099795140L,
1213725840L, -1435152862L, -107454456L, -866982644L, 855487644L,
853839378L, -1598505088L, -1297029612L, 762724904L, 1292787002L,
1951445456L, -2101601172L, 1902205236L, 541539986L, 1555895136L,
578347852L, 599738336L, 132618146L, -1078810712L, -1596834420L,
767201468L, 738824562L, 794062064L, 1141297220L, -1534154408L,
-1920717254L, 1616579440L, -677394372L, -914557164L, -1584419390L,
1803014880L, 1835614076L, -269263152L, 874909858L, 2016597032L,
700028172L, 761393084L, -1831718606L, -1265012288L, -703796684L,
-1818033208L, -804327238L, 1923271056L, -848074260L, -1326515628L,
-132171630L, -355018976L, 1895887308L, 1467037152L, -1596021054L,
-171303896L, 1978825388L, -714922820L, -1422216718L, -1933131472L,
-1262912348L, -995971400L, -939725478L, 377986448L, -1736754308L,
962703636L, -812313790L, -1609745664L, -1419352132L, -103127216L,
-1567513822L, 1224955208L, 1724674572L, -590112676L, -531364654L,
-1658232320L, 2120242516L, -1889091096L, -337187846L, 427626704L,
-1099977428L, -1598094604L, -553098862L, 2106053728L, -1647735540L,
1945842400L, -749825694L, 1683765928L, 932323916L, -1401030276L,
2017590578L, 166547568L, 690990916L, 438674904L, 819559546L,
-1910489552L, -697658692L, 1990960980L, -1453374398L, -925516384L,
-1927402820L, 1787023824L, 1135445346L, 1077153768L, -1096007732L,
1207367420L, 332028402L, 1878528128L, 1209838836L, 1023217288L,
-574259398L, 1711364944L, 1277390444L, 1054689428L, 1472665938L,
-1165486368L, -890643764L, 1824653472L, 1829243778L, 1368122088L,
-526695700L, -763974596L, -1730233742L, 1147798640L, -1754431964L,
-1875141576L, -733411302L, -667977008L, -2125439940L, -1736104812L,
-150889342L, 1832230848L, -1210158788L, -33555824L, -331353054L,
1742410760L, 410478732L, 1319527452L, 2027667090L, 2038842496L,
553209876L, 421633320L, 477732410L, -1851372336L, -1505074068L,
1910256308L, -1068064750L, -1419569184L, 314185676L, -1113453088L,
-27228638L, 706429224L, 1963432204L, -135043524L, 189231346L,
1023603440L, -580500668L, 1062920792L, -1154332102L, 1754824432L,
-775064388L, -1342941676L, 1384495298L, 356092000L, 1802327420L,
636900560L, 439360546L, 457589416L, 2072094604L, -796151364L,
-1444424654L, -2144905408L, 794126516L, -981124280L, 861051578L,
1374017296L, 1829585132L, 670487636L, 277733138L, -658941024L,
141131724L, -295060640L, -379301694L, -57301464L, -87186004L,
143318844L, -1473902734L, -674030800L, -275234268L, -168110024L,
59106522L, 403696016L, -2050029828L, 789704596L, 2147150658L,
-597335552L, -1220036036L, -1460142512L, 898267810L, -10822840L,
-1698307572L, 1953803484L, 26106450L, 2112292992L, 851385172L,
309836264L, 414468858L, 635402064L, 1866153004L, 1222915444L,
-1504475502L, 1509046880L, -279395700L, 1449851744L, -399056158L,
-1313980248L, -868876084L, -452802052L, 1843433394L, 700995056L,
-1062824180L, -1471965475L, 1268723687L, -253008880L, 1637083438L,
-35451429L, -148528451L, -283359190L, 1819947208L, -1405651471L,
513241955L, 798688100L, 1612315282L, 1907824487L, -898889423L,
940798582L, 281023380L, -1743906907L, -1859827537L, 327353704L,
856851782L, -1471896333L, 330406485L, 297678386L, 1605800960L,
-1138657047L, 800804699L, -1334736468L, 560937402L, -883497105L,
-1423927047L, 478043534L, 969838684L, -254725427L, 615482935L,
-1822393152L, -1518963810L, 1185259627L, 26861549L, 1291469402L,
-1320155880L, 1322480257L, 294846515L, -1364916652L, -1683434910L,
-1928850185L, 1384566593L, -1011454522L, 787660548L, 588413621L,
-1382795873L, 259246584L, -688961578L, 681595491L, 1078954533L,
207598018L, -410831184L, 36646617L, -1850623925L, -1443895620L,
1724220650L, 1383026975L, -2026806871L, -1161902274L, -2095435220L,
161374333L, -1483914553L, 1754463152L, -1703531058L, 547626747L,
733373789L, 33566346L, 826698920L, 1031190545L, -1757771517L,
264791044L, -254079950L, 140633223L, 1936371921L, -221617578L,
-810464716L, 610627909L, -2034676849L, -378245368L, -2063476442L,
460392915L, 1740596021L, 1914270354L, -235428256L, 706453449L,
1054396155L, 1725698252L, 171706714L, 1410032719L, 1379723545L,
-413134034L, -1684800196L, -983962963L, 1761207639L, -1882712928L,
991699454L, -482669365L, -468710707L, 2133485050L, 733349432L,
1125075937L, -678259565L, 1179651764L, 1718121410L, -1420261417L,
1553319969L, 199213030L, 384752356L, -596480491L, 109753919L,
-1156438568L, 1293700086L, 598197251L, 409855813L, -341871774L,
797514960L, -203601927L, -164541653L, 1679753372L, 7741130L,
-1540290625L, 717595401L, 24632350L, -2125961780L, 581889949L,
885619239L, 818436176L, -2006433554L, 1467288987L, -957942787L,
1960824554L, -1098966648L, 1183872817L, 1256892323L, 316075940L,
-2126919086L, 516399527L, 2000475633L, -787013322L, 1018771924L,
-109424795L, 1619459951L, -1293860440L, 1288132358L, -1993141709L,
-1151446891L, 1629614322L, 866183488L, 675649193L, -736022757L,
-162137876L, -813345542L, 625646895L, 1314163001L, -2062402738L,
-819723876L, -835790067L, 1044458871L, 726935936L, 370503646L,
240179371L, 2140244909L, 1628041242L, -463608104L, -2032382427L
)
| /Lab3/R/Lab 3-internal.R | no_license | Kresch/Lab3 | R | false | false | 8,241 | r | .Random.seed <-
c(403L, 10L, 446860401L, 1169110691L, -1182745614L, 1232461636L,
-1441968121L, 204115421L, 1093466836L, 4589050L, -1489000123L,
627994015L, -1576759514L, -1842585680L, -1998144429L, -1857652879L,
466734752L, -1401286082L, -1485276855L, 1263904763L, 1649961930L,
-1200011988L, -1215858385L, 1808345061L, -2071873892L, -880680206L,
-480705683L, 1021058215L, -1365657906L, 112036904L, -29249493L,
100721225L, 1543195480L, -605649402L, -419950623L, 895335795L,
-1929865758L, 1394268500L, 192874583L, -256936883L, -1332361244L,
-1290594870L, 1518173237L, -589243761L, -1595862122L, 1262034752L,
385036803L, 41371681L, -1742740720L, 459636974L, -1991073639L,
-942406357L, 1998863386L, -929657124L, 922822367L, -252489547L,
-941451700L, -1795327326L, 1760043517L, -949441801L, 71973918L,
1465480920L, -2032620997L, -1482373159L, 969641320L, 89586838L,
1487010321L, -149785661L, 897486802L, -597996956L, 755468711L,
-460642563L, -932994316L, -1291049510L, -637601499L, -1059987393L,
1460985798L, -229030832L, 815556723L, -329405295L, -705934144L,
29359774L, 132633193L, -1104588133L, 438902698L, 1922083788L,
1324344975L, -1479166267L, -1100382596L, -1397566766L, 594204237L,
-1161725177L, -1233666322L, -211289144L, -1636660021L, -1153338071L,
-1161292424L, 471632550L, 1036938433L, 476351827L, -638252542L,
537629492L, -1498742601L, 1106290861L, 68015556L, -2082724374L,
-1907017131L, -1995783185L, 366175222L, -1532308192L, -1525875997L,
180542337L, -1914749456L, -197696370L, -474862599L, 912837003L,
-2113112646L, 1394990268L, -2112933889L, -747516587L, -1724354964L,
326838978L, -32756451L, 1331471255L, 880097022L, -1305877320L,
-2040834405L, 1987835129L, -673937848L, -2096864906L, 1973831857L,
-1157067549L, -729629774L, -819764220L, 248358727L, -876315107L,
678106260L, -1725434822L, 1389514373L, -877875361L, -502468634L,
-1169964432L, 2019042707L, -46616015L, -1180258080L, 646506878L,
-1182233591L, 2027576379L, -1675566582L, 606530028L, -1111970193L,
1756627365L, 1372330332L, -1721760462L, -169133011L, -424815897L,
1565382670L, 666946024L, 174394347L, -2129513335L, -410701800L,
1401319238L, 187051425L, 411067443L, -1557124702L, -240284524L,
-1363105001L, -1227037427L, -2018520028L, -282557430L, 183245173L,
617758927L, -1205439530L, 461925248L, -163782717L, 1819269473L,
-290017328L, 114140974L, 2063973081L, -524248853L, 2031577434L,
1411035932L, 877651487L, 60341877L, -1302226164L, 1702008546L,
-1665827907L, 897162295L, 244731614L, 1134766616L, 2052552059L,
-21779175L, 673739176L, -1389460138L, -620071471L, -1700655741L,
1038111762L, -1131339868L, 961361767L, 1109157053L, -757115852L,
-270362726L, -1638087707L, 1520123775L, 1043783430L, -1037035376L,
2000990515L, 1171171025L, 1687893120L, -1897319074L, -1420457303L,
588289371L, -61372566L, 390778380L, -59357617L, 1111624453L,
-1503917124L, 146775698L, -1780777587L, -691950137L, -1133539666L,
-138254584L, -56593141L, -430373911L, 933835960L, -44522650L,
731432961L, 1769576083L, -1094702782L, -841722820L, 716479346L,
776390256L, 537947172L, 1894343480L, 1725041818L, -1863095600L,
1636692156L, 1597697684L, 27754114L, 85326016L, -2099795140L,
1213725840L, -1435152862L, -107454456L, -866982644L, 855487644L,
853839378L, -1598505088L, -1297029612L, 762724904L, 1292787002L,
1951445456L, -2101601172L, 1902205236L, 541539986L, 1555895136L,
578347852L, 599738336L, 132618146L, -1078810712L, -1596834420L,
767201468L, 738824562L, 794062064L, 1141297220L, -1534154408L,
-1920717254L, 1616579440L, -677394372L, -914557164L, -1584419390L,
1803014880L, 1835614076L, -269263152L, 874909858L, 2016597032L,
700028172L, 761393084L, -1831718606L, -1265012288L, -703796684L,
-1818033208L, -804327238L, 1923271056L, -848074260L, -1326515628L,
-132171630L, -355018976L, 1895887308L, 1467037152L, -1596021054L,
-171303896L, 1978825388L, -714922820L, -1422216718L, -1933131472L,
-1262912348L, -995971400L, -939725478L, 377986448L, -1736754308L,
962703636L, -812313790L, -1609745664L, -1419352132L, -103127216L,
-1567513822L, 1224955208L, 1724674572L, -590112676L, -531364654L,
-1658232320L, 2120242516L, -1889091096L, -337187846L, 427626704L,
-1099977428L, -1598094604L, -553098862L, 2106053728L, -1647735540L,
1945842400L, -749825694L, 1683765928L, 932323916L, -1401030276L,
2017590578L, 166547568L, 690990916L, 438674904L, 819559546L,
-1910489552L, -697658692L, 1990960980L, -1453374398L, -925516384L,
-1927402820L, 1787023824L, 1135445346L, 1077153768L, -1096007732L,
1207367420L, 332028402L, 1878528128L, 1209838836L, 1023217288L,
-574259398L, 1711364944L, 1277390444L, 1054689428L, 1472665938L,
-1165486368L, -890643764L, 1824653472L, 1829243778L, 1368122088L,
-526695700L, -763974596L, -1730233742L, 1147798640L, -1754431964L,
-1875141576L, -733411302L, -667977008L, -2125439940L, -1736104812L,
-150889342L, 1832230848L, -1210158788L, -33555824L, -331353054L,
1742410760L, 410478732L, 1319527452L, 2027667090L, 2038842496L,
553209876L, 421633320L, 477732410L, -1851372336L, -1505074068L,
1910256308L, -1068064750L, -1419569184L, 314185676L, -1113453088L,
-27228638L, 706429224L, 1963432204L, -135043524L, 189231346L,
1023603440L, -580500668L, 1062920792L, -1154332102L, 1754824432L,
-775064388L, -1342941676L, 1384495298L, 356092000L, 1802327420L,
636900560L, 439360546L, 457589416L, 2072094604L, -796151364L,
-1444424654L, -2144905408L, 794126516L, -981124280L, 861051578L,
1374017296L, 1829585132L, 670487636L, 277733138L, -658941024L,
141131724L, -295060640L, -379301694L, -57301464L, -87186004L,
143318844L, -1473902734L, -674030800L, -275234268L, -168110024L,
59106522L, 403696016L, -2050029828L, 789704596L, 2147150658L,
-597335552L, -1220036036L, -1460142512L, 898267810L, -10822840L,
-1698307572L, 1953803484L, 26106450L, 2112292992L, 851385172L,
309836264L, 414468858L, 635402064L, 1866153004L, 1222915444L,
-1504475502L, 1509046880L, -279395700L, 1449851744L, -399056158L,
-1313980248L, -868876084L, -452802052L, 1843433394L, 700995056L,
-1062824180L, -1471965475L, 1268723687L, -253008880L, 1637083438L,
-35451429L, -148528451L, -283359190L, 1819947208L, -1405651471L,
513241955L, 798688100L, 1612315282L, 1907824487L, -898889423L,
940798582L, 281023380L, -1743906907L, -1859827537L, 327353704L,
856851782L, -1471896333L, 330406485L, 297678386L, 1605800960L,
-1138657047L, 800804699L, -1334736468L, 560937402L, -883497105L,
-1423927047L, 478043534L, 969838684L, -254725427L, 615482935L,
-1822393152L, -1518963810L, 1185259627L, 26861549L, 1291469402L,
-1320155880L, 1322480257L, 294846515L, -1364916652L, -1683434910L,
-1928850185L, 1384566593L, -1011454522L, 787660548L, 588413621L,
-1382795873L, 259246584L, -688961578L, 681595491L, 1078954533L,
207598018L, -410831184L, 36646617L, -1850623925L, -1443895620L,
1724220650L, 1383026975L, -2026806871L, -1161902274L, -2095435220L,
161374333L, -1483914553L, 1754463152L, -1703531058L, 547626747L,
733373789L, 33566346L, 826698920L, 1031190545L, -1757771517L,
264791044L, -254079950L, 140633223L, 1936371921L, -221617578L,
-810464716L, 610627909L, -2034676849L, -378245368L, -2063476442L,
460392915L, 1740596021L, 1914270354L, -235428256L, 706453449L,
1054396155L, 1725698252L, 171706714L, 1410032719L, 1379723545L,
-413134034L, -1684800196L, -983962963L, 1761207639L, -1882712928L,
991699454L, -482669365L, -468710707L, 2133485050L, 733349432L,
1125075937L, -678259565L, 1179651764L, 1718121410L, -1420261417L,
1553319969L, 199213030L, 384752356L, -596480491L, 109753919L,
-1156438568L, 1293700086L, 598197251L, 409855813L, -341871774L,
797514960L, -203601927L, -164541653L, 1679753372L, 7741130L,
-1540290625L, 717595401L, 24632350L, -2125961780L, 581889949L,
885619239L, 818436176L, -2006433554L, 1467288987L, -957942787L,
1960824554L, -1098966648L, 1183872817L, 1256892323L, 316075940L,
-2126919086L, 516399527L, 2000475633L, -787013322L, 1018771924L,
-109424795L, 1619459951L, -1293860440L, 1288132358L, -1993141709L,
-1151446891L, 1629614322L, 866183488L, 675649193L, -736022757L,
-162137876L, -813345542L, 625646895L, 1314163001L, -2062402738L,
-819723876L, -835790067L, 1044458871L, 726935936L, 370503646L,
240179371L, 2140244909L, 1628041242L, -463608104L, -2032382427L
)
|
library(ggplot2)
library(ggdendro)
library(ggfortify)
library(RColorBrewer)
library(R6)
library(MASS)
MultivarVis <- R6Class(
public = list(
style_plt = function(plt) {
palette <- "Dark2"
plt <- plt +
theme_classic()
# scale_color_brewer(palette=palette) +
# scale_fill_brewer(palette=palette)
plt
},
pca = function(expr_m, color_factor, title="PCA", pcs=c(1,2), label=NULL) {
# Inspired by:
# https://cran.r-project.org/web/packages/ggfortify/vignettes/plot_pca.html
expr_m_nona <- expr_m[complete.cases(expr_m), ]
pca_obj <- prcomp(t(expr_m_nona), scale=TRUE, center=TRUE)
style_df <- data.frame(color=color_factor)
if (!is.null(label)) {
rownames(style_df) <- make.names(label, unique=TRUE)
}
if (is.null(label)) shape <- NULL else shape <- FALSE
plt <- autoplot(
pca_obj,
data=style_df,
colour="color",
label=!is.null(label),
shape=shape,
loadings=FALSE,
loadings.label=FALSE,
x=pcs[1],
y=pcs[2]
) + ggtitle(title)
plt <- self$style_plt(plt)
return(plt)
},
plotMDS = function(expr_m, levels, comp1=1, comp2=2, title="no title") {
labels <- colnames(expr_m)
d <- stats::dist(scale(t(stats::na.omit(expr_m)), center=TRUE, scale=TRUE))
fit <- stats::cmdscale(d, eig=TRUE, k=2)
x <- fit$points[, comp1]
y <- fit$points[, comp2]
graphics::plot(x, y, type="n", main=title, xlab="", ylab="")
graphics::text(fit$points[, 1], fit$points[, 2], col=levels, labels=labels)
},
get_component_fraction = function(expr_m) {
# Retrieves a vector with percentage contributions for each PC
expr_m_nona <- expr_m[complete.cases(expr_m),]
pca_object <- prcomp(t(expr_m_nona), scale=TRUE, center=TRUE)
# pca_object <- self$get_pca_object(expr_m)
percentVar <- pca_object$sdev^2 / sum(pca_object$sdev^2 )
names(percentVar) <- colnames(pca_object$x)
return(percentVar)
},
plot_component_fraction = function(expr_m, max_comps=NULL) {
# Directly outputs the PCA numbers together with PC fractions
comp_perc <- self$get_component_fraction(expr_m) * 100
if (!is.null(max_comps)) {
comp_perc <- head(comp_perc, max_comps)
}
plot_df <- data.frame(x=paste0("", seq_len(length(comp_perc))), y=comp_perc)
ggplot(plot_df, aes(x, y)) + geom_bar(stat="identity") +
theme_classic() +
ggtitle("PCA loadings") + ylab("Variance (%)") + xlab("Principal component")
# plot(100 * comp_perc, main="PC loadings", xlab="PC", ylab="Perc. var")
},
dendogram = function(data_m, color_levels, labels=NULL, pick_top_variance=null, title="", label_size=3) {
samples <- colnames(data_m)
if (is.null(labels)) {
labels <- samples
}
# Setup data
expr_m_nona <- data_m[complete.cases(data_m),]
# Calculate tree
scaledTransposedMatrix <- scale(t(expr_m_nona), center=TRUE, scale=TRUE)
hc <- stats::hclust(stats::dist(scaledTransposedMatrix), "ave")
dhc <- as.dendrogram(hc)
# Note - Label order is shuffled within this object! Be careful with coloring.
ddata <- dendro_data(dhc, type="rectangle")
# Prepare for plotting
cluster_label_order <- match(ddata$labels$label, samples)
ddata$labels$color <- color_levels[cluster_label_order]
ddata$labels$label <- labels[cluster_label_order]
# Visualize
plt <- ggplot(segment(ddata)) +
geom_segment(aes(x=x, y=y, xend=xend, yend=yend)) +
theme_dendro() +
geom_text(data=label(ddata),
aes(x=x, y=y, label=label, color=color),
vjust=0.5, hjust=0, size=label_size) +
coord_flip() +
scale_y_reverse(expand=c(0.2, 0)) +
scale_x_continuous(expand=c(0,1)) +
ggtitle(title)
plt
},
table_heatmap = function(data_m, row_annot) {
df <- cbind(annot=row_annot, data_m)
df.melt <- df %>% gather("Sample", "Level", -annot)
plt <- ggplot(df.melt, aes(Sample, annot)) +
geom_tile(aes(fill=Level, color="white")) +
scale_fill_gradient(low="white", high="steelblue")
plt
}
)
)
multvis <- MultivarVis$new()
mv <- multvis
print("Loading module to 'multvis' and 'mv'")
| /MultivarVis.R | no_license | Jakob37/CraftOmics | R | false | false | 5,179 | r | library(ggplot2)
library(ggdendro)
library(ggfortify)
library(RColorBrewer)
library(R6)
library(MASS)
MultivarVis <- R6Class(
public = list(
style_plt = function(plt) {
palette <- "Dark2"
plt <- plt +
theme_classic()
# scale_color_brewer(palette=palette) +
# scale_fill_brewer(palette=palette)
plt
},
pca = function(expr_m, color_factor, title="PCA", pcs=c(1,2), label=NULL) {
# Inspired by:
# https://cran.r-project.org/web/packages/ggfortify/vignettes/plot_pca.html
expr_m_nona <- expr_m[complete.cases(expr_m), ]
pca_obj <- prcomp(t(expr_m_nona), scale=TRUE, center=TRUE)
style_df <- data.frame(color=color_factor)
if (!is.null(label)) {
rownames(style_df) <- make.names(label, unique=TRUE)
}
if (is.null(label)) shape <- NULL else shape <- FALSE
plt <- autoplot(
pca_obj,
data=style_df,
colour="color",
label=!is.null(label),
shape=shape,
loadings=FALSE,
loadings.label=FALSE,
x=pcs[1],
y=pcs[2]
) + ggtitle(title)
plt <- self$style_plt(plt)
return(plt)
},
plotMDS = function(expr_m, levels, comp1=1, comp2=2, title="no title") {
labels <- colnames(expr_m)
d <- stats::dist(scale(t(stats::na.omit(expr_m)), center=TRUE, scale=TRUE))
fit <- stats::cmdscale(d, eig=TRUE, k=2)
x <- fit$points[, comp1]
y <- fit$points[, comp2]
graphics::plot(x, y, type="n", main=title, xlab="", ylab="")
graphics::text(fit$points[, 1], fit$points[, 2], col=levels, labels=labels)
},
get_component_fraction = function(expr_m) {
# Retrieves a vector with percentage contributions for each PC
expr_m_nona <- expr_m[complete.cases(expr_m),]
pca_object <- prcomp(t(expr_m_nona), scale=TRUE, center=TRUE)
# pca_object <- self$get_pca_object(expr_m)
percentVar <- pca_object$sdev^2 / sum(pca_object$sdev^2 )
names(percentVar) <- colnames(pca_object$x)
return(percentVar)
},
plot_component_fraction = function(expr_m, max_comps=NULL) {
# Directly outputs the PCA numbers together with PC fractions
comp_perc <- self$get_component_fraction(expr_m) * 100
if (!is.null(max_comps)) {
comp_perc <- head(comp_perc, max_comps)
}
plot_df <- data.frame(x=paste0("", seq_len(length(comp_perc))), y=comp_perc)
ggplot(plot_df, aes(x, y)) + geom_bar(stat="identity") +
theme_classic() +
ggtitle("PCA loadings") + ylab("Variance (%)") + xlab("Principal component")
# plot(100 * comp_perc, main="PC loadings", xlab="PC", ylab="Perc. var")
},
dendogram = function(data_m, color_levels, labels=NULL, pick_top_variance=null, title="", label_size=3) {
samples <- colnames(data_m)
if (is.null(labels)) {
labels <- samples
}
# Setup data
expr_m_nona <- data_m[complete.cases(data_m),]
# Calculate tree
scaledTransposedMatrix <- scale(t(expr_m_nona), center=TRUE, scale=TRUE)
hc <- stats::hclust(stats::dist(scaledTransposedMatrix), "ave")
dhc <- as.dendrogram(hc)
# Note - Label order is shuffled within this object! Be careful with coloring.
ddata <- dendro_data(dhc, type="rectangle")
# Prepare for plotting
cluster_label_order <- match(ddata$labels$label, samples)
ddata$labels$color <- color_levels[cluster_label_order]
ddata$labels$label <- labels[cluster_label_order]
# Visualize
plt <- ggplot(segment(ddata)) +
geom_segment(aes(x=x, y=y, xend=xend, yend=yend)) +
theme_dendro() +
geom_text(data=label(ddata),
aes(x=x, y=y, label=label, color=color),
vjust=0.5, hjust=0, size=label_size) +
coord_flip() +
scale_y_reverse(expand=c(0.2, 0)) +
scale_x_continuous(expand=c(0,1)) +
ggtitle(title)
plt
},
table_heatmap = function(data_m, row_annot) {
df <- cbind(annot=row_annot, data_m)
df.melt <- df %>% gather("Sample", "Level", -annot)
plt <- ggplot(df.melt, aes(Sample, annot)) +
geom_tile(aes(fill=Level, color="white")) +
scale_fill_gradient(low="white", high="steelblue")
plt
}
)
)
multvis <- MultivarVis$new()
mv <- multvis
print("Loading module to 'multvis' and 'mv'")
|
Dts<- read.csv("coches.csv")
Dts1<- read.csv("coches1.csv")
## entrada de datos ####
d <-78
## limpieza ####
## outcomes ####
| /Extras/r.R | no_license | javiargos/Proyecto_ventacoches_OP | R | false | false | 128 | r | Dts<- read.csv("coches.csv")
Dts1<- read.csv("coches1.csv")
## entrada de datos ####
d <-78
## limpieza ####
## outcomes ####
|
context("Latent plots function")
set.seed(2323)
options(warn=-1)
require(lavaan)
test_that("latent_plot works", {
vdiffr::expect_doppelganger("simple latent_plot",latent_plot(fit_bollen, formula = Eta2 ~ Eta1))
vdiffr::expect_doppelganger("multiple latent_plots",suppressMessages(latent_plot(fit_bollen)[[2]]))
})
test_that("return_alpha works", {
vdiffr::expect_doppelganger("alpha adjustment latent_plot",
latent_plot(fit_bollen, formula = Eta2 ~ Eta1, alpha = .6))
vdiffr::expect_doppelganger("color adjustment latent_plot",
latent_plot(fit_bollen, formula = Eta2 ~ Eta1, color = "blue"))
})
test_that("check_for_sd_true works", {
expect_true(sum(check_for_sd_true(F, flexplavaan_to_lavaan(fit_bollen), "Eta1"))==0)
expect_true(sum(check_for_sd_true(F, flexplavaan_to_lavaan(fit_bollen), c("Eta1", "Eta2")))==0)
})
test_that("beta_to_flexplot works", {
expect_true(paste(beta_to_flexplot(flexplavaan_to_lavaan(fit_bollen), data.frame(lavPredict(flexplavaan_to_lavaan(fit_bollen))))[[2]])[2] == "Eta2")
expect_true(beta_to_flexplot(flexplavaan_to_lavaan(fit_bollen), data.frame(lavPredict(flexplavaan_to_lavaan(fit_bollen))), return_dvs = T)[1] == 1)
#expect_true(paste(beta_to_flexplot(sem_a, data.frame(lavPredict(sem_a))))[2] == "latent_x")
#expect_true(beta_to_flexplot(sem_a, data.frame(lavPredict(sem_a)), return_dvs = T)[1] == 1)
})
test_that("get_dv_iv works", {
expect_true(get_dv_iv(1, flexplavaan_to_lavaan(fit_bollen)@Model@GLIST$beta)==3)
# for negative loadings
expect_true(length(get_dv_iv(3, flexplavaan_to_lavaan(health)@Model@GLIST$beta))==0)
})
# test_that("check_data_has_observed works", {
# d = data.frame(lavPredict(flexplavaan_to_lavaan(health)))
# se_data = estimate_standard_errors(1, flexplavaan_to_lavaan(health))$sd_imp
# d_new = cbind(d, se_data)
# head(check_data_has_observed(d_new, "internet", "health", flexplavaan_to_lavaan(health)))
# head(d_new)
# #expect_true(ncol(check_data_has_observed(d_new, "internet", "health", flexplavaan_to_lavaan(health)))==3)
# #expect_true(ncol(check_data_has_observed(d_new, "internet", "CESD", flexplavaan_to_lavaan(health)))==17)
# })
test_that("get_endogenous_names works", {
expect_true(get_endogenous_names(flexplavaan_to_lavaan(fit_bollen))[1] == "Eta1")
expect_true(get_endogenous_names(sem_a)[1] == "latent_x")
})
options(warn=0) | /tests/testthat/testLatent_Plot.R | no_license | lnsongxf/flexplavaan | R | false | false | 2,429 | r | context("Latent plots function")
set.seed(2323)
options(warn=-1)
require(lavaan)
test_that("latent_plot works", {
vdiffr::expect_doppelganger("simple latent_plot",latent_plot(fit_bollen, formula = Eta2 ~ Eta1))
vdiffr::expect_doppelganger("multiple latent_plots",suppressMessages(latent_plot(fit_bollen)[[2]]))
})
test_that("return_alpha works", {
vdiffr::expect_doppelganger("alpha adjustment latent_plot",
latent_plot(fit_bollen, formula = Eta2 ~ Eta1, alpha = .6))
vdiffr::expect_doppelganger("color adjustment latent_plot",
latent_plot(fit_bollen, formula = Eta2 ~ Eta1, color = "blue"))
})
test_that("check_for_sd_true works", {
expect_true(sum(check_for_sd_true(F, flexplavaan_to_lavaan(fit_bollen), "Eta1"))==0)
expect_true(sum(check_for_sd_true(F, flexplavaan_to_lavaan(fit_bollen), c("Eta1", "Eta2")))==0)
})
test_that("beta_to_flexplot works", {
expect_true(paste(beta_to_flexplot(flexplavaan_to_lavaan(fit_bollen), data.frame(lavPredict(flexplavaan_to_lavaan(fit_bollen))))[[2]])[2] == "Eta2")
expect_true(beta_to_flexplot(flexplavaan_to_lavaan(fit_bollen), data.frame(lavPredict(flexplavaan_to_lavaan(fit_bollen))), return_dvs = T)[1] == 1)
#expect_true(paste(beta_to_flexplot(sem_a, data.frame(lavPredict(sem_a))))[2] == "latent_x")
#expect_true(beta_to_flexplot(sem_a, data.frame(lavPredict(sem_a)), return_dvs = T)[1] == 1)
})
test_that("get_dv_iv works", {
expect_true(get_dv_iv(1, flexplavaan_to_lavaan(fit_bollen)@Model@GLIST$beta)==3)
# for negative loadings
expect_true(length(get_dv_iv(3, flexplavaan_to_lavaan(health)@Model@GLIST$beta))==0)
})
# test_that("check_data_has_observed works", {
# d = data.frame(lavPredict(flexplavaan_to_lavaan(health)))
# se_data = estimate_standard_errors(1, flexplavaan_to_lavaan(health))$sd_imp
# d_new = cbind(d, se_data)
# head(check_data_has_observed(d_new, "internet", "health", flexplavaan_to_lavaan(health)))
# head(d_new)
# #expect_true(ncol(check_data_has_observed(d_new, "internet", "health", flexplavaan_to_lavaan(health)))==3)
# #expect_true(ncol(check_data_has_observed(d_new, "internet", "CESD", flexplavaan_to_lavaan(health)))==17)
# })
test_that("get_endogenous_names works", {
expect_true(get_endogenous_names(flexplavaan_to_lavaan(fit_bollen))[1] == "Eta1")
expect_true(get_endogenous_names(sem_a)[1] == "latent_x")
})
options(warn=0) |
library("testthat")
library(plyr)
library(dplyr)
data("example_mtc_data_item")
#===============================================================================
context("summary")
actual_summary = summary(example_mtc_data_item)
expected_summary = data.frame(path = example_mtc_data_item@path,
Records = nrow(example_mtc_data_item@data),
start = min(example_mtc_data_item@data$timestamp),
end = max(example_mtc_data_item@data$timestamp),
data_type = example_mtc_data_item@data_type)
expect_equal(expected_summary, actual_summary)
#===============================================================================
context("getData - MTCDataItem")
data_item_data = getData(example_mtc_data_item)
expected_data_item_data = data.frame(example_mtc_data_item@data)
expect_equal(data_item_data, expected_data_item_data)
| /mtconnectR/tests/testthat/test-MTCDataItem.R | no_license | ingted/R-Examples | R | false | false | 935 | r |
library("testthat")
library(plyr)
library(dplyr)
data("example_mtc_data_item")
#===============================================================================
context("summary")
actual_summary = summary(example_mtc_data_item)
expected_summary = data.frame(path = example_mtc_data_item@path,
Records = nrow(example_mtc_data_item@data),
start = min(example_mtc_data_item@data$timestamp),
end = max(example_mtc_data_item@data$timestamp),
data_type = example_mtc_data_item@data_type)
expect_equal(expected_summary, actual_summary)
#===============================================================================
context("getData - MTCDataItem")
data_item_data = getData(example_mtc_data_item)
expected_data_item_data = data.frame(example_mtc_data_item@data)
expect_equal(data_item_data, expected_data_item_data)
|
## makeCacheMatrix: This function creates a special "matrix" object
## that can cache its inverse.
## cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve
## should retrieve the inverse from the cache.
## Function makeCacheMatrix creates an object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(inverse) inv <<- inverse
getInv <- function() inv
list(set = set,
get = get,
setInv = setInv,
getInv = getInv)
}
## cacheSolve calculates the inverse of makeCacheMatrix or retrieves
## it from makeCacheMatrix if already calculated
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInv()
if (!is.null(inv)) {
message("getting cached matrix")
return(inv)
}
mtrx <- x$get()
inv <- solve(mtrx, ...)
x$setInv(inv)
inv
}
| /cachematrix.R | no_license | Hagelslag42/ProgrammingAssignment2 | R | false | false | 1,109 | r | ## makeCacheMatrix: This function creates a special "matrix" object
## that can cache its inverse.
## cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve
## should retrieve the inverse from the cache.
## Function makeCacheMatrix creates an object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(inverse) inv <<- inverse
getInv <- function() inv
list(set = set,
get = get,
setInv = setInv,
getInv = getInv)
}
## cacheSolve calculates the inverse of makeCacheMatrix or retrieves
## it from makeCacheMatrix if already calculated
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInv()
if (!is.null(inv)) {
message("getting cached matrix")
return(inv)
}
mtrx <- x$get()
inv <- solve(mtrx, ...)
x$setInv(inv)
inv
}
|
test_that("Eingabe testen", {
#Phi
expect_error(arma_sim(phi=c("a"),sd=1,I=100), "Phi muss ein numerischer Vektor oder NULL sein.")
expect_error(arma_sim(phi=NA,sd=1,I=100), "Phi muss ein numerischer Vektor oder NULL sein.")
expect_error(arma_sim(phi = 10, sd = 1, I = 100), "AR-Teil muss stationaer sein.")
#Theta
expect_error(arma_sim(theta=c("a"),sd=1,I=100), "Theta muss ein numerischer Vektor oder NULL sein.")
expect_error(arma_sim(theta=NA,sd=1,I=100), "Theta muss ein numerischer Vektor oder NULL sein.")
#sd
expect_error(arma_sim(theta=0.,sd="a",I=100), "Die Standardabweichung sd muss ein numerischer Vektor der Laenge 1 sein.")
expect_error(arma_sim(theta=0.,sd=c(1,1),I=100), "Die Standardabweichung sd muss ein numerischer Vektor der Laenge 1 sein.")
expect_error(arma_sim(theta=0.,sd=NA,I=100), "Die Standardabweichung sd muss ein numerischer Vektor der Laenge 1 sein.")
#I
expect_error(arma_sim(theta=0.,sd=0.5,I="a"), "I muss ein numerischer Vektor der Laenge 1 sein" )
expect_error(arma_sim(theta=0.,sd=0.5,I=c(1,1)), "I muss ein numerischer Vektor der Laenge 1 sein" )
expect_error(arma_sim(theta=0.,sd=0.5,I=NA), "I muss ein numerischer Vektor der Laenge 1 sein" )
expect_error(arma_sim(theta=0.,sd=0.5,I=1.2), "I muss ein Interger Wert sein")
expect_error(arma_sim(theta=0.,sd=0.5,I=-1), "I muss groeßer als 0 sein")
})
test_that("AR works", {
expect_equal(2*2, 4)
})
| /tests/testthat/test-ARMA_Generator.R | permissive | dan866/Time-Series | R | false | false | 1,427 | r | test_that("Eingabe testen", {
#Phi
expect_error(arma_sim(phi=c("a"),sd=1,I=100), "Phi muss ein numerischer Vektor oder NULL sein.")
expect_error(arma_sim(phi=NA,sd=1,I=100), "Phi muss ein numerischer Vektor oder NULL sein.")
expect_error(arma_sim(phi = 10, sd = 1, I = 100), "AR-Teil muss stationaer sein.")
#Theta
expect_error(arma_sim(theta=c("a"),sd=1,I=100), "Theta muss ein numerischer Vektor oder NULL sein.")
expect_error(arma_sim(theta=NA,sd=1,I=100), "Theta muss ein numerischer Vektor oder NULL sein.")
#sd
expect_error(arma_sim(theta=0.,sd="a",I=100), "Die Standardabweichung sd muss ein numerischer Vektor der Laenge 1 sein.")
expect_error(arma_sim(theta=0.,sd=c(1,1),I=100), "Die Standardabweichung sd muss ein numerischer Vektor der Laenge 1 sein.")
expect_error(arma_sim(theta=0.,sd=NA,I=100), "Die Standardabweichung sd muss ein numerischer Vektor der Laenge 1 sein.")
#I
expect_error(arma_sim(theta=0.,sd=0.5,I="a"), "I muss ein numerischer Vektor der Laenge 1 sein" )
expect_error(arma_sim(theta=0.,sd=0.5,I=c(1,1)), "I muss ein numerischer Vektor der Laenge 1 sein" )
expect_error(arma_sim(theta=0.,sd=0.5,I=NA), "I muss ein numerischer Vektor der Laenge 1 sein" )
expect_error(arma_sim(theta=0.,sd=0.5,I=1.2), "I muss ein Interger Wert sein")
expect_error(arma_sim(theta=0.,sd=0.5,I=-1), "I muss groeßer als 0 sein")
})
test_that("AR works", {
expect_equal(2*2, 4)
})
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{sjt.grpmean}
\alias{sjt.grpmean}
\title{Show grouped means as HTML table}
\usage{
sjt.grpmean(varCount, varGrp, rowLabels = NULL, digits = 2, file = NULL,
encoding = NULL, CSS = NULL, useViewer = TRUE, no.output = FALSE)
}
\arguments{
\item{varCount}{a numeric vector / variable. Mean, SD and SE for this variable are calculated.}
\item{varGrp}{a (numeric) vector with group indices, used to select sub-groups from \code{varCount}.}
\item{rowLabels}{a character vector of same length as \code{varGrp} unqiue values. In short: the
value labels of \code{varGrp}. Used to name table rows. By default, row labels
are automatically detected if set by \code{sji.setValueLabels}.}
\item{digits}{amount of digits for table values.}
\item{file}{The destination file, which will be in html-format. If no filepath is specified,
the file will be saved as temporary file and openend either in the RStudio View pane or
in the default web browser.}
\item{encoding}{The charset encoding used for variable and value labels. Default is \code{NULL}, so encoding
will be auto-detected depending on your platform (\code{"UTF-8"} for Unix and \code{"Windows-1252"} for
Windows OS). Change encoding if specific chars are not properly displayed (e.g.) German umlauts).}
\item{CSS}{A \code{\link{list}} with user-defined style-sheet-definitions, according to the
\href{http://www.w3.org/Style/CSS/}{official CSS syntax}. See return value \code{page.style} for details
of all style-sheet-classnames that are used in this function. Parameters for this list need:
\enumerate{
\item the class-names with \code{"css."}-prefix as parameter name and
\item each style-definition must end with a semicolon
}
You can add style information to the default styles by using a + (plus-sign) as
initial character for the parameter attributes. Examples:
\itemize{
\item \code{css.table='border:2px solid red;'} for a solid 2-pixel table border in red.
\item \code{css.summary='font-weight:bold;'} for a bold fontweight in the summary row.
}
See further examples below and the \href{http://www.strengejacke.de/sjPlot/sjtbasics}{sjPlot manual: sjt-basics}.}
\item{useViewer}{If \code{TRUE}, the function tries to show the HTML table in the IDE's viewer pane. If
\code{FALSE} or no viewer available, the HTML table is opened in a web browser.}
\item{no.output}{If \code{TRUE}, the html-output is neither opened in a browser nor shown in
the viewer pane and not even saved to file. This option is useful when the html output
should be used in \code{knitr} documents. The html output can be accessed via the return
value.}
}
\value{
Invisibly returns a \code{\link{structure}} with
\itemize{
\item the data frame with the description information (\code{df}),
\item the web page style sheet (\code{page.style}),
\item the web page content (\code{page.content}),
\item the complete html-output (\code{output.complete}) and
\item the html-table with inlin-css for use with knitr (\code{knitr})
}
for further use.
}
\description{
Computes mean, sd and se for each sub-group (indicated by \code{varGrp})
of \code{varCount} as prints the result as HTML table.
}
\examples{
data(efc)
sjt.grpmean(efc$c12hour,
efc$e42dep)
}
\seealso{
\code{\link{sjp.aov1}}
}
| /man/sjt.grpmean.Rd | no_license | wilpi/devel | R | false | false | 3,395 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{sjt.grpmean}
\alias{sjt.grpmean}
\title{Show grouped means as HTML table}
\usage{
sjt.grpmean(varCount, varGrp, rowLabels = NULL, digits = 2, file = NULL,
encoding = NULL, CSS = NULL, useViewer = TRUE, no.output = FALSE)
}
\arguments{
\item{varCount}{a numeric vector / variable. Mean, SD and SE for this variable are calculated.}
\item{varGrp}{a (numeric) vector with group indices, used to select sub-groups from \code{varCount}.}
\item{rowLabels}{a character vector of same length as \code{varGrp} unqiue values. In short: the
value labels of \code{varGrp}. Used to name table rows. By default, row labels
are automatically detected if set by \code{sji.setValueLabels}.}
\item{digits}{amount of digits for table values.}
\item{file}{The destination file, which will be in html-format. If no filepath is specified,
the file will be saved as temporary file and openend either in the RStudio View pane or
in the default web browser.}
\item{encoding}{The charset encoding used for variable and value labels. Default is \code{NULL}, so encoding
will be auto-detected depending on your platform (\code{"UTF-8"} for Unix and \code{"Windows-1252"} for
Windows OS). Change encoding if specific chars are not properly displayed (e.g.) German umlauts).}
\item{CSS}{A \code{\link{list}} with user-defined style-sheet-definitions, according to the
\href{http://www.w3.org/Style/CSS/}{official CSS syntax}. See return value \code{page.style} for details
of all style-sheet-classnames that are used in this function. Parameters for this list need:
\enumerate{
\item the class-names with \code{"css."}-prefix as parameter name and
\item each style-definition must end with a semicolon
}
You can add style information to the default styles by using a + (plus-sign) as
initial character for the parameter attributes. Examples:
\itemize{
\item \code{css.table='border:2px solid red;'} for a solid 2-pixel table border in red.
\item \code{css.summary='font-weight:bold;'} for a bold fontweight in the summary row.
}
See further examples below and the \href{http://www.strengejacke.de/sjPlot/sjtbasics}{sjPlot manual: sjt-basics}.}
\item{useViewer}{If \code{TRUE}, the function tries to show the HTML table in the IDE's viewer pane. If
\code{FALSE} or no viewer available, the HTML table is opened in a web browser.}
\item{no.output}{If \code{TRUE}, the html-output is neither opened in a browser nor shown in
the viewer pane and not even saved to file. This option is useful when the html output
should be used in \code{knitr} documents. The html output can be accessed via the return
value.}
}
\value{
Invisibly returns a \code{\link{structure}} with
\itemize{
\item the data frame with the description information (\code{df}),
\item the web page style sheet (\code{page.style}),
\item the web page content (\code{page.content}),
\item the complete html-output (\code{output.complete}) and
\item the html-table with inlin-css for use with knitr (\code{knitr})
}
for further use.
}
\description{
Computes mean, sd and se for each sub-group (indicated by \code{varGrp})
of \code{varCount} as prints the result as HTML table.
}
\examples{
data(efc)
sjt.grpmean(efc$c12hour,
efc$e42dep)
}
\seealso{
\code{\link{sjp.aov1}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_D3partitionR.R
\name{is_present_variable}
\alias{is_present_variable}
\title{Check if a variable is present in a D3partitionR object}
\usage{
is_present_variable(variable, D3partitionR_object)
}
\arguments{
\item{variable}{The variable which presence is to be checked}
\item{D3partitionR_object}{The D3partitionR object}
}
\value{
TRUE/FALSE
}
\description{
Check if a variable is present in a D3partitionR object
}
| /man/is_present_variable.Rd | no_license | AntoineGuillot2/D3partitionR | R | false | true | 501 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_D3partitionR.R
\name{is_present_variable}
\alias{is_present_variable}
\title{Check if a variable is present in a D3partitionR object}
\usage{
is_present_variable(variable, D3partitionR_object)
}
\arguments{
\item{variable}{The variable which presence is to be checked}
\item{D3partitionR_object}{The D3partitionR object}
}
\value{
TRUE/FALSE
}
\description{
Check if a variable is present in a D3partitionR object
}
|
#' Build a \code{CDS} class object given the input about a CDS
#' contract.
#'
#' @name CDS
#'
#' @param contract is the contract type, default SNAC
#' @param entityName is the name of the reference entity. Optional.
#' @param RED alphanumeric code assigned to the reference entity. Optional.
#' @param TDate is when the trade is executed, denoted as T. Default
#' is \code{Sys.Date}. The date format should be in "YYYY-MM-DD".
#' @param baseDate is the start date for the IR curve. Default is TDate.
#' @param currency in which CDS is denominated.
#' @param types is a string indicating the names of the instruments
#' used for the yield curve. 'M' means money market rate; 'S' is swap
#' rate.
#' @param rates is an array of numeric values indicating the rate of
#' each instrument.
#' @param expiries is an array of characters indicating the maturity
#' of each instrument.
#' @param mmDCC is the day count convention of the instruments.
#' @param fixedSwapFreq is the frequency of the fixed rate of swap
#' being paid.
#' @param floatSwapFreq is the frequency of the floating rate of swap
#' being paid.
#' @param fixedSwapDCC is the day count convention of the fixed leg.
#' @param floatSwapDCC is the day count convention of the floating leg.
#' @param badDayConvZC is a character indicating how non-business days
#' are converted.
#' @param holidays is an input for holiday files to adjust to business
#' days.
#' @param valueDate is the date for which the present value of the CDS
#' is calculated. aka cash-settle date. The default is T + 3.
#' @param benchmarkDate Accrual begin date.
#' @param startDate is when the CDS nomially starts in terms of
#' premium payments, i.e. the number of days in the first period (and
#' thus the amount of the first premium payment) is counted from this
#' date. aka accrual begin date.
#' @param endDate aka maturity date. This is when the contract expires
#' and protection ends. Any default after this date does not trigger a
#' payment.
#' @param stepinDate default is T + 1.
#' @param maturity of the CDS contract.
#' @param dccCDS day count convention of the CDS. Default is ACT/360.
#' @param freqCDS date interval of the CDS contract.
#' @param stubCDS is a character indicating the presence of a stub.
#' @param badDayConvCDS refers to the bay day conversion for the CDS
#' coupon payments. Default is "F", following.
#' @param calendar refers to any calendar adjustment for the CDS.
#' @param parSpread CDS par spread in bps.
#' @param coupon quoted in bps. It specifies the payment amount from
#' the protection buyer to the seller on a regular basis. The default
#' is 100 bps.
#' @param recoveryRate in decimal. Default is 0.4.
#' @param upfront is quoted in the currency amount. Since a standard
#' contract is traded with fixed coupons, upfront payment is
#' introduced to reconcile the difference in contract value due to the
#' difference between the fixed coupon and the conventional par
#' spread. There are two types of upfront, dirty and clean. Dirty
#' upfront, a.k.a. Cash Settlement Amount, refers to the market value
#' of a CDS contract. Clean upfront is dirty upfront less any accrued
#' interest payment, and is also called the Principal.
#' @param ptsUpfront is quoted as a percentage of the notional
#' amount. They represent the upfront payment excluding the accrual
#' payment. High Yield (HY) CDS contracts are often quoted in points
#' upfront. The protection buyer pays the upfront payment if points
#' upfront are positive, and the buyer is paid by the seller if the
#' points are negative.
#' @param isPriceClean refers to the type of upfront calculated. It is
#' boolean. When \code{TRUE}, calculate principal only. When
#' \code{FALSE}, calculate principal + accrual.
#' @param notional is the amount of the underlying asset on which the
#' payments are based. Default is 1e7, i.e. 10MM.
#' @param payAccruedOnDefault is a partial payment of the premium made
#' to the protection seller in the event of a default. Default is
#' \code{TRUE}.
#'
#' @return a \code{CDS} class object including the input informtion on
#' the contract as well as the valuation results of the contract.
#'
#' @export
#' @examples
#' # Build a simple CDS class object
#' require(CDS)
#' cds1 <- CDS(TDate = "2014-05-07", parSpread = 50, coupon = 100)
#'
CDS <- function(contract = "SNAC", ## CDS contract type, default SNAC
entityName = NULL,
RED = NULL,
TDate = Sys.Date(), ## Default is the current date
## IR curve
baseDate = TDate,
currency = "USD",
types = NULL,
rates = NULL,
expiries = NULL,
mmDCC = "ACT/360",
fixedSwapFreq = "6M",
floatSwapFreq = "3M",
fixedSwapDCC = "30/360",
floatSwapDCC = "ACT/360",
badDayConvZC = "M",
holidays = "None",
## CDS
valueDate = NULL,
benchmarkDate = NULL,
startDate = NULL,
endDate = NULL,
stepinDate = NULL,
maturity = "5Y",
dccCDS = "ACT/360",
freqCDS = "Q",
stubCDS = "F",
badDayConvCDS = "F",
calendar = "None",
parSpread = NULL,
coupon = 100,
recoveryRate = 0.4,
upfront = NULL,
ptsUpfront = NULL,
isPriceClean = FALSE,
notional = 1e7,
payAccruedOnDefault = TRUE
){
checkTDate <- testDate(TDate)
if (!is.na(checkTDate)) stop (checkTDate)
if ((is.null(upfront)) & (is.null(ptsUpfront)) & (is.null(parSpread)))
stop("Please input spread, upfront or pts upfront")
if (is.null(maturity)) {
md <- .mondf(TDate, endDate)
if (md < 12){
maturity <- paste(md, "M", sep = "", collapse = "")
} else {
maturity <- paste(floor(md/12), "Y", sep = "", collapse = "")
}
}
ratesDate <- baseDate
effectiveDate <- as.Date(TDate)
cdsDates <- getDates(TDate = as.Date(TDate), maturity = maturity)
if (is.null(valueDate)) valueDate <- cdsDates$valueDate
if (is.null(benchmarkDate)) benchmarkDate <- cdsDates$startDate
if (is.null(startDate)) startDate <- cdsDates$startDate
if (is.null(endDate)) endDate <- cdsDates$endDate
if (is.null(stepinDate)) stepinDate <- cdsDates$stepinDate
stopifnot(all.equal(length(rates), length(expiries), nchar(types)))
if ((is.null(types) | is.null(rates) | is.null(expiries))){
ratesInfo <- getRates(date = ratesDate, currency = currency)
effectiveDate <- as.Date(as.character(ratesInfo[[2]]$effectiveDate))
if (is.null(types)) types = paste(as.character(ratesInfo[[1]]$type), collapse = "")
if (is.null(rates)) rates = as.numeric(as.character(ratesInfo[[1]]$rate))
if (is.null(expiries)) expiries = as.character(ratesInfo[[1]]$expiry)
if (is.null(mmDCC)) mmDCC = as.character(ratesInfo[[2]]$mmDCC)
if (is.null(fixedSwapFreq)) fixedSwapFreq = as.character(ratesInfo[[2]]$fixedFreq)
if (is.null(floatSwapFreq)) floatSwapFreq = as.character(ratesInfo[[2]]$floatFreq)
if (is.null(fixedSwapDCC)) fixedSwapDCC = as.character(ratesInfo[[2]]$fixedDCC)
if (is.null(floatSwapDCC)) floatSwapDCC = as.character(ratesInfo[[2]]$floatDCC)
if (is.null(badDayConvZC)) badDayConvZC = as.character(ratesInfo[[2]]$badDayConvention)
if (is.null(holidays)) holidays = as.character(ratesInfo[[2]]$swapCalendars)
}
if (is.null(entityName)) entityName <- "NA"
if (is.null(RED)) RED <- "NA"
cds <- new("CDS",
contract = as.character(contract),
entityName = as.character(entityName),
RED = as.character(RED),
TDate = as.Date(TDate),
baseDate = as.Date(baseDate),
currency = as.character(currency),
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
effectiveDate = effectiveDate,
fixedSwapFreq = fixedSwapFreq,
floatSwapFreq = floatSwapFreq,
fixedSwapDCC = fixedSwapDCC,
floatSwapDCC = floatSwapDCC,
badDayConvZC = badDayConvZC,
holidays = holidays,
valueDate = as.Date(valueDate),
benchmarkDate = as.Date(benchmarkDate),
startDate = as.Date(startDate),
endDate = as.Date(endDate),
stepinDate = as.Date(stepinDate),
backstopDate = as.Date(cdsDates$backstopDate),
firstcouponDate = as.Date(cdsDates$firstcouponDate),
pencouponDate = as.Date(cdsDates$pencouponDate),
maturity = maturity,
dccCDS = dccCDS,
freqCDS = freqCDS,
stubCDS = stubCDS,
badDayConvCDS = badDayConvCDS,
calendar = calendar,
coupon = coupon,
recoveryRate = recoveryRate,
inputPriceClean = isPriceClean,
notional = notional,
payAccruedOnDefault = payAccruedOnDefault
)
if (!is.null(parSpread)){
## if parSpread is given, calculate principal and accrual
cds@parSpread <- parSpread
cds@principal <- upfront(TDate,
baseDate = baseDate,
currency = currency,
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
fixedSwapFreq,
floatSwapFreq,
fixedSwapDCC,
floatSwapDCC,
badDayConvZC,
holidays,
valueDate,
benchmarkDate,
startDate,
endDate,
stepinDate,
maturity,
dccCDS,
freqCDS,
stubCDS,
badDayConvCDS,
calendar,
parSpread,
coupon,
recoveryRate,
TRUE,
payAccruedOnDefault,
notional)
cds@ptsUpfront <- cds@principal / notional
cds@upfront <- upfront(TDate,
baseDate = baseDate,
currency = currency,
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
fixedSwapFreq,
floatSwapFreq,
fixedSwapDCC,
floatSwapDCC,
badDayConvZC,
holidays,
valueDate,
benchmarkDate,
startDate,
endDate,
stepinDate,
maturity,
dccCDS,
freqCDS,
stubCDS,
badDayConvCDS,
calendar,
parSpread,
coupon,
recoveryRate,
FALSE,
payAccruedOnDefault,
notional)
} else if (!is.null(ptsUpfront)){
cds@ptsUpfront <- ptsUpfront
cds@parSpread <- spread(TDate,
baseDate,
currency,
types,
rates,
expiries,
mmDCC,
fixedSwapFreq,
floatSwapFreq,
fixedSwapDCC,
floatSwapDCC,
badDayConvZC,
holidays,
valueDate,
benchmarkDate,
startDate,
endDate,
stepinDate,
maturity,
dccCDS,
freqCDS,
stubCDS,
badDayConvCDS,
calendar,
upfront,
ptsUpfront,
coupon,
recoveryRate,
payAccruedAtStart = isPriceClean,
notional,
payAccruedOnDefault)
cds@principal <- notional * ptsUpfront
cds@upfront <- upfront(TDate,
baseDate,
currency,
types,
rates,
expiries,
mmDCC,
fixedSwapFreq,
floatSwapFreq,
fixedSwapDCC,
floatSwapDCC,
badDayConvZC,
holidays,
valueDate,
benchmarkDate,
startDate,
endDate,
stepinDate,
maturity,
dccCDS,
freqCDS,
stubCDS,
badDayConvCDS,
calendar,
cds@parSpread,
coupon,
recoveryRate,
FALSE,
payAccruedOnDefault,
notional)
} else {
if (isPriceClean == TRUE) {
cds@principal <- upfront
cds@ptsUpfront <- upfront / notional
cds@parSpread <- spread(TDate = TDate,
baseDate = baseDate,
currency = currency,
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
fixedSwapFreq = fixedSwapFreq,
floatSwapFreq = floatSwapFreq,
fixedSwapDCC = fixedSwapDCC,
floatSwapDCC = floatSwapDCC,
badDayConvZC = badDayConvZC,
holidays = holidays,
valueDate = valueDate,
benchmarkDate = benchmarkDate,
startDate = startDate,
endDate = endDate,
stepinDate = stepinDate,
maturity = maturity,
dccCDS = dccCDS,
freqCDS = freqCDS,
stubCDS = stubCDS,
badDayConvCDS = badDayConvCDS,
calendar = calendar,
upfront = NULL,
ptsUpfront = cds@ptsUpfront,
coupon = coupon,
recoveryRate = recoveryRate,
payAccruedAtStart = TRUE,
payAccruedOnDefault = payAccruedOnDefault,
notional = notional)
cds@upfront <- upfront(TDate = TDate,
baseDate = baseDate,
currency = currency,
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
fixedSwapFreq = fixedSwapFreq,
floatSwapFreq = floatSwapFreq,
fixedSwapDCC = fixedSwapDCC,
floatSwapDCC = floatSwapDCC,
badDayConvZC = badDayConvZC,
holidays = holidays,
valueDate = valueDate,
benchmarkDate = benchmarkDate,
startDate = startDate,
endDate = endDate,
stepinDate = stepinDate,
maturity = maturity,
dccCDS = dccCDS,
freqCDS = freqCDS,
stubCDS = stubCDS,
badDayConvCDS = badDayConvCDS,
calendar = calendar,
parSpread = cds@parSpread,
coupon = coupon,
recoveryRate = recoveryRate,
isPriceClean = FALSE,
payAccruedOnDefault = payAccruedOnDefault,
notional = notional)
} else {
## dirty upfront
cds@upfront <- upfront
cds@parSpread <- spread(TDate = TDate,
baseDate = baseDate,
currency = currency,
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
fixedSwapFreq = fixedSwapFreq,
floatSwapFreq = floatSwapFreq,
fixedSwapDCC = fixedSwapDCC,
floatSwapDCC = floatSwapDCC,
badDayConvZC = badDayConvZC,
holidays = holidays,
valueDate = valueDate,
benchmarkDate = benchmarkDate,
startDate = startDate,
endDate = endDate,
stepinDate = stepinDate,
maturity = maturity,
dccCDS = dccCDS,
freqCDS = freqCDS,
stubCDS = stubCDS,
badDayConvCDS = badDayConvCDS,
calendar = calendar,
upfront = upfront,
ptsUpfront = NULL,
coupon = coupon,
recoveryRate = recoveryRate,
payAccruedAtStart = FALSE,
notional = notional,
payAccruedOnDefault = payAccruedOnDefault)
cds@principal <- upfront(TDate,
baseDate,
currency,
types,
rates,
expiries,
mmDCC,
fixedSwapFreq,
floatSwapFreq,
fixedSwapDCC,
floatSwapDCC,
badDayConvZC,
holidays,
valueDate,
benchmarkDate,
startDate,
endDate,
stepinDate,
maturity,
dccCDS,
freqCDS,
stubCDS,
badDayConvCDS,
calendar,
cds@parSpread,
coupon,
recoveryRate,
isPriceClean = TRUE,
payAccruedOnDefault,
notional)
cds@ptsUpfront <- cds@principal / notional
}
}
cds@accrual <- cds@upfront - cds@principal
cds@spreadDV01 <- spreadDV01(cds)
cds@IRDV01 <- IRDV01(cds)
cds@RecRisk01 <- recRisk01(cds)
cds@defaultProb <- defaultProb(parSpread = cds@parSpread,
t = as.numeric(as.Date(endDate) -
as.Date(TDate))/360,
recoveryRate = recoveryRate)
cds@defaultExpo <- defaultExpo(recoveryRate, notional, cds@principal)
cds@price <- price(cds@principal, notional)
return(cds)
}
| /pkg/R/CDS.R | no_license | bdivet/CDS | R | false | false | 23,799 | r | #' Build a \code{CDS} class object given the input about a CDS
#' contract.
#'
#' @name CDS
#'
#' @param contract is the contract type, default SNAC
#' @param entityName is the name of the reference entity. Optional.
#' @param RED alphanumeric code assigned to the reference entity. Optional.
#' @param TDate is when the trade is executed, denoted as T. Default
#' is \code{Sys.Date}. The date format should be in "YYYY-MM-DD".
#' @param baseDate is the start date for the IR curve. Default is TDate.
#' @param currency in which CDS is denominated.
#' @param types is a string indicating the names of the instruments
#' used for the yield curve. 'M' means money market rate; 'S' is swap
#' rate.
#' @param rates is an array of numeric values indicating the rate of
#' each instrument.
#' @param expiries is an array of characters indicating the maturity
#' of each instrument.
#' @param mmDCC is the day count convention of the instruments.
#' @param fixedSwapFreq is the frequency of the fixed rate of swap
#' being paid.
#' @param floatSwapFreq is the frequency of the floating rate of swap
#' being paid.
#' @param fixedSwapDCC is the day count convention of the fixed leg.
#' @param floatSwapDCC is the day count convention of the floating leg.
#' @param badDayConvZC is a character indicating how non-business days
#' are converted.
#' @param holidays is an input for holiday files to adjust to business
#' days.
#' @param valueDate is the date for which the present value of the CDS
#' is calculated. aka cash-settle date. The default is T + 3.
#' @param benchmarkDate Accrual begin date.
#' @param startDate is when the CDS nomially starts in terms of
#' premium payments, i.e. the number of days in the first period (and
#' thus the amount of the first premium payment) is counted from this
#' date. aka accrual begin date.
#' @param endDate aka maturity date. This is when the contract expires
#' and protection ends. Any default after this date does not trigger a
#' payment.
#' @param stepinDate default is T + 1.
#' @param maturity of the CDS contract.
#' @param dccCDS day count convention of the CDS. Default is ACT/360.
#' @param freqCDS date interval of the CDS contract.
#' @param stubCDS is a character indicating the presence of a stub.
#' @param badDayConvCDS refers to the bay day conversion for the CDS
#' coupon payments. Default is "F", following.
#' @param calendar refers to any calendar adjustment for the CDS.
#' @param parSpread CDS par spread in bps.
#' @param coupon quoted in bps. It specifies the payment amount from
#' the protection buyer to the seller on a regular basis. The default
#' is 100 bps.
#' @param recoveryRate in decimal. Default is 0.4.
#' @param upfront is quoted in the currency amount. Since a standard
#' contract is traded with fixed coupons, upfront payment is
#' introduced to reconcile the difference in contract value due to the
#' difference between the fixed coupon and the conventional par
#' spread. There are two types of upfront, dirty and clean. Dirty
#' upfront, a.k.a. Cash Settlement Amount, refers to the market value
#' of a CDS contract. Clean upfront is dirty upfront less any accrued
#' interest payment, and is also called the Principal.
#' @param ptsUpfront is quoted as a percentage of the notional
#' amount. They represent the upfront payment excluding the accrual
#' payment. High Yield (HY) CDS contracts are often quoted in points
#' upfront. The protection buyer pays the upfront payment if points
#' upfront are positive, and the buyer is paid by the seller if the
#' points are negative.
#' @param isPriceClean refers to the type of upfront calculated. It is
#' boolean. When \code{TRUE}, calculate principal only. When
#' \code{FALSE}, calculate principal + accrual.
#' @param notional is the amount of the underlying asset on which the
#' payments are based. Default is 1e7, i.e. 10MM.
#' @param payAccruedOnDefault is a partial payment of the premium made
#' to the protection seller in the event of a default. Default is
#' \code{TRUE}.
#'
#' @return a \code{CDS} class object including the input informtion on
#' the contract as well as the valuation results of the contract.
#'
#' @export
#' @examples
#' # Build a simple CDS class object
#' require(CDS)
#' cds1 <- CDS(TDate = "2014-05-07", parSpread = 50, coupon = 100)
#'
CDS <- function(contract = "SNAC", ## CDS contract type, default SNAC
entityName = NULL,
RED = NULL,
TDate = Sys.Date(), ## Default is the current date
## IR curve
baseDate = TDate,
currency = "USD",
types = NULL,
rates = NULL,
expiries = NULL,
mmDCC = "ACT/360",
fixedSwapFreq = "6M",
floatSwapFreq = "3M",
fixedSwapDCC = "30/360",
floatSwapDCC = "ACT/360",
badDayConvZC = "M",
holidays = "None",
## CDS
valueDate = NULL,
benchmarkDate = NULL,
startDate = NULL,
endDate = NULL,
stepinDate = NULL,
maturity = "5Y",
dccCDS = "ACT/360",
freqCDS = "Q",
stubCDS = "F",
badDayConvCDS = "F",
calendar = "None",
parSpread = NULL,
coupon = 100,
recoveryRate = 0.4,
upfront = NULL,
ptsUpfront = NULL,
isPriceClean = FALSE,
notional = 1e7,
payAccruedOnDefault = TRUE
){
checkTDate <- testDate(TDate)
if (!is.na(checkTDate)) stop (checkTDate)
if ((is.null(upfront)) & (is.null(ptsUpfront)) & (is.null(parSpread)))
stop("Please input spread, upfront or pts upfront")
if (is.null(maturity)) {
md <- .mondf(TDate, endDate)
if (md < 12){
maturity <- paste(md, "M", sep = "", collapse = "")
} else {
maturity <- paste(floor(md/12), "Y", sep = "", collapse = "")
}
}
ratesDate <- baseDate
effectiveDate <- as.Date(TDate)
cdsDates <- getDates(TDate = as.Date(TDate), maturity = maturity)
if (is.null(valueDate)) valueDate <- cdsDates$valueDate
if (is.null(benchmarkDate)) benchmarkDate <- cdsDates$startDate
if (is.null(startDate)) startDate <- cdsDates$startDate
if (is.null(endDate)) endDate <- cdsDates$endDate
if (is.null(stepinDate)) stepinDate <- cdsDates$stepinDate
stopifnot(all.equal(length(rates), length(expiries), nchar(types)))
if ((is.null(types) | is.null(rates) | is.null(expiries))){
ratesInfo <- getRates(date = ratesDate, currency = currency)
effectiveDate <- as.Date(as.character(ratesInfo[[2]]$effectiveDate))
if (is.null(types)) types = paste(as.character(ratesInfo[[1]]$type), collapse = "")
if (is.null(rates)) rates = as.numeric(as.character(ratesInfo[[1]]$rate))
if (is.null(expiries)) expiries = as.character(ratesInfo[[1]]$expiry)
if (is.null(mmDCC)) mmDCC = as.character(ratesInfo[[2]]$mmDCC)
if (is.null(fixedSwapFreq)) fixedSwapFreq = as.character(ratesInfo[[2]]$fixedFreq)
if (is.null(floatSwapFreq)) floatSwapFreq = as.character(ratesInfo[[2]]$floatFreq)
if (is.null(fixedSwapDCC)) fixedSwapDCC = as.character(ratesInfo[[2]]$fixedDCC)
if (is.null(floatSwapDCC)) floatSwapDCC = as.character(ratesInfo[[2]]$floatDCC)
if (is.null(badDayConvZC)) badDayConvZC = as.character(ratesInfo[[2]]$badDayConvention)
if (is.null(holidays)) holidays = as.character(ratesInfo[[2]]$swapCalendars)
}
if (is.null(entityName)) entityName <- "NA"
if (is.null(RED)) RED <- "NA"
cds <- new("CDS",
contract = as.character(contract),
entityName = as.character(entityName),
RED = as.character(RED),
TDate = as.Date(TDate),
baseDate = as.Date(baseDate),
currency = as.character(currency),
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
effectiveDate = effectiveDate,
fixedSwapFreq = fixedSwapFreq,
floatSwapFreq = floatSwapFreq,
fixedSwapDCC = fixedSwapDCC,
floatSwapDCC = floatSwapDCC,
badDayConvZC = badDayConvZC,
holidays = holidays,
valueDate = as.Date(valueDate),
benchmarkDate = as.Date(benchmarkDate),
startDate = as.Date(startDate),
endDate = as.Date(endDate),
stepinDate = as.Date(stepinDate),
backstopDate = as.Date(cdsDates$backstopDate),
firstcouponDate = as.Date(cdsDates$firstcouponDate),
pencouponDate = as.Date(cdsDates$pencouponDate),
maturity = maturity,
dccCDS = dccCDS,
freqCDS = freqCDS,
stubCDS = stubCDS,
badDayConvCDS = badDayConvCDS,
calendar = calendar,
coupon = coupon,
recoveryRate = recoveryRate,
inputPriceClean = isPriceClean,
notional = notional,
payAccruedOnDefault = payAccruedOnDefault
)
if (!is.null(parSpread)){
## if parSpread is given, calculate principal and accrual
cds@parSpread <- parSpread
cds@principal <- upfront(TDate,
baseDate = baseDate,
currency = currency,
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
fixedSwapFreq,
floatSwapFreq,
fixedSwapDCC,
floatSwapDCC,
badDayConvZC,
holidays,
valueDate,
benchmarkDate,
startDate,
endDate,
stepinDate,
maturity,
dccCDS,
freqCDS,
stubCDS,
badDayConvCDS,
calendar,
parSpread,
coupon,
recoveryRate,
TRUE,
payAccruedOnDefault,
notional)
cds@ptsUpfront <- cds@principal / notional
cds@upfront <- upfront(TDate,
baseDate = baseDate,
currency = currency,
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
fixedSwapFreq,
floatSwapFreq,
fixedSwapDCC,
floatSwapDCC,
badDayConvZC,
holidays,
valueDate,
benchmarkDate,
startDate,
endDate,
stepinDate,
maturity,
dccCDS,
freqCDS,
stubCDS,
badDayConvCDS,
calendar,
parSpread,
coupon,
recoveryRate,
FALSE,
payAccruedOnDefault,
notional)
} else if (!is.null(ptsUpfront)){
cds@ptsUpfront <- ptsUpfront
cds@parSpread <- spread(TDate,
baseDate,
currency,
types,
rates,
expiries,
mmDCC,
fixedSwapFreq,
floatSwapFreq,
fixedSwapDCC,
floatSwapDCC,
badDayConvZC,
holidays,
valueDate,
benchmarkDate,
startDate,
endDate,
stepinDate,
maturity,
dccCDS,
freqCDS,
stubCDS,
badDayConvCDS,
calendar,
upfront,
ptsUpfront,
coupon,
recoveryRate,
payAccruedAtStart = isPriceClean,
notional,
payAccruedOnDefault)
cds@principal <- notional * ptsUpfront
cds@upfront <- upfront(TDate,
baseDate,
currency,
types,
rates,
expiries,
mmDCC,
fixedSwapFreq,
floatSwapFreq,
fixedSwapDCC,
floatSwapDCC,
badDayConvZC,
holidays,
valueDate,
benchmarkDate,
startDate,
endDate,
stepinDate,
maturity,
dccCDS,
freqCDS,
stubCDS,
badDayConvCDS,
calendar,
cds@parSpread,
coupon,
recoveryRate,
FALSE,
payAccruedOnDefault,
notional)
} else {
if (isPriceClean == TRUE) {
cds@principal <- upfront
cds@ptsUpfront <- upfront / notional
cds@parSpread <- spread(TDate = TDate,
baseDate = baseDate,
currency = currency,
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
fixedSwapFreq = fixedSwapFreq,
floatSwapFreq = floatSwapFreq,
fixedSwapDCC = fixedSwapDCC,
floatSwapDCC = floatSwapDCC,
badDayConvZC = badDayConvZC,
holidays = holidays,
valueDate = valueDate,
benchmarkDate = benchmarkDate,
startDate = startDate,
endDate = endDate,
stepinDate = stepinDate,
maturity = maturity,
dccCDS = dccCDS,
freqCDS = freqCDS,
stubCDS = stubCDS,
badDayConvCDS = badDayConvCDS,
calendar = calendar,
upfront = NULL,
ptsUpfront = cds@ptsUpfront,
coupon = coupon,
recoveryRate = recoveryRate,
payAccruedAtStart = TRUE,
payAccruedOnDefault = payAccruedOnDefault,
notional = notional)
cds@upfront <- upfront(TDate = TDate,
baseDate = baseDate,
currency = currency,
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
fixedSwapFreq = fixedSwapFreq,
floatSwapFreq = floatSwapFreq,
fixedSwapDCC = fixedSwapDCC,
floatSwapDCC = floatSwapDCC,
badDayConvZC = badDayConvZC,
holidays = holidays,
valueDate = valueDate,
benchmarkDate = benchmarkDate,
startDate = startDate,
endDate = endDate,
stepinDate = stepinDate,
maturity = maturity,
dccCDS = dccCDS,
freqCDS = freqCDS,
stubCDS = stubCDS,
badDayConvCDS = badDayConvCDS,
calendar = calendar,
parSpread = cds@parSpread,
coupon = coupon,
recoveryRate = recoveryRate,
isPriceClean = FALSE,
payAccruedOnDefault = payAccruedOnDefault,
notional = notional)
} else {
## dirty upfront
cds@upfront <- upfront
cds@parSpread <- spread(TDate = TDate,
baseDate = baseDate,
currency = currency,
types = types,
rates = rates,
expiries = expiries,
mmDCC = mmDCC,
fixedSwapFreq = fixedSwapFreq,
floatSwapFreq = floatSwapFreq,
fixedSwapDCC = fixedSwapDCC,
floatSwapDCC = floatSwapDCC,
badDayConvZC = badDayConvZC,
holidays = holidays,
valueDate = valueDate,
benchmarkDate = benchmarkDate,
startDate = startDate,
endDate = endDate,
stepinDate = stepinDate,
maturity = maturity,
dccCDS = dccCDS,
freqCDS = freqCDS,
stubCDS = stubCDS,
badDayConvCDS = badDayConvCDS,
calendar = calendar,
upfront = upfront,
ptsUpfront = NULL,
coupon = coupon,
recoveryRate = recoveryRate,
payAccruedAtStart = FALSE,
notional = notional,
payAccruedOnDefault = payAccruedOnDefault)
cds@principal <- upfront(TDate,
baseDate,
currency,
types,
rates,
expiries,
mmDCC,
fixedSwapFreq,
floatSwapFreq,
fixedSwapDCC,
floatSwapDCC,
badDayConvZC,
holidays,
valueDate,
benchmarkDate,
startDate,
endDate,
stepinDate,
maturity,
dccCDS,
freqCDS,
stubCDS,
badDayConvCDS,
calendar,
cds@parSpread,
coupon,
recoveryRate,
isPriceClean = TRUE,
payAccruedOnDefault,
notional)
cds@ptsUpfront <- cds@principal / notional
}
}
cds@accrual <- cds@upfront - cds@principal
cds@spreadDV01 <- spreadDV01(cds)
cds@IRDV01 <- IRDV01(cds)
cds@RecRisk01 <- recRisk01(cds)
cds@defaultProb <- defaultProb(parSpread = cds@parSpread,
t = as.numeric(as.Date(endDate) -
as.Date(TDate))/360,
recoveryRate = recoveryRate)
cds@defaultExpo <- defaultExpo(recoveryRate, notional, cds@principal)
cds@price <- price(cds@principal, notional)
return(cds)
}
|
library(misc3d)
### Name: linesTetrahedra
### Title: Create a Set of Lines with Tetrahetra Centered at Points along
### the Lines
### Aliases: linesTetrahedra
### Keywords: hplot
### ** Examples
p <- pointsTetrahedra(x=c(100,100, 257, 257),
y=c(100,100, 257, 257),
z=c(100,257, 257, 100), size=1)
l <- linesTetrahedra(x=matrix(c(100,257,
100,257), nrow=2, byrow=TRUE),
y=matrix(c(100,257,
100,257), nrow=2, byrow=TRUE),
z=matrix(c(100,257,
257,100), nrow=2, byrow=TRUE),
lwd=0.4,
col="red")
drawScene.rgl(list(p, l))
| /data/genthat_extracted_code/misc3d/examples/linesTetrahedra.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 751 | r | library(misc3d)
### Name: linesTetrahedra
### Title: Create a Set of Lines with Tetrahetra Centered at Points along
### the Lines
### Aliases: linesTetrahedra
### Keywords: hplot
### ** Examples
p <- pointsTetrahedra(x=c(100,100, 257, 257),
y=c(100,100, 257, 257),
z=c(100,257, 257, 100), size=1)
l <- linesTetrahedra(x=matrix(c(100,257,
100,257), nrow=2, byrow=TRUE),
y=matrix(c(100,257,
100,257), nrow=2, byrow=TRUE),
z=matrix(c(100,257,
257,100), nrow=2, byrow=TRUE),
lwd=0.4,
col="red")
drawScene.rgl(list(p, l))
|
rm(list = ls())
source("./base-funcs/models_var.R")
source("./base-funcs/path_config.R")
source("./base-funcs/models_func.R")
recalculate_error_func <- function(file, problem, old.maxTest, maxTe){
res.dat <- read.csv(file)
rawdata.path <- path_config(problem)[1]
restore.path <- path_config(problem)[2]
tmp.string <- strsplit(file, "_") %>% unlist()
maxTrain <- tmp.string[2]%>%as.numeric()
cat("maxTrain: ", maxTrain, "\n")
for(maxtest in 1:length(maxTe)){
maxTest <- maxTe[maxtest]
for(fi in 1:nrow(res.dat)){
rawdata.name <- gsub("/", "_", res.dat[fi, "instance_file"] %>% as.character)
rawdata <- paste(rawdata.path, rawdata.name, ".csv",
sep = "") %>% read.csv()
test.script <- which(rawdata$x <= maxTest)
if(length(test.script) !=0){
raw.data <- rawdata[c(1: max(test.script)), ]
} else {
next
}
#train data
train.script <- which(rawdata$x <= maxTrain)
if(length(train.script) != 0){
sample.index <- c(1: max(train.script))
} else {
next
}
xData <- raw.data$x[sample.index]
yData <- raw.data$y[sample.index]
train.data <- data.frame(x = xData, y = yData)
#unseen data
future.data.x <- raw.data$x[-sample.index]
future.data.y <- raw.data$y[-sample.index]
future.data <- data.frame(x = future.data.x, y = future.data.y)
par <- res.dat[fi, c("a", "b", "c", "d")] %>% as.numeric
if(nrow(future.data) != 0){
tmp.pre_y <- GetFormulaValue(eval(parse(text = res.dat$model[fi] %>% as.character))$formula, future.data$x, par)[, 2]
residual = xyRMSE(future.data$y, tmp.pre_y)
# cat(residual, "...fi:", fi, "\n")
res.dat[fi, "residuals"] <- residual
}
}
library(stringr)
restore.res.dat.csvname <- gsub(paste(maxTrain,"_", old.maxTest, "_", sep = ""), paste(maxTrain,"_", maxTest, "_", sep = ""), file)
write.csv(res.dat, file = restore.res.dat.csvname, row.names = FALSE)
}
}
library(dplyr)
path <- "./modelresults/LM.maxsat.pre/maxTrain-maxTest/"
res.list <- list.files(path)
res.list <- paste(path, res.list[grep("BL_.+.csv", res.list)], sep = "")
for(lm in 1:length(res.list)){
recalculate_error_func(res.list[lm], problem = "maxsat", old.maxTest = 100, maxTe = c(100, 1000, 10000))
}
| /problembased-funcs/LM_pre_error_func.R | no_license | qiqi-helloworld/Numeric-Represents-on-Evolutionary-Fitness-Results | R | false | false | 2,704 | r | rm(list = ls())
source("./base-funcs/models_var.R")
source("./base-funcs/path_config.R")
source("./base-funcs/models_func.R")
recalculate_error_func <- function(file, problem, old.maxTest, maxTe){
res.dat <- read.csv(file)
rawdata.path <- path_config(problem)[1]
restore.path <- path_config(problem)[2]
tmp.string <- strsplit(file, "_") %>% unlist()
maxTrain <- tmp.string[2]%>%as.numeric()
cat("maxTrain: ", maxTrain, "\n")
for(maxtest in 1:length(maxTe)){
maxTest <- maxTe[maxtest]
for(fi in 1:nrow(res.dat)){
rawdata.name <- gsub("/", "_", res.dat[fi, "instance_file"] %>% as.character)
rawdata <- paste(rawdata.path, rawdata.name, ".csv",
sep = "") %>% read.csv()
test.script <- which(rawdata$x <= maxTest)
if(length(test.script) !=0){
raw.data <- rawdata[c(1: max(test.script)), ]
} else {
next
}
#train data
train.script <- which(rawdata$x <= maxTrain)
if(length(train.script) != 0){
sample.index <- c(1: max(train.script))
} else {
next
}
xData <- raw.data$x[sample.index]
yData <- raw.data$y[sample.index]
train.data <- data.frame(x = xData, y = yData)
#unseen data
future.data.x <- raw.data$x[-sample.index]
future.data.y <- raw.data$y[-sample.index]
future.data <- data.frame(x = future.data.x, y = future.data.y)
par <- res.dat[fi, c("a", "b", "c", "d")] %>% as.numeric
if(nrow(future.data) != 0){
tmp.pre_y <- GetFormulaValue(eval(parse(text = res.dat$model[fi] %>% as.character))$formula, future.data$x, par)[, 2]
residual = xyRMSE(future.data$y, tmp.pre_y)
# cat(residual, "...fi:", fi, "\n")
res.dat[fi, "residuals"] <- residual
}
}
library(stringr)
restore.res.dat.csvname <- gsub(paste(maxTrain,"_", old.maxTest, "_", sep = ""), paste(maxTrain,"_", maxTest, "_", sep = ""), file)
write.csv(res.dat, file = restore.res.dat.csvname, row.names = FALSE)
}
}
library(dplyr)
path <- "./modelresults/LM.maxsat.pre/maxTrain-maxTest/"
res.list <- list.files(path)
res.list <- paste(path, res.list[grep("BL_.+.csv", res.list)], sep = "")
for(lm in 1:length(res.list)){
recalculate_error_func(res.list[lm], problem = "maxsat", old.maxTest = 100, maxTe = c(100, 1000, 10000))
}
|
# Marcus Rosti
# mer3ef
# hw5
# 4.4, 4.5, 4.7, 4.9, 4.23, 4.24, 4.25
#################################################################################
#
# Data Sets and boilerplate
#
#################################################################################
library(gdata)
setwd("~/MSDS/Fall/Stat_6021/Homework/hw5")
mpg <-
read.xls("../../linear_regression_5e_data_sets/Appendices/data-table-B3.XLS")
hou <-
read.xls("../../linear_regression_5e_data_sets/Appendices/data-table-B4.XLS")
p21 <-
read.xls(
"~/MSDS/Fall/Stat_6021/linear_regression_5e_data_sets/Chapter 2/Problems/data-prob-2-10.XLS"
)
ozn <-
read.xls(
"~/MSDS/Fall/Stat_6021/linear_regression_5e_data_sets/Chapter 2/Problems/data-prob-2-13.XLS"
)
wsj <-
read.xls(
"~/MSDS/Fall/Stat_6021/linear_regression_5e_data_sets/Chapter 2/Problems/data-prob-2-18.XLS"
)
air <-
read.xls("../../linear_regression_5e_data_sets/Appendices/data-table-B-15.xls")
lyf <-
read.xls("../../linear_regression_5e_data_sets/Appendices/data-table-B-16.xls")
#################################################################################
#
# 4.4
#
#################################################################################
mpg.lm <- lm(y ~ x1 + x6, data = mpg)
mpg.lm.sum <- summary(mpg.lm)
### a
mpg.stdres = rstandard(mpg.lm)
# Ooo that looks interesting
# It seems like there's some underlying system that's making the residuals
# follow some sort of trending on the upper middle of the plot
# It's probably due to x6 being catagorical
qqnorm(mpg.stdres)
qqline(mpg.stdres)
### b
plot(mpg.lm$fitted.values,mpg.lm.sum$residuals)
# There looks to be a slight lack of heteroskedasticity
### c
avPlots(mpg.lm)
# Each variable adds explanatory power to the model
### d
rstandard(mpg.lm) # Studentized - These mitigate the effect of heteroskedasticity
plot(rstandard(mpg.lm))
rstudent(mpg.lm) # R-Studentized - Show the effect of removing x_i from the model
plot(rstudent(mpg.lm))
#################################################################################
#
# 4.5
#
#################################################################################
hou.lm <- lm(y ~ ., data = hou)
hou.lm.sum <- summary(hou.lm)
### a
hou.lm.stdres <- rstandard(hou.lm)
qqnorm(hou.lm.stdres)
qqline(hou.lm.stdres)
# Other than in the tails, the residuals are fairly normal
### b
plot(hou.lm$fitted.values,hou.lm.sum$residuals)
# There are so few variables here that it's hard to tell if there's a meaningful trend
### c
avPlots(hou.lm)
# It looks like x1 and x2 provide the most increase given the presense of the other variables
# x5 and x9 as well although less so than the others. x3, x6, x7 and x8 seem to add nothing
### d
studres(hou.lm) # Studentized - These mitigate the effect of heteroskedasticity
plot(rstandard(hou.lm))
rstudent(hou.lm) # R-Studentized - Show the effect of removing x_i from the model
plot(rstudent(hou.lm))
#################################################################################
#
# 4.7
#
#################################################################################
p21.lm <- lm(sys.bp ~ weight ,data = p21)
p21.lm.sum <- summary(p21.lm)
### a
p21.lm.stdres <- rstandard(p21.lm)
qqnorm(p21.lm.stdres)
qqline(p21.lm.stdres)
# This looks absolutely non normal. There's a huge trend in these values that's violated
### b
plot(p21.lm$fitted.values,p21.lm.sum$residuals)
# These residuals show extensive heteroskedasticity
### c
plot(ts(p21)) # Plot it as a time series
# It looks like blood pressure and weight are well correlated
# But it's not linear
#################################################################################
#
# 4.9
#
#################################################################################
ozn.lm <- lm(days ~ index ,data = ozn)
ozn.lm.sum <- summary(ozn.lm)
### a
ozn.lm.stdres <- rstandard(ozn.lm)
qqnorm(ozn.lm.stdres)
qqline(ozn.lm.stdres)
# This looks very normal
### b
plot(ozn.lm$fitted.values,ozn.lm.sum$residuals)
# Can't really tell anything here. It looks fairly regular
### c
plot(ts(ozn)) # Plot it as a time series
# Doesn't show really much correlation that I can detect
#################################################################################
#
# 4.23
#
#################################################################################
wsj.lm <-
lm(Returned.Impressions.per.week..millions. ~ Amount.Spent..Millions. ,data = wsj)
wsj.lm.sum <- summary(wsj.lm)
### a
wsj.lm.stdres <- rstandard(wsj.lm)
qqnorm(wsj.lm.stdres)
qqline(wsj.lm.stdres)
# Fairly normal however the values on the right side of the graph appear to be outliers
### b
plot(wsj.lm$fitted.values,wsj.lm.sum$residuals)
# This looks really bad. All the fitted values are clustered to left hand side
# I would say this violates the regression assumptions
#################################################################################
#
# 4.24
#
#################################################################################
air.lm <- lm(MORT ~ . - City, data = air)
air.lm.sum <- summary(air.lm)
### a
air.lm.stdres <- rstandard(air.lm)
qqnorm(air.lm.stdres)
qqline(air.lm.stdres)
# The tails look non normal but near the mean it does look
### b
plot(air.lm$fitted.values,air.lm.sum$residuals)
# There's a little bit of heteroskedasticity but not enough to violate the
# assumptions of the regression line
#################################################################################
#
# 4.25
#
#################################################################################
lyf.lm <- lm(LifeExp ~ People.per.TV + People.per.Dr, data = lyf)
lyf.lm.sum <- summary(lyf.lm)
lyf.fem.lm <- lm(LifeExpMale ~ People.per.TV + People.per.Dr, data = lyf)
lyf.mal.lm <- lm(LifeExpFemale ~ People.per.TV + People.per.Dr, data = lyf)
lyf.fem.lm.sum <- summary(lyf.fem.lm)
lyf.mal.lm.sum <- summary(lyf.mal.lm)
### a
lyf.lm.stdres <- rstandard(lyf.lm)
lyf.fem.lm.stdres <- rstandard(lyf.fem.lm)
lyf.mal.lm.stdres <- rstandard(lyf.mal.lm)
qqnorm(lyf.lm.stdres)
qqline(lyf.lm.stdres)
qqnorm(lyf.fem.lm.stdres)
qqline(lyf.fem.lm.stdres)
qqnorm(lyf.mal.lm.stdres)
qqline(lyf.mal.lm.stdres)
# There's one big outlier that looks like it would have a lot of leverage on the regression line
# They all seem nearly the same
### b
plot(lyf.fem.lm$fitted.values,lyf.fem.lm.sum$residuals)
plot(lyf.mal.lm$fitted.values,lyf.mal.lm.sum$residuals)
# There's a few points that really stick out as outliers in both models
| /Fall/Stat_6021/Homework/hw5/hw5.R | no_license | Marcus-Rosti/MSDS | R | false | false | 6,551 | r | # Marcus Rosti
# mer3ef
# hw5
# 4.4, 4.5, 4.7, 4.9, 4.23, 4.24, 4.25
#################################################################################
#
# Data Sets and boilerplate
#
#################################################################################
library(gdata)
setwd("~/MSDS/Fall/Stat_6021/Homework/hw5")
mpg <-
read.xls("../../linear_regression_5e_data_sets/Appendices/data-table-B3.XLS")
hou <-
read.xls("../../linear_regression_5e_data_sets/Appendices/data-table-B4.XLS")
p21 <-
read.xls(
"~/MSDS/Fall/Stat_6021/linear_regression_5e_data_sets/Chapter 2/Problems/data-prob-2-10.XLS"
)
ozn <-
read.xls(
"~/MSDS/Fall/Stat_6021/linear_regression_5e_data_sets/Chapter 2/Problems/data-prob-2-13.XLS"
)
wsj <-
read.xls(
"~/MSDS/Fall/Stat_6021/linear_regression_5e_data_sets/Chapter 2/Problems/data-prob-2-18.XLS"
)
air <-
read.xls("../../linear_regression_5e_data_sets/Appendices/data-table-B-15.xls")
lyf <-
read.xls("../../linear_regression_5e_data_sets/Appendices/data-table-B-16.xls")
#################################################################################
#
# 4.4
#
#################################################################################
mpg.lm <- lm(y ~ x1 + x6, data = mpg)
mpg.lm.sum <- summary(mpg.lm)
### a
mpg.stdres = rstandard(mpg.lm)
# Ooo that looks interesting
# It seems like there's some underlying system that's making the residuals
# follow some sort of trending on the upper middle of the plot
# It's probably due to x6 being catagorical
qqnorm(mpg.stdres)
qqline(mpg.stdres)
### b
plot(mpg.lm$fitted.values,mpg.lm.sum$residuals)
# There looks to be a slight lack of heteroskedasticity
### c
avPlots(mpg.lm)
# Each variable adds explanatory power to the model
### d
rstandard(mpg.lm) # Studentized - These mitigate the effect of heteroskedasticity
plot(rstandard(mpg.lm))
rstudent(mpg.lm) # R-Studentized - Show the effect of removing x_i from the model
plot(rstudent(mpg.lm))
#################################################################################
#
# 4.5
#
#################################################################################
hou.lm <- lm(y ~ ., data = hou)
hou.lm.sum <- summary(hou.lm)
### a
hou.lm.stdres <- rstandard(hou.lm)
qqnorm(hou.lm.stdres)
qqline(hou.lm.stdres)
# Other than in the tails, the residuals are fairly normal
### b
plot(hou.lm$fitted.values,hou.lm.sum$residuals)
# There are so few variables here that it's hard to tell if there's a meaningful trend
### c
avPlots(hou.lm)
# It looks like x1 and x2 provide the most increase given the presense of the other variables
# x5 and x9 as well although less so than the others. x3, x6, x7 and x8 seem to add nothing
### d
studres(hou.lm) # Studentized - These mitigate the effect of heteroskedasticity
plot(rstandard(hou.lm))
rstudent(hou.lm) # R-Studentized - Show the effect of removing x_i from the model
plot(rstudent(hou.lm))
#################################################################################
#
# 4.7
#
#################################################################################
p21.lm <- lm(sys.bp ~ weight ,data = p21)
p21.lm.sum <- summary(p21.lm)
### a
p21.lm.stdres <- rstandard(p21.lm)
qqnorm(p21.lm.stdres)
qqline(p21.lm.stdres)
# This looks absolutely non normal. There's a huge trend in these values that's violated
### b
plot(p21.lm$fitted.values,p21.lm.sum$residuals)
# These residuals show extensive heteroskedasticity
### c
plot(ts(p21)) # Plot it as a time series
# It looks like blood pressure and weight are well correlated
# But it's not linear
#################################################################################
#
# 4.9
#
#################################################################################
ozn.lm <- lm(days ~ index ,data = ozn)
ozn.lm.sum <- summary(ozn.lm)
### a
ozn.lm.stdres <- rstandard(ozn.lm)
qqnorm(ozn.lm.stdres)
qqline(ozn.lm.stdres)
# This looks very normal
### b
plot(ozn.lm$fitted.values,ozn.lm.sum$residuals)
# Can't really tell anything here. It looks fairly regular
### c
plot(ts(ozn)) # Plot it as a time series
# Doesn't show really much correlation that I can detect
#################################################################################
#
# 4.23
#
#################################################################################
wsj.lm <-
lm(Returned.Impressions.per.week..millions. ~ Amount.Spent..Millions. ,data = wsj)
wsj.lm.sum <- summary(wsj.lm)
### a
wsj.lm.stdres <- rstandard(wsj.lm)
qqnorm(wsj.lm.stdres)
qqline(wsj.lm.stdres)
# Fairly normal however the values on the right side of the graph appear to be outliers
### b
plot(wsj.lm$fitted.values,wsj.lm.sum$residuals)
# This looks really bad. All the fitted values are clustered to left hand side
# I would say this violates the regression assumptions
#################################################################################
#
# 4.24
#
#################################################################################
air.lm <- lm(MORT ~ . - City, data = air)
air.lm.sum <- summary(air.lm)
### a
air.lm.stdres <- rstandard(air.lm)
qqnorm(air.lm.stdres)
qqline(air.lm.stdres)
# The tails look non normal but near the mean it does look
### b
plot(air.lm$fitted.values,air.lm.sum$residuals)
# There's a little bit of heteroskedasticity but not enough to violate the
# assumptions of the regression line
#################################################################################
#
# 4.25
#
#################################################################################
lyf.lm <- lm(LifeExp ~ People.per.TV + People.per.Dr, data = lyf)
lyf.lm.sum <- summary(lyf.lm)
lyf.fem.lm <- lm(LifeExpMale ~ People.per.TV + People.per.Dr, data = lyf)
lyf.mal.lm <- lm(LifeExpFemale ~ People.per.TV + People.per.Dr, data = lyf)
lyf.fem.lm.sum <- summary(lyf.fem.lm)
lyf.mal.lm.sum <- summary(lyf.mal.lm)
### a
lyf.lm.stdres <- rstandard(lyf.lm)
lyf.fem.lm.stdres <- rstandard(lyf.fem.lm)
lyf.mal.lm.stdres <- rstandard(lyf.mal.lm)
qqnorm(lyf.lm.stdres)
qqline(lyf.lm.stdres)
qqnorm(lyf.fem.lm.stdres)
qqline(lyf.fem.lm.stdres)
qqnorm(lyf.mal.lm.stdres)
qqline(lyf.mal.lm.stdres)
# There's one big outlier that looks like it would have a lot of leverage on the regression line
# They all seem nearly the same
### b
plot(lyf.fem.lm$fitted.values,lyf.fem.lm.sum$residuals)
plot(lyf.mal.lm$fitted.values,lyf.mal.lm.sum$residuals)
# There's a few points that really stick out as outliers in both models
|
testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.34532881467648e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_omitMatrix/AFL_cpp_omitMatrix/cpp_omitMatrix_valgrind_files/1615846197-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,091 | r | testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.34532881467648e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result) |
library(dbData)
library(ggplot2)
connect <- dbConnect(dbDriver("MySQL"), user="2009Expo",
password="R R0cks", port=3306, dbname="baseball",
host="headnode.stat.iastate.edu")
pitch <- new("dataDB", co=connect, table="Pitching")
d1 <- dbData(pitch, vars=c( "G", "SO"))
qplot(G,SO, fill=log10(Freq), data=d1, geom="tile")+
scale_fill_gradient2()
lossCalc(d1, binning=c(2,5))
lossCalc(d1, binning=c(2,5), type="standard")
lossCalc(d1, binning=c(1,5))
lossCalc(d1, binning=c(1,5), type="standard")
d2 <- dbData(pitch, vars=c( "G", "SO"), binwidth=c(1,5))
qplot(G,SO, fill=log10(Freq), data=d2, geom="tile")+
scale_fill_gradient2()
lossCalc(d1, binning=c(1,10))
lossCalc(d2, binning=c(1,2))
d2 <- dbData(pitch, vars=c( "G", "SO"), binwidth=c(1,10))
qplot(G,SO, fill=log10(Freq), data=d2, geom="tile")+
scale_fill_gradient2()
## some more exploration of loss
bins <- expand.grid(x=c(1:10, 15, 20), y=c(1:5, seq(10, 100, 10)))
library(multicore)
lossesRdm <- do.call("rbind", mclapply(1:nrow(bins), function(i) lossCalc(data=d1, binning=c(bins[i,1], bins[i,2]))))
lossesStd <- do.call("rbind", mclapply(1:nrow(bins), function(i) lossCalc(data=d1, binning=c(bins[i,1], bins[i,2]), type="standard")))
lossesRdm <- cbind(bins, lossesRdm)
lossesStd <- cbind(bins, lossesStd)
losses <- rbind(cbind(Algorithm="Random", lossesRdm), cbind(Algorithm="Standard", lossesStd))
write.csv(losses, "/home/susan/Documents/R Projects/dbData/data/losses.csv", row.names=FALSE)
qplot(x=x, y=VisLoss.G*100, group=interaction(y, Algorithm), data=subset(losses, y==1), geom="line", colour=Algorithm, ylab="Percent Visual Loss", xlab="Bin Width (Games)")+ theme_bw() + theme(legend.position="bottom") + ggtitle("Visual Loss with increasing Bin Width: Baseball Games\n Strike Outs not binned")
qplot(x=y, y=VisLoss.SO*100, group=interaction(x, Algorithm), data=subset(losses, x==1), geom="line", colour=Algorithm, ylab="Percent Visual Loss", xlab="Bin Width (Strike Outs)") + theme_bw() + theme(legend.position="bottom") + ggtitle("Visual Loss with increasing Bin Width: Baseball Strike Outs\n Games not binned")
qplot(x=y, y=TotalLoss.LogFreq, group=interaction(x, Algorithm), data=losses, geom="line", colour=Algorithm, ylab="Frequency Loss", xlab="Bin Width (Strike Outs)")+ facet_grid(.~Algorithm) + scale_colour_discrete(guide="none")
lossfreqdiff <- ddply(losses, .(x,y), summarise, diff = TotalLoss.LogFreq[Algorithm=="Random"]-TotalLoss.LogFreq[Algorithm=="Standard"])
qplot(x=x, y=diff, group=y, data=lossfreqdiff, geom="line", colour=y) +
scale_colour_gradient(low="#51A7EA", high="#132B43", trans="log")
# generally random has more frequency loss, but not always (when x=2 or y=2)
| /inst/Example-LossComparison2.R | no_license | heike/dbData | R | false | false | 2,741 | r | library(dbData)
library(ggplot2)
connect <- dbConnect(dbDriver("MySQL"), user="2009Expo",
password="R R0cks", port=3306, dbname="baseball",
host="headnode.stat.iastate.edu")
pitch <- new("dataDB", co=connect, table="Pitching")
d1 <- dbData(pitch, vars=c( "G", "SO"))
qplot(G,SO, fill=log10(Freq), data=d1, geom="tile")+
scale_fill_gradient2()
lossCalc(d1, binning=c(2,5))
lossCalc(d1, binning=c(2,5), type="standard")
lossCalc(d1, binning=c(1,5))
lossCalc(d1, binning=c(1,5), type="standard")
d2 <- dbData(pitch, vars=c( "G", "SO"), binwidth=c(1,5))
qplot(G,SO, fill=log10(Freq), data=d2, geom="tile")+
scale_fill_gradient2()
lossCalc(d1, binning=c(1,10))
lossCalc(d2, binning=c(1,2))
d2 <- dbData(pitch, vars=c( "G", "SO"), binwidth=c(1,10))
qplot(G,SO, fill=log10(Freq), data=d2, geom="tile")+
scale_fill_gradient2()
## some more exploration of loss
bins <- expand.grid(x=c(1:10, 15, 20), y=c(1:5, seq(10, 100, 10)))
library(multicore)
lossesRdm <- do.call("rbind", mclapply(1:nrow(bins), function(i) lossCalc(data=d1, binning=c(bins[i,1], bins[i,2]))))
lossesStd <- do.call("rbind", mclapply(1:nrow(bins), function(i) lossCalc(data=d1, binning=c(bins[i,1], bins[i,2]), type="standard")))
lossesRdm <- cbind(bins, lossesRdm)
lossesStd <- cbind(bins, lossesStd)
losses <- rbind(cbind(Algorithm="Random", lossesRdm), cbind(Algorithm="Standard", lossesStd))
write.csv(losses, "/home/susan/Documents/R Projects/dbData/data/losses.csv", row.names=FALSE)
qplot(x=x, y=VisLoss.G*100, group=interaction(y, Algorithm), data=subset(losses, y==1), geom="line", colour=Algorithm, ylab="Percent Visual Loss", xlab="Bin Width (Games)")+ theme_bw() + theme(legend.position="bottom") + ggtitle("Visual Loss with increasing Bin Width: Baseball Games\n Strike Outs not binned")
qplot(x=y, y=VisLoss.SO*100, group=interaction(x, Algorithm), data=subset(losses, x==1), geom="line", colour=Algorithm, ylab="Percent Visual Loss", xlab="Bin Width (Strike Outs)") + theme_bw() + theme(legend.position="bottom") + ggtitle("Visual Loss with increasing Bin Width: Baseball Strike Outs\n Games not binned")
qplot(x=y, y=TotalLoss.LogFreq, group=interaction(x, Algorithm), data=losses, geom="line", colour=Algorithm, ylab="Frequency Loss", xlab="Bin Width (Strike Outs)")+ facet_grid(.~Algorithm) + scale_colour_discrete(guide="none")
lossfreqdiff <- ddply(losses, .(x,y), summarise, diff = TotalLoss.LogFreq[Algorithm=="Random"]-TotalLoss.LogFreq[Algorithm=="Standard"])
qplot(x=x, y=diff, group=y, data=lossfreqdiff, geom="line", colour=y) +
scale_colour_gradient(low="#51A7EA", high="#132B43", trans="log")
# generally random has more frequency loss, but not always (when x=2 or y=2)
|
#' Estimate soil parameters from texture class or sand/silt/clay
#'
#' @param soil_type USDA Soil Class. See Details
#' @param sand percent sand
#' @param silt percent silt
#' @param clay percent clay
#' @param bulk soil bulk density (optional, kg m-3)
#'
#' @details
#' * Specify _either_ soil_type or sand/silt/clay. soil_type will be ignored if sand/silt/clay is provided
#' * If only 2 out of sand/silt/clay are provided, it will be assumed they sum to 100%
#' * Valid soil class options: "Sand","Loamy sand","Sandy loam","Silt loam","Loam",
#' "Sandy clay loam","Silty clay loam","Clayey loam",
#' "Sandy clay","Silty clay","Clay","Peat","Bedrock",
#' "Silt","Heavy clay","Clayey sand","Clayey silt"
#' * Based on ED2/R-utils/soilutils.r
#' * Hydraulics based on Cosby et al 1984, using table 4 and equation 1 (which is incorrect it should be saturated moisture potential over moisture potential)
#'
#'
#' @return table of soil hydraulic and thermal parmeters
#' @export
#' @examples
#' sand <- c(0.3,0.4,0.5)
#' clay <- c(0.3,0.3,0.3)
#' soil_params(sand=sand,clay=clay)
soil_params <- function(soil_type,sand,silt,clay,bulk){
## load soil parameters
load(system.file("data/soil_class.RData",package = "PEcAn.data.land"))
mysoil <- list()
#---------------------------------------------------------------------------------------#
# Find soil class and sand, silt, and clay fractions. #
#---------------------------------------------------------------------------------------#
if (missing(sand) & missing(clay)){
## insufficient texture data, infer from soil_type
if(missing(soil_type)) PEcAn.logger::logger.error("insufficient arguments")
mysoil$soil_type <- soil_type
mysoil$soil_n <- which(toupper(soil.name) == toupper(soil_type))
# mysoil$key <- soil.key [mysoil$soil_n] ## turning off these abreviations since they lack a CF equivalent
mysoil$volume_fraction_of_sand_in_soil <- xsand.def[soil_type]
mysoil$volume_fraction_of_clay_in_soil <- xclay.def[soil_type]
mysoil$volume_fraction_of_silt_in_soil <- 1. - mysoil$volume_fraction_of_sand_in_soil - mysoil$volume_fraction_of_clay_in_soil
} else {
if(missing(sand)){
sand <- 1-silt-clay
}else if(missing(silt)){
silt <- 1-sand-clay
}else if(missing(clay)){
clay <- 1-sand-silt
} else {
#not missing anything else, normalize
stot <- sand+silt+clay
if(any(stot > 2)) stot <- stot*100 ## assume values reported in % not proportion
sand <- sand/stot
silt <- silt/stot
clay <- clay/stot
}
mysoil$soil_n <- sclass(sand,clay)
mysoil$soil_type <- soil.name[mysoil$soil_n]
# mysoil$key <- soil.key [mysoil$soil_n]
mysoil$volume_fraction_of_sand_in_soil <- sand
mysoil$volume_fraction_of_clay_in_soil <- clay
mysoil$volume_fraction_of_silt_in_soil <- 1. - mysoil$volume_fraction_of_sand_in_soil - mysoil$volume_fraction_of_clay_in_soil
}
#---------------------------------------------------------------------------------------#
if(!missing(bulk)) mysoil$soil_bulk_density = bulk
#---------------------------------------------------------------------------------------#
# Set up primary properties. #
#---------------------------------------------------------------------------------------#
for(z in which(mysoil$soil_n == 13)){
#----- Bedrock. Most things are zero, because it is an impermeable soil. -----------#
mysoil$soil_hydraulic_b[z] <- 0.
mysoil$soil_water_potential_at_saturation[z] <- 0.
mysoil$soil_hydraulic_conductivity_at_saturation[z] <- 0.
mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- 0.
mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- 0.
mysoil$volume_fraction_of_condensed_water_in_dry_soil[z] <- 0.
mysoil$volume_fraction_of_condensed_water_in_soil_at_wilting_point[z] <- 0.
mysoil$slcpd[z] <- 2130000.
#------------------------------------------------------------------------------------#
}
for(z in which(mysoil$soil_n == 12)){
#------------------------------------------------------------------------------------#
# Peat. High concentration of organic matter. Mineral soil equations don't #
# apply here. #
#------------------------------------------------------------------------------------#
mysoil$soil_hydraulic_b[z] <- 6.180000
mysoil$soil_water_potential_at_saturation[z] <- -0.534564359
mysoil$soil_hydraulic_conductivity_at_saturation[z] <- 2.357930e-6
mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- 0.469200
mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- 0.285709966
mysoil$slcpd[z] <- 874000.
#------------------------------------------------------------------------------------#
}
for(z in which(!(mysoil$soil_n %in% c(12,13)))){
#------------------------------------------------------------------------------------#
# Mineral soil. Use the standard Cosby et al 1984 eqns #
#------------------------------------------------------------------------------------#
## TO-DO: Cosby Table 4 has equations for soil property STANDARD DEVIATIONS in addition to means
## in future, upgrade to return these and do ensemble sampling
# B exponent [unitless]
mysoil$soil_hydraulic_b[z] <- 3.10 + 15.7*mysoil$volume_fraction_of_clay_in_soil[z] - 0.3*mysoil$volume_fraction_of_sand_in_soil[z]
# Soil moisture potential at saturation [ m ]
mysoil$soil_water_potential_at_saturation[z] <- -0.01 * (10.^(2.17 - 0.63*mysoil$volume_fraction_of_clay_in_soil[z] - 1.58*mysoil$volume_fraction_of_sand_in_soil[z]))
# Hydraulic conductivity at saturation [ m/s ]
mysoil$soil_hydraulic_conductivity_at_saturation[z] <- udunits2::ud.convert(10.^(-0.60 + 1.26*mysoil$volume_fraction_of_sand_in_soil[z] - 0.64*mysoil$volume_fraction_of_clay_in_soil[z]),
"inch/hour","meters/second")
# Soil moisture at saturation [ m^3/m^3 ]
mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- (50.5 - 14.2*mysoil$volume_fraction_of_sand_in_soil[z] - 3.7*mysoil$volume_fraction_of_clay_in_soil[z]) / 100.
# Soil field capacity[ m^3/m^3 ]
mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- mysoil$volume_fraction_of_water_in_soil_at_saturation[z] * ( fieldcp.K/mysoil$soil_hydraulic_conductivity_at_saturation[z])^ (1. / (2.*mysoil$soil_hydraulic_b[z]+3.))
} ## end primary properties
#---------------------------------------------------------------------------------------#
# Calculate the derived properties in case this is not bedrock. #
#---------------------------------------------------------------------------------------#
mysoil$slpotcp = mysoil$volume_fraction_of_condensed_water_in_dry_soil = mysoil$slpotwp = olume_fraction_of_condensed_water_in_soil_at_wilting_point = 0.0
for(z in which(!(mysoil$soil_n == 13))){
# Dry soil capacity (at -3.1MPa) [ m^3/m^3 ]
mysoil$slpotcp[z] <- - soilcp.MPa * 1000. / grav
mysoil$volume_fraction_of_condensed_water_in_dry_soil[z] <- mpot2smoist(mysoil$slpotcp[z],mysoil$soil_water_potential_at_saturation[z],mysoil$soil_hydraulic_b[z],mysoil$volume_fraction_of_water_in_soil_at_saturation[z])
# Wilting point capacity (at -1.5MPa) [ m^3/m^3 ]
mysoil$slpotwp[z] <- - soilwp.MPa * 1000. / grav
mysoil$volume_fraction_of_condensed_water_in_soil_at_wilting_point[z] <- mpot2smoist(mysoil$slpotwp[z], mysoil$soil_water_potential_at_saturation[z],mysoil$soil_hydraulic_b[z],mysoil$volume_fraction_of_water_in_soil_at_saturation[z])
# Water potential for field capacity [ m]
# mysoil$slpotfc <- smoist2mpot(mysoil$volume_fraction_of_water_in_soil_at_field_capacity, mysoil)
#---------------------------------------------------------------------------------!
# Specific heat of dry soil [ J/m3/K] !
# Here we take the volume average amongst silt, clay, and !
# sand, and consider the contribution of air sitting in. In order to keep it !
# simple, we assume that the air fraction won't change, although in reality its !
# contribution should be a function of soil moisture. Here we use the amount of !
# air in case the soil moisture was halfway between dry air and saturated, so the !
# error is not too biased. !
#---------------------------------------------------------------------------------!
mysoil$slcpd[z] <- (1. - mysoil$volume_fraction_of_water_in_soil_at_saturation[z]) *
( mysoil$volume_fraction_of_sand_in_soil[z] * sand.hcap +
mysoil$volume_fraction_of_silt_in_soil[z] * silt.hcap +
mysoil$volume_fraction_of_clay_in_soil[z] * clay.hcap ) +
0.5 * (mysoil$volume_fraction_of_water_in_soil_at_saturation[z] -
mysoil$volume_fraction_of_condensed_water_in_dry_soil[z]) * air.hcap
}
#---------------------------------------------------------------------------------------#
# Soil thermal conductivity. W/m/K #
# #
# Thermal conductivity is the weighted average of thermal conductivities of #
# all materials, although a further weighting factor due to thermal gradient of #
# different materials. We use the de Vries model described at: #
# #
# Camillo, P., T.J. Schmugge, 1981: A computer program for the simulation of heat #
# and moisture flow in soils, NASA-TM-82121, Greenbelt, MD, United States. #
# #
# Parlange, M.B., et al., 1998: Review of heat and water movement in field soils, #
# Soil Till. Res., 47(1-2), 5-10. #
# #
#---------------------------------------------------------------------------------------#
mysoil$thcond0 <- ( ksand * mysoil$volume_fraction_of_sand_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) * sand.cond
+ ksilt * mysoil$volume_fraction_of_silt_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) * silt.cond
+ kclay * mysoil$volume_fraction_of_clay_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) * clay.cond
+ kair * mysoil$volume_fraction_of_water_in_soil_at_saturation * air.cond )
mysoil$thcond1 <- rep(h2o.cond - kair * air.cond,length=length(mysoil$thcond0))
mysoil$thcond2 <- ( ksand * mysoil$volume_fraction_of_sand_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation )
+ ksilt * mysoil$volume_fraction_of_silt_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation )
+ kclay * mysoil$volume_fraction_of_clay_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation )
+ kair * mysoil$volume_fraction_of_water_in_soil_at_saturation )
mysoil$thcond3 <- rep(1. - kair,length=length(mysoil$thcond0))
## default soil thermal conductivity = dry
mysoil$soil_thermal_conductivity <- ( mysoil$thcond0 + mysoil$thcond1 * mysoil$volume_fraction_of_condensed_water_in_dry_soil) /
( mysoil$thcond2 + mysoil$thcond3 * mysoil$volume_fraction_of_condensed_water_in_dry_soil)
mysoil$soil_thermal_conductivity_at_saturation <- ( mysoil$thcond0 + mysoil$thcond1 * mysoil$volume_fraction_of_water_in_soil_at_saturation) /
( mysoil$thcond2 + mysoil$thcond3 * mysoil$volume_fraction_of_water_in_soil_at_saturation)
#---------------------------------------------------------------------------------------#
## final values to look up
for(z in which(!(mysoil$soil_n <= 13))){
mysoil$soil_albedo[z] <- texture$albdry[mysoil$soil_n[z]]
if(missing(bulk)) mysoil$soil_bulk_density[z] <- texture$xrobulk[mysoil$soil_n[z]]
mysoil$slden[z] <- texture$slden[mysoil$soil_n[z]]
}
for(z in which(!(mysoil$soil_n > 13))){
## if lack class-specific values, use across-soil average
mysoil$soil_albedo[z] <- median(texture$albdry)
if(missing(bulk)) mysoil$soil_bulk_density[z] <- median(texture$xrobulk)
mysoil$slden[z] <- median(texture$slden)
}
## Conversions to standard variables
mysoil$soil_thermal_capacity <- mysoil$slcpd / mysoil$soil_bulk_density ## J/m3/K / [kg m-3] -> J/kg/K
## drop variables that are only meaningful internally
mysoil$slpotcp <- NULL
mysoil$slpotwp <- NULL
mysoil$slden <- NULL ## not clear how this is is different from bulk density in the look-up-table
mysoil$slcpd <- NULL
return(mysoil)
}#end function
#==========================================================================================#
#==========================================================================================#
#' This function determines the soil class number based on the fraction of sand, clay, and silt
#'
#' @param sandfrac
#' @param clayfrac
#'
#' @return
#' @export
#'
#' @examples
#' sclass(0.3,0.3)
sclass <- function(sandfrac,clayfrac){
#----- Define the percentage of sand, clay, and silt. ----------------------------------#
sand <- 100. * sandfrac
clay <- 100. * clayfrac
silt <- 100. - sand - clay
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Here there is not much we can do other than explore where in the triangle space #
# we are. #
#---------------------------------------------------------------------------------------#
if (any(silt > 100.) | any(silt < 0.) | any(sand > 100.) |
any(sand < 0.) | any(clay > 100.) | any(clay < 0.) ) {
PEcAn.logger::logger.warn(" At least one of your percentages is screwy...")
PEcAn.logger::logger.warn(paste("SAND <- ",sprintf("%.2f",sand),"%",sep=""))
PEcAn.logger::logger.warn(paste("CLAY <- ",sprintf("%.2f",clay),"%",sep=""))
PEcAn.logger::logger.warn(paste("SILT <- ",sprintf("%.2f",silt),"%",sep=""))
PEcAn.logger::logger.severe("This soil doesn''t fit into any category...")
}
nlayer = max(length(silt),length(clay),length(sand))
mysoil = NA
for(z in seq_len(nlayer)){
if(sand[z] > 85.0 + 0.5 * clay[z]) {
mysoil[z] <- 1 #----- Sand. ------------------------------------------------------------#
}else if(sand[z] > 70.0 + clay[z]) {
mysoil[z] <- 2 #----- Loamy sand. ------------------------------------------------------#
}else if((clay[z] <= 20.0 & sand[z] > 52.5) | (clay[z] <= 7.5 & silt[z] <= 50.0)) {
mysoil[z] <- 3 #----- Sandy loam. ------------------------------------------------------#
}else if((clay[z] <= 27.5 & silt[z] > 50.0 & silt[z] <= 80.0) | (silt[z] > 80.0 & clay[z] > 12.5)) {
mysoil[z] <- 4 #----- Silt loam. -------------------------------------------------------#
}else if(clay[z] > 7.5 & clay[z] <= 27.5 & silt[z] > 27.5 & silt[z] <= 50.0 & sand[z] <= 52.5) {
mysoil[z] <- 5 #----- Loam. ------------------------------------------------------------#
}else if(clay[z] > 20.0 & clay[z] <= 35.0 & silt[z] <= 27.5 & sand[z] > 45.0) {
mysoil[z] <- 6 #----- Sandy clay loam. -------------------------------------------------#
}else if(clay[z] > 27.5 & clay[z] <= 40.0 & sand[z] <= 20.0) {
mysoil[z] <- 7 #----- Silty clay loam. -------------------------------------------------#
}else if(clay[z] > 27.5 & clay[z] <= 40.0 & sand[z] > 20.0 & sand[z] <= 45.0) {
mysoil[z] <- 8 #----- Clayey loam. -----------------------------------------------------#
}else if(clay[z] > 35.0 & sand[z] > 45.0) {
mysoil[z] <- 9 #----- Sandy clay. ------------------------------------------------------#
}else if(clay[z] > 40.0 & silt[z] > 40.0) {
mysoil[z] <- 10 #----- Silty clay. ------------------------------------------------------#
}else if(clay[z] <= 70.0 & sand[z] <= 30.0 & silt[z] <= 30.0) {
mysoil[z] <- 11 #----- Clay. ------------------------------------------------------------#
}else if( silt[z] > 80.0 & clay[z] <= 12.5) {
mysoil[z] <- 14 #----- Silt. ------------------------------------------------------------#
}else if( clay[z] > 70.0) {
mysoil[z] <- 15 #----- Heavy clay. ------------------------------------------------------#
}else if( clay[z] > 40.0 & sand[z] > 30.0 & sand[z] <= 45.0) {
mysoil[z] <- 16 #----- Clayey sand. -----------------------------------------------------#
}else if( clay[z] > 40.0 & silt[z] > 30.0 & silt[z] <= 40.0) {
mysoil[z] <- 17 #----- Clayey silt. -----------------------------------------------------#
}else{
PEcAn.logger::logger.warn(paste("SAND <- ",sprintf("%.2f",sand[z]),"%",sep=""))
PEcAn.logger::logger.warn(paste("CLAY <- ",sprintf("%.2f",clay[z]),"%",sep=""))
PEcAn.logger::logger.warn(paste("SILT <- ",sprintf("%.2f",silt[z]),"%",sep=""))
PEcAn.logger::logger.severe ("This soil doesn''t fit into any category...")
}#end if
}
return(mysoil)
}#end function
#' Convert a matric potential to a soil moisture
#'
#' @param mpot water potential
#' @param mysoil soil property list
#'
#' @return
#' @export
#'
#' @examples
mpot2smoist <- function(mpot,soil_water_potential_at_saturation,soil_hydraulic_b,volume_fraction_of_water_in_soil_at_saturation){
smfrac = ( mpot / soil_water_potential_at_saturation) ^ (-1. / soil_hydraulic_b)
smoist = smfrac * volume_fraction_of_water_in_soil_at_saturation
return(smoist)
}#end function | /modules/data.land/R/soil_utils.R | permissive | judgementc/pecan | R | false | false | 18,688 | r | #' Estimate soil parameters from texture class or sand/silt/clay
#'
#' @param soil_type USDA Soil Class. See Details
#' @param sand percent sand
#' @param silt percent silt
#' @param clay percent clay
#' @param bulk soil bulk density (optional, kg m-3)
#'
#' @details
#' * Specify _either_ soil_type or sand/silt/clay. soil_type will be ignored if sand/silt/clay is provided
#' * If only 2 out of sand/silt/clay are provided, it will be assumed they sum to 100%
#' * Valid soil class options: "Sand","Loamy sand","Sandy loam","Silt loam","Loam",
#' "Sandy clay loam","Silty clay loam","Clayey loam",
#' "Sandy clay","Silty clay","Clay","Peat","Bedrock",
#' "Silt","Heavy clay","Clayey sand","Clayey silt"
#' * Based on ED2/R-utils/soilutils.r
#' * Hydraulics based on Cosby et al 1984, using table 4 and equation 1 (which is incorrect it should be saturated moisture potential over moisture potential)
#'
#'
#' @return table of soil hydraulic and thermal parmeters
#' @export
#' @examples
#' sand <- c(0.3,0.4,0.5)
#' clay <- c(0.3,0.3,0.3)
#' soil_params(sand=sand,clay=clay)
soil_params <- function(soil_type,sand,silt,clay,bulk){
## load soil parameters
load(system.file("data/soil_class.RData",package = "PEcAn.data.land"))
mysoil <- list()
#---------------------------------------------------------------------------------------#
# Find soil class and sand, silt, and clay fractions. #
#---------------------------------------------------------------------------------------#
if (missing(sand) & missing(clay)){
## insufficient texture data, infer from soil_type
if(missing(soil_type)) PEcAn.logger::logger.error("insufficient arguments")
mysoil$soil_type <- soil_type
mysoil$soil_n <- which(toupper(soil.name) == toupper(soil_type))
# mysoil$key <- soil.key [mysoil$soil_n] ## turning off these abreviations since they lack a CF equivalent
mysoil$volume_fraction_of_sand_in_soil <- xsand.def[soil_type]
mysoil$volume_fraction_of_clay_in_soil <- xclay.def[soil_type]
mysoil$volume_fraction_of_silt_in_soil <- 1. - mysoil$volume_fraction_of_sand_in_soil - mysoil$volume_fraction_of_clay_in_soil
} else {
if(missing(sand)){
sand <- 1-silt-clay
}else if(missing(silt)){
silt <- 1-sand-clay
}else if(missing(clay)){
clay <- 1-sand-silt
} else {
#not missing anything else, normalize
stot <- sand+silt+clay
if(any(stot > 2)) stot <- stot*100 ## assume values reported in % not proportion
sand <- sand/stot
silt <- silt/stot
clay <- clay/stot
}
mysoil$soil_n <- sclass(sand,clay)
mysoil$soil_type <- soil.name[mysoil$soil_n]
# mysoil$key <- soil.key [mysoil$soil_n]
mysoil$volume_fraction_of_sand_in_soil <- sand
mysoil$volume_fraction_of_clay_in_soil <- clay
mysoil$volume_fraction_of_silt_in_soil <- 1. - mysoil$volume_fraction_of_sand_in_soil - mysoil$volume_fraction_of_clay_in_soil
}
#---------------------------------------------------------------------------------------#
if(!missing(bulk)) mysoil$soil_bulk_density = bulk
#---------------------------------------------------------------------------------------#
# Set up primary properties. #
#---------------------------------------------------------------------------------------#
for(z in which(mysoil$soil_n == 13)){
#----- Bedrock. Most things are zero, because it is an impermeable soil. -----------#
mysoil$soil_hydraulic_b[z] <- 0.
mysoil$soil_water_potential_at_saturation[z] <- 0.
mysoil$soil_hydraulic_conductivity_at_saturation[z] <- 0.
mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- 0.
mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- 0.
mysoil$volume_fraction_of_condensed_water_in_dry_soil[z] <- 0.
mysoil$volume_fraction_of_condensed_water_in_soil_at_wilting_point[z] <- 0.
mysoil$slcpd[z] <- 2130000.
#------------------------------------------------------------------------------------#
}
for(z in which(mysoil$soil_n == 12)){
#------------------------------------------------------------------------------------#
# Peat. High concentration of organic matter. Mineral soil equations don't #
# apply here. #
#------------------------------------------------------------------------------------#
mysoil$soil_hydraulic_b[z] <- 6.180000
mysoil$soil_water_potential_at_saturation[z] <- -0.534564359
mysoil$soil_hydraulic_conductivity_at_saturation[z] <- 2.357930e-6
mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- 0.469200
mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- 0.285709966
mysoil$slcpd[z] <- 874000.
#------------------------------------------------------------------------------------#
}
for(z in which(!(mysoil$soil_n %in% c(12,13)))){
#------------------------------------------------------------------------------------#
# Mineral soil. Use the standard Cosby et al 1984 eqns #
#------------------------------------------------------------------------------------#
## TO-DO: Cosby Table 4 has equations for soil property STANDARD DEVIATIONS in addition to means
## in future, upgrade to return these and do ensemble sampling
# B exponent [unitless]
mysoil$soil_hydraulic_b[z] <- 3.10 + 15.7*mysoil$volume_fraction_of_clay_in_soil[z] - 0.3*mysoil$volume_fraction_of_sand_in_soil[z]
# Soil moisture potential at saturation [ m ]
mysoil$soil_water_potential_at_saturation[z] <- -0.01 * (10.^(2.17 - 0.63*mysoil$volume_fraction_of_clay_in_soil[z] - 1.58*mysoil$volume_fraction_of_sand_in_soil[z]))
# Hydraulic conductivity at saturation [ m/s ]
mysoil$soil_hydraulic_conductivity_at_saturation[z] <- udunits2::ud.convert(10.^(-0.60 + 1.26*mysoil$volume_fraction_of_sand_in_soil[z] - 0.64*mysoil$volume_fraction_of_clay_in_soil[z]),
"inch/hour","meters/second")
# Soil moisture at saturation [ m^3/m^3 ]
mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- (50.5 - 14.2*mysoil$volume_fraction_of_sand_in_soil[z] - 3.7*mysoil$volume_fraction_of_clay_in_soil[z]) / 100.
# Soil field capacity[ m^3/m^3 ]
mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- mysoil$volume_fraction_of_water_in_soil_at_saturation[z] * ( fieldcp.K/mysoil$soil_hydraulic_conductivity_at_saturation[z])^ (1. / (2.*mysoil$soil_hydraulic_b[z]+3.))
} ## end primary properties
#---------------------------------------------------------------------------------------#
# Calculate the derived properties in case this is not bedrock. #
#---------------------------------------------------------------------------------------#
mysoil$slpotcp = mysoil$volume_fraction_of_condensed_water_in_dry_soil = mysoil$slpotwp = olume_fraction_of_condensed_water_in_soil_at_wilting_point = 0.0
for(z in which(!(mysoil$soil_n == 13))){
# Dry soil capacity (at -3.1MPa) [ m^3/m^3 ]
mysoil$slpotcp[z] <- - soilcp.MPa * 1000. / grav
mysoil$volume_fraction_of_condensed_water_in_dry_soil[z] <- mpot2smoist(mysoil$slpotcp[z],mysoil$soil_water_potential_at_saturation[z],mysoil$soil_hydraulic_b[z],mysoil$volume_fraction_of_water_in_soil_at_saturation[z])
# Wilting point capacity (at -1.5MPa) [ m^3/m^3 ]
mysoil$slpotwp[z] <- - soilwp.MPa * 1000. / grav
mysoil$volume_fraction_of_condensed_water_in_soil_at_wilting_point[z] <- mpot2smoist(mysoil$slpotwp[z], mysoil$soil_water_potential_at_saturation[z],mysoil$soil_hydraulic_b[z],mysoil$volume_fraction_of_water_in_soil_at_saturation[z])
# Water potential for field capacity [ m]
# mysoil$slpotfc <- smoist2mpot(mysoil$volume_fraction_of_water_in_soil_at_field_capacity, mysoil)
#---------------------------------------------------------------------------------!
# Specific heat of dry soil [ J/m3/K] !
# Here we take the volume average amongst silt, clay, and !
# sand, and consider the contribution of air sitting in. In order to keep it !
# simple, we assume that the air fraction won't change, although in reality its !
# contribution should be a function of soil moisture. Here we use the amount of !
# air in case the soil moisture was halfway between dry air and saturated, so the !
# error is not too biased. !
#---------------------------------------------------------------------------------!
mysoil$slcpd[z] <- (1. - mysoil$volume_fraction_of_water_in_soil_at_saturation[z]) *
( mysoil$volume_fraction_of_sand_in_soil[z] * sand.hcap +
mysoil$volume_fraction_of_silt_in_soil[z] * silt.hcap +
mysoil$volume_fraction_of_clay_in_soil[z] * clay.hcap ) +
0.5 * (mysoil$volume_fraction_of_water_in_soil_at_saturation[z] -
mysoil$volume_fraction_of_condensed_water_in_dry_soil[z]) * air.hcap
}
#---------------------------------------------------------------------------------------#
# Soil thermal conductivity. W/m/K #
# #
# Thermal conductivity is the weighted average of thermal conductivities of #
# all materials, although a further weighting factor due to thermal gradient of #
# different materials. We use the de Vries model described at: #
# #
# Camillo, P., T.J. Schmugge, 1981: A computer program for the simulation of heat #
# and moisture flow in soils, NASA-TM-82121, Greenbelt, MD, United States. #
# #
# Parlange, M.B., et al., 1998: Review of heat and water movement in field soils, #
# Soil Till. Res., 47(1-2), 5-10. #
# #
#---------------------------------------------------------------------------------------#
mysoil$thcond0 <- ( ksand * mysoil$volume_fraction_of_sand_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) * sand.cond
+ ksilt * mysoil$volume_fraction_of_silt_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) * silt.cond
+ kclay * mysoil$volume_fraction_of_clay_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) * clay.cond
+ kair * mysoil$volume_fraction_of_water_in_soil_at_saturation * air.cond )
mysoil$thcond1 <- rep(h2o.cond - kair * air.cond,length=length(mysoil$thcond0))
mysoil$thcond2 <- ( ksand * mysoil$volume_fraction_of_sand_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation )
+ ksilt * mysoil$volume_fraction_of_silt_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation )
+ kclay * mysoil$volume_fraction_of_clay_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation )
+ kair * mysoil$volume_fraction_of_water_in_soil_at_saturation )
mysoil$thcond3 <- rep(1. - kair,length=length(mysoil$thcond0))
## default soil thermal conductivity = dry
mysoil$soil_thermal_conductivity <- ( mysoil$thcond0 + mysoil$thcond1 * mysoil$volume_fraction_of_condensed_water_in_dry_soil) /
( mysoil$thcond2 + mysoil$thcond3 * mysoil$volume_fraction_of_condensed_water_in_dry_soil)
mysoil$soil_thermal_conductivity_at_saturation <- ( mysoil$thcond0 + mysoil$thcond1 * mysoil$volume_fraction_of_water_in_soil_at_saturation) /
( mysoil$thcond2 + mysoil$thcond3 * mysoil$volume_fraction_of_water_in_soil_at_saturation)
#---------------------------------------------------------------------------------------#
## final values to look up
for(z in which(!(mysoil$soil_n <= 13))){
mysoil$soil_albedo[z] <- texture$albdry[mysoil$soil_n[z]]
if(missing(bulk)) mysoil$soil_bulk_density[z] <- texture$xrobulk[mysoil$soil_n[z]]
mysoil$slden[z] <- texture$slden[mysoil$soil_n[z]]
}
for(z in which(!(mysoil$soil_n > 13))){
## if lack class-specific values, use across-soil average
mysoil$soil_albedo[z] <- median(texture$albdry)
if(missing(bulk)) mysoil$soil_bulk_density[z] <- median(texture$xrobulk)
mysoil$slden[z] <- median(texture$slden)
}
## Conversions to standard variables
mysoil$soil_thermal_capacity <- mysoil$slcpd / mysoil$soil_bulk_density ## J/m3/K / [kg m-3] -> J/kg/K
## drop variables that are only meaningful internally
mysoil$slpotcp <- NULL
mysoil$slpotwp <- NULL
mysoil$slden <- NULL ## not clear how this is is different from bulk density in the look-up-table
mysoil$slcpd <- NULL
return(mysoil)
}#end function
#==========================================================================================#
#==========================================================================================#
#' This function determines the soil class number based on the fraction of sand, clay, and silt
#'
#' @param sandfrac
#' @param clayfrac
#'
#' @return
#' @export
#'
#' @examples
#' sclass(0.3,0.3)
sclass <- function(sandfrac,clayfrac){
#----- Define the percentage of sand, clay, and silt. ----------------------------------#
sand <- 100. * sandfrac
clay <- 100. * clayfrac
silt <- 100. - sand - clay
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Here there is not much we can do other than explore where in the triangle space #
# we are. #
#---------------------------------------------------------------------------------------#
if (any(silt > 100.) | any(silt < 0.) | any(sand > 100.) |
any(sand < 0.) | any(clay > 100.) | any(clay < 0.) ) {
PEcAn.logger::logger.warn(" At least one of your percentages is screwy...")
PEcAn.logger::logger.warn(paste("SAND <- ",sprintf("%.2f",sand),"%",sep=""))
PEcAn.logger::logger.warn(paste("CLAY <- ",sprintf("%.2f",clay),"%",sep=""))
PEcAn.logger::logger.warn(paste("SILT <- ",sprintf("%.2f",silt),"%",sep=""))
PEcAn.logger::logger.severe("This soil doesn''t fit into any category...")
}
nlayer = max(length(silt),length(clay),length(sand))
mysoil = NA
for(z in seq_len(nlayer)){
if(sand[z] > 85.0 + 0.5 * clay[z]) {
mysoil[z] <- 1 #----- Sand. ------------------------------------------------------------#
}else if(sand[z] > 70.0 + clay[z]) {
mysoil[z] <- 2 #----- Loamy sand. ------------------------------------------------------#
}else if((clay[z] <= 20.0 & sand[z] > 52.5) | (clay[z] <= 7.5 & silt[z] <= 50.0)) {
mysoil[z] <- 3 #----- Sandy loam. ------------------------------------------------------#
}else if((clay[z] <= 27.5 & silt[z] > 50.0 & silt[z] <= 80.0) | (silt[z] > 80.0 & clay[z] > 12.5)) {
mysoil[z] <- 4 #----- Silt loam. -------------------------------------------------------#
}else if(clay[z] > 7.5 & clay[z] <= 27.5 & silt[z] > 27.5 & silt[z] <= 50.0 & sand[z] <= 52.5) {
mysoil[z] <- 5 #----- Loam. ------------------------------------------------------------#
}else if(clay[z] > 20.0 & clay[z] <= 35.0 & silt[z] <= 27.5 & sand[z] > 45.0) {
mysoil[z] <- 6 #----- Sandy clay loam. -------------------------------------------------#
}else if(clay[z] > 27.5 & clay[z] <= 40.0 & sand[z] <= 20.0) {
mysoil[z] <- 7 #----- Silty clay loam. -------------------------------------------------#
}else if(clay[z] > 27.5 & clay[z] <= 40.0 & sand[z] > 20.0 & sand[z] <= 45.0) {
mysoil[z] <- 8 #----- Clayey loam. -----------------------------------------------------#
}else if(clay[z] > 35.0 & sand[z] > 45.0) {
mysoil[z] <- 9 #----- Sandy clay. ------------------------------------------------------#
}else if(clay[z] > 40.0 & silt[z] > 40.0) {
mysoil[z] <- 10 #----- Silty clay. ------------------------------------------------------#
}else if(clay[z] <= 70.0 & sand[z] <= 30.0 & silt[z] <= 30.0) {
mysoil[z] <- 11 #----- Clay. ------------------------------------------------------------#
}else if( silt[z] > 80.0 & clay[z] <= 12.5) {
mysoil[z] <- 14 #----- Silt. ------------------------------------------------------------#
}else if( clay[z] > 70.0) {
mysoil[z] <- 15 #----- Heavy clay. ------------------------------------------------------#
}else if( clay[z] > 40.0 & sand[z] > 30.0 & sand[z] <= 45.0) {
mysoil[z] <- 16 #----- Clayey sand. -----------------------------------------------------#
}else if( clay[z] > 40.0 & silt[z] > 30.0 & silt[z] <= 40.0) {
mysoil[z] <- 17 #----- Clayey silt. -----------------------------------------------------#
}else{
PEcAn.logger::logger.warn(paste("SAND <- ",sprintf("%.2f",sand[z]),"%",sep=""))
PEcAn.logger::logger.warn(paste("CLAY <- ",sprintf("%.2f",clay[z]),"%",sep=""))
PEcAn.logger::logger.warn(paste("SILT <- ",sprintf("%.2f",silt[z]),"%",sep=""))
PEcAn.logger::logger.severe ("This soil doesn''t fit into any category...")
}#end if
}
return(mysoil)
}#end function
#' Convert a matric potential to a soil moisture
#'
#' @param mpot water potential
#' @param mysoil soil property list
#'
#' @return
#' @export
#'
#' @examples
mpot2smoist <- function(mpot,soil_water_potential_at_saturation,soil_hydraulic_b,volume_fraction_of_water_in_soil_at_saturation){
smfrac = ( mpot / soil_water_potential_at_saturation) ^ (-1. / soil_hydraulic_b)
smoist = smfrac * volume_fraction_of_water_in_soil_at_saturation
return(smoist)
}#end function |
#' @useDynLib later
#' @import Rcpp
#' @importFrom Rcpp evalCpp
.onLoad <- function(...) {
ensureInitialized()
.globals$next_id <- 0L
.globals$global_loop <- create_loop(autorun = FALSE)
.globals$current_loop <- .globals$global_loop
}
.globals <- new.env(parent = emptyenv())
#' Private event loops
#'
#' Normally, later uses a global event loop for scheduling and running
#' functions. However, in some cases, it is useful to create a \emph{private}
#' event loop to schedule and execute tasks without disturbing the global event
#' loop. For example, you might have asynchronous code that queries a remote
#' data source, but want to wait for a full back-and-forth communication to
#' complete before continuing in your code -- from the caller's perspective, it
#' should behave like synchronous code, and not do anything with the global
#' event loop (which could run code unrelated to your operation). To do this,
#' you would run your asynchronous code using a private event loop.
#'
#' \code{create_loop} creates and returns a handle to a private event loop,
#' which is useful when for scheduling tasks when you do not want to interfere
#' with the global event loop.
#'
#' \code{destroy_loop} destroys a private event loop.
#'
#' \code{exists_loop} reports whether an event loop exists -- that is, that it
#' has not been destroyed.
#'
#' \code{current_loop} returns the currently-active event loop. Any calls to
#' \code{\link{later}()} or \code{\link{run_now}()} will use the current loop by
#' default.
#'
#' \code{with_loop} evaluates an expression with a given event loop as the
#' currently-active loop.
#'
#' \code{with_temp_loop} creates an event loop, makes it the current loop, then
#' evaluates the given expression. Afterwards, the new event loop is destroyed.
#'
#' \code{global_loop} returns a handle to the global event loop.
#'
#'
#' @param loop A handle to an event loop.
#' @param expr An expression to evaluate.
#' @param autorun Should this event loop automatically be run when its parent
#' loop runs? Currently, only FALSE is allowed, but in the future TRUE will
#' be implemented and the default. Because in the future the default will
#' change, for now any code that calls \code{create_loop} must explicitly
#' pass in \code{autorun=FALSE}.
#' @rdname create_loop
#'
#' @export
create_loop <- function(autorun = NULL) {
if (!identical(autorun, FALSE)) {
stop("autorun must be set to FALSE (until TRUE is implemented).")
}
id <- .globals$next_id
.globals$next_id <- id + 1L
createCallbackRegistry(id)
# Create the handle for the loop
loop <- new.env(parent = emptyenv())
class(loop) <- "event_loop"
loop$id <- id
lockBinding("id", loop)
if (id != 0L) {
# Automatically destroy the loop when the handle is GC'd (unless it's the
# global loop.) The global loop handle never gets GC'd under normal
# circumstances because .globals$global_loop refers to it. However, if the
# package is unloaded it can get GC'd, and we don't want the
# destroy_loop() finalizer to give an error message about not being able
# to destroy the global loop.
reg.finalizer(loop, destroy_loop)
}
loop
}
#' @rdname create_loop
#' @export
destroy_loop <- function(loop) {
if (identical(loop, global_loop())) {
stop("Can't destroy global loop.")
}
deleteCallbackRegistry(loop$id)
}
#' @rdname create_loop
#' @export
exists_loop <- function(loop) {
existsCallbackRegistry(loop$id)
}
#' @rdname create_loop
#' @export
current_loop <- function() {
.globals$current_loop
}
#' @rdname create_loop
#' @export
with_temp_loop <- function(expr) {
loop <- create_loop(autorun = FALSE)
on.exit(destroy_loop(loop))
with_loop(loop, expr)
}
#' @rdname create_loop
#' @export
with_loop <- function(loop, expr) {
if (!identical(loop, current_loop())) {
old_loop <- .globals$current_loop
on.exit(.globals$current_loop <- old_loop, add = TRUE)
.globals$current_loop <- loop
}
force(expr)
}
#' @rdname create_loop
#' @export
global_loop <- function() {
.globals$global_loop
}
#' @export
format.event_loop <- function(x, ...) {
paste0("<event loop>\n id: ", x$id)
}
#' @export
print.event_loop <- function(x, ...) {
cat(format(x, ...))
}
#' Executes a function later
#'
#' Schedule an R function or formula to run after a specified period of time.
#' Similar to JavaScript's `setTimeout` function. Like JavaScript, R is
#' single-threaded so there's no guarantee that the operation will run exactly
#' at the requested time, only that at least that much time will elapse.
#'
#' The mechanism used by this package is inspired by Simon Urbanek's
#' [background](https://github.com/s-u/background) package and similar code in
#' Rhttpd.
#'
#' @note
#' To avoid bugs due to reentrancy, by default, scheduled operations only run
#' when there is no other R code present on the execution stack; i.e., when R is
#' sitting at the top-level prompt. You can force past-due operations to run at
#' a time of your choosing by calling [run_now()].
#'
#' Error handling is not particularly well-defined and may change in the future.
#' options(error=browser) should work and errors in `func` should generally not
#' crash the R process, but not much else can be said about it at this point.
#' If you must have specific behavior occur in the face of errors, put error
#' handling logic inside of `func`.
#'
#' @param func A function or formula (see [rlang::as_function()]).
#' @param delay Number of seconds in the future to delay execution. There is no
#' guarantee that the function will be executed at the desired time, but it
#' should not execute earlier.
#' @param loop A handle to an event loop. Defaults to the currently-active loop.
#'
#' @return A function, which, if invoked, will cancel the callback. The
#' function will return \code{TRUE} if the callback was successfully
#' cancelled and \code{FALSE} if not (this occurs if the callback has
#' executed or has been cancelled already).
#'
#' @examples
#' # Example of formula style
#' later(~cat("Hello from the past\n"), 3)
#'
#' # Example of function style
#' later(function() {
#' print(summary(cars))
#' }, 2)
#'
#' @export
later <- function(func, delay = 0, loop = current_loop()) {
f <- rlang::as_function(func)
id <- execLater(f, delay, loop$id)
invisible(create_canceller(id, loop))
}
# Returns a function that will cancel a callback with the given ID. If the
# callback has already been executed or canceled, then the function has no
# effect.
create_canceller <- function(id, loop) {
function() {
invisible(cancel(id, loop$id))
}
}
#' Execute scheduled operations
#'
#' Normally, operations scheduled with [later()] will not execute unless/until
#' no other R code is on the stack (i.e. at the top-level). If you need to run
#' blocking R code for a long time and want to allow scheduled operations to run
#' at well-defined points of your own operation, you can call `run_now()` at
#' those points and any operations that are due to run will do so.
#'
#' If one of the callbacks throws an error, the error will _not_ be caught, and
#' subsequent callbacks will not be executed (until `run_now()` is called again,
#' or control returns to the R prompt). You must use your own
#' [tryCatch][base::conditions] if you want to handle errors.
#'
#' @param timeoutSecs Wait (block) for up to this number of seconds waiting for
#' an operation to be ready to run. If `0`, then return immediately if there
#' are no operations that are ready to run. If `Inf` or negative, then wait as
#' long as it takes (if none are scheduled, then this will block forever).
#' @param all If `FALSE`, `run_now()` will execute at most one scheduled
#' operation (instead of all eligible operations). This can be useful in cases
#' where you want to interleave scheduled operations with your own logic.
#' @param loop A handle to an event loop. Defaults to the currently-active loop.
#'
#' @return A logical indicating whether any callbacks were actually run.
#'
#' @export
run_now <- function(timeoutSecs = 0L, all = TRUE, loop = current_loop()) {
if (timeoutSecs == Inf) {
timeoutSecs <- -1
}
if (!is.numeric(timeoutSecs))
stop("timeoutSecs must be numeric")
with_loop(loop,
invisible(execCallbacks(timeoutSecs, all, loop$id))
)
}
#' Check if later loop is empty
#'
#' Returns true if there are currently no callbacks that are scheduled to
#' execute in the present or future.
#'
#' @inheritParams create_loop
#' @keywords internal
#' @export
loop_empty <- function(loop = current_loop()) {
idle(loop$id)
}
#' Relative time to next scheduled operation
#'
#' Returns the duration between now and the earliest operation that is currently
#' scheduled, in seconds. If the operation is in the past, the value will be
#' negative. If no operation is currently scheduled, the value will be `Inf`.
#'
#' @inheritParams create_loop
#' @export
next_op_secs <- function(loop = current_loop()) {
nextOpSecs(loop$id)
}
#' Get the contents of an event loop, as a list
#'
#' This function is for debugging only.
#'
#' @keywords internal
list_queue <- function(loop = current_loop()) {
list_queue_(loop$id)
}
| /R/later.R | permissive | atheriel/later | R | false | false | 9,227 | r | #' @useDynLib later
#' @import Rcpp
#' @importFrom Rcpp evalCpp
.onLoad <- function(...) {
ensureInitialized()
.globals$next_id <- 0L
.globals$global_loop <- create_loop(autorun = FALSE)
.globals$current_loop <- .globals$global_loop
}
.globals <- new.env(parent = emptyenv())
#' Private event loops
#'
#' Normally, later uses a global event loop for scheduling and running
#' functions. However, in some cases, it is useful to create a \emph{private}
#' event loop to schedule and execute tasks without disturbing the global event
#' loop. For example, you might have asynchronous code that queries a remote
#' data source, but want to wait for a full back-and-forth communication to
#' complete before continuing in your code -- from the caller's perspective, it
#' should behave like synchronous code, and not do anything with the global
#' event loop (which could run code unrelated to your operation). To do this,
#' you would run your asynchronous code using a private event loop.
#'
#' \code{create_loop} creates and returns a handle to a private event loop,
#' which is useful when for scheduling tasks when you do not want to interfere
#' with the global event loop.
#'
#' \code{destroy_loop} destroys a private event loop.
#'
#' \code{exists_loop} reports whether an event loop exists -- that is, that it
#' has not been destroyed.
#'
#' \code{current_loop} returns the currently-active event loop. Any calls to
#' \code{\link{later}()} or \code{\link{run_now}()} will use the current loop by
#' default.
#'
#' \code{with_loop} evaluates an expression with a given event loop as the
#' currently-active loop.
#'
#' \code{with_temp_loop} creates an event loop, makes it the current loop, then
#' evaluates the given expression. Afterwards, the new event loop is destroyed.
#'
#' \code{global_loop} returns a handle to the global event loop.
#'
#'
#' @param loop A handle to an event loop.
#' @param expr An expression to evaluate.
#' @param autorun Should this event loop automatically be run when its parent
#' loop runs? Currently, only FALSE is allowed, but in the future TRUE will
#' be implemented and the default. Because in the future the default will
#' change, for now any code that calls \code{create_loop} must explicitly
#' pass in \code{autorun=FALSE}.
#' @rdname create_loop
#'
#' @export
create_loop <- function(autorun = NULL) {
if (!identical(autorun, FALSE)) {
stop("autorun must be set to FALSE (until TRUE is implemented).")
}
id <- .globals$next_id
.globals$next_id <- id + 1L
createCallbackRegistry(id)
# Create the handle for the loop
loop <- new.env(parent = emptyenv())
class(loop) <- "event_loop"
loop$id <- id
lockBinding("id", loop)
if (id != 0L) {
# Automatically destroy the loop when the handle is GC'd (unless it's the
# global loop.) The global loop handle never gets GC'd under normal
# circumstances because .globals$global_loop refers to it. However, if the
# package is unloaded it can get GC'd, and we don't want the
# destroy_loop() finalizer to give an error message about not being able
# to destroy the global loop.
reg.finalizer(loop, destroy_loop)
}
loop
}
#' @rdname create_loop
#' @export
destroy_loop <- function(loop) {
if (identical(loop, global_loop())) {
stop("Can't destroy global loop.")
}
deleteCallbackRegistry(loop$id)
}
#' @rdname create_loop
#' @export
exists_loop <- function(loop) {
existsCallbackRegistry(loop$id)
}
#' @rdname create_loop
#' @export
current_loop <- function() {
.globals$current_loop
}
#' @rdname create_loop
#' @export
with_temp_loop <- function(expr) {
loop <- create_loop(autorun = FALSE)
on.exit(destroy_loop(loop))
with_loop(loop, expr)
}
#' @rdname create_loop
#' @export
with_loop <- function(loop, expr) {
if (!identical(loop, current_loop())) {
old_loop <- .globals$current_loop
on.exit(.globals$current_loop <- old_loop, add = TRUE)
.globals$current_loop <- loop
}
force(expr)
}
#' @rdname create_loop
#' @export
global_loop <- function() {
.globals$global_loop
}
#' @export
format.event_loop <- function(x, ...) {
paste0("<event loop>\n id: ", x$id)
}
#' @export
print.event_loop <- function(x, ...) {
cat(format(x, ...))
}
#' Executes a function later
#'
#' Schedule an R function or formula to run after a specified period of time.
#' Similar to JavaScript's `setTimeout` function. Like JavaScript, R is
#' single-threaded so there's no guarantee that the operation will run exactly
#' at the requested time, only that at least that much time will elapse.
#'
#' The mechanism used by this package is inspired by Simon Urbanek's
#' [background](https://github.com/s-u/background) package and similar code in
#' Rhttpd.
#'
#' @note
#' To avoid bugs due to reentrancy, by default, scheduled operations only run
#' when there is no other R code present on the execution stack; i.e., when R is
#' sitting at the top-level prompt. You can force past-due operations to run at
#' a time of your choosing by calling [run_now()].
#'
#' Error handling is not particularly well-defined and may change in the future.
#' options(error=browser) should work and errors in `func` should generally not
#' crash the R process, but not much else can be said about it at this point.
#' If you must have specific behavior occur in the face of errors, put error
#' handling logic inside of `func`.
#'
#' @param func A function or formula (see [rlang::as_function()]).
#' @param delay Number of seconds in the future to delay execution. There is no
#' guarantee that the function will be executed at the desired time, but it
#' should not execute earlier.
#' @param loop A handle to an event loop. Defaults to the currently-active loop.
#'
#' @return A function, which, if invoked, will cancel the callback. The
#' function will return \code{TRUE} if the callback was successfully
#' cancelled and \code{FALSE} if not (this occurs if the callback has
#' executed or has been cancelled already).
#'
#' @examples
#' # Example of formula style
#' later(~cat("Hello from the past\n"), 3)
#'
#' # Example of function style
#' later(function() {
#' print(summary(cars))
#' }, 2)
#'
#' @export
later <- function(func, delay = 0, loop = current_loop()) {
f <- rlang::as_function(func)
id <- execLater(f, delay, loop$id)
invisible(create_canceller(id, loop))
}
# Returns a function that will cancel a callback with the given ID. If the
# callback has already been executed or canceled, then the function has no
# effect.
create_canceller <- function(id, loop) {
function() {
invisible(cancel(id, loop$id))
}
}
#' Execute scheduled operations
#'
#' Normally, operations scheduled with [later()] will not execute unless/until
#' no other R code is on the stack (i.e. at the top-level). If you need to run
#' blocking R code for a long time and want to allow scheduled operations to run
#' at well-defined points of your own operation, you can call `run_now()` at
#' those points and any operations that are due to run will do so.
#'
#' If one of the callbacks throws an error, the error will _not_ be caught, and
#' subsequent callbacks will not be executed (until `run_now()` is called again,
#' or control returns to the R prompt). You must use your own
#' [tryCatch][base::conditions] if you want to handle errors.
#'
#' @param timeoutSecs Wait (block) for up to this number of seconds waiting for
#' an operation to be ready to run. If `0`, then return immediately if there
#' are no operations that are ready to run. If `Inf` or negative, then wait as
#' long as it takes (if none are scheduled, then this will block forever).
#' @param all If `FALSE`, `run_now()` will execute at most one scheduled
#' operation (instead of all eligible operations). This can be useful in cases
#' where you want to interleave scheduled operations with your own logic.
#' @param loop A handle to an event loop. Defaults to the currently-active loop.
#'
#' @return A logical indicating whether any callbacks were actually run.
#'
#' @export
run_now <- function(timeoutSecs = 0L, all = TRUE, loop = current_loop()) {
if (timeoutSecs == Inf) {
timeoutSecs <- -1
}
if (!is.numeric(timeoutSecs))
stop("timeoutSecs must be numeric")
with_loop(loop,
invisible(execCallbacks(timeoutSecs, all, loop$id))
)
}
#' Check if later loop is empty
#'
#' Returns true if there are currently no callbacks that are scheduled to
#' execute in the present or future.
#'
#' @inheritParams create_loop
#' @keywords internal
#' @export
loop_empty <- function(loop = current_loop()) {
idle(loop$id)
}
#' Relative time to next scheduled operation
#'
#' Returns the duration between now and the earliest operation that is currently
#' scheduled, in seconds. If the operation is in the past, the value will be
#' negative. If no operation is currently scheduled, the value will be `Inf`.
#'
#' @inheritParams create_loop
#' @export
next_op_secs <- function(loop = current_loop()) {
nextOpSecs(loop$id)
}
#' Get the contents of an event loop, as a list
#'
#' This function is for debugging only.
#'
#' @keywords internal
list_queue <- function(loop = current_loop()) {
list_queue_(loop$id)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mdc.R
\name{mdc}
\alias{mdc}
\title{Martingale Difference Correlation}
\usage{
mdc(X, Y, center = "U")
}
\arguments{
\item{X}{A vector, matrix or data frame, where rows represent samples, and columns represent variables.}
\item{Y}{A vector, matrix or data frame, where rows represent samples, and columns represent variables.}
\item{center}{The approach for centering, including
\itemize{
\item \code{U}: U-centering which leads to an unbiased estimator;
\item \code{D}: double-centering which leads to a biased estimator.
}}
}
\value{
\code{mdc} returns the squared martingale difference correlation of \code{Y} given \code{X}.
}
\description{
\code{mdc} measures conditional mean dependence of \code{Y} given \code{X},
where each contains one variable (univariate) or more variables (multivariate).
}
\examples{
# X, Y are 10 x 2 matrices with 10 samples and 2 variables
X <- matrix(rnorm(10 * 2), 10, 2)
Y <- matrix(rnorm(10 * 2), 10, 2)
mdc(X, Y, center = "U")
mdc(X, Y, center = "D")
}
\references{
Shao, X., and Zhang, J. (2014).
Martingale difference correlation and its use in high-dimensional variable screening.
Journal of the American Statistical Association, 109(507), 1302-1318.
\url{http://dx.doi.org/10.1080/01621459.2014.887012}.
Park, T., Shao, X., and Yao, S. (2015).
Partial martingale difference correlation.
Electronic Journal of Statistics, 9(1), 1492-1517.
\url{http://dx.doi.org/10.1214/15-EJS1047}.
}
| /man/mdc.Rd | no_license | zejin/EDMeasure | R | false | true | 1,525 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mdc.R
\name{mdc}
\alias{mdc}
\title{Martingale Difference Correlation}
\usage{
mdc(X, Y, center = "U")
}
\arguments{
\item{X}{A vector, matrix or data frame, where rows represent samples, and columns represent variables.}
\item{Y}{A vector, matrix or data frame, where rows represent samples, and columns represent variables.}
\item{center}{The approach for centering, including
\itemize{
\item \code{U}: U-centering which leads to an unbiased estimator;
\item \code{D}: double-centering which leads to a biased estimator.
}}
}
\value{
\code{mdc} returns the squared martingale difference correlation of \code{Y} given \code{X}.
}
\description{
\code{mdc} measures conditional mean dependence of \code{Y} given \code{X},
where each contains one variable (univariate) or more variables (multivariate).
}
\examples{
# X, Y are 10 x 2 matrices with 10 samples and 2 variables
X <- matrix(rnorm(10 * 2), 10, 2)
Y <- matrix(rnorm(10 * 2), 10, 2)
mdc(X, Y, center = "U")
mdc(X, Y, center = "D")
}
\references{
Shao, X., and Zhang, J. (2014).
Martingale difference correlation and its use in high-dimensional variable screening.
Journal of the American Statistical Association, 109(507), 1302-1318.
\url{http://dx.doi.org/10.1080/01621459.2014.887012}.
Park, T., Shao, X., and Yao, S. (2015).
Partial martingale difference correlation.
Electronic Journal of Statistics, 9(1), 1492-1517.
\url{http://dx.doi.org/10.1214/15-EJS1047}.
}
|
library(ggplot2)
ggplot(mtcars, aes(mpg)) + geom_histogram()
| /ggplothistogram.R | no_license | ashishjsharda/R | R | false | false | 61 | r | library(ggplot2)
ggplot(mtcars, aes(mpg)) + geom_histogram()
|
#' @title Estimate volume for stem and sections
#' @description Estimate volume for a complete stem from bottom to tip or
#' for a section defined by lower and upper diameter or height. Variances for
#' estimated volumes are calculated.
#' @param Hm Numeric vector of stem heights (m) along which diameter
#' measurements were taken for calibration. Can be of length 1. Must be of same
#' length as \code{Dm}.
#' @param Dm Numeric vector of diameter measurements (cm) taken for calibration.
#' Can be of length 1. Must be of same length as \code{Hm}.
#' @param mHt Scalar. Tree height (m).
#' @param sHt Scalar. Standard deviation of stem height. Can be 0 if height was
#' measured without error.
#' @param A Numeric scalar defining the lower threshold of a stem section for
#' volume estimation. Depends on \code{iDH}. If \code{iDH} = "D", a diameter
#' (cm), if \code{iDH} = "H", a height (m). If NULL, section starts at lowest
#' point.
#' @param B Numeric scalar defining the upper threshold of a stem section for
#' volume estimation. Depends on \code{iDH}. If \code{iDH} = "D", a diameter
#' (cm), if \code{iDH} = "H", a height (m). If NULL, section ends at tip.
#' @param iDH Character scalar. Either "D" or "H". Type of threshold for section
#' volume estimation. See \code{A} or \code{B}.
#' @param par.lme List of taper model parameters obtained by
#' \code{\link{TapeR_FIT_LME.f}}.
#' @param R0 indicator whether taper curve should interpolate measurements
#' @param IA Logic scalar. If TRUE, variance calculation of height estimate
#' based on 2-point distribution. If FALSE, variance calculation of height
#' estimate based on Normal approximation.
#' @param nGL Numeric scalar. Number of support points for numerical
#' integration.
#' @param ... not currently used
#' @details calculates the volume for a complete stem or sections defined by
#' \code{A} and \code{B}, which might be defined as diameter or height. The
#' parameter \code{R0} determines whether the estimated taper curve is forced
#' through the measured points (if \code{R0=TRUE}).
#' @return a list holding nine elements:
#' \itemize{
#' \item{E_VOL: }{Estimated volume (m^3).}
#' \item{VAR_VOL: }{Variance of the volume estimate.}
#' \item{Hm: }{Height of diameter measurement (m).}
#' \item{Dm: }{Diameter measurement (cm).}
#' \item{Ht: }{Tree height (m).}
#' \item{Da: }{Diameter at lower section threshold (cm).}
#' \item{Db: }{Diameter at upper section threshold (cm).}
#' \item{Ha: }{Height at lower section threshold (m).}
#' \item{Hb: }{Height at upper section threshold (m).}
#' \item{R0: }{Taper curve forced through measurements (if TRUE) or not (if FALSE).}
#' }
#' @author Edgar Kublin
#' @references Kublin, E., Breidenbach, J., Kaendler, G. (2013) A flexible stem
#' taper and volume prediction method based on mixed-effects B-spline
#' regression, Eur J For Res, 132:983-997.
#' @seealso \code{\link{TapeR_FIT_LME.f}}
#' @export
#'
#' @examples
#' #example data
#' data(DxHx.df)
#' taper curve parameters based on all measured trees
#' data(SK.par.lme)
#'
#' #select data of first tree
#' Idi <- (DxHx.df[,"Id"] == unique(DxHx.df$Id)[1])
#' (tree1 <- DxHx.df[Idi,])
#'
#' ## Calculate the timber volume for the whole stem
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 0, # no height variance assumed
#' par.lme = SK.par.lme)
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for the whole stem, using R0=TRUE
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 0, # no height variance assumed
#' par.lme = SK.par.lme,
#' R0 = TRUE)
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for the whole stem
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 1, # no height variance assumed
#' par.lme = SK.par.lme)
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for the whole stem, using R0=TRUE
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 1, # height variance assumed
#' par.lme = SK.par.lme,
#' R0 = TRUE)
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for a selected section given a height (0.3 - 5 m)
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 1,
#' par.lme = SK.par.lme,
#' A=0.3,
#' B=5,
#' iDH = "H")
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for a selected section given a height (0.3 - 5 m)
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 1,
#' par.lme = SK.par.lme,
#' A=0.3,
#' B=5,
#' iDH = "H",
#' R0=TRUE)
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for a selected section given a diameter
#' ## threshold (30cm - 15cm) (negative value if A<B)
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 1,
#' par.lme = SK.par.lme,
#' A=30,
#' B=15,
#' iDH = "D")
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#'
#' ## Not run:
#' ## The variance estimate resulting from the tree height uncertainty using
#' ## a Normal approximation takes much longer...
#' ptm <- proc.time()
#' E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3], Dm=tree1$Dx[3], mHt = tree1$Ht[1],
#' sHt = 1, par.lme = SK.par.lme, IA=FALSE)
#' proc.time() - ptm
#'
#'
#' ##... than the calculation using a 2-point distribution...
#' ptm <- proc.time()
#' E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3], Dm=tree1$Dx[3], mHt = tree1$Ht[1],
#' sHt = 1, par.lme = SK.par.lme, IA=TRUE)
#' proc.time() - ptm
#'
#' ##...fastest if no height variance is assumed
#' ptm <- proc.time()
#' E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3], Dm=tree1$Dx[3], mHt = tree1$Ht[1],
#' sHt = 0, par.lme = SK.par.lme, IA=FALSE)
#' proc.time() - ptm
#'
#' ## Also the number of supportive points for the numerical integration
#' ## influences the calculation time
#' ptm <- proc.time()
#' E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3], Dm=tree1$Dx[3], mHt = tree1$Ht[1],
#' sHt = 0, par.lme = SK.par.lme, IA=FALSE, nGL=10)
#' proc.time() - ptm
#' ##' End(Not run)
#'
E_VOL_AB_HmDm_HT.f <-
function(Hm, Dm, mHt, sHt = 0, A = NULL, B = NULL, iDH = "D", par.lme, R0 = FALSE, IA = F, nGL = 51, ...){
# ************************************************************************************************
# Hm; Dm; mHt = mw_HtT; sHt = sd_HtT; a = NULL; b = 7 ; iDH = "DH"; par.lme = SK.par.lme; IA = F; nGL = 51
# Hm; Dm; mHt = mw_HtT; sHt = sd_HtT; a = NULL; b = Int_E_VOL_dHt$Hb ; iDH = "H"; par.lme = SK.par.lme; IA = F; nGL = 51
# a - unterer Grenzdurchmesser/ -hoehe (iDH = "H")
# b - oberer Grenzdurchmesser/ -hoehe
Ht = max(Hm,mHt)
if(min(Dm)>0){Ht = max(c(Hm,Ht))}else{Ht = max(Hm)}
xm = Hm/Ht
ym = Dm
if(is.null(A)){
a=0
}else{
if(iDH %in% c("d","D")){
a = xy0_SK_EBLUP_LME.f(xm, ym, y0 = A, par.lme, R0)
}else{
a = min(1,A/Ht)
}
}
if(is.null(B)){
b=1
}else{
if(iDH %in% c("d","D")){
b = xy0_SK_EBLUP_LME.f(xm, ym, y0 = B, par.lme, R0)
}else{
b = min(1,B/Ht)
}
}
if(sHt > 0){# Hoehentarifvarianz - Int{VOLab|(Hm,Dm),Ht]dHt}
Ht = max(Hm,mHt)
# ************************************************************************
Int_VOLab = Int_E_VOL_AB_HmDm_HT_dHt.f(Hm, Dm, A, B, iDH, mw_HtT=mHt,
sd_HtT=sHt, par.lme, R0=R0, IA, nGL)
# ************************************************************************
E_VOLab = Int_VOLab$E_VOL
VAR_VOLab = Int_VOLab$VAR_VOL
} else { # RotationsIntegral ueber die kalibrierte Schaftkurve E[D(Hx)|(Hm,Dm),Ht]
# ************************************************************************
SK_VOLab = SK_VOLab_EBLUP_LME.f(xm, ym, a, b, Ht, par.lme, R0)
# ************************************************************************
E_VOLab = SK_VOLab$VOL
VAR_VOLab = SK_VOLab$VAR_VOL
}
Ht = max(Hm,mHt)
Ha = a*Ht
Hb = b*Ht
Da = SK_EBLUP_LME.f(xm = Hm/Ht, ym = Dm, xp = a, par.lme, R0)$yp
Db = SK_EBLUP_LME.f(xm = Hm/Ht, ym = Dm, xp = b, par.lme, R0)$yp
return(list(E_VOL = E_VOLab, VAR_VOL = VAR_VOLab, Hm = Hm, Dm = Dm, Ht = Ht,
Da = Da, Db = Db, Ha = a*Ht, Hb = b*Ht, R0=R0))
}
| /R/e_vol_ab_hmdm_ht.f_1.R | no_license | jonibio/TapeR | R | false | false | 9,769 | r | #' @title Estimate volume for stem and sections
#' @description Estimate volume for a complete stem from bottom to tip or
#' for a section defined by lower and upper diameter or height. Variances for
#' estimated volumes are calculated.
#' @param Hm Numeric vector of stem heights (m) along which diameter
#' measurements were taken for calibration. Can be of length 1. Must be of same
#' length as \code{Dm}.
#' @param Dm Numeric vector of diameter measurements (cm) taken for calibration.
#' Can be of length 1. Must be of same length as \code{Hm}.
#' @param mHt Scalar. Tree height (m).
#' @param sHt Scalar. Standard deviation of stem height. Can be 0 if height was
#' measured without error.
#' @param A Numeric scalar defining the lower threshold of a stem section for
#' volume estimation. Depends on \code{iDH}. If \code{iDH} = "D", a diameter
#' (cm), if \code{iDH} = "H", a height (m). If NULL, section starts at lowest
#' point.
#' @param B Numeric scalar defining the upper threshold of a stem section for
#' volume estimation. Depends on \code{iDH}. If \code{iDH} = "D", a diameter
#' (cm), if \code{iDH} = "H", a height (m). If NULL, section ends at tip.
#' @param iDH Character scalar. Either "D" or "H". Type of threshold for section
#' volume estimation. See \code{A} or \code{B}.
#' @param par.lme List of taper model parameters obtained by
#' \code{\link{TapeR_FIT_LME.f}}.
#' @param R0 indicator whether taper curve should interpolate measurements
#' @param IA Logic scalar. If TRUE, variance calculation of height estimate
#' based on 2-point distribution. If FALSE, variance calculation of height
#' estimate based on Normal approximation.
#' @param nGL Numeric scalar. Number of support points for numerical
#' integration.
#' @param ... not currently used
#' @details calculates the volume for a complete stem or sections defined by
#' \code{A} and \code{B}, which might be defined as diameter or height. The
#' parameter \code{R0} determines whether the estimated taper curve is forced
#' through the measured points (if \code{R0=TRUE}).
#' @return a list holding nine elements:
#' \itemize{
#' \item{E_VOL: }{Estimated volume (m^3).}
#' \item{VAR_VOL: }{Variance of the volume estimate.}
#' \item{Hm: }{Height of diameter measurement (m).}
#' \item{Dm: }{Diameter measurement (cm).}
#' \item{Ht: }{Tree height (m).}
#' \item{Da: }{Diameter at lower section threshold (cm).}
#' \item{Db: }{Diameter at upper section threshold (cm).}
#' \item{Ha: }{Height at lower section threshold (m).}
#' \item{Hb: }{Height at upper section threshold (m).}
#' \item{R0: }{Taper curve forced through measurements (if TRUE) or not (if FALSE).}
#' }
#' @author Edgar Kublin
#' @references Kublin, E., Breidenbach, J., Kaendler, G. (2013) A flexible stem
#' taper and volume prediction method based on mixed-effects B-spline
#' regression, Eur J For Res, 132:983-997.
#' @seealso \code{\link{TapeR_FIT_LME.f}}
#' @export
#'
#' @examples
#' #example data
#' data(DxHx.df)
#' taper curve parameters based on all measured trees
#' data(SK.par.lme)
#'
#' #select data of first tree
#' Idi <- (DxHx.df[,"Id"] == unique(DxHx.df$Id)[1])
#' (tree1 <- DxHx.df[Idi,])
#'
#' ## Calculate the timber volume for the whole stem
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 0, # no height variance assumed
#' par.lme = SK.par.lme)
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for the whole stem, using R0=TRUE
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 0, # no height variance assumed
#' par.lme = SK.par.lme,
#' R0 = TRUE)
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for the whole stem
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 1, # no height variance assumed
#' par.lme = SK.par.lme)
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for the whole stem, using R0=TRUE
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 1, # height variance assumed
#' par.lme = SK.par.lme,
#' R0 = TRUE)
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for a selected section given a height (0.3 - 5 m)
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 1,
#' par.lme = SK.par.lme,
#' A=0.3,
#' B=5,
#' iDH = "H")
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for a selected section given a height (0.3 - 5 m)
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 1,
#' par.lme = SK.par.lme,
#' A=0.3,
#' B=5,
#' iDH = "H",
#' R0=TRUE)
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#' VOL$R0
#'
#' ## Calculate the timber volume for a selected section given a diameter
#' ## threshold (30cm - 15cm) (negative value if A<B)
#' VOL <- E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3],
#' Dm=tree1$Dx[3],
#' mHt = tree1$Ht[1],
#' sHt = 1,
#' par.lme = SK.par.lme,
#' A=30,
#' B=15,
#' iDH = "D")
#' VOL$E_VOL #' expected value
#' VOL$VAR_VOL #' corresponding variance
#'
#' ## Not run:
#' ## The variance estimate resulting from the tree height uncertainty using
#' ## a Normal approximation takes much longer...
#' ptm <- proc.time()
#' E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3], Dm=tree1$Dx[3], mHt = tree1$Ht[1],
#' sHt = 1, par.lme = SK.par.lme, IA=FALSE)
#' proc.time() - ptm
#'
#'
#' ##... than the calculation using a 2-point distribution...
#' ptm <- proc.time()
#' E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3], Dm=tree1$Dx[3], mHt = tree1$Ht[1],
#' sHt = 1, par.lme = SK.par.lme, IA=TRUE)
#' proc.time() - ptm
#'
#' ##...fastest if no height variance is assumed
#' ptm <- proc.time()
#' E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3], Dm=tree1$Dx[3], mHt = tree1$Ht[1],
#' sHt = 0, par.lme = SK.par.lme, IA=FALSE)
#' proc.time() - ptm
#'
#' ## Also the number of supportive points for the numerical integration
#' ## influences the calculation time
#' ptm <- proc.time()
#' E_VOL_AB_HmDm_HT.f(Hm=tree1$Hx[3], Dm=tree1$Dx[3], mHt = tree1$Ht[1],
#' sHt = 0, par.lme = SK.par.lme, IA=FALSE, nGL=10)
#' proc.time() - ptm
#' ##' End(Not run)
#'
E_VOL_AB_HmDm_HT.f <-
function(Hm, Dm, mHt, sHt = 0, A = NULL, B = NULL, iDH = "D", par.lme, R0 = FALSE, IA = F, nGL = 51, ...){
# ************************************************************************************************
# Hm; Dm; mHt = mw_HtT; sHt = sd_HtT; a = NULL; b = 7 ; iDH = "DH"; par.lme = SK.par.lme; IA = F; nGL = 51
# Hm; Dm; mHt = mw_HtT; sHt = sd_HtT; a = NULL; b = Int_E_VOL_dHt$Hb ; iDH = "H"; par.lme = SK.par.lme; IA = F; nGL = 51
# a - unterer Grenzdurchmesser/ -hoehe (iDH = "H")
# b - oberer Grenzdurchmesser/ -hoehe
Ht = max(Hm,mHt)
if(min(Dm)>0){Ht = max(c(Hm,Ht))}else{Ht = max(Hm)}
xm = Hm/Ht
ym = Dm
if(is.null(A)){
a=0
}else{
if(iDH %in% c("d","D")){
a = xy0_SK_EBLUP_LME.f(xm, ym, y0 = A, par.lme, R0)
}else{
a = min(1,A/Ht)
}
}
if(is.null(B)){
b=1
}else{
if(iDH %in% c("d","D")){
b = xy0_SK_EBLUP_LME.f(xm, ym, y0 = B, par.lme, R0)
}else{
b = min(1,B/Ht)
}
}
if(sHt > 0){# Hoehentarifvarianz - Int{VOLab|(Hm,Dm),Ht]dHt}
Ht = max(Hm,mHt)
# ************************************************************************
Int_VOLab = Int_E_VOL_AB_HmDm_HT_dHt.f(Hm, Dm, A, B, iDH, mw_HtT=mHt,
sd_HtT=sHt, par.lme, R0=R0, IA, nGL)
# ************************************************************************
E_VOLab = Int_VOLab$E_VOL
VAR_VOLab = Int_VOLab$VAR_VOL
} else { # RotationsIntegral ueber die kalibrierte Schaftkurve E[D(Hx)|(Hm,Dm),Ht]
# ************************************************************************
SK_VOLab = SK_VOLab_EBLUP_LME.f(xm, ym, a, b, Ht, par.lme, R0)
# ************************************************************************
E_VOLab = SK_VOLab$VOL
VAR_VOLab = SK_VOLab$VAR_VOL
}
Ht = max(Hm,mHt)
Ha = a*Ht
Hb = b*Ht
Da = SK_EBLUP_LME.f(xm = Hm/Ht, ym = Dm, xp = a, par.lme, R0)$yp
Db = SK_EBLUP_LME.f(xm = Hm/Ht, ym = Dm, xp = b, par.lme, R0)$yp
return(list(E_VOL = E_VOLab, VAR_VOL = VAR_VOLab, Hm = Hm, Dm = Dm, Ht = Ht,
Da = Da, Db = Db, Ha = a*Ht, Hb = b*Ht, R0=R0))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/source_functions.R
\name{node.estimate.fossils}
\alias{node.estimate.fossils}
\title{node.estimate.fossils}
\usage{
node.estimate.fossils()
}
\arguments{
\item{treedata_min}{tree data object with min estimate of the climate envelope}
\item{treedata_max}{tree data object with max estimate of the climate envelope}
\item{fossils}{the estimate of the climate envelope of the fossil occurrences}
\item{fossils.edges}{the edge number that the fossil occurs on}
}
\description{
To estimate nodes with the placement of fossils on randomly assigned or specifed edges on a tree.
}
| /man/node.estimate.fossils.Rd | no_license | michellelawing/ppgm | R | false | true | 655 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/source_functions.R
\name{node.estimate.fossils}
\alias{node.estimate.fossils}
\title{node.estimate.fossils}
\usage{
node.estimate.fossils()
}
\arguments{
\item{treedata_min}{tree data object with min estimate of the climate envelope}
\item{treedata_max}{tree data object with max estimate of the climate envelope}
\item{fossils}{the estimate of the climate envelope of the fossil occurrences}
\item{fossils.edges}{the edge number that the fossil occurs on}
}
\description{
To estimate nodes with the placement of fossils on randomly assigned or specifed edges on a tree.
}
|
`sumMatrices` <-
function(matrices){
if(length(matrices)==1) matrices[[1]]
else if (length(matrices) > 2) matrices[[1]] + Recall(matrices[-1])
else matrices[[1]] + matrices[[2]]
}
| /STM/sumMatrices.R | no_license | rallozi/stm | R | false | false | 208 | r | `sumMatrices` <-
function(matrices){
if(length(matrices)==1) matrices[[1]]
else if (length(matrices) > 2) matrices[[1]] + Recall(matrices[-1])
else matrices[[1]] + matrices[[2]]
}
|
library(dismo)
library(gbm)
setwd("E:/DarwinFox/dismo/ALL")
presence<-read.table("1.txt", head=T, sep=",")
test_set<-read.table("all.txt", head=T, sep=",")
test_set_bg<-test_set[which(test_set$L==0),]
pseudo_absence<-test_set_bg[sample(nrow(test_set_bg), dim(presence)[1] * 10), ]
train_set<-rbind(presence, pseudo_absence)
gbm_all <- gbm.step(data=train_set, gbm.x = 4:6, gbm.y = 3,
family = "bernoulli", tree.complexity = 5,
learning.rate = 0.01, bag.fraction = 0.5)
result<-predict.gbm(gbm_all, test_set,
n.trees=gbm_all$gbm.call$best.trees, type="response")
test_set$result<-result
write.table(test_set, file="result.1.txt", row.names=F, sep=",")
| /dismo/ALL/rscript.1.r | no_license | qiaohj/DarwinFox | R | false | false | 716 | r | library(dismo)
library(gbm)
setwd("E:/DarwinFox/dismo/ALL")
presence<-read.table("1.txt", head=T, sep=",")
test_set<-read.table("all.txt", head=T, sep=",")
test_set_bg<-test_set[which(test_set$L==0),]
pseudo_absence<-test_set_bg[sample(nrow(test_set_bg), dim(presence)[1] * 10), ]
train_set<-rbind(presence, pseudo_absence)
gbm_all <- gbm.step(data=train_set, gbm.x = 4:6, gbm.y = 3,
family = "bernoulli", tree.complexity = 5,
learning.rate = 0.01, bag.fraction = 0.5)
result<-predict.gbm(gbm_all, test_set,
n.trees=gbm_all$gbm.call$best.trees, type="response")
test_set$result<-result
write.table(test_set, file="result.1.txt", row.names=F, sep=",")
|
# Libraries needed: none
# Files required: TraderJoes.csv
# Make sure you put this file in the same directory as this file
# In this file we are going to cluster the data in the TraderJoes file, based on the
# features Longitude and Latitude
#--Read the data
dat <- read.csv("/Users/chakaneshegog/Desktop/HW4:445001/Part1/TraderJoes.csv", header=TRUE)
cat("Preview the data:\n")
print(head(dat))
#--Plot original data
plot(dat$Longitude, dat$Latitude, col="blue", xlab="Longitude", ylab="Latitude", pch=18,
main = "scatter plot")
#I. K-MEANS
set.seed(-50)
k = 8;
readline(paste("\nPress enter for k-means with", k, "clusters..."))
clustk <- kmeans(dat[ , c("Longitude", "Latitude")], k)
#print clusters centers and sizes
clustk_output <-data.frame(Cluster = seq(1, k), Size=clustk[["size"]], clustk[["centers"]], Whithin_cluster_SS = clustk$withinss)
names(clustk_output)[3] <- "Center_Longitude"
names(clustk_output)[4] <- "Center_Latitude"
print(clustk_output)
#plot clusters
clust1 <- clustk$cluster #cluster assignments
dat1 <- cbind(dat, cluster=clust1) #append the clusters info to dat
colors = c("blue", "red", "green", "grey", "olivedrab", "violet", "pink", "orange")
plot(dat1$Longitude, dat1$Latitude, col="white", xlab="Longitude", ylab="Latitude",
main=paste("clustered data using k-means:", k, "clusters"))
for (i in 1:k){
#different clusters -> different colors
points(dat[dat1$cluster==i,"Longitude"], dat[dat1$cluster==i,"Latitude"], col=colors[i], pch=18)
#plot the centroids
points(clustk_output[i,"Center_Longitude"], clustk_output[i,"Center_Latitude"], col=colors[i], pch=4) #different clusters -> different colors
legend(clustk_output[i,"Center_Longitude"], clustk_output[i,"Center_Latitude"],
legend = as.character(i), bty="n", text.col = colors[i], xjust = 0.5, yjust = 0.5)
}
#-- The elbow method
readline(paste("\nPress enter for the elbow method..."))
set.seed(60)
k_values = seq(2, 20);
wss_values <- c();
for (k in k_values){
clustk <- kmeans(dat[ , c("Longitude", "Latitude")], k)
wss_values <- c(wss_values, clustk$tot.withinss) #whithin cluster sum of squares
}
plot(k_values, wss_values,type="b", pch = 19,
xlab="Number of clusters K", ylab="Total within-clusters sum of squares")
| /HW4:445001/Part1/TraderJoes.R | no_license | chakane3/Homework | R | false | false | 2,284 | r | # Libraries needed: none
# Files required: TraderJoes.csv
# Make sure you put this file in the same directory as this file
# In this file we are going to cluster the data in the TraderJoes file, based on the
# features Longitude and Latitude
#--Read the data
dat <- read.csv("/Users/chakaneshegog/Desktop/HW4:445001/Part1/TraderJoes.csv", header=TRUE)
cat("Preview the data:\n")
print(head(dat))
#--Plot original data
plot(dat$Longitude, dat$Latitude, col="blue", xlab="Longitude", ylab="Latitude", pch=18,
main = "scatter plot")
#I. K-MEANS
set.seed(-50)
k = 8;
readline(paste("\nPress enter for k-means with", k, "clusters..."))
clustk <- kmeans(dat[ , c("Longitude", "Latitude")], k)
#print clusters centers and sizes
clustk_output <-data.frame(Cluster = seq(1, k), Size=clustk[["size"]], clustk[["centers"]], Whithin_cluster_SS = clustk$withinss)
names(clustk_output)[3] <- "Center_Longitude"
names(clustk_output)[4] <- "Center_Latitude"
print(clustk_output)
#plot clusters
clust1 <- clustk$cluster #cluster assignments
dat1 <- cbind(dat, cluster=clust1) #append the clusters info to dat
colors = c("blue", "red", "green", "grey", "olivedrab", "violet", "pink", "orange")
plot(dat1$Longitude, dat1$Latitude, col="white", xlab="Longitude", ylab="Latitude",
main=paste("clustered data using k-means:", k, "clusters"))
for (i in 1:k){
#different clusters -> different colors
points(dat[dat1$cluster==i,"Longitude"], dat[dat1$cluster==i,"Latitude"], col=colors[i], pch=18)
#plot the centroids
points(clustk_output[i,"Center_Longitude"], clustk_output[i,"Center_Latitude"], col=colors[i], pch=4) #different clusters -> different colors
legend(clustk_output[i,"Center_Longitude"], clustk_output[i,"Center_Latitude"],
legend = as.character(i), bty="n", text.col = colors[i], xjust = 0.5, yjust = 0.5)
}
#-- The elbow method
readline(paste("\nPress enter for the elbow method..."))
set.seed(60)
k_values = seq(2, 20);
wss_values <- c();
for (k in k_values){
clustk <- kmeans(dat[ , c("Longitude", "Latitude")], k)
wss_values <- c(wss_values, clustk$tot.withinss) #whithin cluster sum of squares
}
plot(k_values, wss_values,type="b", pch = 19,
xlab="Number of clusters K", ylab="Total within-clusters sum of squares")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/most-variable.r
\name{meffil.most.variable.cpgs}
\alias{meffil.most.variable.cpgs}
\title{Most variable CpG sites}
\usage{
meffil.most.variable.cpgs(
beta,
n = 1000,
sites = NULL,
samples = NULL,
autosomal = T,
winsorize.pct = NA,
outlier.iqr.factor = NA
)
}
\arguments{
\item{beta}{Output from \code{\link{meffil.normalize.samples}()},
either a matrix or a GDS filename.}
\item{n}{Number of CpG sites to return.}
\item{sites}{Subset of CpG sites to consider (row names of beta) (Default: NULL).}
\item{samples}{Subset of samples to consider (column names of beta) (Default: NULL).}
\item{autosomal}{If true, remove probes on sex chromosomes (Default: TRUE).}
\item{winsorize.pct}{Apply to methylation levels
winsorized to the given level. Set to NA to avoid winsorizing (Default: NA).}
\item{outlier.iqr.factor}{Apply to methylation after setting,
for each CpG site, values less than
\code{Q1 - outlier.iqr.factor * IQR} or more than
\code{Q3 + outlier.iqr.factor * IQR} to NA. Here IQR is the inter-quartile
range of the methylation levels at the CpG site, i.e. Q3-Q1.
Set to NA to skip this step (Default: NA).}
}
\value{
The \code{n} CpG site identifiers (rownames of \code{x}) with the greatest variance in \code{x}.
}
\description{
Returns the most variable CpG sites (rows) in the methylation matrix.
}
| /man/meffil.most.variable.cpgs.Rd | permissive | perishky/meffil | R | false | true | 1,408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/most-variable.r
\name{meffil.most.variable.cpgs}
\alias{meffil.most.variable.cpgs}
\title{Most variable CpG sites}
\usage{
meffil.most.variable.cpgs(
beta,
n = 1000,
sites = NULL,
samples = NULL,
autosomal = T,
winsorize.pct = NA,
outlier.iqr.factor = NA
)
}
\arguments{
\item{beta}{Output from \code{\link{meffil.normalize.samples}()},
either a matrix or a GDS filename.}
\item{n}{Number of CpG sites to return.}
\item{sites}{Subset of CpG sites to consider (row names of beta) (Default: NULL).}
\item{samples}{Subset of samples to consider (column names of beta) (Default: NULL).}
\item{autosomal}{If true, remove probes on sex chromosomes (Default: TRUE).}
\item{winsorize.pct}{Apply to methylation levels
winsorized to the given level. Set to NA to avoid winsorizing (Default: NA).}
\item{outlier.iqr.factor}{Apply to methylation after setting,
for each CpG site, values less than
\code{Q1 - outlier.iqr.factor * IQR} or more than
\code{Q3 + outlier.iqr.factor * IQR} to NA. Here IQR is the inter-quartile
range of the methylation levels at the CpG site, i.e. Q3-Q1.
Set to NA to skip this step (Default: NA).}
}
\value{
The \code{n} CpG site identifiers (rownames of \code{x}) with the greatest variance in \code{x}.
}
\description{
Returns the most variable CpG sites (rows) in the methylation matrix.
}
|
context("Expressions")
test_that(".dispatchFilter uses right numeric function", {
## Use expect_output because toJSON returns class "json" but prints correctly
expect_fixed_output(toJSON(.dispatchFilter(5)),
paste0('{"function":"==","args":[{"function":"row",',
'"args":[]},{"value":4}]}'))
expect_fixed_output(toJSON(.dispatchFilter(c(5, 7))),
paste0('{"function":"in","args":[{"function":"row",',
'"args":[]},{"column":[4,6]}]}'))
expect_fixed_output(toJSON(.dispatchFilter(5:7)),
paste0('{"function":"between","args":[{"function":"row",',
'"args":[]},{"value":4},',
'{"value":7}]}'))
})
with_mock_HTTP({
ds <- loadDataset("test ds")
test_that("Arithmetic generates expressions", {
e1 <- try(ds$birthyr + 5)
expect_is(e1, "CrunchExpr")
zexp <- list(`function`="+",
args=list(
list(variable="https://app.crunch.io/api/datasets/1/variables/birthyr/"),
list(value=5)
)
)
expect_identical(zcl(e1), zexp)
expect_fixed_output(e1, "Crunch expression: birthyr + 5")
e2 <- try(5 + ds$birthyr)
expect_is(e2, "CrunchExpr")
expect_fixed_output(e2, "Crunch expression: 5 + birthyr")
})
test_that("Logic generates expressions", {
e1 <- try(ds$birthyr < 0)
expect_is(e1, "CrunchLogicalExpr")
expect_fixed_output(e1, "Crunch logical expression: birthyr < 0")
})
test_that("R logical & CrunchLogicalExpr", {
expect_is(c(TRUE, FALSE, TRUE) & ds$gender == "Female",
"CrunchLogicalExpr")
expect_is(c(TRUE, FALSE, TRUE) | ds$gender == "Female",
"CrunchLogicalExpr")
expect_is(ds$gender == "Female" & c(TRUE, FALSE, TRUE),
"CrunchLogicalExpr")
expect_is(ds$gender == "Female" | c(TRUE, FALSE, TRUE),
"CrunchLogicalExpr")
})
test_that("Datetime operations: logical", {
expect_fixed_output(ds$starttime == "2015-01-01",
'Crunch logical expression: starttime == "2015-01-01"')
expect_fixed_output(ds$starttime > "2015-01-01",
'Crunch logical expression: starttime > "2015-01-01"')
expect_fixed_output(ds$starttime == as.Date("2015-01-01"),
'Crunch logical expression: starttime == "2015-01-01"')
expect_fixed_output(ds$starttime > as.Date("2015-01-01"),
'Crunch logical expression: starttime > "2015-01-01"')
})
test_that("Logical expr with categoricals", {
expect_is(ds$gender == "Male", "CrunchLogicalExpr")
expect_fixed_output(ds$gender == "Male",
'Crunch logical expression: gender == "Male"')
expect_fixed_output(ds$gender == as.factor("Male"),
'Crunch logical expression: gender == "Male"')
expect_fixed_output(ds$gender %in% "Male",
'Crunch logical expression: gender == "Male"')
expect_fixed_output(ds$gender %in% as.factor("Male"),
'Crunch logical expression: gender == "Male"')
expect_fixed_output(ds$gender %in% c("Male", "Female"),
'Crunch logical expression: gender %in% c("Male", "Female")')
expect_fixed_output(ds$gender %in% as.factor(c("Male", "Female")),
'Crunch logical expression: gender %in% c("Male", "Female")')
expect_fixed_output(ds$gender != "Female",
'Crunch logical expression: gender != "Female"')
expect_fixed_output(ds$gender != as.factor("Female"),
'Crunch logical expression: gender != "Female"')
})
test_that("Referencing category names that don't exist warns and drops", {
expect_warning(
expect_fixed_output(ds$gender == "other",
'Crunch logical expression: gender %in% character(0)'),
paste("Category not found:", dQuote("other")))
expect_warning(
expect_fixed_output(ds$gender %in% c("other", "Male", "another"),
'Crunch logical expression: gender == "Male"'),
paste("Categories not found:", dQuote("other"), "and",
dQuote("another")))
expect_warning(
expect_fixed_output(ds$gender != "other",
'Crunch logical expression: !gender %in% character(0)'),
paste("Category not found:", dQuote("other")))
})
test_that("Show method for logical expressions", {
expect_fixed_output(ds$gender %in% c("Male", "Female"),
'Crunch logical expression: gender %in% c("Male", "Female"')
expect_fixed_output(ds$gender %in% 1:2,
'Crunch logical expression: gender %in% c("Male", "Female"')
expect_fixed_output(ds$birthyr == 1945 | ds$birthyr < 1941,
'birthyr == 1945 | birthyr < 1941')
expect_fixed_output(ds$gender %in% "Male" & !is.na(ds$birthyr),
'gender == "Male" & !is.na(birthyr)')
expect_fixed_output(!(ds$gender == "Male"),
'Crunch logical expression: !gender == "Male"')
## TODO: better parentheses for ^^
expect_fixed_output(duplicated(ds$gender),
'Crunch logical expression: duplicated(gender)')
expect_fixed_output(duplicated(ds$gender == "Male"),
'Crunch logical expression: duplicated(gender == "Male")')
})
test_that("Can subset a CrunchExpr with R values", {
age <- 2016 - ds$birthyr
## Note: no check for correct number of rows
expect_is(age[c(TRUE, FALSE, TRUE)], "CrunchExpr")
expect_fixed_output(toJSON(activeFilter(age[c(TRUE, FALSE, TRUE)])),
paste0('{"function":"in","args":[{"function":"row",',
'"args":[]},{"column":[0,2]}]}'))
expect_is(age[c(1, 3)], "CrunchExpr")
expect_fixed_output(toJSON(activeFilter(age[c(1, 3)])),
paste0('{"function":"in","args":[{"function":"row",',
'"args":[]},{"column":[0,2]}]}'))
})
test_that("Show method for expresssions", {
skip("TODO: something intelligent with parentheses and order of operations")
print(ds$birthyr * 3 + 5)
print(3 * (ds$birthyr + 5))
})
})
with_test_authentication({
ds <- newDataset(df)
ds$q1 <- factor(rep(c("selected", "not selected"), 10))
test_that("Arithmetic expressions evaluate", {
e1 <- try(ds$v3 + 5)
expect_is(e1, "CrunchExpr")
e2 <- try(5 + ds$v3)
expect_is(e2, "CrunchExpr")
expect_identical(as.vector(e1), as.vector(ds$v3) + 5)
expect_identical(as.vector(e1), as.vector(e2))
expect_identical(as.vector(ds$v3 * ds$v3), df$v3^2)
})
uncached({
with_mock(`crunch::.crunchPageSize`=function (x) 5L, {
with(temp.option(httpcache.log=""), {
avlog <- capture.output(v35 <- as.vector(ds$v3 + 5))
})
test_that("as.vector with CrunchExpr is paginated", {
logdf <- loadLogfile(textConnection(avlog))
## GET /values/ 4x
## to get data, then a 5th GET /values/ that returns 0
## values, which breaks the pagination loop
expect_identical(logdf$verb, rep("GET", 5))
expect_identical(grep("table", logdf$url), 1:5)
})
test_that("getValues returns the same result when paginated", {
expect_equivalent(v35, df$v3 + 5)
})
})
})
test_that("Logical expressions evaluate", {
e1 <- try(ds$v3 > 10)
expect_is(e1, "CrunchLogicalExpr")
skip("which isn't implemented correctly yet")
expect_identical(which(e1), which(df$v3 > 10))
skip("select with logical expression not supported")
expect_identical(as.vector(e1), as.vector(ds$v3) > 10)
})
test_that("R & Crunch logical together", {
e1 <- ds$v3 < 10 | c(rep(FALSE, 15), rep(TRUE, 5))
expect_equivalent(as.vector(ds$v3[e1]),
c(8, 9, 23, 24, 25, 26, 27))
e2 <- TRUE & is.na(ds$v2)
expect_equivalent(as.vector(ds$v3[e2]),
23:27)
e3 <- df$v4 == "B" & is.na(ds$v1) ## Note df
expect_equivalent(as.vector(ds$v3[e3]),
c(8, 10, 12))
})
test_that("expressions on expresssions evaluate", {
e3 <- try(ds$v3 + ds$v3 + 10)
expect_is(e3, "CrunchExpr")
expect_fixed_output(e3, "Crunch expression: v3 + v3 + 10")
expect_identical(as.vector(e3), 2*df$v3 + 10)
e4 <- try(ds$v3 + ds$v3 * 2)
expect_is(e4, "CrunchExpr")
expect_fixed_output(e4, "Crunch expression: v3 + v3 * 2")
expect_identical(as.vector(e4), 3*df$v3)
})
varnames <- names(df[-6])
test_that("Select values with Numeric inequality filter", {
e5 <- try(ds$v3[ds$v3 < 10])
expect_is(e5, "CrunchVariable")
expect_identical(as.vector(e5), c(8, 9))
for (i in varnames) {
expect_equivalent(as.vector(ds[[i]][ds$v3 < 10]),
df[[i]][1:2], info=i)
}
})
test_that("Select values with %in% on Numeric", {
for (i in varnames) {
expect_equivalent(as.vector(ds[[i]][ds$v3 %in% 10]),
df[[i]][3], info=i)
expect_equivalent(as.vector(ds[[i]][ds$v3 %in% c(10, 12)]),
df[[i]][c(3, 5)], info=i)
}
})
test_that("Select values with %in% on Categorical", {
expect_length(as.vector(ds$v3[ds$v4 %in% "B"]), 10)
for (i in varnames) {
expect_equivalent(as.vector(ds[[i]][ds$v4 %in% "B"]),
df[[i]][df$v4 %in% "B"], info=i)
}
expect_length(as.vector(ds$v3[ds$q1 %in% "selected"]), 10)
})
test_that("Select values with %in% on nonexistent categories", {
expect_length(as.vector(ds$v3[ds$v4 %in% numeric(0)]), 0)
expect_length(as.vector(ds$v3[!(ds$v4 %in% numeric(0))]), 20)
expect_warning(
expect_length(as.vector(ds$v3[ds$v4 == "other"]), 0),
paste0("Category not found: ", dQuote("other"), ". Dropping."))
expect_warning(
expect_length(as.vector(ds$v3[ds$v4 != "other"]), 20),
paste0("Category not found: ", dQuote("other"), ". Dropping."))
})
uncached({
with_mock(`crunch::.crunchPageSize`=function (x) 5L, {
with(temp.option(httpcache.log=""), {
avlog <- capture.output(v3.5 <- as.vector(ds$v3[ds$v4 %in% "B"]))
})
test_that("Select values with %in% on Categorical, paginated", {
logdf <- loadLogfile(textConnection(avlog))
## GET v3 entity to get /values/ URL,
## GET v3 entity to get categories to construct expr,
## GET /values/ 2x to get data,
## then a 3rd GET /values/ that returns 0
## values, which breaks the pagination loop
expect_identical(logdf$verb, rep("GET", 5))
expect_identical(grep("values", logdf$url), 3:5)
expect_equivalent(v3.5, df$v3[df$v4 %in% "B"])
})
})
})
test_that("Select values with &ed filter", {
expect_equivalent(as.vector(ds$v3[ds$v3 >= 10 & ds$v3 < 13]),
10:12)
f <- ds$v3 >= 10 & ds$v3 < 13
expect_is(f, "CrunchLogicalExpr")
for (i in varnames) {
expect_equivalent(as.vector(ds[[i]][f]),
df[[i]][3:5], info=i)
}
})
test_that("Select values with negated filter", {
expect_equivalent(as.vector(ds$v3[!(ds$v4 %in% "B")]),
df$v3[df$v4 %in% "C"])
for (i in varnames) {
expect_equivalent(as.vector(ds[[i]][!(ds$v4 %in% "B")]),
df[[i]][df$v4 %in% "C"], info=i)
}
})
test_that("R numeric filter evaluates", {
expect_equivalent(as.vector(ds$v3[6]), df$v3[6])
})
test_that("If R numeric filter is a range, 'between' is correct", {
expect_equivalent(as.vector(ds$v3[3:18]), df$v3[3:18])
})
test_that("R logical filter evaluates", {
expect_identical(as.vector(ds$v3[df$v3 < 10]), c(8, 9))
})
test_that("filtered categorical returns factor", {
expect_equivalent(as.vector(ds$v4[ds$v4 == "B"]),
factor(rep("B", 10)))
})
test_that("duplicated method", {
expect_identical(which(duplicated(ds$v3)), integer(0))
expect_equivalent(as.vector(ds$v3[duplicated(ds$v4)]), 10:27)
expect_identical(which(duplicated(ds$v3 + 4)), integer(0))
skip("'which' isn't implemented correctly")
expect_identical(which(duplicated(ds$v4)), 3:20)
})
})
| /tests/testthat/test-expressions.R | no_license | malecki/rcrunch | R | false | false | 12,709 | r | context("Expressions")
test_that(".dispatchFilter uses right numeric function", {
## Use expect_output because toJSON returns class "json" but prints correctly
expect_fixed_output(toJSON(.dispatchFilter(5)),
paste0('{"function":"==","args":[{"function":"row",',
'"args":[]},{"value":4}]}'))
expect_fixed_output(toJSON(.dispatchFilter(c(5, 7))),
paste0('{"function":"in","args":[{"function":"row",',
'"args":[]},{"column":[4,6]}]}'))
expect_fixed_output(toJSON(.dispatchFilter(5:7)),
paste0('{"function":"between","args":[{"function":"row",',
'"args":[]},{"value":4},',
'{"value":7}]}'))
})
with_mock_HTTP({
ds <- loadDataset("test ds")
test_that("Arithmetic generates expressions", {
e1 <- try(ds$birthyr + 5)
expect_is(e1, "CrunchExpr")
zexp <- list(`function`="+",
args=list(
list(variable="https://app.crunch.io/api/datasets/1/variables/birthyr/"),
list(value=5)
)
)
expect_identical(zcl(e1), zexp)
expect_fixed_output(e1, "Crunch expression: birthyr + 5")
e2 <- try(5 + ds$birthyr)
expect_is(e2, "CrunchExpr")
expect_fixed_output(e2, "Crunch expression: 5 + birthyr")
})
test_that("Logic generates expressions", {
e1 <- try(ds$birthyr < 0)
expect_is(e1, "CrunchLogicalExpr")
expect_fixed_output(e1, "Crunch logical expression: birthyr < 0")
})
test_that("R logical & CrunchLogicalExpr", {
expect_is(c(TRUE, FALSE, TRUE) & ds$gender == "Female",
"CrunchLogicalExpr")
expect_is(c(TRUE, FALSE, TRUE) | ds$gender == "Female",
"CrunchLogicalExpr")
expect_is(ds$gender == "Female" & c(TRUE, FALSE, TRUE),
"CrunchLogicalExpr")
expect_is(ds$gender == "Female" | c(TRUE, FALSE, TRUE),
"CrunchLogicalExpr")
})
test_that("Datetime operations: logical", {
expect_fixed_output(ds$starttime == "2015-01-01",
'Crunch logical expression: starttime == "2015-01-01"')
expect_fixed_output(ds$starttime > "2015-01-01",
'Crunch logical expression: starttime > "2015-01-01"')
expect_fixed_output(ds$starttime == as.Date("2015-01-01"),
'Crunch logical expression: starttime == "2015-01-01"')
expect_fixed_output(ds$starttime > as.Date("2015-01-01"),
'Crunch logical expression: starttime > "2015-01-01"')
})
test_that("Logical expr with categoricals", {
expect_is(ds$gender == "Male", "CrunchLogicalExpr")
expect_fixed_output(ds$gender == "Male",
'Crunch logical expression: gender == "Male"')
expect_fixed_output(ds$gender == as.factor("Male"),
'Crunch logical expression: gender == "Male"')
expect_fixed_output(ds$gender %in% "Male",
'Crunch logical expression: gender == "Male"')
expect_fixed_output(ds$gender %in% as.factor("Male"),
'Crunch logical expression: gender == "Male"')
expect_fixed_output(ds$gender %in% c("Male", "Female"),
'Crunch logical expression: gender %in% c("Male", "Female")')
expect_fixed_output(ds$gender %in% as.factor(c("Male", "Female")),
'Crunch logical expression: gender %in% c("Male", "Female")')
expect_fixed_output(ds$gender != "Female",
'Crunch logical expression: gender != "Female"')
expect_fixed_output(ds$gender != as.factor("Female"),
'Crunch logical expression: gender != "Female"')
})
test_that("Referencing category names that don't exist warns and drops", {
expect_warning(
expect_fixed_output(ds$gender == "other",
'Crunch logical expression: gender %in% character(0)'),
paste("Category not found:", dQuote("other")))
expect_warning(
expect_fixed_output(ds$gender %in% c("other", "Male", "another"),
'Crunch logical expression: gender == "Male"'),
paste("Categories not found:", dQuote("other"), "and",
dQuote("another")))
expect_warning(
expect_fixed_output(ds$gender != "other",
'Crunch logical expression: !gender %in% character(0)'),
paste("Category not found:", dQuote("other")))
})
test_that("Show method for logical expressions", {
expect_fixed_output(ds$gender %in% c("Male", "Female"),
'Crunch logical expression: gender %in% c("Male", "Female"')
expect_fixed_output(ds$gender %in% 1:2,
'Crunch logical expression: gender %in% c("Male", "Female"')
expect_fixed_output(ds$birthyr == 1945 | ds$birthyr < 1941,
'birthyr == 1945 | birthyr < 1941')
expect_fixed_output(ds$gender %in% "Male" & !is.na(ds$birthyr),
'gender == "Male" & !is.na(birthyr)')
expect_fixed_output(!(ds$gender == "Male"),
'Crunch logical expression: !gender == "Male"')
## TODO: better parentheses for ^^
expect_fixed_output(duplicated(ds$gender),
'Crunch logical expression: duplicated(gender)')
expect_fixed_output(duplicated(ds$gender == "Male"),
'Crunch logical expression: duplicated(gender == "Male")')
})
test_that("Can subset a CrunchExpr with R values", {
age <- 2016 - ds$birthyr
## Note: no check for correct number of rows
expect_is(age[c(TRUE, FALSE, TRUE)], "CrunchExpr")
expect_fixed_output(toJSON(activeFilter(age[c(TRUE, FALSE, TRUE)])),
paste0('{"function":"in","args":[{"function":"row",',
'"args":[]},{"column":[0,2]}]}'))
expect_is(age[c(1, 3)], "CrunchExpr")
expect_fixed_output(toJSON(activeFilter(age[c(1, 3)])),
paste0('{"function":"in","args":[{"function":"row",',
'"args":[]},{"column":[0,2]}]}'))
})
test_that("Show method for expresssions", {
skip("TODO: something intelligent with parentheses and order of operations")
print(ds$birthyr * 3 + 5)
print(3 * (ds$birthyr + 5))
})
})
with_test_authentication({
ds <- newDataset(df)
ds$q1 <- factor(rep(c("selected", "not selected"), 10))
test_that("Arithmetic expressions evaluate", {
e1 <- try(ds$v3 + 5)
expect_is(e1, "CrunchExpr")
e2 <- try(5 + ds$v3)
expect_is(e2, "CrunchExpr")
expect_identical(as.vector(e1), as.vector(ds$v3) + 5)
expect_identical(as.vector(e1), as.vector(e2))
expect_identical(as.vector(ds$v3 * ds$v3), df$v3^2)
})
uncached({
with_mock(`crunch::.crunchPageSize`=function (x) 5L, {
with(temp.option(httpcache.log=""), {
avlog <- capture.output(v35 <- as.vector(ds$v3 + 5))
})
test_that("as.vector with CrunchExpr is paginated", {
logdf <- loadLogfile(textConnection(avlog))
## GET /values/ 4x
## to get data, then a 5th GET /values/ that returns 0
## values, which breaks the pagination loop
expect_identical(logdf$verb, rep("GET", 5))
expect_identical(grep("table", logdf$url), 1:5)
})
test_that("getValues returns the same result when paginated", {
expect_equivalent(v35, df$v3 + 5)
})
})
})
test_that("Logical expressions evaluate", {
e1 <- try(ds$v3 > 10)
expect_is(e1, "CrunchLogicalExpr")
skip("which isn't implemented correctly yet")
expect_identical(which(e1), which(df$v3 > 10))
skip("select with logical expression not supported")
expect_identical(as.vector(e1), as.vector(ds$v3) > 10)
})
test_that("R & Crunch logical together", {
e1 <- ds$v3 < 10 | c(rep(FALSE, 15), rep(TRUE, 5))
expect_equivalent(as.vector(ds$v3[e1]),
c(8, 9, 23, 24, 25, 26, 27))
e2 <- TRUE & is.na(ds$v2)
expect_equivalent(as.vector(ds$v3[e2]),
23:27)
e3 <- df$v4 == "B" & is.na(ds$v1) ## Note df
expect_equivalent(as.vector(ds$v3[e3]),
c(8, 10, 12))
})
test_that("expressions on expresssions evaluate", {
e3 <- try(ds$v3 + ds$v3 + 10)
expect_is(e3, "CrunchExpr")
expect_fixed_output(e3, "Crunch expression: v3 + v3 + 10")
expect_identical(as.vector(e3), 2*df$v3 + 10)
e4 <- try(ds$v3 + ds$v3 * 2)
expect_is(e4, "CrunchExpr")
expect_fixed_output(e4, "Crunch expression: v3 + v3 * 2")
expect_identical(as.vector(e4), 3*df$v3)
})
varnames <- names(df[-6])
test_that("Select values with Numeric inequality filter", {
e5 <- try(ds$v3[ds$v3 < 10])
expect_is(e5, "CrunchVariable")
expect_identical(as.vector(e5), c(8, 9))
for (i in varnames) {
expect_equivalent(as.vector(ds[[i]][ds$v3 < 10]),
df[[i]][1:2], info=i)
}
})
test_that("Select values with %in% on Numeric", {
for (i in varnames) {
expect_equivalent(as.vector(ds[[i]][ds$v3 %in% 10]),
df[[i]][3], info=i)
expect_equivalent(as.vector(ds[[i]][ds$v3 %in% c(10, 12)]),
df[[i]][c(3, 5)], info=i)
}
})
test_that("Select values with %in% on Categorical", {
expect_length(as.vector(ds$v3[ds$v4 %in% "B"]), 10)
for (i in varnames) {
expect_equivalent(as.vector(ds[[i]][ds$v4 %in% "B"]),
df[[i]][df$v4 %in% "B"], info=i)
}
expect_length(as.vector(ds$v3[ds$q1 %in% "selected"]), 10)
})
test_that("Select values with %in% on nonexistent categories", {
expect_length(as.vector(ds$v3[ds$v4 %in% numeric(0)]), 0)
expect_length(as.vector(ds$v3[!(ds$v4 %in% numeric(0))]), 20)
expect_warning(
expect_length(as.vector(ds$v3[ds$v4 == "other"]), 0),
paste0("Category not found: ", dQuote("other"), ". Dropping."))
expect_warning(
expect_length(as.vector(ds$v3[ds$v4 != "other"]), 20),
paste0("Category not found: ", dQuote("other"), ". Dropping."))
})
uncached({
with_mock(`crunch::.crunchPageSize`=function (x) 5L, {
with(temp.option(httpcache.log=""), {
avlog <- capture.output(v3.5 <- as.vector(ds$v3[ds$v4 %in% "B"]))
})
test_that("Select values with %in% on Categorical, paginated", {
logdf <- loadLogfile(textConnection(avlog))
## GET v3 entity to get /values/ URL,
## GET v3 entity to get categories to construct expr,
## GET /values/ 2x to get data,
## then a 3rd GET /values/ that returns 0
## values, which breaks the pagination loop
expect_identical(logdf$verb, rep("GET", 5))
expect_identical(grep("values", logdf$url), 3:5)
expect_equivalent(v3.5, df$v3[df$v4 %in% "B"])
})
})
})
test_that("Select values with &ed filter", {
expect_equivalent(as.vector(ds$v3[ds$v3 >= 10 & ds$v3 < 13]),
10:12)
f <- ds$v3 >= 10 & ds$v3 < 13
expect_is(f, "CrunchLogicalExpr")
for (i in varnames) {
expect_equivalent(as.vector(ds[[i]][f]),
df[[i]][3:5], info=i)
}
})
test_that("Select values with negated filter", {
expect_equivalent(as.vector(ds$v3[!(ds$v4 %in% "B")]),
df$v3[df$v4 %in% "C"])
for (i in varnames) {
expect_equivalent(as.vector(ds[[i]][!(ds$v4 %in% "B")]),
df[[i]][df$v4 %in% "C"], info=i)
}
})
test_that("R numeric filter evaluates", {
expect_equivalent(as.vector(ds$v3[6]), df$v3[6])
})
test_that("If R numeric filter is a range, 'between' is correct", {
expect_equivalent(as.vector(ds$v3[3:18]), df$v3[3:18])
})
test_that("R logical filter evaluates", {
expect_identical(as.vector(ds$v3[df$v3 < 10]), c(8, 9))
})
test_that("filtered categorical returns factor", {
expect_equivalent(as.vector(ds$v4[ds$v4 == "B"]),
factor(rep("B", 10)))
})
test_that("duplicated method", {
expect_identical(which(duplicated(ds$v3)), integer(0))
expect_equivalent(as.vector(ds$v3[duplicated(ds$v4)]), 10:27)
expect_identical(which(duplicated(ds$v3 + 4)), integer(0))
skip("'which' isn't implemented correctly")
expect_identical(which(duplicated(ds$v4)), 3:20)
})
})
|
library(shiny)
source("~/www/probability-theory/includes/bernoulli.R", chdir=TRUE)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Hit to bull's eye"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("hits",
"Number of Hits:",
min = 1,
max = 8,
value = 6),
sliderInput("shots",
"Number of Shots:",
min = 1,
max = 8,
value = 8)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
p = seq(0,1,by=0.01)
pLab = seq(0,1,0.1)
plot(fBernoulli(p = p, m = input$hits, n = input$shots), type='l', xaxt="n", xlab = 'Hit probability', ylab = paste('Probability of "', input$hits, '" hits from "', input$shots, '" shots', sep = ""))
axis(1,at=pLab*length(p),labels=pLab)
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /Tasks/6_from_8_hit_to_bulls_eye.R | no_license | spolischook/probability-theory | R | false | false | 1,268 | r | library(shiny)
source("~/www/probability-theory/includes/bernoulli.R", chdir=TRUE)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Hit to bull's eye"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("hits",
"Number of Hits:",
min = 1,
max = 8,
value = 6),
sliderInput("shots",
"Number of Shots:",
min = 1,
max = 8,
value = 8)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
p = seq(0,1,by=0.01)
pLab = seq(0,1,0.1)
plot(fBernoulli(p = p, m = input$hits, n = input$shots), type='l', xaxt="n", xlab = 'Hit probability', ylab = paste('Probability of "', input$hits, '" hits from "', input$shots, '" shots', sep = ""))
axis(1,at=pLab*length(p),labels=pLab)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
testlist <- list(hi = 0, lo = 4.93769206453742e-320, mu = 3.92090496539613e-320, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) | /gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610044525-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 151 | r | testlist <- list(hi = 0, lo = 4.93769206453742e-320, mu = 3.92090496539613e-320, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) |
## This file is R script file for
## drawing
## - Global Active Power line graph
## - Voltage line graph
## - Energy sub metering line graph
## - Global reactive power line graph
## from 2007-02-01 to 2007-02-02 of Electric power consumption Dataset.
##
## This script needs data files in the directory named data.
## The data files can be get from below site and are needed to
## be unzipped before running this script.
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
##
## This script output these files.
## * plot4.png
## - Global Active Power line graph
## - Voltage line graph
## - Energy sub metering line graph
## - Global reactive power line graph
## ------------------------------------------------------------
## read data and name columns (common for plot[1-4].R)
## ------------------------------------------------------------
dat <- read.table(pipe('grep "^[12]/2/2007" "data/household_power_consumption.txt"'),header=F, sep=';', colClasses=c(rep("character", 2), rep("numeric", 7)))
colnames(dat) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
## ------------------------------------------------------------
## prepare for drawing graphs.
## ------------------------------------------------------------
dateTime <- strptime(paste(dat$Date, dat$Time), "%d/%m/%Y %T")
png(filename="plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
## ------------------------------------------------------------
## draw
## - Global Active Power line graph
## ------------------------------------------------------------
plot(dateTime, dat$Global_active_power, type="l", xlab="", ylab="Global Active Power")
## ------------------------------------------------------------
## draw
## - Voltage line graph
## ------------------------------------------------------------
plot(dateTime, dat$Voltage, type="l", xlab="datatime", ylab="Voltage")
## ------------------------------------------------------------
## draw
## - Energy sub metering line graph
## ------------------------------------------------------------
plot(dateTime, dat$Sub_metering_1, type="n", xlab="", ylab="Energy sub metering")
col <- c("black", "red", "blue")
for(i in 1:3) {
colName <- sprintf("Sub_metering_%d", i)
points(dateTime, dat[[colName]], type="l", col=col[i])
}
subMeteringCols <- grep("^Sub_metering_", colnames(dat))
subMeteringNames <- colnames(dat)[subMeteringCols]
legend("topright", lty=1, col=col, legend=subMeteringNames, bty="n")
## ------------------------------------------------------------
## draw
## - Global reactive power line graph
## ------------------------------------------------------------
plot(dateTime, dat$Global_reactive_power, type="l", xlab="datatime", ylab="Global_reactive_power")
dev.off()
| /plot4.R | no_license | patakuti/ExData_Plotting1 | R | false | false | 2,916 | r | ## This file is R script file for
## drawing
## - Global Active Power line graph
## - Voltage line graph
## - Energy sub metering line graph
## - Global reactive power line graph
## from 2007-02-01 to 2007-02-02 of Electric power consumption Dataset.
##
## This script needs data files in the directory named data.
## The data files can be get from below site and are needed to
## be unzipped before running this script.
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
##
## This script output these files.
## * plot4.png
## - Global Active Power line graph
## - Voltage line graph
## - Energy sub metering line graph
## - Global reactive power line graph
## ------------------------------------------------------------
## read data and name columns (common for plot[1-4].R)
## ------------------------------------------------------------
dat <- read.table(pipe('grep "^[12]/2/2007" "data/household_power_consumption.txt"'),header=F, sep=';', colClasses=c(rep("character", 2), rep("numeric", 7)))
colnames(dat) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
## ------------------------------------------------------------
## prepare for drawing graphs.
## ------------------------------------------------------------
dateTime <- strptime(paste(dat$Date, dat$Time), "%d/%m/%Y %T")
png(filename="plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
## ------------------------------------------------------------
## draw
## - Global Active Power line graph
## ------------------------------------------------------------
plot(dateTime, dat$Global_active_power, type="l", xlab="", ylab="Global Active Power")
## ------------------------------------------------------------
## draw
## - Voltage line graph
## ------------------------------------------------------------
plot(dateTime, dat$Voltage, type="l", xlab="datatime", ylab="Voltage")
## ------------------------------------------------------------
## draw
## - Energy sub metering line graph
## ------------------------------------------------------------
plot(dateTime, dat$Sub_metering_1, type="n", xlab="", ylab="Energy sub metering")
col <- c("black", "red", "blue")
for(i in 1:3) {
colName <- sprintf("Sub_metering_%d", i)
points(dateTime, dat[[colName]], type="l", col=col[i])
}
subMeteringCols <- grep("^Sub_metering_", colnames(dat))
subMeteringNames <- colnames(dat)[subMeteringCols]
legend("topright", lty=1, col=col, legend=subMeteringNames, bty="n")
## ------------------------------------------------------------
## draw
## - Global reactive power line graph
## ------------------------------------------------------------
plot(dateTime, dat$Global_reactive_power, type="l", xlab="datatime", ylab="Global_reactive_power")
dev.off()
|
### Remove previous variables
rm(list = ls())
StartTimeTotal <- proc.time()
#### Libraries, set seed, set cores ----
library(data.table)
library(doParallel)
library(foreach)
library(doRNG)
library(copula)
library(compiler)
library(metafor)
enableJIT(3)
set.seed(123)
# Number of cores for parallel
num.Cores <- detectCores() - 1
c1 <- makeCluster(num.Cores)
#### Declare variables ----
# Reps = number of repetitions of experiment
Reps = 5
# k = number of studies in series
Studies = c(3,5,10,30,50,100)
# subj = number of subjects in study, likely to be distributed
Subj <- list(as.integer(c(60,60)), as.integer(c(20,100)), as.integer(c(250, 1000)), as.numeric(c(4.2, 1.1)))
# sd = study level standard deviation
True.sd = sqrt(2)
# theta = population level mean - need good sense of range for SMD
theta = c( -0.76, -0.12, 0, 0.12, 0.76)
# tau.sq = between studies variance (can be squared due to sqrt() in normal draw), ?to be distributed
tau.sq = c(0, 0.005, 0.022, 1.676)
# controlProp = proportion of total sample in control arm
controlProp = 0.5
## Boundary of step function on p value, causing severity of publication bias
Severity.boundary <- c(0.05, 0.2)
# Set up strength of publication bias selection IF STILL USING
Begg_a <- 0.5
Begg_b <- 3
Begg_c <- -0.3
Begg_sided <- 1
# Set up within study reporting bias - this is now one sided
Tested.outcomes <- 5
Sd.split <- 0.6
# Size of per unit bias increase
Bias.multiple <- c(0, log(0.85)/(-1.81) * 2, log(0.7225)/(-1.81) * 2)
#### Functions ----
findvaluesNSim <- function(ID_num){
dummy1 = ID_num %% Reps
repetitions = ifelse(dummy1 == 0, Reps, dummy1)
intermed <- Studies * Reps
intermed <- c(0,cumsum(intermed))
dummy2 = ID_num %% (Reps * sum(Studies))
numstudies = Studies[(min(which(intermed >= dummy2))-1)]
dummy5 = ID_num %% (Reps * sum(Studies) * length(tau.sq))
dummy6 = ifelse(dummy5 == 0, length(tau.sq), (dummy5 %/% (Reps * sum(Studies))) + 1)
hetero = tau.sq[dummy6]
dummy7 = ID_num %% (Reps * sum(Studies) * length(tau.sq) * length(theta))
dummy8 = ifelse(dummy7 == 0, length(theta), (dummy7 %/% (Reps * sum(Studies) * length(tau.sq))) + 1)
truevalue = theta[dummy8]
dummy9 = ID_num %% (Reps * sum(Studies) * length(tau.sq) * length(theta) * length(Subj))
dummy10 = ifelse(dummy9 == 0, length(Subj), (dummy9 %/% (Reps * sum(Studies) * length(tau.sq) * length(theta))) + 1)
subjects <- tryCatch(Subj[[dummy10]][1], error = function(e) {try(Subj[dummy10], silent=TRUE)})
return(list(reps = repetitions, subj = subjects, theta = truevalue, tau2 = hetero, numstud = numstudies))
}
findIDNSim <- function(repetitions, subjects, truevalue, hetero, numstudies){
IDnumber <- integer()
for (o in 1:numstudies){
counter.dummy <- as.integer((match(subjects, Subj)-1) * length(theta) * length(tau.sq) * sum(Studies) * Reps +
(match(truevalue, theta)-1) * length(tau.sq) * sum(Studies) * Reps +
(match(hetero, tau.sq)-1) * sum(Studies) * Reps +
(sum(Studies[0:(match(numstudies, Studies)-1)]) + o -1) * Reps +
repetitions
)
IDnumber <- append(IDnumber, counter.dummy)
}
return(IDnumber)
}
### Unstandardised mean differrence function
UMD <- function(StudySize, Theta, Heterogeneity, Control_Prop, sd){
StudyUMD <- rnorm(1, Theta, sqrt(Heterogeneity))
Group1Size <- as.integer(Control_Prop*StudySize)
Group2Size <- Group1Size
ControlGroup <- rnorm(Group1Size, -StudyUMD/2, sd)
TreatmentGroup <- rnorm(Group2Size, StudyUMD/2, sd)
Studymean <- mean(TreatmentGroup) - mean(ControlGroup)
Studysd <- sqrt( (var(ControlGroup) * (Group1Size - 1) + var(TreatmentGroup) * (Group2Size-1))/ (Group1Size + Group2Size -2) * (1/Group1Size + 1/Group2Size))
return(c(Studymean, Studysd))
}
### UMD function with multiple outcome bias with frac being sd in first level, num.times = number of outcomes simulated
# outputs vectors ordered by p-val
UMD.mult.out <- function(StudySize, Theta, Heterogeneity, Control_Prop, total.sd, frac, num.times){
StudyUMD <- rnorm(1, Theta, sqrt(Heterogeneity))
Group1Size <- as.integer(Control_Prop*StudySize)
Group2Size <- Group1Size
z <- normalCopula(param = frac, dim = num.times)
Z <- rCopula(Group1Size, z)
ControlGroup <- qnorm(Z, mean = -StudyUMD/2, sd = total.sd)
y <- normalCopula(param = frac, dim = num.times)
Y <- rCopula(Group1Size, y)
TreatmentGroup <- qnorm(Y, mean = StudyUMD/2, sd = total.sd)
Studymean <- apply(TreatmentGroup,2,mean) - apply(ControlGroup, 2, mean)
Studysd <- sqrt( (apply(TreatmentGroup, 2, var) * (Group1Size - 1) + apply(TreatmentGroup, 2, var) * (Group2Size-1))/ (Group1Size + Group2Size -2) * (1/Group1Size + 1/Group2Size))
Begg_p <- pnorm(Studymean/Studysd)
return(list(Studymean[order(Begg_p)], Studysd[order(Begg_p)]))
}
anyNA <- function(x) {
i <- 1
repeat {
if (is.na(x[i])) return(TRUE)
i <- i + 1
if (i > length(x)) return(FALSE)
}
}
.psort <- function(x,y) {
### t(apply(xy, 1, sort)) would be okay, but problematic if there are NAs;
### either they are removed completely (na.last=NA) or they are always put
### first/last (na.last=FALSE/TRUE); but we just want to leave the NAs in
### their position!
if (is.null(x) || length(x) == 0) ### need to catch this
return(NULL)
if (missing(y)) {
if (is.matrix(x)) {
xy <- x
} else {
xy <- rbind(x) ### in case x is just a vector
}
} else {
xy <- cbind(x,y)
}
n <- nrow(xy)
for (i in seq_len(n)) {
if (anyNA(xy[i,]))
next
xy[i,] <- sort(xy[i,])
}
colnames(xy) <- NULL
return(xy)
}
mod.hc <- function(object, digits, transf, targs, control, tau2est, ...) {
if (!inherits(object, "rma.uni"))
stop("Argument 'object' must be an object of class \"rma.uni\".")
if (inherits(object, "rma.ls"))
stop("Method not yet implemented for objects of class \"rma.ls\". Sorry!")
x <- object
if (!x$int.only)
stop("Method only applicable for models without moderators.")
if (missing(digits))
digits <- x$digits
if (missing(transf))
transf <- FALSE
if (missing(targs))
targs <- NULL
yi <- x$yi
vi <- x$vi
k <- length(yi)
if (k == 1)
stop("Stopped because k = 1.")
if (!x$allvipos)
stop("Cannot use method when one or more sampling variances are non-positive.")
level <- ifelse(x$level > 1, (100-x$level)/100, ifelse(x$level > .5, 1-x$level, x$level))
if (missing(control))
control <- list()
###
### set control parameters for uniroot() and possibly replace with user-defined values
con <- list(tol=.Machine$double.eps^0.25, maxiter=1000, verbose=FALSE)
con[pmatch(names(control), names(con))] <- control
###
### original code by Henmi & Copas (2012), modified by Michael Dewey, small adjustments
### for consistency with other functions in the metafor package by Wolfgang Viechtbauer
wi <- 1/vi ### fixed effects weights
W1 <- sum(wi)
W2 <- sum(wi^2) / W1
W3 <- sum(wi^3) / W1
W4 <- sum(wi^4) / W1
### fixed-effects estimate of theta
beta <- sum(wi*yi) / W1
### Q statistic
Q <- sum(wi * ((yi - beta)^2))
### DL estimate of tau^2
###### Modified here to take REML tau2
tau2 <- max(0, tau2est)
vb <- (tau2 * W2 + 1) / W1 ### estimated Var of b
se <- sqrt(vb) ### estimated SE of b
VR <- 1 + tau2 * W2 ### estimated Var of R
SDR <- sqrt(VR) ### estimated SD of R
### conditional mean of Q given R=r
EQ <- function(r)
(k - 1) + tau2 * (W1 - W2) + (tau2^2)*((1/VR^2) * (r^2) - 1/VR) * (W3 - W2^2)
### conditional variance of Q given R=r
VQ <- function(r) {
rsq <- r^2
recipvr2 <- 1 / VR^2
2 * (k - 1) + 4 * tau2 * (W1 - W2) +
2 * tau2^2 * (W1*W2 - 2*W3 + W2^2) +
4 * tau2^2 * (recipvr2 * rsq - 1/VR) * (W3 - W2^2) +
4 * tau2^3 * (recipvr2 * rsq - 1/VR) * (W4 - 2*W2*W3 + W2^3) +
2 * tau2^4 * (recipvr2 - 2 * (1/VR^3) * rsq) * (W3 - W2^2)^2
}
scale <- function(r){VQ(r)/EQ(r)} ### scale parameter of the gamma distribution
shape <- function(r){EQ(r)^2/VQ(r)} ### shape parameter of the gamma distribution
### inverse of f
finv <- function(f)
(W1/W2 - 1) * ((f^2) - 1) + (k - 1)
### equation to be solved
eqn <- function(x) {
integrand <- function(r) {
pgamma(finv(r/x), scale=scale(SDR*r), shape=shape(SDR*r))*dnorm(r)
}
integral <- integrate(integrand, lower=x, upper=Inf)$value
val <- integral - level / 2
#cat(val, "\n")
val
}
t0 <- try(uniroot(eqn, lower=0, upper=2, tol=con$tol, maxiter=con$maxiter))
if (inherits(t0, "try-error"))
stop("Error in uniroot().")
t0 <- t0$root
u0 <- SDR * t0 ### (approximate) percentage point for the distribution of U
###
ci.lb <- beta - u0 * se ### lower CI bound
ci.ub <- beta + u0 * se ### upper CI bound
beta.rma <- x$beta
se.rma <- x$se
ci.lb.rma <- x$ci.lb
ci.ub.rma <- x$ci.ub
### if requested, apply transformation to yi's and CI bounds
if (is.function(transf)) {
if (is.null(targs)) {
beta <- sapply(beta, transf)
beta.rma <- sapply(beta.rma, transf)
se <- NA
se.rma <- NA
ci.lb <- sapply(ci.lb, transf)
ci.ub <- sapply(ci.ub, transf)
ci.lb.rma <- sapply(ci.lb.rma, transf)
ci.ub.rma <- sapply(ci.ub.rma, transf)
} else {
beta <- sapply(beta, transf, targs)
beta.rma <- sapply(beta.rma, transf, targs)
se <- NA
se.rma <- NA
ci.lb <- sapply(ci.lb, transf, targs)
ci.ub <- sapply(ci.ub, transf, targs)
ci.lb.rma <- sapply(ci.lb.rma, transf, targs)
ci.ub.rma <- sapply(ci.ub.rma, transf, targs)
}
}
### make sure order of intervals is always increasing
tmp <- .psort(ci.lb, ci.ub)
ci.lb <- tmp[,1]
ci.ub <- tmp[,2]
tmp <- .psort(ci.lb.rma, ci.ub.rma)
ci.lb.rma <- tmp[,1]
ci.ub.rma <- tmp[,2]
###
res <- list(beta=beta, se=se, ci.lb=ci.lb, ci.ub=ci.ub,
beta.rma=beta.rma, se.rma=se.rma, ci.lb.rma=ci.lb.rma, ci.ub.rma=ci.ub.rma,
method="DL", method.rma=x$method, tau2=tau2, tau2.rma=x$tau2, digits=digits)
class(res) <- "hc.rma.uni"
return(res)
}
#### Parallel Sim Loop ----
StartTime <- proc.time()
registerDoParallel(c1)
set.seed(2543)
Normal.Sim.Results <- foreach (m = 1:Reps, .combine=rbind, .packages = c("data.table", "copula", "metafor"),
.export = c("Studies", "Subj", "True.sd",
"theta", "tau.sq", "controlProp", "UMD", "Severity.boundary", "Begg_a",
"Begg_b", "Begg_sided", "Tested.outcomes", "Sd.split",
"Bias.multiple", "UMD.mult.out", "Begg_c", "anyNA", ".psort", "mod.hc")
) %dorng% {
# ID different for analysis
ID = length(tau.sq) * length(Subj) * length(theta) * length(Studies)
Normal.Sim <- data.table(
Unique_ID = integer(length = ID),
FE_Estimate = numeric(length = ID),
FE_se = numeric(length = ID),
REML_Estimate = numeric(length = ID),
REML_se = numeric(length = ID),
REML_tau2 = numeric(length = ID),
DL_Estimate = numeric(length = ID),
DL_se = numeric(length = ID),
DL_tau2 = numeric(length = ID),
DL_I2 = numeric(length = ID),
HC_DL_se = numeric(length = ID),
HC_DL_CIlb = numeric(length = ID),
HC_DL_CIub = numeric(length = ID),
HC_REML_se = numeric(length = ID),
HC_REML_CIlb = numeric(length = ID),
HC_REML_CIub = numeric(length = ID),
KH_REML_CIlb = numeric(length = ID),
KH_REML_CIub = numeric(length = ID),
KH_REML_se = numeric(length = ID),
KH_DL_CIlb = numeric(length = ID),
KH_DL_CIub = numeric(length = ID),
KH_DL_se = numeric(length = ID),
Moreno_Estimate = numeric(length = ID),
Moreno_se = numeric(length = ID),
Mult_se = numeric(length = ID)
)
dummy.counter <- 1
for (l in tau.sq){
for (n in Studies){
for (i in Subj){
for(k in theta){
Study_estimate <- numeric(length = n)
Study_sd <- numeric(length = n)
ni <- integer(length = n)
for (o in 1:n){
#Select sample size
if (is.integer(i[1]) == TRUE){
#Study_patientnumber <- i[1]
Study_patientnumber <- as.integer( (runif(1, sqrt(i[1]), sqrt(i[2])))^2 )
} else {
Study_patientnumber <- round(rlnorm(1, meanlog = i[1], sdlog = i[2]) + 4)
}
repeat{
Study_summary <- UMD(Study_patientnumber, k, l, controlProp, True.sd)
Study_mean <- Study_summary[1]
Study_StanDev <- Study_summary[2]
Study.n <- as.integer(0.5*Study_patientnumber) * 2
Begg_weight <-exp(
-Begg_b * (
(Begg_sided * pnorm(Study_mean/(Study_StanDev)))
^Begg_a ) * (Study.n ^ Begg_c)
)
if(rbinom(1,1, Begg_weight) == 1 ){break}
}
Study_estimate[o] <- Study_mean
Study_sd[o] <- Study_StanDev
ni[o] <- Study.n
}
## Counter without number of studies
counter <- as.integer((apply(sapply(Subj, function(vec) {i %in% vec}), 1, which.max)[1]-1) * length(theta) * length(tau.sq) * length(Studies) * Reps +
(match(k, theta)-1) * length(tau.sq) * length(Studies) * Reps +
(match(l, tau.sq)-1) * length(Studies) * Reps +
(match(n, Studies)-1) * Reps +
m
)
### Temporary data.table
temp.data <- data.table(Study_estimate, Study_sd, ni)
#Fixed and random effects
ma.fe <- tryCatch({
rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 , method = "FE")
},
error = function(e){
return(list(b = NA, se = NA))
},
warning = function(w){
return(list(list(b = NA, se = NA)))
}
)
ma.reml <- tryCatch({
rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 , method = "REML", control = list(stepadj = 0.5))
},
error = function(e){
return(list(b = NA, tau2 = NA, se = NA))
},
warning = function(w){
return(list(list(b = NA, tau2 = NA, se = NA)))
}
)
ma.DL <- tryCatch({
rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 , method = "DL")
},error = function(e){
return(list(b = NA, tau2 = NA, se = NA, I2 = NA))
},warning = function(w){
return(list(list(b = NA, tau2 = NA, se = NA, I2 = NA)))
})
# Henmi & Copas
ma.hc.DL <- tryCatch({
hc(ma.DL)
},error = function(e){
return(list(se = NA, ci.lb = NA, ci.ub = NA))
},warning = function(w){
return(list(list(se = NA, ci.lb = NA, ci.ub = NA)))
})
ma.hc.REML <- tryCatch({
mod.hc(ma.reml, tau2est = ma.reml$tau2)
},error = function(e){
return(list(se = NA, ci.lb = NA, ci.ub = NA))
},warning = function(w){
return(list(list(se = NA, ci.lb = NA, ci.ub = NA)))
})
# Knapp Hartung
ma.reml.kh <- tryCatch({
rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 , method = "REML", knha = TRUE, control = list(stepadj = 0.5))
},
error = function(e){
return(list(b = NA, tau2 = NA, se = NA, ci.lb = NA, ci.ub = NA))
},
warning = function(w){
return(list(b = NA, tau2 = NA, se = NA, ci.lb = NA, ci.ub = NA))
}
)
ma.DL.kh <- tryCatch({rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 , method = "DL", knha = TRUE)}
, error = function(e){return(list(se = NA, ci.lb = NA, ci.ub = NA))
})
## Moreno (?D-var) - not exactly clear which implementation is being used is likely equation 2a
moreno.est <- tryCatch({ma.moren <- regtest(ma.fe , predictor = "vi", model = "lm")
c(ma.moren$fit[[5]][1],ma.moren$fit[[5]][3])
}, error=function(err) c(NA,NA))
## Mawdesley
ma.mult <- tryCatch({
mawd.lm <- lm(temp.data$Study_estimate ~ 1, weights = 1/(temp.data$Study_sd^2))
sm.mawd.lm <- summary(mawd.lm)
ifelse(mean(sm.mawd.lm$residuals^2) < 1, phi.est <- 1, phi.est <- mean(sm.mawd.lm$residuals^2))
rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 * phi.est , method = "FE")}
, error = function(e){return(list(se = NA))
})
Normal.Sim[dummy.counter, `:=` (Unique_ID = counter,
FE_Estimate = ma.fe[[1]][1],
FE_se = ma.fe$se,
REML_Estimate = ma.reml$b[1],
REML_se = ma.reml$se,
REML_tau2 = ma.reml$tau2,
DL_Estimate = ma.DL[[1]][1],
DL_se = ma.DL$se,
DL_tau2 = ma.DL$tau2,
DL_I2 = ma.DL$I2,
HC_DL_se = ma.hc.DL$se,
HC_DL_CIlb = ma.hc.DL$ci.lb,
HC_DL_CIub = ma.hc.DL$ci.ub,
HC_REML_se = ma.hc.REML$se,
HC_REML_CIlb = ma.hc.REML$ci.lb,
HC_REML_CIub = ma.hc.REML$ci.ub,
KH_REML_CIlb = ma.reml.kh$ci.lb,
KH_REML_CIub = ma.reml.kh$ci.ub,
KH_REML_se = ma.reml.kh$se,
KH_DL_CIlb = ma.DL.kh$ci.lb,
KH_DL_CIub = ma.DL.kh$ci.ub,
KH_DL_se = ma.DL.kh$se,
Moreno_Estimate = moreno.est[1],
Moreno_se = moreno.est[2],
Mult_se = ma.mult$se
)]
dummy.counter <- dummy.counter + 1
}
}
}
}
Normal.Sim
}
stopCluster(c1)
Normal.Sim.Results <- Normal.Sim.Results[order(Unique_ID)]
##### Need to re append values - specific to analysis
ID = length(Subj) * length(controlProp) * length(theta) * length(tau.sq) * Reps * length(Studies)
Subj <- c(60, 20, 250, 4.2)
Normal.Sim.Results$Rep_Number = rep(1:Reps, times = ID/Reps)
Normal.Sim.Results$Rep_NumStudies = rep(rep(Studies, each = Reps), times = ID/(Reps*length(Studies)))
Normal.Sim.Results$Rep_tau.sq = rep(rep(tau.sq, each = Reps * length(Studies)), times = ID/(Reps*length(Studies)*length(tau.sq)))
Normal.Sim.Results$Rep_theta = rep( rep(theta, each = Reps * length(Studies) * length(tau.sq)), times = length(Subj))
Normal.Sim.Results$Rep_Subj = rep(Subj, each = ID / length(Subj))
TimeTakenAn <- proc.time() - StartTime
#write.csv(Normal.Sim.Results, file = "NSB0V1An.csv")
saveRDS(Normal.Sim.Results, file = "NSBeggRDS")
### Checking values
sum(is.na(Normal.Sim.Results))
Normal.Sim.Results[is.na(Normal.Sim.Results$REML_Est)]
#### Timings ----
TimeTakenTotal <- proc.time() - StartTimeTotal
TimeTakenAn
TimeTakenTotal | /Final/NSTotalBegg.R | no_license | Schupert/MetaSimulationProject | R | false | false | 20,028 | r | ### Remove previous variables
rm(list = ls())
StartTimeTotal <- proc.time()
#### Libraries, set seed, set cores ----
library(data.table)
library(doParallel)
library(foreach)
library(doRNG)
library(copula)
library(compiler)
library(metafor)
enableJIT(3)
set.seed(123)
# Number of cores for parallel
num.Cores <- detectCores() - 1
c1 <- makeCluster(num.Cores)
#### Declare variables ----
# Reps = number of repetitions of experiment
Reps = 5
# k = number of studies in series
Studies = c(3,5,10,30,50,100)
# subj = number of subjects in study, likely to be distributed
Subj <- list(as.integer(c(60,60)), as.integer(c(20,100)), as.integer(c(250, 1000)), as.numeric(c(4.2, 1.1)))
# sd = study level standard deviation
True.sd = sqrt(2)
# theta = population level mean - need good sense of range for SMD
theta = c( -0.76, -0.12, 0, 0.12, 0.76)
# tau.sq = between studies variance (can be squared due to sqrt() in normal draw), ?to be distributed
tau.sq = c(0, 0.005, 0.022, 1.676)
# controlProp = proportion of total sample in control arm
controlProp = 0.5
## Boundary of step function on p value, causing severity of publication bias
Severity.boundary <- c(0.05, 0.2)
# Set up strength of publication bias selection IF STILL USING
Begg_a <- 0.5
Begg_b <- 3
Begg_c <- -0.3
Begg_sided <- 1
# Set up within study reporting bias - this is now one sided
Tested.outcomes <- 5
Sd.split <- 0.6
# Size of per unit bias increase
Bias.multiple <- c(0, log(0.85)/(-1.81) * 2, log(0.7225)/(-1.81) * 2)
#### Functions ----
findvaluesNSim <- function(ID_num){
dummy1 = ID_num %% Reps
repetitions = ifelse(dummy1 == 0, Reps, dummy1)
intermed <- Studies * Reps
intermed <- c(0,cumsum(intermed))
dummy2 = ID_num %% (Reps * sum(Studies))
numstudies = Studies[(min(which(intermed >= dummy2))-1)]
dummy5 = ID_num %% (Reps * sum(Studies) * length(tau.sq))
dummy6 = ifelse(dummy5 == 0, length(tau.sq), (dummy5 %/% (Reps * sum(Studies))) + 1)
hetero = tau.sq[dummy6]
dummy7 = ID_num %% (Reps * sum(Studies) * length(tau.sq) * length(theta))
dummy8 = ifelse(dummy7 == 0, length(theta), (dummy7 %/% (Reps * sum(Studies) * length(tau.sq))) + 1)
truevalue = theta[dummy8]
dummy9 = ID_num %% (Reps * sum(Studies) * length(tau.sq) * length(theta) * length(Subj))
dummy10 = ifelse(dummy9 == 0, length(Subj), (dummy9 %/% (Reps * sum(Studies) * length(tau.sq) * length(theta))) + 1)
subjects <- tryCatch(Subj[[dummy10]][1], error = function(e) {try(Subj[dummy10], silent=TRUE)})
return(list(reps = repetitions, subj = subjects, theta = truevalue, tau2 = hetero, numstud = numstudies))
}
findIDNSim <- function(repetitions, subjects, truevalue, hetero, numstudies){
IDnumber <- integer()
for (o in 1:numstudies){
counter.dummy <- as.integer((match(subjects, Subj)-1) * length(theta) * length(tau.sq) * sum(Studies) * Reps +
(match(truevalue, theta)-1) * length(tau.sq) * sum(Studies) * Reps +
(match(hetero, tau.sq)-1) * sum(Studies) * Reps +
(sum(Studies[0:(match(numstudies, Studies)-1)]) + o -1) * Reps +
repetitions
)
IDnumber <- append(IDnumber, counter.dummy)
}
return(IDnumber)
}
### Unstandardised mean differrence function
UMD <- function(StudySize, Theta, Heterogeneity, Control_Prop, sd){
StudyUMD <- rnorm(1, Theta, sqrt(Heterogeneity))
Group1Size <- as.integer(Control_Prop*StudySize)
Group2Size <- Group1Size
ControlGroup <- rnorm(Group1Size, -StudyUMD/2, sd)
TreatmentGroup <- rnorm(Group2Size, StudyUMD/2, sd)
Studymean <- mean(TreatmentGroup) - mean(ControlGroup)
Studysd <- sqrt( (var(ControlGroup) * (Group1Size - 1) + var(TreatmentGroup) * (Group2Size-1))/ (Group1Size + Group2Size -2) * (1/Group1Size + 1/Group2Size))
return(c(Studymean, Studysd))
}
### UMD function with multiple outcome bias with frac being sd in first level, num.times = number of outcomes simulated
# outputs vectors ordered by p-val
UMD.mult.out <- function(StudySize, Theta, Heterogeneity, Control_Prop, total.sd, frac, num.times){
StudyUMD <- rnorm(1, Theta, sqrt(Heterogeneity))
Group1Size <- as.integer(Control_Prop*StudySize)
Group2Size <- Group1Size
z <- normalCopula(param = frac, dim = num.times)
Z <- rCopula(Group1Size, z)
ControlGroup <- qnorm(Z, mean = -StudyUMD/2, sd = total.sd)
y <- normalCopula(param = frac, dim = num.times)
Y <- rCopula(Group1Size, y)
TreatmentGroup <- qnorm(Y, mean = StudyUMD/2, sd = total.sd)
Studymean <- apply(TreatmentGroup,2,mean) - apply(ControlGroup, 2, mean)
Studysd <- sqrt( (apply(TreatmentGroup, 2, var) * (Group1Size - 1) + apply(TreatmentGroup, 2, var) * (Group2Size-1))/ (Group1Size + Group2Size -2) * (1/Group1Size + 1/Group2Size))
Begg_p <- pnorm(Studymean/Studysd)
return(list(Studymean[order(Begg_p)], Studysd[order(Begg_p)]))
}
anyNA <- function(x) {
i <- 1
repeat {
if (is.na(x[i])) return(TRUE)
i <- i + 1
if (i > length(x)) return(FALSE)
}
}
.psort <- function(x,y) {
### t(apply(xy, 1, sort)) would be okay, but problematic if there are NAs;
### either they are removed completely (na.last=NA) or they are always put
### first/last (na.last=FALSE/TRUE); but we just want to leave the NAs in
### their position!
if (is.null(x) || length(x) == 0) ### need to catch this
return(NULL)
if (missing(y)) {
if (is.matrix(x)) {
xy <- x
} else {
xy <- rbind(x) ### in case x is just a vector
}
} else {
xy <- cbind(x,y)
}
n <- nrow(xy)
for (i in seq_len(n)) {
if (anyNA(xy[i,]))
next
xy[i,] <- sort(xy[i,])
}
colnames(xy) <- NULL
return(xy)
}
mod.hc <- function(object, digits, transf, targs, control, tau2est, ...) {
if (!inherits(object, "rma.uni"))
stop("Argument 'object' must be an object of class \"rma.uni\".")
if (inherits(object, "rma.ls"))
stop("Method not yet implemented for objects of class \"rma.ls\". Sorry!")
x <- object
if (!x$int.only)
stop("Method only applicable for models without moderators.")
if (missing(digits))
digits <- x$digits
if (missing(transf))
transf <- FALSE
if (missing(targs))
targs <- NULL
yi <- x$yi
vi <- x$vi
k <- length(yi)
if (k == 1)
stop("Stopped because k = 1.")
if (!x$allvipos)
stop("Cannot use method when one or more sampling variances are non-positive.")
level <- ifelse(x$level > 1, (100-x$level)/100, ifelse(x$level > .5, 1-x$level, x$level))
if (missing(control))
control <- list()
###
### set control parameters for uniroot() and possibly replace with user-defined values
con <- list(tol=.Machine$double.eps^0.25, maxiter=1000, verbose=FALSE)
con[pmatch(names(control), names(con))] <- control
###
### original code by Henmi & Copas (2012), modified by Michael Dewey, small adjustments
### for consistency with other functions in the metafor package by Wolfgang Viechtbauer
wi <- 1/vi ### fixed effects weights
W1 <- sum(wi)
W2 <- sum(wi^2) / W1
W3 <- sum(wi^3) / W1
W4 <- sum(wi^4) / W1
### fixed-effects estimate of theta
beta <- sum(wi*yi) / W1
### Q statistic
Q <- sum(wi * ((yi - beta)^2))
### DL estimate of tau^2
###### Modified here to take REML tau2
tau2 <- max(0, tau2est)
vb <- (tau2 * W2 + 1) / W1 ### estimated Var of b
se <- sqrt(vb) ### estimated SE of b
VR <- 1 + tau2 * W2 ### estimated Var of R
SDR <- sqrt(VR) ### estimated SD of R
### conditional mean of Q given R=r
EQ <- function(r)
(k - 1) + tau2 * (W1 - W2) + (tau2^2)*((1/VR^2) * (r^2) - 1/VR) * (W3 - W2^2)
### conditional variance of Q given R=r
VQ <- function(r) {
rsq <- r^2
recipvr2 <- 1 / VR^2
2 * (k - 1) + 4 * tau2 * (W1 - W2) +
2 * tau2^2 * (W1*W2 - 2*W3 + W2^2) +
4 * tau2^2 * (recipvr2 * rsq - 1/VR) * (W3 - W2^2) +
4 * tau2^3 * (recipvr2 * rsq - 1/VR) * (W4 - 2*W2*W3 + W2^3) +
2 * tau2^4 * (recipvr2 - 2 * (1/VR^3) * rsq) * (W3 - W2^2)^2
}
scale <- function(r){VQ(r)/EQ(r)} ### scale parameter of the gamma distribution
shape <- function(r){EQ(r)^2/VQ(r)} ### shape parameter of the gamma distribution
### inverse of f
finv <- function(f)
(W1/W2 - 1) * ((f^2) - 1) + (k - 1)
### equation to be solved
eqn <- function(x) {
integrand <- function(r) {
pgamma(finv(r/x), scale=scale(SDR*r), shape=shape(SDR*r))*dnorm(r)
}
integral <- integrate(integrand, lower=x, upper=Inf)$value
val <- integral - level / 2
#cat(val, "\n")
val
}
t0 <- try(uniroot(eqn, lower=0, upper=2, tol=con$tol, maxiter=con$maxiter))
if (inherits(t0, "try-error"))
stop("Error in uniroot().")
t0 <- t0$root
u0 <- SDR * t0 ### (approximate) percentage point for the distribution of U
###
ci.lb <- beta - u0 * se ### lower CI bound
ci.ub <- beta + u0 * se ### upper CI bound
beta.rma <- x$beta
se.rma <- x$se
ci.lb.rma <- x$ci.lb
ci.ub.rma <- x$ci.ub
### if requested, apply transformation to yi's and CI bounds
if (is.function(transf)) {
if (is.null(targs)) {
beta <- sapply(beta, transf)
beta.rma <- sapply(beta.rma, transf)
se <- NA
se.rma <- NA
ci.lb <- sapply(ci.lb, transf)
ci.ub <- sapply(ci.ub, transf)
ci.lb.rma <- sapply(ci.lb.rma, transf)
ci.ub.rma <- sapply(ci.ub.rma, transf)
} else {
beta <- sapply(beta, transf, targs)
beta.rma <- sapply(beta.rma, transf, targs)
se <- NA
se.rma <- NA
ci.lb <- sapply(ci.lb, transf, targs)
ci.ub <- sapply(ci.ub, transf, targs)
ci.lb.rma <- sapply(ci.lb.rma, transf, targs)
ci.ub.rma <- sapply(ci.ub.rma, transf, targs)
}
}
### make sure order of intervals is always increasing
tmp <- .psort(ci.lb, ci.ub)
ci.lb <- tmp[,1]
ci.ub <- tmp[,2]
tmp <- .psort(ci.lb.rma, ci.ub.rma)
ci.lb.rma <- tmp[,1]
ci.ub.rma <- tmp[,2]
###
res <- list(beta=beta, se=se, ci.lb=ci.lb, ci.ub=ci.ub,
beta.rma=beta.rma, se.rma=se.rma, ci.lb.rma=ci.lb.rma, ci.ub.rma=ci.ub.rma,
method="DL", method.rma=x$method, tau2=tau2, tau2.rma=x$tau2, digits=digits)
class(res) <- "hc.rma.uni"
return(res)
}
#### Parallel Sim Loop ----
StartTime <- proc.time()
registerDoParallel(c1)
set.seed(2543)
Normal.Sim.Results <- foreach (m = 1:Reps, .combine=rbind, .packages = c("data.table", "copula", "metafor"),
.export = c("Studies", "Subj", "True.sd",
"theta", "tau.sq", "controlProp", "UMD", "Severity.boundary", "Begg_a",
"Begg_b", "Begg_sided", "Tested.outcomes", "Sd.split",
"Bias.multiple", "UMD.mult.out", "Begg_c", "anyNA", ".psort", "mod.hc")
) %dorng% {
# ID different for analysis
ID = length(tau.sq) * length(Subj) * length(theta) * length(Studies)
Normal.Sim <- data.table(
Unique_ID = integer(length = ID),
FE_Estimate = numeric(length = ID),
FE_se = numeric(length = ID),
REML_Estimate = numeric(length = ID),
REML_se = numeric(length = ID),
REML_tau2 = numeric(length = ID),
DL_Estimate = numeric(length = ID),
DL_se = numeric(length = ID),
DL_tau2 = numeric(length = ID),
DL_I2 = numeric(length = ID),
HC_DL_se = numeric(length = ID),
HC_DL_CIlb = numeric(length = ID),
HC_DL_CIub = numeric(length = ID),
HC_REML_se = numeric(length = ID),
HC_REML_CIlb = numeric(length = ID),
HC_REML_CIub = numeric(length = ID),
KH_REML_CIlb = numeric(length = ID),
KH_REML_CIub = numeric(length = ID),
KH_REML_se = numeric(length = ID),
KH_DL_CIlb = numeric(length = ID),
KH_DL_CIub = numeric(length = ID),
KH_DL_se = numeric(length = ID),
Moreno_Estimate = numeric(length = ID),
Moreno_se = numeric(length = ID),
Mult_se = numeric(length = ID)
)
dummy.counter <- 1
for (l in tau.sq){
for (n in Studies){
for (i in Subj){
for(k in theta){
Study_estimate <- numeric(length = n)
Study_sd <- numeric(length = n)
ni <- integer(length = n)
for (o in 1:n){
#Select sample size
if (is.integer(i[1]) == TRUE){
#Study_patientnumber <- i[1]
Study_patientnumber <- as.integer( (runif(1, sqrt(i[1]), sqrt(i[2])))^2 )
} else {
Study_patientnumber <- round(rlnorm(1, meanlog = i[1], sdlog = i[2]) + 4)
}
repeat{
Study_summary <- UMD(Study_patientnumber, k, l, controlProp, True.sd)
Study_mean <- Study_summary[1]
Study_StanDev <- Study_summary[2]
Study.n <- as.integer(0.5*Study_patientnumber) * 2
Begg_weight <-exp(
-Begg_b * (
(Begg_sided * pnorm(Study_mean/(Study_StanDev)))
^Begg_a ) * (Study.n ^ Begg_c)
)
if(rbinom(1,1, Begg_weight) == 1 ){break}
}
Study_estimate[o] <- Study_mean
Study_sd[o] <- Study_StanDev
ni[o] <- Study.n
}
## Counter without number of studies
counter <- as.integer((apply(sapply(Subj, function(vec) {i %in% vec}), 1, which.max)[1]-1) * length(theta) * length(tau.sq) * length(Studies) * Reps +
(match(k, theta)-1) * length(tau.sq) * length(Studies) * Reps +
(match(l, tau.sq)-1) * length(Studies) * Reps +
(match(n, Studies)-1) * Reps +
m
)
### Temporary data.table
temp.data <- data.table(Study_estimate, Study_sd, ni)
#Fixed and random effects
ma.fe <- tryCatch({
rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 , method = "FE")
},
error = function(e){
return(list(b = NA, se = NA))
},
warning = function(w){
return(list(list(b = NA, se = NA)))
}
)
ma.reml <- tryCatch({
rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 , method = "REML", control = list(stepadj = 0.5))
},
error = function(e){
return(list(b = NA, tau2 = NA, se = NA))
},
warning = function(w){
return(list(list(b = NA, tau2 = NA, se = NA)))
}
)
ma.DL <- tryCatch({
rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 , method = "DL")
},error = function(e){
return(list(b = NA, tau2 = NA, se = NA, I2 = NA))
},warning = function(w){
return(list(list(b = NA, tau2 = NA, se = NA, I2 = NA)))
})
# Henmi & Copas
ma.hc.DL <- tryCatch({
hc(ma.DL)
},error = function(e){
return(list(se = NA, ci.lb = NA, ci.ub = NA))
},warning = function(w){
return(list(list(se = NA, ci.lb = NA, ci.ub = NA)))
})
ma.hc.REML <- tryCatch({
mod.hc(ma.reml, tau2est = ma.reml$tau2)
},error = function(e){
return(list(se = NA, ci.lb = NA, ci.ub = NA))
},warning = function(w){
return(list(list(se = NA, ci.lb = NA, ci.ub = NA)))
})
# Knapp Hartung
ma.reml.kh <- tryCatch({
rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 , method = "REML", knha = TRUE, control = list(stepadj = 0.5))
},
error = function(e){
return(list(b = NA, tau2 = NA, se = NA, ci.lb = NA, ci.ub = NA))
},
warning = function(w){
return(list(b = NA, tau2 = NA, se = NA, ci.lb = NA, ci.ub = NA))
}
)
ma.DL.kh <- tryCatch({rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 , method = "DL", knha = TRUE)}
, error = function(e){return(list(se = NA, ci.lb = NA, ci.ub = NA))
})
## Moreno (?D-var) - not exactly clear which implementation is being used is likely equation 2a
moreno.est <- tryCatch({ma.moren <- regtest(ma.fe , predictor = "vi", model = "lm")
c(ma.moren$fit[[5]][1],ma.moren$fit[[5]][3])
}, error=function(err) c(NA,NA))
## Mawdesley
ma.mult <- tryCatch({
mawd.lm <- lm(temp.data$Study_estimate ~ 1, weights = 1/(temp.data$Study_sd^2))
sm.mawd.lm <- summary(mawd.lm)
ifelse(mean(sm.mawd.lm$residuals^2) < 1, phi.est <- 1, phi.est <- mean(sm.mawd.lm$residuals^2))
rma.uni(temp.data$Study_estimate, temp.data$Study_sd^2 * phi.est , method = "FE")}
, error = function(e){return(list(se = NA))
})
Normal.Sim[dummy.counter, `:=` (Unique_ID = counter,
FE_Estimate = ma.fe[[1]][1],
FE_se = ma.fe$se,
REML_Estimate = ma.reml$b[1],
REML_se = ma.reml$se,
REML_tau2 = ma.reml$tau2,
DL_Estimate = ma.DL[[1]][1],
DL_se = ma.DL$se,
DL_tau2 = ma.DL$tau2,
DL_I2 = ma.DL$I2,
HC_DL_se = ma.hc.DL$se,
HC_DL_CIlb = ma.hc.DL$ci.lb,
HC_DL_CIub = ma.hc.DL$ci.ub,
HC_REML_se = ma.hc.REML$se,
HC_REML_CIlb = ma.hc.REML$ci.lb,
HC_REML_CIub = ma.hc.REML$ci.ub,
KH_REML_CIlb = ma.reml.kh$ci.lb,
KH_REML_CIub = ma.reml.kh$ci.ub,
KH_REML_se = ma.reml.kh$se,
KH_DL_CIlb = ma.DL.kh$ci.lb,
KH_DL_CIub = ma.DL.kh$ci.ub,
KH_DL_se = ma.DL.kh$se,
Moreno_Estimate = moreno.est[1],
Moreno_se = moreno.est[2],
Mult_se = ma.mult$se
)]
dummy.counter <- dummy.counter + 1
}
}
}
}
Normal.Sim
}
stopCluster(c1)
Normal.Sim.Results <- Normal.Sim.Results[order(Unique_ID)]
##### Need to re append values - specific to analysis
ID = length(Subj) * length(controlProp) * length(theta) * length(tau.sq) * Reps * length(Studies)
Subj <- c(60, 20, 250, 4.2)
Normal.Sim.Results$Rep_Number = rep(1:Reps, times = ID/Reps)
Normal.Sim.Results$Rep_NumStudies = rep(rep(Studies, each = Reps), times = ID/(Reps*length(Studies)))
Normal.Sim.Results$Rep_tau.sq = rep(rep(tau.sq, each = Reps * length(Studies)), times = ID/(Reps*length(Studies)*length(tau.sq)))
Normal.Sim.Results$Rep_theta = rep( rep(theta, each = Reps * length(Studies) * length(tau.sq)), times = length(Subj))
Normal.Sim.Results$Rep_Subj = rep(Subj, each = ID / length(Subj))
TimeTakenAn <- proc.time() - StartTime
#write.csv(Normal.Sim.Results, file = "NSB0V1An.csv")
saveRDS(Normal.Sim.Results, file = "NSBeggRDS")
### Checking values
sum(is.na(Normal.Sim.Results))
Normal.Sim.Results[is.na(Normal.Sim.Results$REML_Est)]
#### Timings ----
TimeTakenTotal <- proc.time() - StartTimeTotal
TimeTakenAn
TimeTakenTotal |
#
# Read multiple tables from multiple model runs and save each table values as TableName.csv
# Also save model runs metadata and tables metadata (name, description, notes) into .csv files
#
# If any of library below is not installed then do:
# install.packages("jsonlite")
# install.packages("httr")
#
library("jsonlite")
library("httr")
# Include openM++ helper functions from your $HOME directory
#
source("~/omsCommon.R")
#
# Model digest of RiskPaths version 3.0.0.0: "d90e1e9a49a06d972ecf1d50e684c62b"
# We MUST use model digest if there are multiple versions of the model published.
# We can use model name if only single version of the model is published.
#
md <- "d90e1e9a49a06d972ecf1d50e684c62b"
# oms web-service URL from file: ~/oms_url.txt
#
apiUrl <- getOmsApiUrl()
# model runs can be identified by digest, by run stamp or by run name
# run digest is unique and it preferable way to identify model run
# run names are user friendly may not be unique
#
runNames <- c(
"New 123,000 cases",
"New 456,000 cases",
"New 789,000 cases"
)
# output tables to retrieve data from
#
tblNames <- c(
"T04_FertilityRatesByAgeGroup",
"T03_FertilityByAge"
)
# get table information
#
rsp <- GET(paste0(
apiUrl, "model/", md, "/text"
))
if (http_type(rsp) != 'application/json') {
stop("Failed to get first model info")
}
jr <- content(rsp)
tTxt <- jr$TableTxt
tableInfo <- data.frame()
for (t in tTxt) {
for (tbl in tblNames)
{
if (t$Table$Name == tbl) {
ti <- data.frame(
TableName = tbl,
TableDescription = t$TableDescr,
TableNotes = t$TableNote
)
tableInfo <- rbind(tableInfo, ti)
break
}
}
}
# save table information into some .csv file
#
write.csv(tableInfo, "tableInfo.csv", row.names = FALSE)
# get run information
#
runInfo <- data.frame()
for (run in runNames)
{
rsp <- GET(paste0(
apiUrl, "model/", md, "/run/", URLencode(run, reserved = TRUE), "/text"
))
if (http_type(rsp) != 'application/json') {
stop("Failed to get first run info of: ", run)
}
jr <- content(rsp)
ri <- data.frame(
ModelName = jr$ModelName,
ModelVersion = jr$ModelVersion,
RunName = jr$Name,
SubCount = jr$SubCount,
RunStarted = jr$CreateDateTime,
RunCompleted = jr$UpdateDateTime,
RunDescription = "",
RunNotes = ""
)
if (length(jr$Txt) > 0) {
ri$RunDescription <- jr$Txt[[1]]$Descr
ri$RunNotes <- jr$Txt[[1]]$Note
}
runInfo <- rbind(runInfo, ri)
}
# save run information into some .csv file
#
write.csv(runInfo, "runInfo.csv", row.names = FALSE)
# combine all run results and write it into T04_FertilityRatesByAgeGroup.csv
#
allCct <- NULL
for (run in runNames)
{
cct <- read.csv(paste0(
apiUrl, "model/", md, "/run/", URLencode(run, reserved = TRUE), "/table/T04_FertilityRatesByAgeGroup/expr/csv"
))
cct$RunName <- run
allCct <- rbind(allCct, cct)
}
write.csv(allCct, "T04_FertilityRatesByAgeGroup.csv", row.names = FALSE)
| /oms-R/examples/RiskPaths_multiple_tables_and_metadata_to_csv.R | permissive | openmpp/R | R | false | false | 3,027 | r | #
# Read multiple tables from multiple model runs and save each table values as TableName.csv
# Also save model runs metadata and tables metadata (name, description, notes) into .csv files
#
# If any of library below is not installed then do:
# install.packages("jsonlite")
# install.packages("httr")
#
library("jsonlite")
library("httr")
# Include openM++ helper functions from your $HOME directory
#
source("~/omsCommon.R")
#
# Model digest of RiskPaths version 3.0.0.0: "d90e1e9a49a06d972ecf1d50e684c62b"
# We MUST use model digest if there are multiple versions of the model published.
# We can use model name if only single version of the model is published.
#
md <- "d90e1e9a49a06d972ecf1d50e684c62b"
# oms web-service URL from file: ~/oms_url.txt
#
apiUrl <- getOmsApiUrl()
# model runs can be identified by digest, by run stamp or by run name
# run digest is unique and it preferable way to identify model run
# run names are user friendly may not be unique
#
runNames <- c(
"New 123,000 cases",
"New 456,000 cases",
"New 789,000 cases"
)
# output tables to retrieve data from
#
tblNames <- c(
"T04_FertilityRatesByAgeGroup",
"T03_FertilityByAge"
)
# get table information
#
rsp <- GET(paste0(
apiUrl, "model/", md, "/text"
))
if (http_type(rsp) != 'application/json') {
stop("Failed to get first model info")
}
jr <- content(rsp)
tTxt <- jr$TableTxt
tableInfo <- data.frame()
for (t in tTxt) {
for (tbl in tblNames)
{
if (t$Table$Name == tbl) {
ti <- data.frame(
TableName = tbl,
TableDescription = t$TableDescr,
TableNotes = t$TableNote
)
tableInfo <- rbind(tableInfo, ti)
break
}
}
}
# save table information into some .csv file
#
write.csv(tableInfo, "tableInfo.csv", row.names = FALSE)
# get run information
#
runInfo <- data.frame()
for (run in runNames)
{
rsp <- GET(paste0(
apiUrl, "model/", md, "/run/", URLencode(run, reserved = TRUE), "/text"
))
if (http_type(rsp) != 'application/json') {
stop("Failed to get first run info of: ", run)
}
jr <- content(rsp)
ri <- data.frame(
ModelName = jr$ModelName,
ModelVersion = jr$ModelVersion,
RunName = jr$Name,
SubCount = jr$SubCount,
RunStarted = jr$CreateDateTime,
RunCompleted = jr$UpdateDateTime,
RunDescription = "",
RunNotes = ""
)
if (length(jr$Txt) > 0) {
ri$RunDescription <- jr$Txt[[1]]$Descr
ri$RunNotes <- jr$Txt[[1]]$Note
}
runInfo <- rbind(runInfo, ri)
}
# save run information into some .csv file
#
write.csv(runInfo, "runInfo.csv", row.names = FALSE)
# combine all run results and write it into T04_FertilityRatesByAgeGroup.csv
#
allCct <- NULL
for (run in runNames)
{
cct <- read.csv(paste0(
apiUrl, "model/", md, "/run/", URLencode(run, reserved = TRUE), "/table/T04_FertilityRatesByAgeGroup/expr/csv"
))
cct$RunName <- run
allCct <- rbind(allCct, cct)
}
write.csv(allCct, "T04_FertilityRatesByAgeGroup.csv", row.names = FALSE)
|
library(tidyverse)
library(ggplot2)
source("study1/defs.R")
image_dir = "study1/images"
tbl = readr::read_delim("study1/data/final.csv", delim = " ")
print(tbl)
tbl = tbl %>%
arrange(generator, D, eps, mu)
tbl$facet_var = sprintf("(%s, %i, %.2f, %i)", tbl$generator, tbl$D, tbl$eps, tbl$mu)
# THE BIG PICTURE
# ===
for (REPAIR in unique(tbl$repair)) {
g = ggplot(filter(tbl, repair == REPAIR), aes(x = mutator, y = entropy))
g = g + geom_boxplot(aes(color = mutator), alpha = 0.5)
g = g + theme_bw()
g = g + scale_color_brewer(palette = "Dark2")
g = g + theme(legend.position = "top")
g = g + guides(colour = guide_legend(nrow = 1))
g = g + labs(
#title = "Distribution of population diversity for different mutators",
#subtitle = sprintf("Repair mechanism: %s", REPAIR),
ylab = "Entropy",
xlab = "Mutator",
color = "Mutator")
#g = g + facet_grid(generator + D ~ eps + mu, scales = "free_y")
g = g + facet_wrap(. ~ facet_var, scales = "free_y", ncol = 6L)
fn = file.path(image_dir, sprintf("boxplots_mutator_entropy_%s.pdf", REPAIR))
ggsave(fn, width = 10, height = 10)
}
tbl2 = tbl
#tbl2$entropy = log(tbl2$entropy)
g = ggplot(tbl2, aes(x = interaction(repair, mutator, sep = " / "), y = entropy))
g = g + geom_boxplot(aes(color = mutator), alpha = 0.5)
#g = g + scale_y_log10()
g = g + ylim(c(25, 38))
g = g + theme_bw()
g = g + scale_color_brewer(palette = "Dark2")
g = g + theme(legend.position = "top", axis.text.x = element_text(hjust = 1, angle = 45))
g = g + guides(colour = guide_legend(nrow = 1))
g = g + labs(
#title = "Distribution of population diversity for different mutators",
#subtitle = sprintf("Repair mechanism: %s", REPAIR),
ylab = "Entropy",
xlab = "Mutator / Repair",
color = "Mutator")
#g = g + facet_grid(generator + D ~ eps + mu, scales = "free_y")
g = g + facet_wrap(. ~ facet_var, scales = "free_y", ncol = 3L)
fn = file.path(image_dir, "boxplots_mutator_entropy_all_repairs.pdf")
ggsave(fn, plot = g, width = 10, height = 12)
#stop("")
# AGGREGATED RANKING
# ===
aggr = tbl %>%
group_by(generator, mutator, D, eps, mu, repair) %>%
dplyr::summarize(entropy_mean = mean(entropy)) %>%
ungroup() %>%
group_by(generator, D, eps, mu, repair) %>%
dplyr::mutate(rank = rank(entropy_mean)) %>%
ungroup()
table(aggr$mutator, aggr$rank, aggr$repair)
# EFFECT OF REPAIR
# ===
tbl2 = tbl %>% group_by(repair, mutator) %>%
dplyr::summarise(
no_improve = round(mean(iters_without_improvement / iters) * 100, digits = 2),
did_repair = round(mean(repair_operations / (iters)) * 100, digits = 2)) %>%
arrange(no_improve) %>%
ungroup() %>%
reshape2::melt(id.vars = c("repair", "mutator"), variable.name = "split", value.name = "value")
g = ggplot(tbl2, aes(x = mutator, y = value, fill = split))
g = g + geom_bar(stat = "identity", position = "dodge", alpha = 0.5)
g = g + theme_bw()
g = g + scale_fill_brewer(palette = "Dark2")
#g = g + theme(legend.position = "top", axis.text.x = element_text(hjust = 1, angle = 45))
g = g + facet_grid(. ~ repair)
#print(g)
loadTraces = function(tbl, step = 100) {
res = lapply(seq_len(nrow(tbl)), function(i) {
worktbl = tbl[i, ]
fn = file.path(out.dir.local, sprintf("%i.out", worktbl$jobid))
#print(fn)
trace = read.table(
file = fn,
header = FALSE,
nrow = length(readLines(fn)) - 1L)
colnames(trace) = c("iter", "diversity", "improved", "infeasible", "repaired")
worktbl = cbind(trace, worktbl)
filter(worktbl, ((iter %% step) == 0) | iter == 1)
})
return(dplyr::as_tibble(do.call(rbind, res)))
}
# SAMPLE TRAJECTORIES
# ===
traces = tbl %>%
filter(mu == 50) %>% #a, generator %in% c("uncorr", "scorr", "invscorr")) %>%
loadTraces()
traces2 = traces %>%
group_by(eps, D, mu, repair, generator, iter, facet_var, mutator) %>%
dplyr::summarize(diversity = mean(diversity))
for (REPAIR in unique(tbl$repair)) {
g = ggplot(filter(traces2, repair == REPAIR), aes(x = iter, y = diversity, shape = mutator, color = mutator, group = mutator))
g = g + geom_path(alpha = 0.5) + geom_point(aplha = 0.5, size = 0.8)
g = g + scale_color_brewer(palette = "Dark2")
g = g + xlim(c(0, 2500))
g = g + theme_bw()
g = g + theme(legend.position = "top")
g = g + labs(
#title = "Aggregated Trajectories",
#subtitle = sprintf("Repair mechanism: %s", REPAIR),
ylab = "Entropy",
xlab = "Iteration",
color = "Mutator",
shape = "Mutator")
g = g + guides(colour = guide_legend(nrow = 1))
g = g + facet_wrap(. ~ facet_var, ncol = 3)
#print(g)
fn = file.path(image_dir, sprintf("traces_%s.pdf", REPAIR))
ggsave(fn, plot = g, width = 8, height = 10)
}
| /ex08_analyse.R | no_license | jakobbossek/GECCO2021-knapsack-diversity | R | false | false | 4,680 | r | library(tidyverse)
library(ggplot2)
source("study1/defs.R")
image_dir = "study1/images"
tbl = readr::read_delim("study1/data/final.csv", delim = " ")
print(tbl)
tbl = tbl %>%
arrange(generator, D, eps, mu)
tbl$facet_var = sprintf("(%s, %i, %.2f, %i)", tbl$generator, tbl$D, tbl$eps, tbl$mu)
# THE BIG PICTURE
# ===
for (REPAIR in unique(tbl$repair)) {
g = ggplot(filter(tbl, repair == REPAIR), aes(x = mutator, y = entropy))
g = g + geom_boxplot(aes(color = mutator), alpha = 0.5)
g = g + theme_bw()
g = g + scale_color_brewer(palette = "Dark2")
g = g + theme(legend.position = "top")
g = g + guides(colour = guide_legend(nrow = 1))
g = g + labs(
#title = "Distribution of population diversity for different mutators",
#subtitle = sprintf("Repair mechanism: %s", REPAIR),
ylab = "Entropy",
xlab = "Mutator",
color = "Mutator")
#g = g + facet_grid(generator + D ~ eps + mu, scales = "free_y")
g = g + facet_wrap(. ~ facet_var, scales = "free_y", ncol = 6L)
fn = file.path(image_dir, sprintf("boxplots_mutator_entropy_%s.pdf", REPAIR))
ggsave(fn, width = 10, height = 10)
}
tbl2 = tbl
#tbl2$entropy = log(tbl2$entropy)
g = ggplot(tbl2, aes(x = interaction(repair, mutator, sep = " / "), y = entropy))
g = g + geom_boxplot(aes(color = mutator), alpha = 0.5)
#g = g + scale_y_log10()
g = g + ylim(c(25, 38))
g = g + theme_bw()
g = g + scale_color_brewer(palette = "Dark2")
g = g + theme(legend.position = "top", axis.text.x = element_text(hjust = 1, angle = 45))
g = g + guides(colour = guide_legend(nrow = 1))
g = g + labs(
#title = "Distribution of population diversity for different mutators",
#subtitle = sprintf("Repair mechanism: %s", REPAIR),
ylab = "Entropy",
xlab = "Mutator / Repair",
color = "Mutator")
#g = g + facet_grid(generator + D ~ eps + mu, scales = "free_y")
g = g + facet_wrap(. ~ facet_var, scales = "free_y", ncol = 3L)
fn = file.path(image_dir, "boxplots_mutator_entropy_all_repairs.pdf")
ggsave(fn, plot = g, width = 10, height = 12)
#stop("")
# AGGREGATED RANKING
# ===
aggr = tbl %>%
group_by(generator, mutator, D, eps, mu, repair) %>%
dplyr::summarize(entropy_mean = mean(entropy)) %>%
ungroup() %>%
group_by(generator, D, eps, mu, repair) %>%
dplyr::mutate(rank = rank(entropy_mean)) %>%
ungroup()
table(aggr$mutator, aggr$rank, aggr$repair)
# EFFECT OF REPAIR
# ===
tbl2 = tbl %>% group_by(repair, mutator) %>%
dplyr::summarise(
no_improve = round(mean(iters_without_improvement / iters) * 100, digits = 2),
did_repair = round(mean(repair_operations / (iters)) * 100, digits = 2)) %>%
arrange(no_improve) %>%
ungroup() %>%
reshape2::melt(id.vars = c("repair", "mutator"), variable.name = "split", value.name = "value")
g = ggplot(tbl2, aes(x = mutator, y = value, fill = split))
g = g + geom_bar(stat = "identity", position = "dodge", alpha = 0.5)
g = g + theme_bw()
g = g + scale_fill_brewer(palette = "Dark2")
#g = g + theme(legend.position = "top", axis.text.x = element_text(hjust = 1, angle = 45))
g = g + facet_grid(. ~ repair)
#print(g)
loadTraces = function(tbl, step = 100) {
res = lapply(seq_len(nrow(tbl)), function(i) {
worktbl = tbl[i, ]
fn = file.path(out.dir.local, sprintf("%i.out", worktbl$jobid))
#print(fn)
trace = read.table(
file = fn,
header = FALSE,
nrow = length(readLines(fn)) - 1L)
colnames(trace) = c("iter", "diversity", "improved", "infeasible", "repaired")
worktbl = cbind(trace, worktbl)
filter(worktbl, ((iter %% step) == 0) | iter == 1)
})
return(dplyr::as_tibble(do.call(rbind, res)))
}
# SAMPLE TRAJECTORIES
# ===
traces = tbl %>%
filter(mu == 50) %>% #a, generator %in% c("uncorr", "scorr", "invscorr")) %>%
loadTraces()
traces2 = traces %>%
group_by(eps, D, mu, repair, generator, iter, facet_var, mutator) %>%
dplyr::summarize(diversity = mean(diversity))
for (REPAIR in unique(tbl$repair)) {
g = ggplot(filter(traces2, repair == REPAIR), aes(x = iter, y = diversity, shape = mutator, color = mutator, group = mutator))
g = g + geom_path(alpha = 0.5) + geom_point(aplha = 0.5, size = 0.8)
g = g + scale_color_brewer(palette = "Dark2")
g = g + xlim(c(0, 2500))
g = g + theme_bw()
g = g + theme(legend.position = "top")
g = g + labs(
#title = "Aggregated Trajectories",
#subtitle = sprintf("Repair mechanism: %s", REPAIR),
ylab = "Entropy",
xlab = "Iteration",
color = "Mutator",
shape = "Mutator")
g = g + guides(colour = guide_legend(nrow = 1))
g = g + facet_wrap(. ~ facet_var, ncol = 3)
#print(g)
fn = file.path(image_dir, sprintf("traces_%s.pdf", REPAIR))
ggsave(fn, plot = g, width = 8, height = 10)
}
|
# Install packages
install.packages <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
packages<-c("ggplot2", "dplyr", "tidyverse", "plotly", "ggthemes", "tidyr","shiny","shinydashboard")
install.packages(packages)
################################################################################
############ Simple Random Walk ############
################################################################################
simple.random.walk <- function(n.steps,n.sim,prob.r=0.5){
n <- n.steps
a <- prob.r
x.left = -1
x.right = 1
Sn_mat <- matrix(0,ncol=n+1,nrow=n.sim)
for(i in 1:n.sim){
for(j in 2:(n+1)){
step <- sample(c(x.left,x.right),1,prob=c(1-a,a),replace=F)
Sn_mat[i,j] <- Sn_mat[i,j-1] + step
}
}
# names
names <- sapply(1:(n+1),function(i) paste('step',i,sep=''))
# data frame
result_df <- data.frame('sim'=sapply(1:n.sim, function(i) paste('sim',i,sep='')),
'Sn'=Sn_mat)
return(result_df)
} | /Project01/init.R | no_license | JulioCesarMartinez-00/ShinyProjects | R | false | false | 1,206 | r |
# Install packages
install.packages <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
packages<-c("ggplot2", "dplyr", "tidyverse", "plotly", "ggthemes", "tidyr","shiny","shinydashboard")
install.packages(packages)
################################################################################
############ Simple Random Walk ############
################################################################################
simple.random.walk <- function(n.steps,n.sim,prob.r=0.5){
n <- n.steps
a <- prob.r
x.left = -1
x.right = 1
Sn_mat <- matrix(0,ncol=n+1,nrow=n.sim)
for(i in 1:n.sim){
for(j in 2:(n+1)){
step <- sample(c(x.left,x.right),1,prob=c(1-a,a),replace=F)
Sn_mat[i,j] <- Sn_mat[i,j-1] + step
}
}
# names
names <- sapply(1:(n+1),function(i) paste('step',i,sep=''))
# data frame
result_df <- data.frame('sim'=sapply(1:n.sim, function(i) paste('sim',i,sep='')),
'Sn'=Sn_mat)
return(result_df)
} |
## Example Calculations
###Below two examples are given, on which the functions were tested. We can see that the functions work properly:
A
[,1] [,2]
[1,] 2 -9
[2,] 0 9
cacheMatrix <- makeCacheMatrix(A)
invA<-cacheSolve(cacheMatrix)
invA
[,1] [,2]
[1,] 0.5 0.5000000
[2,] 0.0 0.1111111
### Test that invA is indeed the inverse
invA%*%A
[,1] [,2]
[1,] 1 0
[2,] 0 1
A%*%invA
[,1] [,2]
[1,] 1 0
[2,] 0 1
### Cache of the inverse matrix
cacheSolve(cacheMatrix)
getting cached data
[,1] [,2]
[1,] 0.5 0.5000000
[2,] 0.0 0.1111111
### Second example
B
[,1] [,2] [,3]
[1,] 3 2 1
[2,] 1 1 -1
[3,] 0 1 2
invB<-solve(B)
invB
[,1] [,2] [,3]
[1,] 0.5000000 -0.5 -0.5000000
[2,] -0.3333333 1.0 0.6666667
[3,] 0.1666667 -0.5 0.1666667
exB<-makeCacheMatrix(B)
cacheSolve(exB)
[,1] [,2] [,3]
[1,] 0.5000000 -0.5 -0.5000000
[2,] -0.3333333 1.0 0.6666667
[3,] 0.1666667 -0.5 0.1666667
cacheSolve(exB)
getting cached data
[,1] [,2] [,3]
[1,] 0.5000000 -0.5 -0.5000000
[2,] -0.3333333 1.0 0.6666667
[3,] 0.1666667 -0.5 0.1666667
| /ExampleCalculations.R | no_license | YT-10/ProgrammingAssignment2 | R | false | false | 1,231 | r | ## Example Calculations
###Below two examples are given, on which the functions were tested. We can see that the functions work properly:
A
[,1] [,2]
[1,] 2 -9
[2,] 0 9
cacheMatrix <- makeCacheMatrix(A)
invA<-cacheSolve(cacheMatrix)
invA
[,1] [,2]
[1,] 0.5 0.5000000
[2,] 0.0 0.1111111
### Test that invA is indeed the inverse
invA%*%A
[,1] [,2]
[1,] 1 0
[2,] 0 1
A%*%invA
[,1] [,2]
[1,] 1 0
[2,] 0 1
### Cache of the inverse matrix
cacheSolve(cacheMatrix)
getting cached data
[,1] [,2]
[1,] 0.5 0.5000000
[2,] 0.0 0.1111111
### Second example
B
[,1] [,2] [,3]
[1,] 3 2 1
[2,] 1 1 -1
[3,] 0 1 2
invB<-solve(B)
invB
[,1] [,2] [,3]
[1,] 0.5000000 -0.5 -0.5000000
[2,] -0.3333333 1.0 0.6666667
[3,] 0.1666667 -0.5 0.1666667
exB<-makeCacheMatrix(B)
cacheSolve(exB)
[,1] [,2] [,3]
[1,] 0.5000000 -0.5 -0.5000000
[2,] -0.3333333 1.0 0.6666667
[3,] 0.1666667 -0.5 0.1666667
cacheSolve(exB)
getting cached data
[,1] [,2] [,3]
[1,] 0.5000000 -0.5 -0.5000000
[2,] -0.3333333 1.0 0.6666667
[3,] 0.1666667 -0.5 0.1666667
|
xlsRenameSheet <- function(filepath, oldname, newname)
{
xls <- comCreateObject("Excel.Application")
wb <- comGetProperty(comGetProperty(xls, "Workbooks"), "Open", filepath)
sheets <- comGetProperty(xls, "Sheets")
sheet.to.rename <- comGetProperty(sheets, "Item", oldname)
sheet.to.rename[["Name"]] <- newname
comSetProperty(wb, "Save", filepath)
comInvoke(xls, "Quit")
rm(xls)
}
| /R Extension/RMG/Utilities/Interfaces/CEGxls/R/xlsRenameSheet.R | no_license | uhasan1/QLExtension-backup | R | false | false | 405 | r | xlsRenameSheet <- function(filepath, oldname, newname)
{
xls <- comCreateObject("Excel.Application")
wb <- comGetProperty(comGetProperty(xls, "Workbooks"), "Open", filepath)
sheets <- comGetProperty(xls, "Sheets")
sheet.to.rename <- comGetProperty(sheets, "Item", oldname)
sheet.to.rename[["Name"]] <- newname
comSetProperty(wb, "Save", filepath)
comInvoke(xls, "Quit")
rm(xls)
}
|
########## Define working directory
getwd()
setwd("G:/RExploratory")
########## Loading packages
library(pryr)
library(datasets)
########## Load file
HPC <- read.csv("household_power_consumption.txt", sep=";",na.strings="?")
summary(HPC)
class(HPC$Date)
class(HPC$Time)
str(as.Date)
?as.Date
########## Set date and date subset
HPC$Date<-as.Date(HPC$Date,"%d/%m/%Y")
head(HPC)
HPC<-HPC[(HPC$Date=="2007-02-01"|HPC$Date=="2007-02-02"),]
head(HPC)
##### Plot 2
plot(HPC$Global_active_power~as.POSIXct(paste(HPC$Date,HPC$Time)),type="n",ylab="",xlab="")
title(ylab="Global Active Power (kilowatts)")
lines(HPC$Global_active_power~as.POSIXct(paste(HPC$Date,HPC$Time)))
dev.copy(png,file="plot2.png")
dev.off()
| /plot2.R | no_license | RomainBenetiere/ExData_Plotting1 | R | false | false | 722 | r | ########## Define working directory
getwd()
setwd("G:/RExploratory")
########## Loading packages
library(pryr)
library(datasets)
########## Load file
HPC <- read.csv("household_power_consumption.txt", sep=";",na.strings="?")
summary(HPC)
class(HPC$Date)
class(HPC$Time)
str(as.Date)
?as.Date
########## Set date and date subset
HPC$Date<-as.Date(HPC$Date,"%d/%m/%Y")
head(HPC)
HPC<-HPC[(HPC$Date=="2007-02-01"|HPC$Date=="2007-02-02"),]
head(HPC)
##### Plot 2
plot(HPC$Global_active_power~as.POSIXct(paste(HPC$Date,HPC$Time)),type="n",ylab="",xlab="")
title(ylab="Global Active Power (kilowatts)")
lines(HPC$Global_active_power~as.POSIXct(paste(HPC$Date,HPC$Time)))
dev.copy(png,file="plot2.png")
dev.off()
|
#!/usr/bin/env Rscript
source("~/CMWT/common.R")
source("topic.R")
input_dates <- Sys.Date()-1
if(lubridate::wday(input_dates)==1) {
input_dates <- as.Date((input_dates-2):input_dates, "1970-01-01")
}
cat(paste("Start download file list:", Sys.time()))
dat <- context_data(input_dates)
cat(paste("Start target:", Sys.time()))
masiv <- get_masiv(dat)
cat(paste("Start topic modeling:", Sys.time()))
osr <- invisible(capture.output({
masiv <- masiv %>% bind_cols(train_lda(masiv$Текст, masiv$Заголовок, nrow(masiv) / 4)[3:4])
}))
print(paste("Writing xlsx",Sys.time()))
masiv$Текст <- substr(masiv$Текст, 1, 32000)
fileXls <- paste0(getwd(), "/workfiles/tv_daily/tv_", input_dates[length(input_dates)], ".xlsx")
wb <- openxlsx::createWorkbook()
openxlsx::addWorksheet(wb, "tv")
openxlsx::writeDataTable(wb,"tv",masiv,withFilter = F)
openxlsx::setColWidths(wb, "tv", c(1:137), widths = 8.43, ignoreMergedCells = FALSE)
openxlsx::saveWorkbook(wb,file = fileXls,overwrite = T)
suppressPackageStartupMessages(library(mailR))
send.mail(from = "Roman Kyrychenko<roman.kyrychenko@corestone.expert>",
to = c("kirichenko17roman@gmail.com", "victoriya.poda@corestone.expert"),
html = F,encoding = "utf-8",
subject = paste("Context",input_dates[length(input_dates)]),
body = paste("Context",input_dates[length(input_dates)]),
attach.files = c(fileXls),
smtp = list(host.name = hostname, port = port, user.name = username, passwd = mailpass),
authenticate = TRUE,
send = TRUE)
q(save = "no") | /poda.R | no_license | RomanKyrychenko/CMWT | R | false | false | 1,595 | r | #!/usr/bin/env Rscript
source("~/CMWT/common.R")
source("topic.R")
input_dates <- Sys.Date()-1
if(lubridate::wday(input_dates)==1) {
input_dates <- as.Date((input_dates-2):input_dates, "1970-01-01")
}
cat(paste("Start download file list:", Sys.time()))
dat <- context_data(input_dates)
cat(paste("Start target:", Sys.time()))
masiv <- get_masiv(dat)
cat(paste("Start topic modeling:", Sys.time()))
osr <- invisible(capture.output({
masiv <- masiv %>% bind_cols(train_lda(masiv$Текст, masiv$Заголовок, nrow(masiv) / 4)[3:4])
}))
print(paste("Writing xlsx",Sys.time()))
masiv$Текст <- substr(masiv$Текст, 1, 32000)
fileXls <- paste0(getwd(), "/workfiles/tv_daily/tv_", input_dates[length(input_dates)], ".xlsx")
wb <- openxlsx::createWorkbook()
openxlsx::addWorksheet(wb, "tv")
openxlsx::writeDataTable(wb,"tv",masiv,withFilter = F)
openxlsx::setColWidths(wb, "tv", c(1:137), widths = 8.43, ignoreMergedCells = FALSE)
openxlsx::saveWorkbook(wb,file = fileXls,overwrite = T)
suppressPackageStartupMessages(library(mailR))
send.mail(from = "Roman Kyrychenko<roman.kyrychenko@corestone.expert>",
to = c("kirichenko17roman@gmail.com", "victoriya.poda@corestone.expert"),
html = F,encoding = "utf-8",
subject = paste("Context",input_dates[length(input_dates)]),
body = paste("Context",input_dates[length(input_dates)]),
attach.files = c(fileXls),
smtp = list(host.name = hostname, port = port, user.name = username, passwd = mailpass),
authenticate = TRUE,
send = TRUE)
q(save = "no") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.