content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(openxlsx)
library(pivottabler)
wb <- createWorkbook(creator = Sys.getenv("USERNAME"))
# 1: special case
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$renderPivot()
addWorksheet(wb, "Sc1")
openxlsx::writeData(wb, sheet="Sc1", x="Sc1 Empty", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc1", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2a: single measure on columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2a")
openxlsx::writeData(wb, sheet="Sc2a", x="Sc2a Only 1 Measure", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2a", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2b: three measures on columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$defineCalculation(calculationName="TotalTrains1", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains2", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains3", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2b")
openxlsx::writeData(wb, sheet="Sc2b", x="Sc2b Only 3 Measures", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2b", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2c: single measure plus rows
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2c")
openxlsx::writeData(wb, sheet="Sc2c", x="Sc2c Measure plus Rows", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2c", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2d: single measure plus rows and columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2d")
openxlsx::writeData(wb, sheet="Sc2d", x="Sc2d Measure plus RowsXCols", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2d", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2e: single measure plus rows and 2 sets of columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2e")
openxlsx::writeData(wb, sheet="Sc2e", x="Sc2e Measure plus RowsX2Cols", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2e", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2f: three measures plus rows and 2 sets of columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains1", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains2", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains3", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2f")
openxlsx::writeData(wb, sheet="Sc2f", x="Sc2f 3 Measures plus RowsX2Cols", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2f", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3a: single measure on rows
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3a")
openxlsx::writeData(wb, sheet="Sc3a", x="Sc3a Only 1 Measure", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3a", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3b: three measures on rows
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$defineCalculation(calculationName="TotalTrains1", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains2", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains3", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3b")
openxlsx::writeData(wb, sheet="Sc3b", x="Sc3b Only 3 Measures", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3b", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3c: single measure plus columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3c")
openxlsx::writeData(wb, sheet="Sc3c", x="Sc3c Measure plus Rows", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3c", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3d: single measure plus rows and columns (produces identical output to 2d)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3d")
openxlsx::writeData(wb, sheet="Sc3d", x="Sc3d Measure plus RowsXCols (same output as Sc2d)", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3d", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3e: single measure plus rows and 2 sets of columns (produces identical output to 2e)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3e")
openxlsx::writeData(wb, sheet="Sc3e", x="Sc3e Measure plus RowsX2Cols (same output as Sc2e)", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3e", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3f: three measures plus rows and 2 sets of columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains1", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains2", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains3", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3f")
openxlsx::writeData(wb, sheet="Sc3f", x="Sc3f 3 Measures plus RowsX2Cols", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3f", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# finished
saveWorkbook(wb, file="C:\\Users\\Chris\\Desktop\\test.xlsx", overwrite = TRUE)
|
/dev/excel-export/ExcelManualRegressionTest.R
|
no_license
|
cbailiss/pivottabler
|
R
| false
| false
| 7,829
|
r
|
library(openxlsx)
library(pivottabler)
wb <- createWorkbook(creator = Sys.getenv("USERNAME"))
# 1: special case
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$renderPivot()
addWorksheet(wb, "Sc1")
openxlsx::writeData(wb, sheet="Sc1", x="Sc1 Empty", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc1", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2a: single measure on columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2a")
openxlsx::writeData(wb, sheet="Sc2a", x="Sc2a Only 1 Measure", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2a", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2b: three measures on columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$defineCalculation(calculationName="TotalTrains1", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains2", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains3", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2b")
openxlsx::writeData(wb, sheet="Sc2b", x="Sc2b Only 3 Measures", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2b", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2c: single measure plus rows
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2c")
openxlsx::writeData(wb, sheet="Sc2c", x="Sc2c Measure plus Rows", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2c", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2d: single measure plus rows and columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2d")
openxlsx::writeData(wb, sheet="Sc2d", x="Sc2d Measure plus RowsXCols", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2d", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2e: single measure plus rows and 2 sets of columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2e")
openxlsx::writeData(wb, sheet="Sc2e", x="Sc2e Measure plus RowsX2Cols", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2e", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 2f: three measures plus rows and 2 sets of columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains1", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains2", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains3", summariseExpression="n()")
pt$renderPivot()
addWorksheet(wb, "Sc2f")
openxlsx::writeData(wb, sheet="Sc2f", x="Sc2f 3 Measures plus RowsX2Cols", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc2f", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3a: single measure on rows
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3a")
openxlsx::writeData(wb, sheet="Sc3a", x="Sc3a Only 1 Measure", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3a", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3b: three measures on rows
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$defineCalculation(calculationName="TotalTrains1", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains2", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains3", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3b")
openxlsx::writeData(wb, sheet="Sc3b", x="Sc3b Only 3 Measures", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3b", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3c: single measure plus columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3c")
openxlsx::writeData(wb, sheet="Sc3c", x="Sc3c Measure plus Rows", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3c", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3d: single measure plus rows and columns (produces identical output to 2d)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3d")
openxlsx::writeData(wb, sheet="Sc3d", x="Sc3d Measure plus RowsXCols (same output as Sc2d)", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3d", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3e: single measure plus rows and 2 sets of columns (produces identical output to 2e)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3e")
openxlsx::writeData(wb, sheet="Sc3e", x="Sc3e Measure plus RowsX2Cols (same output as Sc2e)", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3e", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# 3f: three measures plus rows and 2 sets of columns
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains1", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains2", summariseExpression="n()")
pt$defineCalculation(calculationName="TotalTrains3", summariseExpression="n()")
pt$addRowCalculationGroups()
pt$renderPivot()
addWorksheet(wb, "Sc3f")
openxlsx::writeData(wb, sheet="Sc3f", x="Sc3f 3 Measures plus RowsX2Cols", colNames=FALSE, rowNames=FALSE, startCol=1, startRow=1)
pt$writeToExcelWorksheet(wb=wb, wsName="Sc3f", topRowNumber=3, leftMostColumnNumber=2, applyStyles=TRUE, mapStylesFromCSS=TRUE)
pt$renderPivot()
# finished
saveWorkbook(wb, file="C:\\Users\\Chris\\Desktop\\test.xlsx", overwrite = TRUE)
|
#' DEST UFPR Themed Project Template for Rmarkdown
#'
#' Generates from an RMarkdown file a PDF document.
#' @inheritParams rmarkdown::pdf_document
#' @return A PDF document.
#' @author Fernando Mayer
#' @examples
#' \dontrun{
#' # Generate slide deck from beamer template
#' rmarkdown::draft("proj.Rmd", template = "projeto_template", package = "tcctemplate")
#'
#' # Compile the document
#' rmarkdown::render("proj/proj.Rmd")
#' }
#' @importFrom bookdown pdf_document2
#' @export
projeto_template <-
function(toc = TRUE,
toc_depth = 3,
number_sections = TRUE,
fig_width = 10,
fig_height = 7,
fig_crop = TRUE,
fig_caption = TRUE,
dev = "pdf",
df_print = "default",
highlight = "default",
## template = "default",
keep_tex = FALSE,
keep_md = FALSE,
latex_engine = "pdflatex",
citation_package = c("default", "natbib", "biblatex"),
includes = NULL,
## Necessario para que nao de problema com o
## \hypertarget ao definir ections e etc. Ver:
## https://github.com/pzhaonet/bookdownplus/issues/45
## https://martinhjelm.github.io/2018/05/30/Removing-Hypertarget-For-Pandoc-Markdown-to-Latex/
## md_extensions = "-auto_identifiers",
md_extensions = NULL,
output_extensions = NULL,
pandoc_args = "--top-level-division=chapter",
extra_dependencies = NULL)
{
template <- find_resource("projeto-template", "template.tex")
load_resources_if_missing("projeto-template",
c("leg.pdf", "ufpr.pdf", "dest.pdf",
"abntex2.csl", "ref.bib",
"anexo01.Rmd", "apendice01.Rmd",
"cronograma.Rmd", "introducao.Rmd",
"metodologia.Rmd", "objetivos.Rmd"))
bookdown::pdf_document2(
template = template,
toc = toc,
toc_depth = toc_depth,
number_sections = number_sections,
fig_width = fig_width,
fig_height = fig_height,
fig_crop = fig_crop,
fig_caption = fig_caption,
dev = dev,
df_print = df_print,
highlight = highlight,
keep_tex = keep_tex,
keep_md = keep_md,
latex_engine = latex_engine,
citation_package = citation_package,
includes = includes,
md_extensions = md_extensions,
output_extensions = output_extensions,
pandoc_args = pandoc_args,
extra_dependencies = extra_dependencies)
}
|
/R/projeto_templates.R
|
permissive
|
fernandomayer/tcctemplate
|
R
| false
| false
| 3,020
|
r
|
#' DEST UFPR Themed Project Template for Rmarkdown
#'
#' Generates from an RMarkdown file a PDF document.
#' @inheritParams rmarkdown::pdf_document
#' @return A PDF document.
#' @author Fernando Mayer
#' @examples
#' \dontrun{
#' # Generate slide deck from beamer template
#' rmarkdown::draft("proj.Rmd", template = "projeto_template", package = "tcctemplate")
#'
#' # Compile the document
#' rmarkdown::render("proj/proj.Rmd")
#' }
#' @importFrom bookdown pdf_document2
#' @export
projeto_template <-
function(toc = TRUE,
toc_depth = 3,
number_sections = TRUE,
fig_width = 10,
fig_height = 7,
fig_crop = TRUE,
fig_caption = TRUE,
dev = "pdf",
df_print = "default",
highlight = "default",
## template = "default",
keep_tex = FALSE,
keep_md = FALSE,
latex_engine = "pdflatex",
citation_package = c("default", "natbib", "biblatex"),
includes = NULL,
## Necessario para que nao de problema com o
## \hypertarget ao definir ections e etc. Ver:
## https://github.com/pzhaonet/bookdownplus/issues/45
## https://martinhjelm.github.io/2018/05/30/Removing-Hypertarget-For-Pandoc-Markdown-to-Latex/
## md_extensions = "-auto_identifiers",
md_extensions = NULL,
output_extensions = NULL,
pandoc_args = "--top-level-division=chapter",
extra_dependencies = NULL)
{
template <- find_resource("projeto-template", "template.tex")
load_resources_if_missing("projeto-template",
c("leg.pdf", "ufpr.pdf", "dest.pdf",
"abntex2.csl", "ref.bib",
"anexo01.Rmd", "apendice01.Rmd",
"cronograma.Rmd", "introducao.Rmd",
"metodologia.Rmd", "objetivos.Rmd"))
bookdown::pdf_document2(
template = template,
toc = toc,
toc_depth = toc_depth,
number_sections = number_sections,
fig_width = fig_width,
fig_height = fig_height,
fig_crop = fig_crop,
fig_caption = fig_caption,
dev = dev,
df_print = df_print,
highlight = highlight,
keep_tex = keep_tex,
keep_md = keep_md,
latex_engine = latex_engine,
citation_package = citation_package,
includes = includes,
md_extensions = md_extensions,
output_extensions = output_extensions,
pandoc_args = pandoc_args,
extra_dependencies = extra_dependencies)
}
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree2500taxa91.txt")
sim.chrom<-read.table("chrom2500taxa91.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax2500taxa91.csv",sep=",")
|
/SImulations number of taxa/2500 taxa/optim2500taxa91.R
|
no_license
|
roszenil/Bichromdryad
|
R
| false
| false
| 827
|
r
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree2500taxa91.txt")
sim.chrom<-read.table("chrom2500taxa91.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax2500taxa91.csv",sep=",")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transforms_text.R
\name{numerize}
\alias{numerize}
\title{Combine several numbers together into a string}
\usage{
numerize(..., sep = "")
}
\arguments{
\item{...}{expressions containing numerical values to concatenate}
\item{sep}{An optional separator to insert between the values in ...}
}
\value{
A character vector with the concatenated values in ...
}
\description{
Combine several numbers together into a string
}
\details{
Any number of values can be concatenated. Numerize is
meant to work with number-like data, but will only warn if the
expressions return non-integer values. The returned values are character
representations of (ideally) numbers, but if non-integer values are included
they will still appear.
}
\examples{
numerize(as.character(1:10), 11:20, sep=".")
numerize(as.character(1:10), letters[1:10], sep=".")
numerize(as.character(1:10), 11:20/3, sep=".")
}
|
/man/numerize.Rd
|
no_license
|
qPharmetra/PMDatR
|
R
| false
| true
| 961
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transforms_text.R
\name{numerize}
\alias{numerize}
\title{Combine several numbers together into a string}
\usage{
numerize(..., sep = "")
}
\arguments{
\item{...}{expressions containing numerical values to concatenate}
\item{sep}{An optional separator to insert between the values in ...}
}
\value{
A character vector with the concatenated values in ...
}
\description{
Combine several numbers together into a string
}
\details{
Any number of values can be concatenated. Numerize is
meant to work with number-like data, but will only warn if the
expressions return non-integer values. The returned values are character
representations of (ideally) numbers, but if non-integer values are included
they will still appear.
}
\examples{
numerize(as.character(1:10), 11:20, sep=".")
numerize(as.character(1:10), letters[1:10], sep=".")
numerize(as.character(1:10), 11:20/3, sep=".")
}
|
testlist <- list(doy = c(4.62595082430429e-312, 3.64097969159734e-277, -0.427429395729092, 2.09887675795476e-104, 1.55322770574539e-47, 1.45474215376015e+135, 3.56441595774554e+114, -2.65061195968734e+303, -9.52682579807939e+139, -3.98397314603138e+183, -1.77863325536183e+126, 6.42851301544252e-310, 1.66013830765329e-307, 0.000234804018098546, 1.91570942562165e+206, 365.687522888184, 2.07029838648818e+24, -7.21048519616203e+198, 3.51433879412484e+125, -7.92665263616256e+107, 1.17913068623545e+75 ), latitude = c(-2.61899946856284e-59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::Photoperiod,testlist)
str(result)
|
/meteor/inst/testfiles/Photoperiod/AFL_Photoperiod/Photoperiod_valgrind_files/1615769043-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 683
|
r
|
testlist <- list(doy = c(4.62595082430429e-312, 3.64097969159734e-277, -0.427429395729092, 2.09887675795476e-104, 1.55322770574539e-47, 1.45474215376015e+135, 3.56441595774554e+114, -2.65061195968734e+303, -9.52682579807939e+139, -3.98397314603138e+183, -1.77863325536183e+126, 6.42851301544252e-310, 1.66013830765329e-307, 0.000234804018098546, 1.91570942562165e+206, 365.687522888184, 2.07029838648818e+24, -7.21048519616203e+198, 3.51433879412484e+125, -7.92665263616256e+107, 1.17913068623545e+75 ), latitude = c(-2.61899946856284e-59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::Photoperiod,testlist)
str(result)
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% AvgCnPlm.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{AvgCnPlm}
\docType{class}
\alias{AvgCnPlm}
\title{The AvgCnPlm class}
\description{
Package: aroma.affymetrix \cr
\bold{Class AvgCnPlm}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[aroma.core]{ParametersInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[aroma.affymetrix]{Model}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{UnitModel}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{MultiArrayUnitModel}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{ProbeLevelModel}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{AvgPlm}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{AvgSnpPlm}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{SnpPlm}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{CnPlm}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\emph{\code{AvgCnPlm}}\cr
\bold{Directly known subclasses:}\cr
\cr
public abstract static class \bold{AvgCnPlm}\cr
extends \link[aroma.affymetrix]{CnPlm}\cr
}
\usage{
AvgCnPlm(..., combineAlleles=FALSE)
}
\arguments{
\item{...}{Arguments passed to \code{\link{AvgSnpPlm}}.}
\item{combineAlleles}{If \code{\link[base:logical]{FALSE}}, allele A and allele B are treated
separately, otherwise together.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\emph{No methods defined}.
\bold{Methods inherited from CnPlm}:\cr
getCellIndices, getChipEffectSet, getCombineAlleles, getParameters, getProbeAffinityFile, setCombineAlleles
\bold{Methods inherited from SnpPlm}:\cr
getCellIndices, getChipEffectSet, getMergeStrands, getParameters, getProbeAffinityFile, setMergeStrands
\bold{Methods inherited from AvgSnpPlm}:\cr
getAsteriskTags
\bold{Methods inherited from AvgPlm}:\cr
getAsteriskTags, getCalculateResidualsFunction, getParameters, validate
\bold{Methods inherited from ProbeLevelModel}:\cr
calculateResidualSet, calculateWeights, fit, getAsteriskTags, getCalculateResidualsFunction, getChipEffectSet, getProbeAffinityFile, getResidualSet, getRootPath, getWeightsSet
\bold{Methods inherited from MultiArrayUnitModel}:\cr
getListOfPriors, setListOfPriors, validate
\bold{Methods inherited from UnitModel}:\cr
findUnitsTodo, getAsteriskTags, getFitSingleCellUnitFunction, getParameters
\bold{Methods inherited from Model}:\cr
as.character, fit, getAlias, getAsteriskTags, getDataSet, getFullName, getName, getPath, getRootPath, getTags, setAlias, setTags
\bold{Methods inherited from ParametersInterface}:\cr
getParameterSets, getParameters, getParametersAsString
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, names, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
|
/man/AvgCnPlm.Rd
|
no_license
|
HenrikBengtsson/aroma.affymetrix
|
R
| false
| false
| 3,685
|
rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% AvgCnPlm.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{AvgCnPlm}
\docType{class}
\alias{AvgCnPlm}
\title{The AvgCnPlm class}
\description{
Package: aroma.affymetrix \cr
\bold{Class AvgCnPlm}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[aroma.core]{ParametersInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[aroma.affymetrix]{Model}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{UnitModel}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{MultiArrayUnitModel}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{ProbeLevelModel}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{AvgPlm}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{AvgSnpPlm}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{SnpPlm}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{CnPlm}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\emph{\code{AvgCnPlm}}\cr
\bold{Directly known subclasses:}\cr
\cr
public abstract static class \bold{AvgCnPlm}\cr
extends \link[aroma.affymetrix]{CnPlm}\cr
}
\usage{
AvgCnPlm(..., combineAlleles=FALSE)
}
\arguments{
\item{...}{Arguments passed to \code{\link{AvgSnpPlm}}.}
\item{combineAlleles}{If \code{\link[base:logical]{FALSE}}, allele A and allele B are treated
separately, otherwise together.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\emph{No methods defined}.
\bold{Methods inherited from CnPlm}:\cr
getCellIndices, getChipEffectSet, getCombineAlleles, getParameters, getProbeAffinityFile, setCombineAlleles
\bold{Methods inherited from SnpPlm}:\cr
getCellIndices, getChipEffectSet, getMergeStrands, getParameters, getProbeAffinityFile, setMergeStrands
\bold{Methods inherited from AvgSnpPlm}:\cr
getAsteriskTags
\bold{Methods inherited from AvgPlm}:\cr
getAsteriskTags, getCalculateResidualsFunction, getParameters, validate
\bold{Methods inherited from ProbeLevelModel}:\cr
calculateResidualSet, calculateWeights, fit, getAsteriskTags, getCalculateResidualsFunction, getChipEffectSet, getProbeAffinityFile, getResidualSet, getRootPath, getWeightsSet
\bold{Methods inherited from MultiArrayUnitModel}:\cr
getListOfPriors, setListOfPriors, validate
\bold{Methods inherited from UnitModel}:\cr
findUnitsTodo, getAsteriskTags, getFitSingleCellUnitFunction, getParameters
\bold{Methods inherited from Model}:\cr
as.character, fit, getAlias, getAsteriskTags, getDataSet, getFullName, getName, getPath, getRootPath, getTags, setAlias, setTags
\bold{Methods inherited from ParametersInterface}:\cr
getParameterSets, getParameters, getParametersAsString
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, names, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
|
#' @title Load Coastline Data
#' @description Low res to high res: coastlineWorld, coastlineWorldCoarse, coastlinewWorldMedium, coastlineWorldFine
#' @author Laura Whitmore
#' @author Thomas Bryce Kelly
#' @param cost a string referring to a coastline data object such as those provided by the OCE package.
#' @import oce
#' @import ocedata
get.coast = function(coast = 'coastlineWorld') {
do.call('data', list(coast))
}
#' @title Make Map Projection
#' @author Thomas Bryce Kelly
#' @param projection A string or number corresponding to a base projection (e.g. 1 = 'merc')
#' @param lat the primary latitude for the projection, may or may not be applicable based on projection
#' @param lon same as lat for longitude
#' @param h The viewing height in meters (used only for some projections)
#' @param dlat The distance to the secondary latitude (in degrees). Only applicable to some projections
#' @export
make.proj = function(projection = NULL, lat = NULL, lon = NULL, h = NULL, dlat = 10) {
if (is.null(projection)) { projection = 'merc' }
projections = c('merc', 'aea', 'eck3', 'eck4', 'eqc', 'geos', 'lonlat', 'mill', 'natearth', 'nsper', 'stere', 'ortho')
projection.list = c('1) merc', '2) aea', '3) eck3', '4) eck4', '5) eqc', '6) geos', '7) lonlat', '8) mill', '9) natearth', '10) nsper', '11) stere', '12) ortho')
if (is.numeric(projection)) {
projection = projections[projection]
}
if (!projection %in% projections) {
message('Unknown projection type, recommend to use:', paste(projection.list, collapse = ', '))
}
if (projection == 'geos' & is.null(h)) { h = 1e8 }
if (projection == 'nsper' & is.null(h)) { h = 1e8 }
# Default
if (is.null(lat)) { lat = 0 }
if (is.null(lon)) { lon = 0 }
if (is.null(h)) { h = 1e8 }
paste0('+proj=', projection, ' +lon_0=', lon, ' +lat_0=', lat, ' +lat_1=', lat, ' +lat_2=', lat + dlat, ' +h=', h)
}
#' @title Bilinear Interpolation of a Grid
#' @author Thomas Bryce Kelly
#' @export
interp.bilinear = function(x, y, gx, gy, z) {
z.out = rep(0, length(x))
for (i in 1:length(x)) {
x1 = max(0, which(gx <= x[i]))
x2 = x1 + 1
y1 = max(0, which(gy <= y[i]))
y2 = y1 + 1
wx1 = (gx[x2] - x[i]) / (gx[2] - gx[1])
wy1 = (gy[y2] - y[i]) / (gy[2] - gy[1])
if (x1 == 0) {
x1 = 1
wx1 = 1
}
if (y1 == 0) {
y1 = 1
wy1 = 1
}
if(x1 == length(gx)) {
x2 = x1
wx1 = 1
}
if(y1 == length(gy)) {
y2 = y1
wy1 = 1
}
z.out[i] = wy1 * (wx1 * z[x1, y1] + (1 - wx1) * z[x2,y1]) + (1 - wy1) * (wx1 * z[x1, y2] + (1 - wx1) * z[x2,y2])
}
z.out
}
#' @title Interpolate from fractional index onto grid
#' @author Thomas Bryce Kelly
#' @export
grid.interp = function(grid, i, j) {
val = rep(NA, length(i))
x = round(i)
y = round(j)
dx = i - x
dy = j - y
for (k in 1:length(i)) {
val[k] = (1 - abs(dy[k])) * ((1 - abs(dx[k])) * grid[cbind(x[k], y[k])] + abs(dx[k]) * grid[cbind(x[k] + sign(dx[k]), y[k])]) +
abs(dy[k]) * ((1 - abs(dx[k])) * grid[cbind(x[k], y[k] + sign(dy[k]))] + abs(dx[k]) * grid[cbind(x[k] + sign(dx[k]), y[k] + sign(dy[k]))])
}
## Return
val
}
#' @title Calculate extended grid
#' @author Thomas Bryce Kelly
#' @export
calc.vertex = function(x, y) {
## Diffs
dx.dx = t(diff(t(x))) / 2
dx.dy = diff(x) / 2
dy.dx = t(diff(t(y))) / 2
dy.dy = diff(y) / 2
## Vertex
vertex.x = matrix(NA, nrow = dim(x)[1]+1, ncol = dim(x)[2]+1)
vertex.y = matrix(NA, nrow = dim(y)[1]+1, ncol = dim(y)[2]+1)
## Field
for (i in 2:(dim(vertex.x)[1] - 1)) {
for (j in 2:(dim(vertex.x)[2] - 1)) {
ii = max(i-1, 1)
jj = max(j-1, 1)
vertex.x[i,j] = x[ii,jj] + dx.dx[ii,jj] + dx.dy[ii,jj]
vertex.y[i,j] = y[ii,jj] + dy.dx[ii,jj] + dy.dy[ii,jj]
}
}
## Fill in perimeter
# i = 1
for (j in 2:(dim(vertex.x)[2] - 1)) {
jj = max(j-1, 1)
vertex.x[1,j] = x[1,jj] + dx.dx[1,jj] - dx.dy[1,jj]
vertex.y[1,j] = y[1,jj] + dy.dx[1,jj] - dy.dy[1,jj]
}
# j = 1
for (i in 2:(dim(vertex.x)[1] - 1)) {
ii = max(i-1, 1)
vertex.x[i,1] = x[ii,1] - dx.dx[ii,1] + dx.dy[ii,1]
vertex.y[i,1] = y[ii,1] - dy.dx[ii,1] + dy.dy[ii,1]
}
# j = dim(vertex.x)[2]
for (i in 1:(dim(vertex.x)[1] - 1)) {
ii = max(i-1, 1)
j = dim(vertex.x)[2]
vertex.x[i,j] = x[ii,j-1] + dx.dx[ii,j-2] + dx.dy[ii,j-1]
vertex.y[i,j] = y[ii,j-1] + dy.dx[ii,j-2] + dy.dy[ii,j-1]
}
# i = dim(vertex.x)[2]
for (j in 1:(dim(vertex.x)[2] - 1)) {
jj = max(j-1, 1)
i = dim(vertex.x)[1]
vertex.x[i,j] = x[i-1,jj] + dx.dx[i-1,jj] + dx.dy[i-2,jj]
vertex.y[i,j] = y[i-1,jj] + dy.dx[i-1,jj] + dy.dy[i-2,jj]
}
## Fill in corners
## both = 1
vertex.x[1,1] = x[1,1] - dx.dx[1,1] - dx.dy[1,1]
vertex.y[1,1] = y[1,1] - dy.dx[1,1] - dy.dy[1,1]
i = dim(vertex.x)[1]
vertex.x[i,1] = x[i-1,1] - dx.dx[i-1,1] + dx.dy[i-2,1]
vertex.y[i,1] = y[i-1,1] - dy.dx[i-1,1] + dy.dy[i-2,1]
i = dim(vertex.x)[1]
j = dim(vertex.x)[2]
vertex.x[i,j] = x[i-1,j-1] + dx.dx[i-1,j-2] + dx.dy[i-2,j-1]
vertex.y[i,j] = y[i-1,j-1] + dy.dx[i-1,j-2] + dy.dy[i-2,j-1]
j = dim(vertex.x)[2]
vertex.x[1,j] = x[1,j-1] + dx.dx[1,j-2] - dx.dy[1,j-1]
vertex.y[1,j] = y[1,j-1] + dy.dx[1,j-2] - dy.dy[1,j-1]
list(x = vertex.x, y = vertex.y)
}
#' @title Calcuye extended grid
#' @author Thomas Bryce Kelly
#' @export
grid.refinement = function(x = NULL, y = NULL, z) {
if (is.null(dim(z))) {stop('grid.refinement: z must be an array object of two dimensions.')}
dim = dim(z)
if (is.null(x) & is.null(y)) {
x = c(1:dim[1])
y = c(1:dim[2])
}
if (is.null(dim(x)) & is.null(dim(y))) {
x = array(x, dim = dim)
y = t(array(y, dim = rev(dim)))
}
## Vertex
vertex.x = array(0, dim = c(2*dim(x)[1]-1, 2*dim(x)[2]-1))
vertex.y = vertex.x
vertex.z = vertex.x
## fill in known values
for (i in 1:dim(x)[1]) {
for (j in 1:dim(x)[2]) {
vertex.x[2*i-1, 2*j-1] = x[i,j]
vertex.y[2*i-1, 2*j-1] = y[i,j]
vertex.z[2*i-1, 2*j-1] = z[i,j]
}
}
## Interpolate x
for (i in 1:(dim(x)[1]-1)) {
for (j in 1:dim(x)[2]) {
vertex.x[2*i, 2*j-1] = 0.5 * (x[i,j] + x[i+1,j])
vertex.y[2*i, 2*j-1] = 0.5 * (y[i,j] + y[i+1,j])
vertex.z[2*i, 2*j-1] = 0.5 * (z[i,j] + z[i+1,j])
}
}
## Interpolate y
for (i in 1:dim(x)[1]) {
for (j in 1:(dim(x)[2]-1)) {
vertex.x[2*i-1, 2*j] = 0.5 * (x[i,j] + x[i,j+1])
vertex.y[2*i-1, 2*j] = 0.5 * (y[i,j] + y[i,j+1])
vertex.z[2*i-1, 2*j] = 0.5 * (z[i,j] + z[i,j+1])
}
}
## corners
for (i in 1:(dim(x)[1]-1)) {
for (j in 1:(dim(x)[2]-1)) {
vertex.x[2*i, 2*j] = 0.25 * (x[i,j] + x[i,j+1] + x[i+1,j] + x[i+1,j+1])
vertex.y[2*i, 2*j] = 0.25 * (y[i,j] + y[i,j+1] + y[i+1,j] + y[i+1,j+1])
vertex.z[2*i, 2*j] = 0.25 * (z[i,j] + z[i,j+1] + z[i+1,j] + z[i+1,j+1])
}
}
list(x = vertex.x, y = vertex.y, z = vertex.z)
}
#' @title Calculate subsampled grid
#' @author Thomas Bryce Kelly
#' @export
grid.subsample = function(x = NULL, y = NULL, z, approx = F) {
if (is.null(dim(z))) {stop('grid.refinement: z must be an array object of two dimensions.')}
dim = dim(z)
if (is.null(x) & is.null(y)) {
x = c(1:dim[1])
y = c(1:dim[2])
}
if (is.null(dim(x)) & is.null(dim(y))) {
x = array(x, dim = dim)
y = t(array(y, dim = rev(dim)))
}
## Vertex
vertex.x = array(0, dim = floor(c(dim(x)[1], dim(x)[2]) / 2))
vertex.y = vertex.x
vertex.z = vertex.x
if (approx) {
for (i in 1:dim(vertex.x)[1]) {
for (j in 1:dim(vertex.x)[2]) {
vertex.x[i, j] = x[2*i, 2*j]
vertex.y[i, j] = y[2*i, 2*j]
vertex.z[i, j] = z[2*i, 2*j]
}
}
} else {
for (i in 1:dim(vertex.x)[1]) {
for (j in 1:dim(vertex.x)[2]) {
vertex.x[i, j] = 0.25 * (x[2*i, 2*j] + x[2*i-1, 2*j] + x[2*i, 2*j-1] + x[2*i-1, 2*j-1])
vertex.y[i, j] = 0.25 * (y[2*i, 2*j] + y[2*i-1, 2*j] + y[2*i, 2*j-1] + y[2*i-1, 2*j-1])
vertex.z[i, j] = mean(c(z[2*i, 2*j], z[2*i-1, 2*j], z[2*i, 2*j-1], z[2*i-1, 2*j-1]), na.rm = T)
}
}
}
list(x = vertex.x, y = vertex.y, z = vertex.z)
}
#' @export
calc.section.dist = function(lon, lat) {
if (length(lon) == 1) { lon = rep(lon, length(lat))}
if (length(lat) == 1) { lat = rep(lat, length(lon))}
if (length(lon) != length(lat)) { stop('Length of lon/lat are not the same!')}
d = rep(NA, length(lon))
l = which(diff(lon) != 0 | diff(lat) != 0)
d[1:(l[1]-1)] = 0 ## same station
if (lenght(l) > 1) {
for (i in 2:length(l)) {
d[l[i-1]:(l[i]-1)] = calc.dist(lon[c(l[i],l[i]+1)], lat[c(l[i],l[i]+1)])
}
}
## return
d
}
#' @export
calc.dist = function(lon, lat) {
sapply(1:(length(lon)-1), function(x) {abs(lon[x+1] - lon[x])})
}
#' @title Retreive depth value from bathymetric grid.
#' @export
get.depth = function(lon, lat, bathy) {
depths = rep(NA, length(lon))
for (i in 1:lnegth(lon)) {
k1 = which.min(abs(lon[i] - bathy$Lon))
k2 = which.min(abs(lat[i] - bathy$Lat))
depths[i] = bathy$Z[k1,k2]
}
## Return
depths
}
|
/R/map.misc.R
|
no_license
|
tbrycekelly/TheSource
|
R
| false
| false
| 9,137
|
r
|
#' @title Load Coastline Data
#' @description Low res to high res: coastlineWorld, coastlineWorldCoarse, coastlinewWorldMedium, coastlineWorldFine
#' @author Laura Whitmore
#' @author Thomas Bryce Kelly
#' @param cost a string referring to a coastline data object such as those provided by the OCE package.
#' @import oce
#' @import ocedata
get.coast = function(coast = 'coastlineWorld') {
do.call('data', list(coast))
}
#' @title Make Map Projection
#' @author Thomas Bryce Kelly
#' @param projection A string or number corresponding to a base projection (e.g. 1 = 'merc')
#' @param lat the primary latitude for the projection, may or may not be applicable based on projection
#' @param lon same as lat for longitude
#' @param h The viewing height in meters (used only for some projections)
#' @param dlat The distance to the secondary latitude (in degrees). Only applicable to some projections
#' @export
make.proj = function(projection = NULL, lat = NULL, lon = NULL, h = NULL, dlat = 10) {
if (is.null(projection)) { projection = 'merc' }
projections = c('merc', 'aea', 'eck3', 'eck4', 'eqc', 'geos', 'lonlat', 'mill', 'natearth', 'nsper', 'stere', 'ortho')
projection.list = c('1) merc', '2) aea', '3) eck3', '4) eck4', '5) eqc', '6) geos', '7) lonlat', '8) mill', '9) natearth', '10) nsper', '11) stere', '12) ortho')
if (is.numeric(projection)) {
projection = projections[projection]
}
if (!projection %in% projections) {
message('Unknown projection type, recommend to use:', paste(projection.list, collapse = ', '))
}
if (projection == 'geos' & is.null(h)) { h = 1e8 }
if (projection == 'nsper' & is.null(h)) { h = 1e8 }
# Default
if (is.null(lat)) { lat = 0 }
if (is.null(lon)) { lon = 0 }
if (is.null(h)) { h = 1e8 }
paste0('+proj=', projection, ' +lon_0=', lon, ' +lat_0=', lat, ' +lat_1=', lat, ' +lat_2=', lat + dlat, ' +h=', h)
}
#' @title Bilinear Interpolation of a Grid
#' @author Thomas Bryce Kelly
#' @export
interp.bilinear = function(x, y, gx, gy, z) {
z.out = rep(0, length(x))
for (i in 1:length(x)) {
x1 = max(0, which(gx <= x[i]))
x2 = x1 + 1
y1 = max(0, which(gy <= y[i]))
y2 = y1 + 1
wx1 = (gx[x2] - x[i]) / (gx[2] - gx[1])
wy1 = (gy[y2] - y[i]) / (gy[2] - gy[1])
if (x1 == 0) {
x1 = 1
wx1 = 1
}
if (y1 == 0) {
y1 = 1
wy1 = 1
}
if(x1 == length(gx)) {
x2 = x1
wx1 = 1
}
if(y1 == length(gy)) {
y2 = y1
wy1 = 1
}
z.out[i] = wy1 * (wx1 * z[x1, y1] + (1 - wx1) * z[x2,y1]) + (1 - wy1) * (wx1 * z[x1, y2] + (1 - wx1) * z[x2,y2])
}
z.out
}
#' @title Interpolate from fractional index onto grid
#' @author Thomas Bryce Kelly
#' @export
grid.interp = function(grid, i, j) {
val = rep(NA, length(i))
x = round(i)
y = round(j)
dx = i - x
dy = j - y
for (k in 1:length(i)) {
val[k] = (1 - abs(dy[k])) * ((1 - abs(dx[k])) * grid[cbind(x[k], y[k])] + abs(dx[k]) * grid[cbind(x[k] + sign(dx[k]), y[k])]) +
abs(dy[k]) * ((1 - abs(dx[k])) * grid[cbind(x[k], y[k] + sign(dy[k]))] + abs(dx[k]) * grid[cbind(x[k] + sign(dx[k]), y[k] + sign(dy[k]))])
}
## Return
val
}
#' @title Calculate extended grid
#' @author Thomas Bryce Kelly
#' @export
calc.vertex = function(x, y) {
## Diffs
dx.dx = t(diff(t(x))) / 2
dx.dy = diff(x) / 2
dy.dx = t(diff(t(y))) / 2
dy.dy = diff(y) / 2
## Vertex
vertex.x = matrix(NA, nrow = dim(x)[1]+1, ncol = dim(x)[2]+1)
vertex.y = matrix(NA, nrow = dim(y)[1]+1, ncol = dim(y)[2]+1)
## Field
for (i in 2:(dim(vertex.x)[1] - 1)) {
for (j in 2:(dim(vertex.x)[2] - 1)) {
ii = max(i-1, 1)
jj = max(j-1, 1)
vertex.x[i,j] = x[ii,jj] + dx.dx[ii,jj] + dx.dy[ii,jj]
vertex.y[i,j] = y[ii,jj] + dy.dx[ii,jj] + dy.dy[ii,jj]
}
}
## Fill in perimeter
# i = 1
for (j in 2:(dim(vertex.x)[2] - 1)) {
jj = max(j-1, 1)
vertex.x[1,j] = x[1,jj] + dx.dx[1,jj] - dx.dy[1,jj]
vertex.y[1,j] = y[1,jj] + dy.dx[1,jj] - dy.dy[1,jj]
}
# j = 1
for (i in 2:(dim(vertex.x)[1] - 1)) {
ii = max(i-1, 1)
vertex.x[i,1] = x[ii,1] - dx.dx[ii,1] + dx.dy[ii,1]
vertex.y[i,1] = y[ii,1] - dy.dx[ii,1] + dy.dy[ii,1]
}
# j = dim(vertex.x)[2]
for (i in 1:(dim(vertex.x)[1] - 1)) {
ii = max(i-1, 1)
j = dim(vertex.x)[2]
vertex.x[i,j] = x[ii,j-1] + dx.dx[ii,j-2] + dx.dy[ii,j-1]
vertex.y[i,j] = y[ii,j-1] + dy.dx[ii,j-2] + dy.dy[ii,j-1]
}
# i = dim(vertex.x)[2]
for (j in 1:(dim(vertex.x)[2] - 1)) {
jj = max(j-1, 1)
i = dim(vertex.x)[1]
vertex.x[i,j] = x[i-1,jj] + dx.dx[i-1,jj] + dx.dy[i-2,jj]
vertex.y[i,j] = y[i-1,jj] + dy.dx[i-1,jj] + dy.dy[i-2,jj]
}
## Fill in corners
## both = 1
vertex.x[1,1] = x[1,1] - dx.dx[1,1] - dx.dy[1,1]
vertex.y[1,1] = y[1,1] - dy.dx[1,1] - dy.dy[1,1]
i = dim(vertex.x)[1]
vertex.x[i,1] = x[i-1,1] - dx.dx[i-1,1] + dx.dy[i-2,1]
vertex.y[i,1] = y[i-1,1] - dy.dx[i-1,1] + dy.dy[i-2,1]
i = dim(vertex.x)[1]
j = dim(vertex.x)[2]
vertex.x[i,j] = x[i-1,j-1] + dx.dx[i-1,j-2] + dx.dy[i-2,j-1]
vertex.y[i,j] = y[i-1,j-1] + dy.dx[i-1,j-2] + dy.dy[i-2,j-1]
j = dim(vertex.x)[2]
vertex.x[1,j] = x[1,j-1] + dx.dx[1,j-2] - dx.dy[1,j-1]
vertex.y[1,j] = y[1,j-1] + dy.dx[1,j-2] - dy.dy[1,j-1]
list(x = vertex.x, y = vertex.y)
}
#' @title Calcuye extended grid
#' @author Thomas Bryce Kelly
#' @export
grid.refinement = function(x = NULL, y = NULL, z) {
if (is.null(dim(z))) {stop('grid.refinement: z must be an array object of two dimensions.')}
dim = dim(z)
if (is.null(x) & is.null(y)) {
x = c(1:dim[1])
y = c(1:dim[2])
}
if (is.null(dim(x)) & is.null(dim(y))) {
x = array(x, dim = dim)
y = t(array(y, dim = rev(dim)))
}
## Vertex
vertex.x = array(0, dim = c(2*dim(x)[1]-1, 2*dim(x)[2]-1))
vertex.y = vertex.x
vertex.z = vertex.x
## fill in known values
for (i in 1:dim(x)[1]) {
for (j in 1:dim(x)[2]) {
vertex.x[2*i-1, 2*j-1] = x[i,j]
vertex.y[2*i-1, 2*j-1] = y[i,j]
vertex.z[2*i-1, 2*j-1] = z[i,j]
}
}
## Interpolate x
for (i in 1:(dim(x)[1]-1)) {
for (j in 1:dim(x)[2]) {
vertex.x[2*i, 2*j-1] = 0.5 * (x[i,j] + x[i+1,j])
vertex.y[2*i, 2*j-1] = 0.5 * (y[i,j] + y[i+1,j])
vertex.z[2*i, 2*j-1] = 0.5 * (z[i,j] + z[i+1,j])
}
}
## Interpolate y
for (i in 1:dim(x)[1]) {
for (j in 1:(dim(x)[2]-1)) {
vertex.x[2*i-1, 2*j] = 0.5 * (x[i,j] + x[i,j+1])
vertex.y[2*i-1, 2*j] = 0.5 * (y[i,j] + y[i,j+1])
vertex.z[2*i-1, 2*j] = 0.5 * (z[i,j] + z[i,j+1])
}
}
## corners
for (i in 1:(dim(x)[1]-1)) {
for (j in 1:(dim(x)[2]-1)) {
vertex.x[2*i, 2*j] = 0.25 * (x[i,j] + x[i,j+1] + x[i+1,j] + x[i+1,j+1])
vertex.y[2*i, 2*j] = 0.25 * (y[i,j] + y[i,j+1] + y[i+1,j] + y[i+1,j+1])
vertex.z[2*i, 2*j] = 0.25 * (z[i,j] + z[i,j+1] + z[i+1,j] + z[i+1,j+1])
}
}
list(x = vertex.x, y = vertex.y, z = vertex.z)
}
#' @title Calculate subsampled grid
#' @author Thomas Bryce Kelly
#' @export
grid.subsample = function(x = NULL, y = NULL, z, approx = F) {
if (is.null(dim(z))) {stop('grid.refinement: z must be an array object of two dimensions.')}
dim = dim(z)
if (is.null(x) & is.null(y)) {
x = c(1:dim[1])
y = c(1:dim[2])
}
if (is.null(dim(x)) & is.null(dim(y))) {
x = array(x, dim = dim)
y = t(array(y, dim = rev(dim)))
}
## Vertex
vertex.x = array(0, dim = floor(c(dim(x)[1], dim(x)[2]) / 2))
vertex.y = vertex.x
vertex.z = vertex.x
if (approx) {
for (i in 1:dim(vertex.x)[1]) {
for (j in 1:dim(vertex.x)[2]) {
vertex.x[i, j] = x[2*i, 2*j]
vertex.y[i, j] = y[2*i, 2*j]
vertex.z[i, j] = z[2*i, 2*j]
}
}
} else {
for (i in 1:dim(vertex.x)[1]) {
for (j in 1:dim(vertex.x)[2]) {
vertex.x[i, j] = 0.25 * (x[2*i, 2*j] + x[2*i-1, 2*j] + x[2*i, 2*j-1] + x[2*i-1, 2*j-1])
vertex.y[i, j] = 0.25 * (y[2*i, 2*j] + y[2*i-1, 2*j] + y[2*i, 2*j-1] + y[2*i-1, 2*j-1])
vertex.z[i, j] = mean(c(z[2*i, 2*j], z[2*i-1, 2*j], z[2*i, 2*j-1], z[2*i-1, 2*j-1]), na.rm = T)
}
}
}
list(x = vertex.x, y = vertex.y, z = vertex.z)
}
#' @export
calc.section.dist = function(lon, lat) {
if (length(lon) == 1) { lon = rep(lon, length(lat))}
if (length(lat) == 1) { lat = rep(lat, length(lon))}
if (length(lon) != length(lat)) { stop('Length of lon/lat are not the same!')}
d = rep(NA, length(lon))
l = which(diff(lon) != 0 | diff(lat) != 0)
d[1:(l[1]-1)] = 0 ## same station
if (lenght(l) > 1) {
for (i in 2:length(l)) {
d[l[i-1]:(l[i]-1)] = calc.dist(lon[c(l[i],l[i]+1)], lat[c(l[i],l[i]+1)])
}
}
## return
d
}
#' @export
calc.dist = function(lon, lat) {
sapply(1:(length(lon)-1), function(x) {abs(lon[x+1] - lon[x])})
}
#' @title Retreive depth value from bathymetric grid.
#' @export
get.depth = function(lon, lat, bathy) {
depths = rep(NA, length(lon))
for (i in 1:lnegth(lon)) {
k1 = which.min(abs(lon[i] - bathy$Lon))
k2 = which.min(abs(lat[i] - bathy$Lat))
depths[i] = bathy$Z[k1,k2]
}
## Return
depths
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raincloud_2x2_repmes.R
\name{raincloud_2x2_repmes}
\alias{raincloud_2x2_repmes}
\title{2 x 2 repeated measures raincloud}
\arguments{
\item{data_2x2}{<data.frame> the array of datapoints to be plotted}
\item{colors}{<string> concatenated string for both colors}
\item{fills}{<string> concatenated string for both fills}
\item{line_color}{<string> color lines}
\item{line_alpha}{<numeric> alpha lines}
\item{size}{<numeric> data size}
\item{alpha}{<numeric> data alpha}
\item{spread_x_ticks}{<bool> TRUE if 4 x ticks, FALSE if 2 x ticks}
}
\description{
This function visualizes a 2x2 repeated measures raincloud.
}
\examples{
\dontrun{
# Using an example dataset
raincloud_2x2_repmes <- function(data_2x2,
colors = (c('dodgerblue', 'darkorange',
'dodgerblue', 'darkorange')),
fills = (c('dodgerblue', 'darkorange',
'dodgerblue', 'darkorange')),
line_color = 'gray',
line_alpha = .3,
size = 1.5,
alpha = .6,
spread_x_ticks = TRUE)
}
}
|
/man/raincloud_2x2_repmes.Rd
|
permissive
|
GuangtengMeng/raincloudplots
|
R
| false
| true
| 1,308
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raincloud_2x2_repmes.R
\name{raincloud_2x2_repmes}
\alias{raincloud_2x2_repmes}
\title{2 x 2 repeated measures raincloud}
\arguments{
\item{data_2x2}{<data.frame> the array of datapoints to be plotted}
\item{colors}{<string> concatenated string for both colors}
\item{fills}{<string> concatenated string for both fills}
\item{line_color}{<string> color lines}
\item{line_alpha}{<numeric> alpha lines}
\item{size}{<numeric> data size}
\item{alpha}{<numeric> data alpha}
\item{spread_x_ticks}{<bool> TRUE if 4 x ticks, FALSE if 2 x ticks}
}
\description{
This function visualizes a 2x2 repeated measures raincloud.
}
\examples{
\dontrun{
# Using an example dataset
raincloud_2x2_repmes <- function(data_2x2,
colors = (c('dodgerblue', 'darkorange',
'dodgerblue', 'darkorange')),
fills = (c('dodgerblue', 'darkorange',
'dodgerblue', 'darkorange')),
line_color = 'gray',
line_alpha = .3,
size = 1.5,
alpha = .6,
spread_x_ticks = TRUE)
}
}
|
#######################################################################################################################
# It gives us the orders p, q, P and Q of the best ARIMA (p, d, q)x(P, D, Q)_s according to one of the following criteria: AIC, AICC, BIC.
# It works with ARIMAs with constant term and d+D!=0, besides the ordinary cases.
#
# Arguments:
# x ==> vector or object of the class "time series", to which we want to fit an ARIMA
# order.max ==> vector of length 3:
# order.max[1] ==> max.p
# order.max[2] ==> d
# order.max[3] ==> max.q
# seasonal$order.max ==> vector of length 3:
# [1] ==> max.P
# [2] ==> D
# [3] ==> max.Q
#
# seasonal$period ==> period of the seasonal component
#
# include.mean ==> in reference to the inclusion or not of the mean/constant in the ARIMA. By default, TRUE. If d+D != 0, it does not allow mean/constant
# criterio ==> criterion to be chosen in the selection of the model: "AIC", "AICC" or "BIC". By default, BIC.
# dist.max.crit ==> the function will give the orders of all the ARMA models whose criterium function has a value which differs from the minimum at most dist.max.crit units
# By default, 2.
# method ==> estimation method: it should be CSS-ML or ML, although it also allows CSS. By default, CSS-ML.
# Salida: orders and values of the criterion function for the selected ARMAs.
#
########################################################################################################################
best.arima <- function(x=x, order.max=c(0,0,0), seasonal=list(order.max=c(0,0,0), period=1), include.mean=NULL, criterio=NULL, dist.max.crit=NULL, method=NULL)
{
if (is.null(include.mean)) include.mean <- TRUE
if (is.null(criterio)) criterio <- "BIC"
if (is.null(dist.max.crit)) dist.max.crit <- 2
p.max <- order.max[1]
d <- order.max[2]
q.max <- order.max[3]
P.max <- seasonal$order.max[1]
D <- seasonal$order.max[2]
Q.max <- seasonal$order.max[3]
if (is.ts(x)) period <- frequency(x) else period <- seasonal$period
num.x.perdidos <- d + period*D
T <- length(x) - num.x.perdidos
# We create a matrix which will contain all the combinations of the considered orders and the values of the criterium function for the corresponding ARIMAS
VALORES.CRITERIO <- matrix(0,(p.max+1)*(q.max+1)*(P.max+1)*(Q.max+1), 5)
# The penalty which, relating to the quantity of parameters, impose the criterium functions AIC or BIC depends on the factor defined below
if (criterio=="AIC") factor <- 2
else if (criterio=="BIC") factor <- log(T)
fila <- 0
for (p in 0:p.max)
for (q in 0:q.max)
for (P in 0:P.max)
for (Q in 0:Q.max)
{
#optim.control=list(maxit=500)
fila <- fila +1
ajuste <- try(arima(x=x, order=c(p,d,q), seasonal=list(order=c(P,D,Q), period=period), include.mean=include.mean, method=method), silent=TRUE)
if (class(ajuste)=="try-error") {
VALORES.CRITERIO[fila, ] <- c(p, q, P, Q, NaN)
next
}
# The penalty which, relating to the quantity of parameters, impose the criterium function AICC depends on the factor defined below
if (criterio=="AICC") factor <- 2*T/(T-length(ajuste$coef)-2)
criterio.ajuste <- -2*ajuste$loglik + factor*(length(ajuste$coef)+1)
VALORES.CRITERIO[fila, ] <- c(p, q, P, Q, criterio.ajuste)
}
# We obtain the distance between each value of the criterium function and its minimum value
DISTANCIAS.AL.MINIMO <- VALORES.CRITERIO[,5] - min(VALORES.CRITERIO[,5], na.rm=TRUE)
# We create a vector with the values TRUE or FALSE depending on whether we want that the corresponding row to each corresponding vector is shown in the output
FILAS.OK <- rep(FALSE,length=(p.max+1)*(q.max+1)*(P.max+1)*(Q.max+1))
FILAS.OK[ DISTANCIAS.AL.MINIMO<= dist.max.crit] <- TRUE
# We just load the results (orders and value of the criterium function) that we want to show
VALORES.CRITERIO.OK <- VALORES.CRITERIO[FILAS.OK,]
if (!is.matrix(VALORES.CRITERIO.OK)) VALORES.CRITERIO.OK <- t(as.matrix(VALORES.CRITERIO.OK))
# We sort the VALORES.CRITERIO.OK from higher to lower according to the criterium function
DISTANCIA.MAXIMA.OK <- VALORES.CRITERIO.OK[order(VALORES.CRITERIO.OK[,5]),]
if (!is.matrix(DISTANCIA.MAXIMA.OK)) DISTANCIA.MAXIMA.OK <- t(as.matrix(DISTANCIA.MAXIMA.OK))
DISTANCIA.MAXIMA.OK <- data.frame(DISTANCIA.MAXIMA.OK)
if (criterio=="AIC") names(DISTANCIA.MAXIMA.OK) <- c("p", "q", "P", "Q", "AIC")
else if (criterio=="AICC") names(DISTANCIA.MAXIMA.OK) <- c("p", "q", "P", "Q", "AICC")
else names(DISTANCIA.MAXIMA.OK) <- c("p", "q", "P", "Q", "BIC")
return(DISTANCIA.MAXIMA.OK[,c(1*(p.max!=0), 2*(q.max!=0), 3*(P.max!=0), 4*(Q.max!=0), 5)])
}
|
/PLRModels/R/best.arima.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 4,966
|
r
|
#######################################################################################################################
# It gives us the orders p, q, P and Q of the best ARIMA (p, d, q)x(P, D, Q)_s according to one of the following criteria: AIC, AICC, BIC.
# It works with ARIMAs with constant term and d+D!=0, besides the ordinary cases.
#
# Arguments:
# x ==> vector or object of the class "time series", to which we want to fit an ARIMA
# order.max ==> vector of length 3:
# order.max[1] ==> max.p
# order.max[2] ==> d
# order.max[3] ==> max.q
# seasonal$order.max ==> vector of length 3:
# [1] ==> max.P
# [2] ==> D
# [3] ==> max.Q
#
# seasonal$period ==> period of the seasonal component
#
# include.mean ==> in reference to the inclusion or not of the mean/constant in the ARIMA. By default, TRUE. If d+D != 0, it does not allow mean/constant
# criterio ==> criterion to be chosen in the selection of the model: "AIC", "AICC" or "BIC". By default, BIC.
# dist.max.crit ==> the function will give the orders of all the ARMA models whose criterium function has a value which differs from the minimum at most dist.max.crit units
# By default, 2.
# method ==> estimation method: it should be CSS-ML or ML, although it also allows CSS. By default, CSS-ML.
# Salida: orders and values of the criterion function for the selected ARMAs.
#
########################################################################################################################
best.arima <- function(x=x, order.max=c(0,0,0), seasonal=list(order.max=c(0,0,0), period=1), include.mean=NULL, criterio=NULL, dist.max.crit=NULL, method=NULL)
{
if (is.null(include.mean)) include.mean <- TRUE
if (is.null(criterio)) criterio <- "BIC"
if (is.null(dist.max.crit)) dist.max.crit <- 2
p.max <- order.max[1]
d <- order.max[2]
q.max <- order.max[3]
P.max <- seasonal$order.max[1]
D <- seasonal$order.max[2]
Q.max <- seasonal$order.max[3]
if (is.ts(x)) period <- frequency(x) else period <- seasonal$period
num.x.perdidos <- d + period*D
T <- length(x) - num.x.perdidos
# We create a matrix which will contain all the combinations of the considered orders and the values of the criterium function for the corresponding ARIMAS
VALORES.CRITERIO <- matrix(0,(p.max+1)*(q.max+1)*(P.max+1)*(Q.max+1), 5)
# The penalty which, relating to the quantity of parameters, impose the criterium functions AIC or BIC depends on the factor defined below
if (criterio=="AIC") factor <- 2
else if (criterio=="BIC") factor <- log(T)
fila <- 0
for (p in 0:p.max)
for (q in 0:q.max)
for (P in 0:P.max)
for (Q in 0:Q.max)
{
#optim.control=list(maxit=500)
fila <- fila +1
ajuste <- try(arima(x=x, order=c(p,d,q), seasonal=list(order=c(P,D,Q), period=period), include.mean=include.mean, method=method), silent=TRUE)
if (class(ajuste)=="try-error") {
VALORES.CRITERIO[fila, ] <- c(p, q, P, Q, NaN)
next
}
# The penalty which, relating to the quantity of parameters, impose the criterium function AICC depends on the factor defined below
if (criterio=="AICC") factor <- 2*T/(T-length(ajuste$coef)-2)
criterio.ajuste <- -2*ajuste$loglik + factor*(length(ajuste$coef)+1)
VALORES.CRITERIO[fila, ] <- c(p, q, P, Q, criterio.ajuste)
}
# We obtain the distance between each value of the criterium function and its minimum value
DISTANCIAS.AL.MINIMO <- VALORES.CRITERIO[,5] - min(VALORES.CRITERIO[,5], na.rm=TRUE)
# We create a vector with the values TRUE or FALSE depending on whether we want that the corresponding row to each corresponding vector is shown in the output
FILAS.OK <- rep(FALSE,length=(p.max+1)*(q.max+1)*(P.max+1)*(Q.max+1))
FILAS.OK[ DISTANCIAS.AL.MINIMO<= dist.max.crit] <- TRUE
# We just load the results (orders and value of the criterium function) that we want to show
VALORES.CRITERIO.OK <- VALORES.CRITERIO[FILAS.OK,]
if (!is.matrix(VALORES.CRITERIO.OK)) VALORES.CRITERIO.OK <- t(as.matrix(VALORES.CRITERIO.OK))
# We sort the VALORES.CRITERIO.OK from higher to lower according to the criterium function
DISTANCIA.MAXIMA.OK <- VALORES.CRITERIO.OK[order(VALORES.CRITERIO.OK[,5]),]
if (!is.matrix(DISTANCIA.MAXIMA.OK)) DISTANCIA.MAXIMA.OK <- t(as.matrix(DISTANCIA.MAXIMA.OK))
DISTANCIA.MAXIMA.OK <- data.frame(DISTANCIA.MAXIMA.OK)
if (criterio=="AIC") names(DISTANCIA.MAXIMA.OK) <- c("p", "q", "P", "Q", "AIC")
else if (criterio=="AICC") names(DISTANCIA.MAXIMA.OK) <- c("p", "q", "P", "Q", "AICC")
else names(DISTANCIA.MAXIMA.OK) <- c("p", "q", "P", "Q", "BIC")
return(DISTANCIA.MAXIMA.OK[,c(1*(p.max!=0), 2*(q.max!=0), 3*(P.max!=0), 4*(Q.max!=0), 5)])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExportMethods.R
\docType{methods}
\name{exportToBed}
\alias{exportToBed}
\alias{exportToBed,CAGEr-method}
\title{Create BED tracks of TSSs and clusters of TSSs}
\usage{
exportToBed(object, what = c("CTSS", "tagClusters", "consensusClusters"),
qLow = NULL, qUp = NULL, colorByExpressionProfile = FALSE,
oneFile = TRUE)
\S4method{exportToBed}{CAGEr}(object, what = c("CTSS", "tagClusters",
"consensusClusters"), qLow = NULL, qUp = NULL,
colorByExpressionProfile = FALSE, oneFile = TRUE)
}
\arguments{
\item{object}{A \code{\link{CAGEr}} object.}
\item{what}{Which elements should be exported to BED track. \code{CTSS} to export
individual CTSSs, \code{tagClusters} to export tag clusters or \code{consensusClusters}
to export consensus clusters.}
\item{qLow, qUp}{Position of which "lower" (resp. "upper") quantile should be
used as 5' (resp. 3') boundary of the filled block in gene-like
representation of the cluster. Default value \code{NULL} uses start (resp. end)
position of the cluster. Ignored when \code{what = "CTSS"}.}
\item{colorByExpressionProfile}{Logical, should blocks be colored in the
color of their corresponding expression class. Ignored when
\code{what = "tagClusters"}.}
\item{oneFile}{Logical, should all CAGE datasets be exported as individual
tracks into the same BED file (\code{TRUE}) or into separate BED files (\code{FALSE}).
Ignored when \code{what = "CTSS"}, which by default produces only one track.}
}
\value{
Creates BED file(s) in the working directory.
}
\description{
Creates BED file(s) with track(s) of individual CTSSs, tag
clusters or consensus clusters. CTSSs and consensus clusters can be
optionally colored in the color of their expression class. \emph{Tag clusters}
and \emph{consensus clusters} can be displayed in a gene-like representation with
a line showing full span on the cluster, filled block showing interquantile
range and a thick box denoting position of the dominant (most frequently
used TSS.
}
\details{
The BED representations of \emph{CTSSs}, \emph{tag cluster} and
\emph{consensus clusters} can be directly visualised in the ZENBU or UCSC Genome
Browsers.
When \code{what = "CTSS"}, one BED file with single track of 1 bp blocks
representing all detected CTSSs (in all CAGE samples) is created. CTSSs can
be colored according to their expression class (provided the expression
profiling of CTSSs was done by calling \code{\link{getExpressionProfiles}} function).
Colors of expression classes match the colors in which they are shown in the
plot returned by the \code{\link{plotExpressionProfiles}} function. For
\code{colorByExpressionProfile = FALSE}, CTSSs included in the clusters are
shown in black and CTSSs that were filtered out in gray.
When \code{what = "tagClusters"}, one track per CAGE dataset is created, which can
be exported to a single BED file (by setting \code{oneFile = TRUE}) or separate
BED files (\code{FALSE}). If no quantile boundaries were provided (\code{qLow} and
\code{qUp} are \code{NULL}, TCs are represented as simple blocks showing the full
span of TC fromthe start to the end. Setting \code{qLow} and/or \code{qUp} parameters
to a value of the desired quantile creates a gene-like representation with a
line showing full span of the TC, filled block showing specified
interquantile range and a thick 1 bp block denoting position of the dominant
(most frequently used) TSS. All TCs in one track (one CAGE dataset) are
shown in the same color.
When \code{what = "consensusClusters"} \emph{consensus clusters} are exported to BED
file. Since there is only one set of consensus clusters common to all CAGE
datasets, only one track is created in case of a simple representation. This
means that when \code{qLow = NULL} and \code{qUp = NULL} one track with blocks showing
the full span of consensus cluster from the start to the end is created.
However, the distribution of the CAGE signal within consensus cluster can be
different in different CAGE samples, resulting in different positions of
quantiles and dominant TSS. Thus, when \code{qLow} and/or \code{qUp} parameters
are set to a value of the desired quantile, a separate track with a gene-like
representation is created for every CAGE dataset. These tracks can be
exported to a single BED file (by setting \code{oneFile = TRUE}) or separate
BED files (by setting \code{oneFile = FALSE}). The gene-like representation is
analogous to the one described above for the TCs. In all cases consensus
clusters can be colored according to their expression class (provided the
expression profiling of consensus clusters was done by calling
\code{getExpressionProfiles} function). Colors of expression classes match the
colors in which they are shown in the plot returned by the
\code{plotExpressionProfiles} function. For \code{colorByExpressionProfile = FALSE}
all consensus clusters are shown in black.
}
\examples{
### exporting CTSSs colored by expression class
exportToBed(object = exampleCAGEset, what = "CTSS", colorByExpressionProfile = TRUE)
### exporting tag clusters in gene-like representation
exportToBed( object = exampleCAGEset, what = "tagClusters"
, qLow = 0.1, qUp = 0.9, oneFile = TRUE)
exportToBed( object = exampleCAGEexp, what = "tagClusters"
, qLow = 0.1, qUp = 0.9, oneFile = TRUE)
}
\seealso{
Other CAGEr export functions: \code{\link{exportCTSStoBedGraph}}
}
\author{
Vanja Haberle
}
\concept{CAGEr export functions}
|
/man/exportToBed.Rd
|
no_license
|
clarapereira/CAGEr
|
R
| false
| true
| 5,533
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExportMethods.R
\docType{methods}
\name{exportToBed}
\alias{exportToBed}
\alias{exportToBed,CAGEr-method}
\title{Create BED tracks of TSSs and clusters of TSSs}
\usage{
exportToBed(object, what = c("CTSS", "tagClusters", "consensusClusters"),
qLow = NULL, qUp = NULL, colorByExpressionProfile = FALSE,
oneFile = TRUE)
\S4method{exportToBed}{CAGEr}(object, what = c("CTSS", "tagClusters",
"consensusClusters"), qLow = NULL, qUp = NULL,
colorByExpressionProfile = FALSE, oneFile = TRUE)
}
\arguments{
\item{object}{A \code{\link{CAGEr}} object.}
\item{what}{Which elements should be exported to BED track. \code{CTSS} to export
individual CTSSs, \code{tagClusters} to export tag clusters or \code{consensusClusters}
to export consensus clusters.}
\item{qLow, qUp}{Position of which "lower" (resp. "upper") quantile should be
used as 5' (resp. 3') boundary of the filled block in gene-like
representation of the cluster. Default value \code{NULL} uses start (resp. end)
position of the cluster. Ignored when \code{what = "CTSS"}.}
\item{colorByExpressionProfile}{Logical, should blocks be colored in the
color of their corresponding expression class. Ignored when
\code{what = "tagClusters"}.}
\item{oneFile}{Logical, should all CAGE datasets be exported as individual
tracks into the same BED file (\code{TRUE}) or into separate BED files (\code{FALSE}).
Ignored when \code{what = "CTSS"}, which by default produces only one track.}
}
\value{
Creates BED file(s) in the working directory.
}
\description{
Creates BED file(s) with track(s) of individual CTSSs, tag
clusters or consensus clusters. CTSSs and consensus clusters can be
optionally colored in the color of their expression class. \emph{Tag clusters}
and \emph{consensus clusters} can be displayed in a gene-like representation with
a line showing full span on the cluster, filled block showing interquantile
range and a thick box denoting position of the dominant (most frequently
used TSS.
}
\details{
The BED representations of \emph{CTSSs}, \emph{tag cluster} and
\emph{consensus clusters} can be directly visualised in the ZENBU or UCSC Genome
Browsers.
When \code{what = "CTSS"}, one BED file with single track of 1 bp blocks
representing all detected CTSSs (in all CAGE samples) is created. CTSSs can
be colored according to their expression class (provided the expression
profiling of CTSSs was done by calling \code{\link{getExpressionProfiles}} function).
Colors of expression classes match the colors in which they are shown in the
plot returned by the \code{\link{plotExpressionProfiles}} function. For
\code{colorByExpressionProfile = FALSE}, CTSSs included in the clusters are
shown in black and CTSSs that were filtered out in gray.
When \code{what = "tagClusters"}, one track per CAGE dataset is created, which can
be exported to a single BED file (by setting \code{oneFile = TRUE}) or separate
BED files (\code{FALSE}). If no quantile boundaries were provided (\code{qLow} and
\code{qUp} are \code{NULL}, TCs are represented as simple blocks showing the full
span of TC fromthe start to the end. Setting \code{qLow} and/or \code{qUp} parameters
to a value of the desired quantile creates a gene-like representation with a
line showing full span of the TC, filled block showing specified
interquantile range and a thick 1 bp block denoting position of the dominant
(most frequently used) TSS. All TCs in one track (one CAGE dataset) are
shown in the same color.
When \code{what = "consensusClusters"} \emph{consensus clusters} are exported to BED
file. Since there is only one set of consensus clusters common to all CAGE
datasets, only one track is created in case of a simple representation. This
means that when \code{qLow = NULL} and \code{qUp = NULL} one track with blocks showing
the full span of consensus cluster from the start to the end is created.
However, the distribution of the CAGE signal within consensus cluster can be
different in different CAGE samples, resulting in different positions of
quantiles and dominant TSS. Thus, when \code{qLow} and/or \code{qUp} parameters
are set to a value of the desired quantile, a separate track with a gene-like
representation is created for every CAGE dataset. These tracks can be
exported to a single BED file (by setting \code{oneFile = TRUE}) or separate
BED files (by setting \code{oneFile = FALSE}). The gene-like representation is
analogous to the one described above for the TCs. In all cases consensus
clusters can be colored according to their expression class (provided the
expression profiling of consensus clusters was done by calling
\code{getExpressionProfiles} function). Colors of expression classes match the
colors in which they are shown in the plot returned by the
\code{plotExpressionProfiles} function. For \code{colorByExpressionProfile = FALSE}
all consensus clusters are shown in black.
}
\examples{
### exporting CTSSs colored by expression class
exportToBed(object = exampleCAGEset, what = "CTSS", colorByExpressionProfile = TRUE)
### exporting tag clusters in gene-like representation
exportToBed( object = exampleCAGEset, what = "tagClusters"
, qLow = 0.1, qUp = 0.9, oneFile = TRUE)
exportToBed( object = exampleCAGEexp, what = "tagClusters"
, qLow = 0.1, qUp = 0.9, oneFile = TRUE)
}
\seealso{
Other CAGEr export functions: \code{\link{exportCTSStoBedGraph}}
}
\author{
Vanja Haberle
}
\concept{CAGEr export functions}
|
# model validation based on all exploratories
library(Metrics)
library(reshape2)
library(ggplot2)
source("/home/marvin/repositories/envimaR/R/getEnvi.R")
p <- getEnvi("/home/marvin/be_hyperspectral/data/")
res <- readRDS(paste0(p$results$here, "trait_prediction.RDS"))
# include which exploratory it is
res$EP <- substr(res$EPID, 1,1)
res <- na.omit(res)
# statements
traits <- as.character(unique(res$target))
ep <- c("A", "H", "S")
statements <- expand.grid(traits, ep)
i <- 1
rmse_all <- lapply(seq(nrow(statements)), function(i){
t <- res$target == statements[i,1]
e <- res$EP == statements[i,2]
all_folds <- lapply(seq(3,9), function(j){
data.frame(EP = statements[i,2],
fold = colnames(res)[j],
trait = statements[i,1],
RMSE = rmse(actual = res[t & e,]$observation, predicted = res[t & e,j]))
})
do.call(rbind, all_folds)
})
rmse_df <- do.call(rbind, rmse_all)
saveRDS(rmse_df, paste0(p$results$here, "ep_rmse.RDS"))
|
/src/calculate_rmse.R
|
permissive
|
yangxhcaf/BE-HyperSpecPrediction
|
R
| false
| false
| 1,006
|
r
|
# model validation based on all exploratories
library(Metrics)
library(reshape2)
library(ggplot2)
source("/home/marvin/repositories/envimaR/R/getEnvi.R")
p <- getEnvi("/home/marvin/be_hyperspectral/data/")
res <- readRDS(paste0(p$results$here, "trait_prediction.RDS"))
# include which exploratory it is
res$EP <- substr(res$EPID, 1,1)
res <- na.omit(res)
# statements
traits <- as.character(unique(res$target))
ep <- c("A", "H", "S")
statements <- expand.grid(traits, ep)
i <- 1
rmse_all <- lapply(seq(nrow(statements)), function(i){
t <- res$target == statements[i,1]
e <- res$EP == statements[i,2]
all_folds <- lapply(seq(3,9), function(j){
data.frame(EP = statements[i,2],
fold = colnames(res)[j],
trait = statements[i,1],
RMSE = rmse(actual = res[t & e,]$observation, predicted = res[t & e,j]))
})
do.call(rbind, all_folds)
})
rmse_df <- do.call(rbind, rmse_all)
saveRDS(rmse_df, paste0(p$results$here, "ep_rmse.RDS"))
|
getwd()
rm(list=ls())
if (!file.exists("data")){
dir.create("data")
}
#I downladed the zipfile to a folder called zipdir under the working directory folder, and
#extracted it into a folder called data. Then read the full file and subset the observations indicated.
unzip("zipdir/exdata-data-household_power_consumption.zip",exdir="data")
readData<-read.table("data/household_power_consumption.txt",sep=";",na.strings = "?",header=TRUE,stringsAsFactors = FALSE)
DataDates<-readData[grep("^[1,2]/2/2007",readData$Date),]
#I convert dates and times to actual dates and times, first I need to change my locale settings so the dates abreviations
#on the axis will stay in English
#1-save your current locale
original_locale<-Sys.getlocale(category = "LC_TIME")
#2-change it to english
Sys.setlocale(category = "LC_TIME", locale = "English_United States.1252")
#strptime, when applied to the time column, creates a date info (default is te current date) therefore I create the today
#variable to substract it from the new time string before creating a new column that pastes date and time
today<-Sys.Date()
DataDates$Date<-strptime(DataDates$Date,format="%d/%m/%Y")
DataDates$Time<-sub(today,"", strptime(DataDates$Time,format="%H:%M:%S"))
DataDates$datetime<-strptime(paste(DataDates$Date,DataDates$Time,sep=" "), format="%Y-%m-%d %H:%M:%S")
# I open the png graphic device, create the plot (480x480 is the default for png device) and close the device
png(filename = "plot2.png")
with(DataDates,plot(datetime,Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab=""))
dev.off()
#change the locale back to the original setting
Sys.setlocale(category = "LC_TIME", locale = original_locale)
|
/plot2.R
|
no_license
|
albertollamas/ExData_Plotting1
|
R
| false
| false
| 1,717
|
r
|
getwd()
rm(list=ls())
if (!file.exists("data")){
dir.create("data")
}
#I downladed the zipfile to a folder called zipdir under the working directory folder, and
#extracted it into a folder called data. Then read the full file and subset the observations indicated.
unzip("zipdir/exdata-data-household_power_consumption.zip",exdir="data")
readData<-read.table("data/household_power_consumption.txt",sep=";",na.strings = "?",header=TRUE,stringsAsFactors = FALSE)
DataDates<-readData[grep("^[1,2]/2/2007",readData$Date),]
#I convert dates and times to actual dates and times, first I need to change my locale settings so the dates abreviations
#on the axis will stay in English
#1-save your current locale
original_locale<-Sys.getlocale(category = "LC_TIME")
#2-change it to english
Sys.setlocale(category = "LC_TIME", locale = "English_United States.1252")
#strptime, when applied to the time column, creates a date info (default is te current date) therefore I create the today
#variable to substract it from the new time string before creating a new column that pastes date and time
today<-Sys.Date()
DataDates$Date<-strptime(DataDates$Date,format="%d/%m/%Y")
DataDates$Time<-sub(today,"", strptime(DataDates$Time,format="%H:%M:%S"))
DataDates$datetime<-strptime(paste(DataDates$Date,DataDates$Time,sep=" "), format="%Y-%m-%d %H:%M:%S")
# I open the png graphic device, create the plot (480x480 is the default for png device) and close the device
png(filename = "plot2.png")
with(DataDates,plot(datetime,Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab=""))
dev.off()
#change the locale back to the original setting
Sys.setlocale(category = "LC_TIME", locale = original_locale)
|
context('Schulze method tests')
#Check function basics
test_that("Schulze basics are correct", {
votes <- c(
replist("Orange", 4),
replist(c("Pear", "Orange"), 2),
replist(c("Choc", "Strawberry"), 8),
replist(c("Choc", "Candy"), 4),
replist("Strawberry", 1),
replist("Candy", 1)
)
results_1seat <- schulze(votes, nseats=1)
expect_equal(results_1seat$nballots, 20)
expect_equal(sort(results_1seat$candidates), c('Candy','Choc','Orange','Pear','Strawberry'))
expect_equal(results_1seat$winners, c('Choc'))
results_2seat <- schulze(votes, nseats=2)
expect_equal(sort(results_2seat$winners), c('Choc','Strawberry'))
})
#Schulze check from Wikipedia
test_that("Schulze method correct for Wikipedia example", {
test_cands <- c('A','B','C','D','E')
test_ballots <- c(rep(list(ballot(c(1,3,2,5,4), map=test_cands)), 5),
rep(list(ballot(c(1,5,4,2,3), map=test_cands)), 5),
rep(list(ballot(c(4,1,5,3,2), map=test_cands)), 8),
rep(list(ballot(c(2,3,1,5,4), map=test_cands)), 3),
rep(list(ballot(c(2,4,1,5,3), map=test_cands)), 7),
rep(list(ballot(c(3,2,1,4,5), map=test_cands)), 2),
rep(list(ballot(c(5,4,2,1,3), map=test_cands)), 7),
rep(list(ballot(c(3,2,5,4,1), map=test_cands)), 8))
results_1seat <- schulze(test_ballots, nseats=1)
expect_equal(results_1seat$winner, 'E')
expect_equal(length(results_1seat$winner), 1)
})
#Completely tied case
test_that("Schulze uses random in complete tie situation", {
test_ballots <- c(list(c('A','B','C')), list(c('B','A','C')))
results_tied <- schulze(test_ballots, nseats=1)
expect_true(results_tied$winner %in% c('A','B'))
results_not_tied <- schulze(test_ballots, nseats=2)
expect_equal(sort(results_not_tied$winner), c('A','B'))
})
|
/tests/testthat/test_schulze.R
|
permissive
|
j450h1/avr
|
R
| false
| false
| 2,016
|
r
|
context('Schulze method tests')
#Check function basics
test_that("Schulze basics are correct", {
votes <- c(
replist("Orange", 4),
replist(c("Pear", "Orange"), 2),
replist(c("Choc", "Strawberry"), 8),
replist(c("Choc", "Candy"), 4),
replist("Strawberry", 1),
replist("Candy", 1)
)
results_1seat <- schulze(votes, nseats=1)
expect_equal(results_1seat$nballots, 20)
expect_equal(sort(results_1seat$candidates), c('Candy','Choc','Orange','Pear','Strawberry'))
expect_equal(results_1seat$winners, c('Choc'))
results_2seat <- schulze(votes, nseats=2)
expect_equal(sort(results_2seat$winners), c('Choc','Strawberry'))
})
#Schulze check from Wikipedia
test_that("Schulze method correct for Wikipedia example", {
test_cands <- c('A','B','C','D','E')
test_ballots <- c(rep(list(ballot(c(1,3,2,5,4), map=test_cands)), 5),
rep(list(ballot(c(1,5,4,2,3), map=test_cands)), 5),
rep(list(ballot(c(4,1,5,3,2), map=test_cands)), 8),
rep(list(ballot(c(2,3,1,5,4), map=test_cands)), 3),
rep(list(ballot(c(2,4,1,5,3), map=test_cands)), 7),
rep(list(ballot(c(3,2,1,4,5), map=test_cands)), 2),
rep(list(ballot(c(5,4,2,1,3), map=test_cands)), 7),
rep(list(ballot(c(3,2,5,4,1), map=test_cands)), 8))
results_1seat <- schulze(test_ballots, nseats=1)
expect_equal(results_1seat$winner, 'E')
expect_equal(length(results_1seat$winner), 1)
})
#Completely tied case
test_that("Schulze uses random in complete tie situation", {
test_ballots <- c(list(c('A','B','C')), list(c('B','A','C')))
results_tied <- schulze(test_ballots, nseats=1)
expect_true(results_tied$winner %in% c('A','B'))
results_not_tied <- schulze(test_ballots, nseats=2)
expect_equal(sort(results_not_tied$winner), c('A','B'))
})
|
library("nbaR")
library("testthat")
wd <- getwd()
if (grepl("testthat", wd)) {
data_dir <- file.path("data")
} else {
## for running test at package level
data_dir <- file.path("tests", "testthat", "data")
}
tc <- TaxonClient$new(basePath = "http://api.biodiversitydata.nl/v2")
test_that("Class hierarchy correct", {
expect_is(tc, "TaxonClient")
expect_is(tc, "ApiClient")
})
|
/tests/testthat/test-taxonClient.R
|
no_license
|
mbjoseph/nbaR
|
R
| false
| false
| 389
|
r
|
library("nbaR")
library("testthat")
wd <- getwd()
if (grepl("testthat", wd)) {
data_dir <- file.path("data")
} else {
## for running test at package level
data_dir <- file.path("tests", "testthat", "data")
}
tc <- TaxonClient$new(basePath = "http://api.biodiversitydata.nl/v2")
test_that("Class hierarchy correct", {
expect_is(tc, "TaxonClient")
expect_is(tc, "ApiClient")
})
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "soybean")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.randomForest", par.vals = list(), predict.type = "prob")
#:# hash
#:# efaf17cd6974bb344011d72e65d5e257
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_soybean/classification_class/efaf17cd6974bb344011d72e65d5e257/code.R
|
no_license
|
lukaszbrzozowski/CaseStudies2019S
|
R
| false
| false
| 698
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "soybean")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.randomForest", par.vals = list(), predict.type = "prob")
#:# hash
#:# efaf17cd6974bb344011d72e65d5e257
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{ReadNrmAverages}
\alias{ReadNrmAverages}
\title{Read the average data from the nrm file. note that in some cases a large number of empty columns are generated; only extract the first three.}
\usage{
ReadNrmAverages(nrm_file)
}
\description{
Read the average data from the nrm file. note that in some cases a large number of empty columns are generated; only extract the first three.
}
|
/man/ReadNrmAverages.Rd
|
no_license
|
imarigenias/motionTools
|
R
| false
| true
| 476
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{ReadNrmAverages}
\alias{ReadNrmAverages}
\title{Read the average data from the nrm file. note that in some cases a large number of empty columns are generated; only extract the first three.}
\usage{
ReadNrmAverages(nrm_file)
}
\description{
Read the average data from the nrm file. note that in some cases a large number of empty columns are generated; only extract the first three.
}
|
#Boston Pricing case study
setwd("D:\\")
##Loading Data
prices<-read.csv("boston_prices.csv",header=TRUE,stringsAsFactors=FALSE)
##Checking Data Characteristics
dim(prices)
str(prices)
head(prices)
names(prices)
#summary statistics
summary(prices)
#Missing values treatment
colSums(is.na(prices)) #MEDV has a lot of missing values
summary((prices$MEDV))
prices$MEDV[is.na(prices$MEDV)]<-mean(prices$MEDV,na.rm=TRUE)
#Outlier plots
par(mfrow=c(2,7)) #This allows you to plot 14 charts on a single page; It is optional.
list<-names(prices) #Store the names of the dataset in a list format
list<-list[-4]
for(i in 1:length(list)) #Plot the boxplots of all variables and shortlist which ones need outlier treatment.
{
boxplot(prices[,list[i]],main=list[i])
}
#Restore the par parameters to normal
dev.off()
#In this solution, We have replaced the outlier values by the median values
#You can decide to replace by max or mean values based on business objectives
#Outlier treatment
for(i in 1:length(list)) ##For loop to replace all the outlier values with the mean value ; if you want you can replace with median value as well.
{
x<-boxplot(prices[,list[i]])
out<-x$out
index<-which(prices[,list[i]] %in% x$out)
prices[index,list[i]]<-mean(prices[,list[i]])
rm(x)
rm(out)
}
#Exploratory analysis
library(ggplot2)
#Study the histogram of the DV and the transformed histogram
hist(prices$MEDV)
#hist(prices$log_MEDV) #Once you create the transformations;look down
#You can look at the correlation between each IDV and the DV
#An eg :
ggplot(prices,aes(x=MEDV,y=LSTAT)) +geom_point()
ggplot(prices,aes(x=MEDV,y=DIS)) +geom_point()
ggplot(prices,aes(x=MEDV,y=AGE)) +geom_point()
#Inorder to quicken the process, lets write a function :
#Below is a function that gives you the correlation values between all IDV's and the DV
#Simply taking a look at the output of this function, you can quickly shortlist
#Which all IDV's are correlated to the DV
#Function to get the list of correlations between : DV and the IDV's
list1<-list[-13]
for(i in 1:length(list1))
{
x<-cor(prices$MEDV,prices[list[i]])
print(x)
}
#Significant variables are : B LSTAT AGE X.rooms.dwelling nitric.oxides.concentration INDUS
#You can also try to use data transformations
#Log transformations
#Create the log transformation for all variables
prices$log_CRIM<-log(prices$CRIM)
prices$log_ZN<-log(prices$ZN)
prices$log_NOX<-log(prices$nitric.oxides.concentration)
prices$log_RM<-log(prices$X.rooms.dwelling)
prices$log_AGE<-log(prices$AGE)
prices$log_DIS<-log(prices$DIS)
prices$log_RAD<-log(prices$RAD)
prices$log_TAX<-log(prices$TAX)
prices$log_PTRATIO<-log(prices$PTRATIO)
prices$log_B<-log(prices$B)
prices$log_LSTAT<-log(prices$LSTAT)
prices$log_MEDV<-log(prices$MEDV) #DV
prices$log_INDUS<-log(prices$INDUS)
#Refer to the profiling excel sheet to see all the correlations documented
#Function to get the list of correlations between : log_DV and log of IDV's
list_log<-names(prices)[c(15:25,27)]
for(i in 1:length(list_log))
{
xlog<-cor(prices$log_MEDV,prices[list_log[i]])
print(xlog)
}
#Function to get the list of correlations between : log_DV and IDV's
list_log_DV<-names(prices)[1:13]
list_log_DV<-list_log_DV[-4]
for(i in 1:length(list_log_DV))
{
xlogdv<-cor(prices$log_MEDV,prices[list_log_DV[i]])
print(xlogdv)
}
sampling<-sort(sample(nrow(prices), nrow(prices)*.7))
#Select training sample
train<-prices[sampling,]
test<-prices[-sampling,]
##Building SimpLe Linear Regression Model
#Metrics :
#Rsquare
#Coefficients
#P values : Significance levels of the IDV's
#Residuals distribution
#Factor variables as IDV's
#All good modelssummm
Reg<-lm(log_MEDV~CRIM+INDUS+RAD+TAX+B+
Charles.River.dummy.variable+
DIS+ZN+PTRATIO+LSTAT+AGE+X.rooms.dwelling+nitric.oxides.concentration,data=train)
summary(Reg)
#Getting the formula
formula(Reg)
#Getting the formula
formula(Reg)
#Remove insignificant variables :
Reg1<-lm(log_MEDV~
Charles.River.dummy.variable+
DIS+PTRATIO+LSTAT+AGE+X.rooms.dwelling+nitric.oxides.concentration,data=train)
summary(Reg1)
#Reg2 : remove insignificant values
Reg2 <- lm(log_MEDV ~CRIM+INDUS+RAD+TAX+B+
Charles.River.dummy.variable+
DIS+ZN+PTRATIO+LSTAT+X.rooms.dwelling+nitric.oxides.concentration, data=train)
summary(Reg2)
#Reg3 _ remove insignificant values
Reg3 <- lm(log_MEDV ~CRIM+RAD+
Charles.River.dummy.variable+
DIS+ZN+PTRATIO+LSTAT+nitric.oxides.concentration, data=train)
summary(Reg3)
#Some other combination
Reg4<-lm(log_MEDV~INDUS +ZN + X.rooms.dwelling + LSTAT+CRIM + Charles.River.dummy.variable,data=train)
summary(Reg4)
#The best model happens to be : Reg3
##Getting predicted values
predicted<-predict(Reg3)
plot(predicted)
length(predicted)
##Finding Residuals
residuals<-resid(Reg3)
plot(residuals)
length(residuals)
##Plotting Residuals vs Predicted Values
##Checking Heteroskedastcity
##There should be no trend between predicted values and residual values
plot(predicted,residuals,abline(0,0))
#You can notice that there seems to be an inverse pattern for some points
#So this model may not be the preferred model.
#atttching predicted values to test data
predicted<-predict(Reg3,newdata=test)
length(predicted)
test$p<-predicted
#Calculating error in the test dataset - (Actual- predicted)/predicted values
test$error<-(test$log_MEDV-test$p)/test$log_MEDV
mean(test$error)*100 #you get to know the average error in the given dataset
##Plotting actual vs predicted values
plot(test$p,col="blue",type="l")
lines(test$log_MEDV,col="red",type="l")
#checking for Correlation between variables
library(car)
vif(Reg3)
#You can drop variables if they have a vif>10 ; means high correlation between variables
|
/Solution Linear Regression (1).R
|
no_license
|
sanchita21/Linear-Regression-with-R-Assignment
|
R
| false
| false
| 5,836
|
r
|
#Boston Pricing case study
setwd("D:\\")
##Loading Data
prices<-read.csv("boston_prices.csv",header=TRUE,stringsAsFactors=FALSE)
##Checking Data Characteristics
dim(prices)
str(prices)
head(prices)
names(prices)
#summary statistics
summary(prices)
#Missing values treatment
colSums(is.na(prices)) #MEDV has a lot of missing values
summary((prices$MEDV))
prices$MEDV[is.na(prices$MEDV)]<-mean(prices$MEDV,na.rm=TRUE)
#Outlier plots
par(mfrow=c(2,7)) #This allows you to plot 14 charts on a single page; It is optional.
list<-names(prices) #Store the names of the dataset in a list format
list<-list[-4]
for(i in 1:length(list)) #Plot the boxplots of all variables and shortlist which ones need outlier treatment.
{
boxplot(prices[,list[i]],main=list[i])
}
#Restore the par parameters to normal
dev.off()
#In this solution, We have replaced the outlier values by the median values
#You can decide to replace by max or mean values based on business objectives
#Outlier treatment
for(i in 1:length(list)) ##For loop to replace all the outlier values with the mean value ; if you want you can replace with median value as well.
{
x<-boxplot(prices[,list[i]])
out<-x$out
index<-which(prices[,list[i]] %in% x$out)
prices[index,list[i]]<-mean(prices[,list[i]])
rm(x)
rm(out)
}
#Exploratory analysis
library(ggplot2)
#Study the histogram of the DV and the transformed histogram
hist(prices$MEDV)
#hist(prices$log_MEDV) #Once you create the transformations;look down
#You can look at the correlation between each IDV and the DV
#An eg :
ggplot(prices,aes(x=MEDV,y=LSTAT)) +geom_point()
ggplot(prices,aes(x=MEDV,y=DIS)) +geom_point()
ggplot(prices,aes(x=MEDV,y=AGE)) +geom_point()
#Inorder to quicken the process, lets write a function :
#Below is a function that gives you the correlation values between all IDV's and the DV
#Simply taking a look at the output of this function, you can quickly shortlist
#Which all IDV's are correlated to the DV
#Function to get the list of correlations between : DV and the IDV's
list1<-list[-13]
for(i in 1:length(list1))
{
x<-cor(prices$MEDV,prices[list[i]])
print(x)
}
#Significant variables are : B LSTAT AGE X.rooms.dwelling nitric.oxides.concentration INDUS
#You can also try to use data transformations
#Log transformations
#Create the log transformation for all variables
prices$log_CRIM<-log(prices$CRIM)
prices$log_ZN<-log(prices$ZN)
prices$log_NOX<-log(prices$nitric.oxides.concentration)
prices$log_RM<-log(prices$X.rooms.dwelling)
prices$log_AGE<-log(prices$AGE)
prices$log_DIS<-log(prices$DIS)
prices$log_RAD<-log(prices$RAD)
prices$log_TAX<-log(prices$TAX)
prices$log_PTRATIO<-log(prices$PTRATIO)
prices$log_B<-log(prices$B)
prices$log_LSTAT<-log(prices$LSTAT)
prices$log_MEDV<-log(prices$MEDV) #DV
prices$log_INDUS<-log(prices$INDUS)
#Refer to the profiling excel sheet to see all the correlations documented
#Function to get the list of correlations between : log_DV and log of IDV's
list_log<-names(prices)[c(15:25,27)]
for(i in 1:length(list_log))
{
xlog<-cor(prices$log_MEDV,prices[list_log[i]])
print(xlog)
}
#Function to get the list of correlations between : log_DV and IDV's
list_log_DV<-names(prices)[1:13]
list_log_DV<-list_log_DV[-4]
for(i in 1:length(list_log_DV))
{
xlogdv<-cor(prices$log_MEDV,prices[list_log_DV[i]])
print(xlogdv)
}
sampling<-sort(sample(nrow(prices), nrow(prices)*.7))
#Select training sample
train<-prices[sampling,]
test<-prices[-sampling,]
##Building SimpLe Linear Regression Model
#Metrics :
#Rsquare
#Coefficients
#P values : Significance levels of the IDV's
#Residuals distribution
#Factor variables as IDV's
#All good modelssummm
Reg<-lm(log_MEDV~CRIM+INDUS+RAD+TAX+B+
Charles.River.dummy.variable+
DIS+ZN+PTRATIO+LSTAT+AGE+X.rooms.dwelling+nitric.oxides.concentration,data=train)
summary(Reg)
#Getting the formula
formula(Reg)
#Getting the formula
formula(Reg)
#Remove insignificant variables :
Reg1<-lm(log_MEDV~
Charles.River.dummy.variable+
DIS+PTRATIO+LSTAT+AGE+X.rooms.dwelling+nitric.oxides.concentration,data=train)
summary(Reg1)
#Reg2 : remove insignificant values
Reg2 <- lm(log_MEDV ~CRIM+INDUS+RAD+TAX+B+
Charles.River.dummy.variable+
DIS+ZN+PTRATIO+LSTAT+X.rooms.dwelling+nitric.oxides.concentration, data=train)
summary(Reg2)
#Reg3 _ remove insignificant values
Reg3 <- lm(log_MEDV ~CRIM+RAD+
Charles.River.dummy.variable+
DIS+ZN+PTRATIO+LSTAT+nitric.oxides.concentration, data=train)
summary(Reg3)
#Some other combination
Reg4<-lm(log_MEDV~INDUS +ZN + X.rooms.dwelling + LSTAT+CRIM + Charles.River.dummy.variable,data=train)
summary(Reg4)
#The best model happens to be : Reg3
##Getting predicted values
predicted<-predict(Reg3)
plot(predicted)
length(predicted)
##Finding Residuals
residuals<-resid(Reg3)
plot(residuals)
length(residuals)
##Plotting Residuals vs Predicted Values
##Checking Heteroskedastcity
##There should be no trend between predicted values and residual values
plot(predicted,residuals,abline(0,0))
#You can notice that there seems to be an inverse pattern for some points
#So this model may not be the preferred model.
#atttching predicted values to test data
predicted<-predict(Reg3,newdata=test)
length(predicted)
test$p<-predicted
#Calculating error in the test dataset - (Actual- predicted)/predicted values
test$error<-(test$log_MEDV-test$p)/test$log_MEDV
mean(test$error)*100 #you get to know the average error in the given dataset
##Plotting actual vs predicted values
plot(test$p,col="blue",type="l")
lines(test$log_MEDV,col="red",type="l")
#checking for Correlation between variables
library(car)
vif(Reg3)
#You can drop variables if they have a vif>10 ; means high correlation between variables
|
#' Make request to Zillow API GetChart Web Service
#'
#' The GetChart API generates a URL for an image file that displays historical
#' Zestimates for a specific property. The API accepts as input the Zillow
#' Property ID as well as a chart type: either percentage or dollar value
#' change. Optionally, the API accepts width and height parameters that
#' constrain the size of the image. The historical data can be for the past 1
#' year, 5 years or 10 years.
#'
#' @param zpid The Zillow Property ID for the property for which to obtain
#' information. Required.
#' @param unit_type A string value that specifies whether to show the percent
#' change (unit_type = 'percent') or dollar change (unit_type = 'dollar').
#' Required.
#' @param width An integer value that specifies the width of the generated
#' image; the value must be between 200 and 600, inclusive.
#' @param height An integer value that specifies the height of the generated
#' image; the value must be between 100 and 300, inclusive.
#' @param chartDuration The duration of past data that needs to be shown in the
#' chart. Valid values are '1year', '5years' and '10years'. If unspecified,
#' the value defaults to '1year'.
#' @param zws_id The Zillow Web Service Identifier. Required.
#' @param url URL for the GetChart Web Service. Required.
#'
#' @return A named list with the following elements:
#' \describe{
#' \item{\strong{request}}{a list with the request parameters}
#' \item{\strong{message}}{a list of status code(s) and message(s)
#' returned by the API}
#' \item{\strong{response}}{an XMLNode with the API-specific response
#' values. At this time, no further coercion is performed, so you
#' may have to use functions from the `XML` package to extract
#' the desired output.}
#' }
#'
#' @export
#' @importFrom RCurl getURL
#'
#' @examples
#' \dontrun{
#' GetChart(zpid = 48749425)
#' GetChart(zpid = 48749425, unit_type = 'dollar', width = 600, height = 300,
#' chartDuration = '10years')}
GetChart <- function(
zpid = NULL, unit_type = c('percent', 'dollar'),
width = NULL, height = NULL, chartDuration = c('1year', '5years', '10years'),
zws_id = getOption('ZillowR-zws_id'),
url = 'http://www.zillow.com/webservice/GetChart.htm'
) {
validation_errors <- c(
validate_arg(zpid, required = TRUE, format = '^\\d+$', length_min = 1, length_max = 1),
validate_arg(unit_type, required = TRUE, inclusion = c('percent', 'dollar'), length_min = 1, length_max = 2),
validate_arg(width, inclusion = 200:600, length_min = 1, length_max = 1),
validate_arg(height, inclusion = 100:300, length_min = 1, length_max = 1),
validate_arg(chartDuration, inclusion = c('1year', '5years', '10years'), length_min = 1, length_max = 3),
validate_arg(zws_id, required = TRUE, class = 'character', length_min = 1, length_max = 1),
validate_arg(url, required = TRUE, class = 'character', length_min = 1, length_max = 1)
)
if (length(validation_errors) > 0) {
stop(paste(validation_errors, collapse = '\n'))
}
request <- url_encode_request(url,
'zpid' = zpid,
'unit-type' = unit_type,
'width' = width,
'height' = height,
'chartDuration' = chartDuration,
'zws-id' = zws_id
)
response <- tryCatch(
RCurl::getURL(request),
error = function(e) {stop(sprintf("Zillow API call with request '%s' failed with %s", request, e))}
)
return(preprocess_response(response))
}
|
/R/GetChart.R
|
no_license
|
jacobkap/ZillowR
|
R
| false
| false
| 3,620
|
r
|
#' Make request to Zillow API GetChart Web Service
#'
#' The GetChart API generates a URL for an image file that displays historical
#' Zestimates for a specific property. The API accepts as input the Zillow
#' Property ID as well as a chart type: either percentage or dollar value
#' change. Optionally, the API accepts width and height parameters that
#' constrain the size of the image. The historical data can be for the past 1
#' year, 5 years or 10 years.
#'
#' @param zpid The Zillow Property ID for the property for which to obtain
#' information. Required.
#' @param unit_type A string value that specifies whether to show the percent
#' change (unit_type = 'percent') or dollar change (unit_type = 'dollar').
#' Required.
#' @param width An integer value that specifies the width of the generated
#' image; the value must be between 200 and 600, inclusive.
#' @param height An integer value that specifies the height of the generated
#' image; the value must be between 100 and 300, inclusive.
#' @param chartDuration The duration of past data that needs to be shown in the
#' chart. Valid values are '1year', '5years' and '10years'. If unspecified,
#' the value defaults to '1year'.
#' @param zws_id The Zillow Web Service Identifier. Required.
#' @param url URL for the GetChart Web Service. Required.
#'
#' @return A named list with the following elements:
#' \describe{
#' \item{\strong{request}}{a list with the request parameters}
#' \item{\strong{message}}{a list of status code(s) and message(s)
#' returned by the API}
#' \item{\strong{response}}{an XMLNode with the API-specific response
#' values. At this time, no further coercion is performed, so you
#' may have to use functions from the `XML` package to extract
#' the desired output.}
#' }
#'
#' @export
#' @importFrom RCurl getURL
#'
#' @examples
#' \dontrun{
#' GetChart(zpid = 48749425)
#' GetChart(zpid = 48749425, unit_type = 'dollar', width = 600, height = 300,
#' chartDuration = '10years')}
GetChart <- function(
zpid = NULL, unit_type = c('percent', 'dollar'),
width = NULL, height = NULL, chartDuration = c('1year', '5years', '10years'),
zws_id = getOption('ZillowR-zws_id'),
url = 'http://www.zillow.com/webservice/GetChart.htm'
) {
validation_errors <- c(
validate_arg(zpid, required = TRUE, format = '^\\d+$', length_min = 1, length_max = 1),
validate_arg(unit_type, required = TRUE, inclusion = c('percent', 'dollar'), length_min = 1, length_max = 2),
validate_arg(width, inclusion = 200:600, length_min = 1, length_max = 1),
validate_arg(height, inclusion = 100:300, length_min = 1, length_max = 1),
validate_arg(chartDuration, inclusion = c('1year', '5years', '10years'), length_min = 1, length_max = 3),
validate_arg(zws_id, required = TRUE, class = 'character', length_min = 1, length_max = 1),
validate_arg(url, required = TRUE, class = 'character', length_min = 1, length_max = 1)
)
if (length(validation_errors) > 0) {
stop(paste(validation_errors, collapse = '\n'))
}
request <- url_encode_request(url,
'zpid' = zpid,
'unit-type' = unit_type,
'width' = width,
'height' = height,
'chartDuration' = chartDuration,
'zws-id' = zws_id
)
response <- tryCatch(
RCurl::getURL(request),
error = function(e) {stop(sprintf("Zillow API call with request '%s' failed with %s", request, e))}
)
return(preprocess_response(response))
}
|
library("data.table")
setwd("C:/Users/alber/OneDrive/Documentos/Proyectos RStudio/exploratorydata/Proyecto2")
SCC <- data.table::as.data.table(x = readRDS(file = "Source_Classification_Code.rds"))
NEI <- data.table::as.data.table(x = readRDS(file = "summarySCC_PM25.rds"))
NEI[, Emissions := lapply(.SD, as.numeric), .SDcols = c("Emissions")]
totalNEI <- NEI[fips=='24510', lapply(.SD, sum, na.rm = TRUE)
, .SDcols = c("Emissions")
, by = year]
png(filename='plot2.png')
barplot(totalNEI[, Emissions]
, names = totalNEI[, year]
, xlab = "Years", ylab = "Emissions"
, main = "Emissions over the Years")
dev.off()
|
/Proyecto2/Plot2.R
|
no_license
|
albertovelsan/exploratorydata
|
R
| false
| false
| 690
|
r
|
library("data.table")
setwd("C:/Users/alber/OneDrive/Documentos/Proyectos RStudio/exploratorydata/Proyecto2")
SCC <- data.table::as.data.table(x = readRDS(file = "Source_Classification_Code.rds"))
NEI <- data.table::as.data.table(x = readRDS(file = "summarySCC_PM25.rds"))
NEI[, Emissions := lapply(.SD, as.numeric), .SDcols = c("Emissions")]
totalNEI <- NEI[fips=='24510', lapply(.SD, sum, na.rm = TRUE)
, .SDcols = c("Emissions")
, by = year]
png(filename='plot2.png')
barplot(totalNEI[, Emissions]
, names = totalNEI[, year]
, xlab = "Years", ylab = "Emissions"
, main = "Emissions over the Years")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dkfLLMvard.R
\name{dfkLLMvard}
\alias{dfkLLMvard}
\title{Runs the Diffuse Kalman Filter (DFK).}
\usage{
dfkLLMvard(x, y)
}
\arguments{
\item{x}{A vector with values of hyperparameters}
\item{y}{A vector with the values of the time series}
}
\description{
The state space form is
y(x) = Z(x)\emph{alpha(x) + G}u(x)
alpha(x+1) = TT\emph{alpha(x) + H}u(x)
the DKF is initialized with A0 and P0 (Q0=0).
}
\details{
sigma.eps^2 is concentrated out
}
\author{
Sonia Mazzi
}
|
/man/dfkLLMvard.Rd
|
no_license
|
uk-gov-mirror/datasciencecampus.trendyr
|
R
| false
| true
| 549
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dkfLLMvard.R
\name{dfkLLMvard}
\alias{dfkLLMvard}
\title{Runs the Diffuse Kalman Filter (DFK).}
\usage{
dfkLLMvard(x, y)
}
\arguments{
\item{x}{A vector with values of hyperparameters}
\item{y}{A vector with the values of the time series}
}
\description{
The state space form is
y(x) = Z(x)\emph{alpha(x) + G}u(x)
alpha(x+1) = TT\emph{alpha(x) + H}u(x)
the DKF is initialized with A0 and P0 (Q0=0).
}
\details{
sigma.eps^2 is concentrated out
}
\author{
Sonia Mazzi
}
|
testlist <- list(Beta = 0, CVLinf = -3.78576841580673e-270, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615829296-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 487
|
r
|
testlist <- list(Beta = 0, CVLinf = -3.78576841580673e-270, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
#' QR Decomposition by Graham-Schmidt Orthonormalization
#'
#' \code{QR} computes the QR decomposition of a matrix, \eqn{X}, that is an orthonormal matrix, \eqn{Q} and an upper triangular
#' matrix, \eqn{R}, such that \eqn{X = Q R}.
#'
#' The QR decomposition plays an important role in many statistical techniques.
#' In particular it can be used to solve the equation \eqn{Ax = b} for given matrix \eqn{A} and vector \eqn{b}.
#' The function is included here simply to show the algorithm of Gram-Schmidt orthogonalization. The standard
#' \code{\link[base]{qr}} function is faster and more accurate.
#'
#' @param X a numeric matrix
#' @param tol tolerance for detecting linear dependencies in the columns of \code{X}
#' @return a list of three elements, consisting of an orthonormal matrix \code{Q}, an upper triangular matrix \code{R}, and the \code{rank}
#' of the matrix \code{X}
#' @author John Fox and Georges Monette
#' @seealso \code{\link[base]{qr}}
#' @export
#' @examples
#' A <- matrix(c(1,2,3,4,5,6,7,8,10), 3, 3) # a square nonsingular matrix
#' res <- QR(A)
#' res
#' q <- res$Q
#' zapsmall( t(q) %*% q) # check that q' q = I
#' r <- res$R
#' q %*% r # check that q r = A
#'
#' # relation to determinant: det(A) = prod(diag(R))
#' det(A)
#' prod(diag(r))
#'
#' B <- matrix(1:9, 3, 3) # a singular matrix
#' QR(B)
QR <- function(X, tol=sqrt(.Machine$double.eps)){
# QR decomposition by Graham-Schmidt orthonormalization
# X: a matrix
# tol: 0 tolerance
if (!is.numeric(X) || !is.matrix(X)) stop("X must be a numeric matrix")
length <- function(u) sqrt(sum(u^2))
U <- X
E <- matrix(0, nrow(X), ncol(X))
E[, 1] <- U[, 1]/length(U[, 1])
if (ncol(U)>1) { # trap potential error
for (j in 2:ncol(U)){
for (k in 1:(j - 1)){
U[, j] <- U[, j] - as.vector(X[, j] %*% E[, k]) * E[, k]
}
len.U.j <- length(U[, j])
if (len.U.j > tol) E[, j] <- U[, j]/len.U.j
}
}
R <- t(E) %*% X
R[abs(R) < tol] <- 0
rank <- sum(rowSums(abs(R)) > 0)
list(Q=-E, R=-R, rank=rank) # negated to match qr()
}
|
/R/QR.R
|
no_license
|
friendly/matlib
|
R
| false
| false
| 2,098
|
r
|
#' QR Decomposition by Graham-Schmidt Orthonormalization
#'
#' \code{QR} computes the QR decomposition of a matrix, \eqn{X}, that is an orthonormal matrix, \eqn{Q} and an upper triangular
#' matrix, \eqn{R}, such that \eqn{X = Q R}.
#'
#' The QR decomposition plays an important role in many statistical techniques.
#' In particular it can be used to solve the equation \eqn{Ax = b} for given matrix \eqn{A} and vector \eqn{b}.
#' The function is included here simply to show the algorithm of Gram-Schmidt orthogonalization. The standard
#' \code{\link[base]{qr}} function is faster and more accurate.
#'
#' @param X a numeric matrix
#' @param tol tolerance for detecting linear dependencies in the columns of \code{X}
#' @return a list of three elements, consisting of an orthonormal matrix \code{Q}, an upper triangular matrix \code{R}, and the \code{rank}
#' of the matrix \code{X}
#' @author John Fox and Georges Monette
#' @seealso \code{\link[base]{qr}}
#' @export
#' @examples
#' A <- matrix(c(1,2,3,4,5,6,7,8,10), 3, 3) # a square nonsingular matrix
#' res <- QR(A)
#' res
#' q <- res$Q
#' zapsmall( t(q) %*% q) # check that q' q = I
#' r <- res$R
#' q %*% r # check that q r = A
#'
#' # relation to determinant: det(A) = prod(diag(R))
#' det(A)
#' prod(diag(r))
#'
#' B <- matrix(1:9, 3, 3) # a singular matrix
#' QR(B)
QR <- function(X, tol=sqrt(.Machine$double.eps)){
# QR decomposition by Graham-Schmidt orthonormalization
# X: a matrix
# tol: 0 tolerance
if (!is.numeric(X) || !is.matrix(X)) stop("X must be a numeric matrix")
length <- function(u) sqrt(sum(u^2))
U <- X
E <- matrix(0, nrow(X), ncol(X))
E[, 1] <- U[, 1]/length(U[, 1])
if (ncol(U)>1) { # trap potential error
for (j in 2:ncol(U)){
for (k in 1:(j - 1)){
U[, j] <- U[, j] - as.vector(X[, j] %*% E[, k]) * E[, k]
}
len.U.j <- length(U[, j])
if (len.U.j > tol) E[, j] <- U[, j]/len.U.j
}
}
R <- t(E) %*% X
R[abs(R) < tol] <- 0
rank <- sum(rowSums(abs(R)) > 0)
list(Q=-E, R=-R, rank=rank) # negated to match qr()
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/calpuff_06_complex_terrain_inputs.R
\name{calpuff_06_complex_terrain_inputs}
\alias{calpuff_06_complex_terrain_inputs}
\title{Set the CALPUFF subgrid scale complex terrain inputs}
\usage{
calpuff_06_complex_terrain_inputs(calpuff_inp = "calpuff_template.txt",
nhill = 0, nctrec = 0, mhill = 2, xhill2m = 1, zhill2m = 1,
xctdmkm = 0, yctdmkm = 0)
}
\arguments{
\item{calpuff_inp}{the absolute path and filename for the working CALPUFF input file.}
\item{nhill}{the number of terrain features.}
\item{nctrec}{the number of special complex terrain receptors.}
\item{mhill}{provenance of terrain and CTSG Receptor data: (1) hill and receptor data created by CTDM processors and read from HILL.DAT and HILLRCT.DAT files, or (2) hill data created by OPTHILL.}
\item{xhill2m}{factor to convert horizontal dimensions to meters.}
\item{zhill2m}{factor to convert vertical dimensions to meters.}
\item{xctdmkm}{the x-origin of the CTDM system relative to the CALPUFF coordinate system, in kilometers.}
\item{yctdmkm}{the y-origin of the CTDM system relative to the CALPUFF coordinate system, in kilometers.}
}
\description{
This function validates and writes CALPUFF subgrid scale complex terrain inputs.
}
|
/man/calpuff_06_complex_terrain_inputs.Rd
|
permissive
|
yosukefk/PuffR
|
R
| false
| false
| 1,295
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/calpuff_06_complex_terrain_inputs.R
\name{calpuff_06_complex_terrain_inputs}
\alias{calpuff_06_complex_terrain_inputs}
\title{Set the CALPUFF subgrid scale complex terrain inputs}
\usage{
calpuff_06_complex_terrain_inputs(calpuff_inp = "calpuff_template.txt",
nhill = 0, nctrec = 0, mhill = 2, xhill2m = 1, zhill2m = 1,
xctdmkm = 0, yctdmkm = 0)
}
\arguments{
\item{calpuff_inp}{the absolute path and filename for the working CALPUFF input file.}
\item{nhill}{the number of terrain features.}
\item{nctrec}{the number of special complex terrain receptors.}
\item{mhill}{provenance of terrain and CTSG Receptor data: (1) hill and receptor data created by CTDM processors and read from HILL.DAT and HILLRCT.DAT files, or (2) hill data created by OPTHILL.}
\item{xhill2m}{factor to convert horizontal dimensions to meters.}
\item{zhill2m}{factor to convert vertical dimensions to meters.}
\item{xctdmkm}{the x-origin of the CTDM system relative to the CALPUFF coordinate system, in kilometers.}
\item{yctdmkm}{the y-origin of the CTDM system relative to the CALPUFF coordinate system, in kilometers.}
}
\description{
This function validates and writes CALPUFF subgrid scale complex terrain inputs.
}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("numericInput"),
sidebarLayout(
sidebarPanel(
numericInput("numericInputData",
"irisデータでヒストグラムを表示する列番号",
min = 1,
max = 4,
value = 1),
sliderInput("sliderInputData",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
mainPanel(
plotOutput("distPlot")
)
)
))
|
/ui.R
|
no_license
|
chan-ume/shiny-example
|
R
| false
| false
| 536
|
r
|
library(shiny)
shinyUI(fluidPage(
titlePanel("numericInput"),
sidebarLayout(
sidebarPanel(
numericInput("numericInputData",
"irisデータでヒストグラムを表示する列番号",
min = 1,
max = 4,
value = 1),
sliderInput("sliderInputData",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
mainPanel(
plotOutput("distPlot")
)
)
))
|
# psychonetrics sample:
generate_psychonetrics_samplestats <- setClass("psychonetrics_samplestats", slots = c(
covs = "list",
cors = "list",
means = "list",
thresholds = "list",
squares = "list",
groups = "data.frame", # Data frame with information on each group
variables = "data.frame",
nobs = "numeric", # Number of observations
corinput = "logical",
# missingness = "list", # Missing patterns, only used when rawts = TRUE
# data = "list" # Raw data, used only with fimldata
fimldata = "list",
fullFIML = "logical",
WLS.W = "list", # List with weights matrix per group
rawdata = "data.frame", # For bootstrapping!
groupvar = "character"
), prototype = list(groups = data.frame(
label = character(0),
id = integer(0),
nobs = integer(0),
stringsAsFactors = FALSE
),
variables = data.frame(
label = character(0),
id = integer(0),
ordered = logical(0)
),
corinput = FALSE,
fullFIML = FALSE
))
# Timestamp:
setOldClass("sessionInfo")
psychonetrics_log <- setClass("psychonetrics_log", slots = c(
event = "character",
time = "POSIXct",
sessionInfo = "sessionInfo"))
generate_psychonetrics_logentry <- function(event){
stamp <- psychonetrics_log()
stamp@event <- event
stamp@time <- Sys.time()
# stamp@sessionInfo <- sessionInfo()
stamp
}
addLog <- function(x,event){
x@log[[length(x@log)+1]] <- generate_psychonetrics_logentry(event)
x
}
createLogList <- function(){
res <- list(generate_psychonetrics_logentry("Model created"))
class(res) <- "psychonetrics_log"
return(res)
}
# Psychonetrics model:
generate_psychonetrics <- setClass("psychonetrics", slots = c(
model = "character", # Model framework
submodel = "character",
parameters = "data.frame", # Parameter table data.frame(from, edge, to, est, std, se, matrix, row, col, par)
matrices = "data.frame",
computed = "logical", # Logical, is the model computed yet?
sample = "psychonetrics_samplestats", # Sample statistics
modelmatrices = "list", # Model matrices in list form
# fitfunctions = "list", # contains fitfunction, gradient, hessian and extramatrices, logliks
log = "psychonetrics_log",
optim = "list",
fitmeasures = "list",
baseline_saturated = "list",
equal = "character",
objective = "numeric",
information = "matrix",
identification = "character",
optimizer = "character",
estimator = "character",
distribution = "character",
extramatrices = "list", # Contains extra matrices
rawts = "logical",
Drawts = "list",
types = "list",
cpp = "logical",
meanstructure = "logical",
verbose = "logical"
),
prototype = list(
model = "dummy", submodel = "none",
parameters = data.frame(
var1 = character(0),
var1_id = integer(0),
op = character(0),
var2 = character(0),
var2_id = integer(0),
est = numeric(0),
std = numeric(0),
se = numeric(0),
p = numeric(0),
se_boot = numeric(0),
p_boot = numeric(0),
matrix = character(0),
row = numeric(0),
col = numeric(0),
par = integer(0),
group = character(0),
group_id = integer(0),
fixed = logical(0),
symmetrical = logical(0), # Used to determine if matrix is symmetrical
mi = numeric(0), # Modification index
pmi = numeric(0), #p-value modification index
epc = numeric(0),
mi_free = numeric(0), # Modification index
pmi_free = numeric(0), #p-value modification index
epc_free = numeric(0),
mi_equal = numeric(0), # Modification index constraning groups to be equal
pmi_equal = numeric(0), #p-value modification index constraining groups to be equal
pmi_free = numeric(0), #p-value modification index constraining groups to be equal
minimum = numeric(0),
maximum = numeric(0),
identified = logical(0), # Indicating a parameter is fixed to identify the model!
stringsAsFactors = FALSE
),
matrices = data.frame(
name = character(0),
nrow = integer(0),
ncol = integer(0),
ngroup = integer(0),
symmetrical = logical(0),
sparse = logical(0),
posdef = logical(0),
diagonal = logical(0),
incomplete = logical(0)
),
computed = FALSE,
log = createLogList(),
identification = "none",
optimizer = "ucminf",
estimator = "ML",
rawts = FALSE,
cpp = TRUE, # Use C++ when available
meanstructure = TRUE,
verbose = FALSE
))
# generate_psychonetrics()
|
/psychonetrics/R/01_classes.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| false
| 4,518
|
r
|
# psychonetrics sample:
generate_psychonetrics_samplestats <- setClass("psychonetrics_samplestats", slots = c(
covs = "list",
cors = "list",
means = "list",
thresholds = "list",
squares = "list",
groups = "data.frame", # Data frame with information on each group
variables = "data.frame",
nobs = "numeric", # Number of observations
corinput = "logical",
# missingness = "list", # Missing patterns, only used when rawts = TRUE
# data = "list" # Raw data, used only with fimldata
fimldata = "list",
fullFIML = "logical",
WLS.W = "list", # List with weights matrix per group
rawdata = "data.frame", # For bootstrapping!
groupvar = "character"
), prototype = list(groups = data.frame(
label = character(0),
id = integer(0),
nobs = integer(0),
stringsAsFactors = FALSE
),
variables = data.frame(
label = character(0),
id = integer(0),
ordered = logical(0)
),
corinput = FALSE,
fullFIML = FALSE
))
# Timestamp:
setOldClass("sessionInfo")
psychonetrics_log <- setClass("psychonetrics_log", slots = c(
event = "character",
time = "POSIXct",
sessionInfo = "sessionInfo"))
generate_psychonetrics_logentry <- function(event){
stamp <- psychonetrics_log()
stamp@event <- event
stamp@time <- Sys.time()
# stamp@sessionInfo <- sessionInfo()
stamp
}
addLog <- function(x,event){
x@log[[length(x@log)+1]] <- generate_psychonetrics_logentry(event)
x
}
createLogList <- function(){
res <- list(generate_psychonetrics_logentry("Model created"))
class(res) <- "psychonetrics_log"
return(res)
}
# Psychonetrics model:
generate_psychonetrics <- setClass("psychonetrics", slots = c(
model = "character", # Model framework
submodel = "character",
parameters = "data.frame", # Parameter table data.frame(from, edge, to, est, std, se, matrix, row, col, par)
matrices = "data.frame",
computed = "logical", # Logical, is the model computed yet?
sample = "psychonetrics_samplestats", # Sample statistics
modelmatrices = "list", # Model matrices in list form
# fitfunctions = "list", # contains fitfunction, gradient, hessian and extramatrices, logliks
log = "psychonetrics_log",
optim = "list",
fitmeasures = "list",
baseline_saturated = "list",
equal = "character",
objective = "numeric",
information = "matrix",
identification = "character",
optimizer = "character",
estimator = "character",
distribution = "character",
extramatrices = "list", # Contains extra matrices
rawts = "logical",
Drawts = "list",
types = "list",
cpp = "logical",
meanstructure = "logical",
verbose = "logical"
),
prototype = list(
model = "dummy", submodel = "none",
parameters = data.frame(
var1 = character(0),
var1_id = integer(0),
op = character(0),
var2 = character(0),
var2_id = integer(0),
est = numeric(0),
std = numeric(0),
se = numeric(0),
p = numeric(0),
se_boot = numeric(0),
p_boot = numeric(0),
matrix = character(0),
row = numeric(0),
col = numeric(0),
par = integer(0),
group = character(0),
group_id = integer(0),
fixed = logical(0),
symmetrical = logical(0), # Used to determine if matrix is symmetrical
mi = numeric(0), # Modification index
pmi = numeric(0), #p-value modification index
epc = numeric(0),
mi_free = numeric(0), # Modification index
pmi_free = numeric(0), #p-value modification index
epc_free = numeric(0),
mi_equal = numeric(0), # Modification index constraning groups to be equal
pmi_equal = numeric(0), #p-value modification index constraining groups to be equal
pmi_free = numeric(0), #p-value modification index constraining groups to be equal
minimum = numeric(0),
maximum = numeric(0),
identified = logical(0), # Indicating a parameter is fixed to identify the model!
stringsAsFactors = FALSE
),
matrices = data.frame(
name = character(0),
nrow = integer(0),
ncol = integer(0),
ngroup = integer(0),
symmetrical = logical(0),
sparse = logical(0),
posdef = logical(0),
diagonal = logical(0),
incomplete = logical(0)
),
computed = FALSE,
log = createLogList(),
identification = "none",
optimizer = "ucminf",
estimator = "ML",
rawts = FALSE,
cpp = TRUE, # Use C++ when available
meanstructure = TRUE,
verbose = FALSE
))
# generate_psychonetrics()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retractcheck.R
\name{retractcheck_html}
\alias{retractcheck_html}
\title{Check html file for retractions}
\usage{
retractcheck_html(path, ...)
}
\arguments{
\item{path}{Path to html file to check}
\item{...}{Arguments passed on to \code{retractcheck}
\describe{
\item{database}{Character. Abbreviation of the databases to search
(\code{or} for openretractions.com). # #' and \code{rw} for #
#' retractiondatabase.com). Note that in the absence of an API,
# #' searching retractiondatabase.com is rather slow.}
\item{return}{Character. If \code{all}, all DOIs are queried and
all results are returned; if \code{unique}, the DOIs are
queried in the order specified until either a correction or
retraction notice is found or all databases have been queried.}
}}
}
\value{
\code{\link{retractcheck}} dataframe without filenames
}
\description{
Check a html file for retractions.
}
\examples{
\donttest{
retractcheck_html(system.file("extdata", "manuscript.html", package = "retractcheck"))
}
}
|
/man/retractcheck_html.Rd
|
permissive
|
libscie/retractcheck
|
R
| false
| true
| 1,076
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retractcheck.R
\name{retractcheck_html}
\alias{retractcheck_html}
\title{Check html file for retractions}
\usage{
retractcheck_html(path, ...)
}
\arguments{
\item{path}{Path to html file to check}
\item{...}{Arguments passed on to \code{retractcheck}
\describe{
\item{database}{Character. Abbreviation of the databases to search
(\code{or} for openretractions.com). # #' and \code{rw} for #
#' retractiondatabase.com). Note that in the absence of an API,
# #' searching retractiondatabase.com is rather slow.}
\item{return}{Character. If \code{all}, all DOIs are queried and
all results are returned; if \code{unique}, the DOIs are
queried in the order specified until either a correction or
retraction notice is found or all databases have been queried.}
}}
}
\value{
\code{\link{retractcheck}} dataframe without filenames
}
\description{
Check a html file for retractions.
}
\examples{
\donttest{
retractcheck_html(system.file("extdata", "manuscript.html", package = "retractcheck"))
}
}
|
library(causalweight)
### Name: medweightcont
### Title: Causal mediation analysis with a continuous treatment based on
### weighting by the inverse of generalized propensity scores
### Aliases: medweightcont
### ** Examples
# A little example with simulated data (10000 observations)
n=10000
x=runif(n=n,min=-1,max=1)
d=0.25*x+runif(n=n,min=-2,max=2)
d=d-min(d)
m=0.5*d+0.25*x+runif(n=n,min=-2,max=2)
y=0.5*d+m+0.25*x+runif(n=n,min=-2,max=2)
# The true direct and indirect effects are all equal to 0.5
output=medweightcont(y,d,m,x, d0=2, d1=3, ATET=FALSE, trim=0.05, lognorm=FALSE, bw=NULL, boot=19)
round(output$results,3)
output$ntrimmed
|
/data/genthat_extracted_code/causalweight/examples/medweightcont.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 650
|
r
|
library(causalweight)
### Name: medweightcont
### Title: Causal mediation analysis with a continuous treatment based on
### weighting by the inverse of generalized propensity scores
### Aliases: medweightcont
### ** Examples
# A little example with simulated data (10000 observations)
n=10000
x=runif(n=n,min=-1,max=1)
d=0.25*x+runif(n=n,min=-2,max=2)
d=d-min(d)
m=0.5*d+0.25*x+runif(n=n,min=-2,max=2)
y=0.5*d+m+0.25*x+runif(n=n,min=-2,max=2)
# The true direct and indirect effects are all equal to 0.5
output=medweightcont(y,d,m,x, d0=2, d1=3, ATET=FALSE, trim=0.05, lognorm=FALSE, bw=NULL, boot=19)
round(output$results,3)
output$ntrimmed
|
\name{miNEXT2-package}
\alias{miNEXT2-package}
\alias{miNEXT2}
\docType{package}
\title{\packageTitle{miNEXT2}}
\description{\packageDescription{miNEXT2}}
\details{
The DESCRIPTION file: \packageDESCRIPTION{miNEXT2}
\packageIndices{miNEXT2}
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
\packageAuthor{miNEXT2}
Maintainer: \packageMaintainer{miNEXT2}
}
\references{
This optional section can contain literature or other references for
background information.
}
% Optionally other standard keywords, one per line,
% from the file KEYWORDS in the R documentation.
\keyword{package}
\seealso{
Optional links to other man pages
}
\examples{
## Optional simple examples of the most important functions
## Use \dontrun{} around code to be shown but not executed
}
|
/man/miNEXT2-package.Rd
|
no_license
|
HsiaotungHuang/miNEXT2
|
R
| false
| false
| 869
|
rd
|
\name{miNEXT2-package}
\alias{miNEXT2-package}
\alias{miNEXT2}
\docType{package}
\title{\packageTitle{miNEXT2}}
\description{\packageDescription{miNEXT2}}
\details{
The DESCRIPTION file: \packageDESCRIPTION{miNEXT2}
\packageIndices{miNEXT2}
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
\packageAuthor{miNEXT2}
Maintainer: \packageMaintainer{miNEXT2}
}
\references{
This optional section can contain literature or other references for
background information.
}
% Optionally other standard keywords, one per line,
% from the file KEYWORDS in the R documentation.
\keyword{package}
\seealso{
Optional links to other man pages
}
\examples{
## Optional simple examples of the most important functions
## Use \dontrun{} around code to be shown but not executed
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_quantiles.R
\name{add_quantiles}
\alias{add_quantiles}
\alias{add_quantiles.ggsurvfit}
\title{Add quantile indicators to visR plot}
\usage{
add_quantiles(gg, ...)
\method{add_quantiles}{ggsurvfit}(
gg,
quantiles = 0.5,
linetype = "dashed",
linecolour = "grey50",
alpha = 1,
...
)
}
\arguments{
\item{gg}{A ggplot created with visR}
\item{...}{other arguments passed on to the method to modify \code{\link[ggplot2]{geom_line}}}
\item{quantiles}{vector of quantiles to be displayed on the probability scale, default: 0.5}
\item{linetype}{string indicating the linetype as described in the aesthetics of ggplot2 \code{\link[ggplot2]{geom_line}}, default: dashed (also supports "mixed" -> horizontal lines are solid, vertical ones are dashed)}
\item{linecolour}{string indicating the linetype as described in the aesthetics of ggplot2 \code{\link[ggplot2]{geom_line}}, default: grey, (also supports "strata" -> horizontal lines are grey50, vertical ones are the same colour as the respective strata)}
\item{alpha}{numeric value between 0 and 1 as described in the aesthetics of ggplot2 \code{\link[ggplot2]{geom_line}}, default: 1}
}
\value{
Lines indicating the quantiles overlayed on a visR ggplot
}
\description{
Method to add quantile lines to a plot.
}
\examples{
library(visR)
adtte \%>\%
estimate_KM("SEX") \%>\%
visr() \%>\%
add_quantiles()
adtte \%>\%
estimate_KM("SEX") \%>\%
visr() \%>\%
add_quantiles(quantiles = c(0.25, 0.50))
adtte \%>\%
estimate_KM("SEX") \%>\%
visr() \%>\%
add_quantiles(
quantiles = c(0.25, 0.50),
linetype = "solid",
linecolour = "grey"
)
adtte \%>\%
estimate_KM("SEX") \%>\%
visr() \%>\%
add_quantiles(
quantiles = c(0.25, 0.50),
linetype = "mixed",
linecolour = "strata"
)
}
|
/man/add_quantiles.Rd
|
permissive
|
bailliem/pharmavisR
|
R
| false
| true
| 1,866
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_quantiles.R
\name{add_quantiles}
\alias{add_quantiles}
\alias{add_quantiles.ggsurvfit}
\title{Add quantile indicators to visR plot}
\usage{
add_quantiles(gg, ...)
\method{add_quantiles}{ggsurvfit}(
gg,
quantiles = 0.5,
linetype = "dashed",
linecolour = "grey50",
alpha = 1,
...
)
}
\arguments{
\item{gg}{A ggplot created with visR}
\item{...}{other arguments passed on to the method to modify \code{\link[ggplot2]{geom_line}}}
\item{quantiles}{vector of quantiles to be displayed on the probability scale, default: 0.5}
\item{linetype}{string indicating the linetype as described in the aesthetics of ggplot2 \code{\link[ggplot2]{geom_line}}, default: dashed (also supports "mixed" -> horizontal lines are solid, vertical ones are dashed)}
\item{linecolour}{string indicating the linetype as described in the aesthetics of ggplot2 \code{\link[ggplot2]{geom_line}}, default: grey, (also supports "strata" -> horizontal lines are grey50, vertical ones are the same colour as the respective strata)}
\item{alpha}{numeric value between 0 and 1 as described in the aesthetics of ggplot2 \code{\link[ggplot2]{geom_line}}, default: 1}
}
\value{
Lines indicating the quantiles overlayed on a visR ggplot
}
\description{
Method to add quantile lines to a plot.
}
\examples{
library(visR)
adtte \%>\%
estimate_KM("SEX") \%>\%
visr() \%>\%
add_quantiles()
adtte \%>\%
estimate_KM("SEX") \%>\%
visr() \%>\%
add_quantiles(quantiles = c(0.25, 0.50))
adtte \%>\%
estimate_KM("SEX") \%>\%
visr() \%>\%
add_quantiles(
quantiles = c(0.25, 0.50),
linetype = "solid",
linecolour = "grey"
)
adtte \%>\%
estimate_KM("SEX") \%>\%
visr() \%>\%
add_quantiles(
quantiles = c(0.25, 0.50),
linetype = "mixed",
linecolour = "strata"
)
}
|
library(shiny)
# Define UI ----
ui <- fluidPage(
titlePanel("censusVis"),
sidebarLayout(
sidebarPanel(
helpText("Create demographhic maps with information from the 2010 US Census."),
selectInput("var", label = "Choose a variable to display",
choices = c("Percent White", "Percent Black", "Percent Hispanic", "Percent Asian"),
selected = "Percent White"),
sliderInput("range", label = "Range of interest:",
min = 0, max = 100, value = c(0,100))
),
mainPanel(
textOutput("selected_var"),
textOutput("min_max")
)
)
)
# Define server logic ----
server <- function(input, output) {
output$selected_var <- renderText({
paste("You have selected", input$var)
})
output$min_max <- renderText({
paste("You have chosen a range that goes from", input$range[1], "to", input$range[2])
})
}
# Run the app ----
shinyApp(ui = ui, server = server)
|
/shiny/App-4/app.R
|
no_license
|
rguitar96/big-data-2019
|
R
| false
| false
| 957
|
r
|
library(shiny)
# Define UI ----
ui <- fluidPage(
titlePanel("censusVis"),
sidebarLayout(
sidebarPanel(
helpText("Create demographhic maps with information from the 2010 US Census."),
selectInput("var", label = "Choose a variable to display",
choices = c("Percent White", "Percent Black", "Percent Hispanic", "Percent Asian"),
selected = "Percent White"),
sliderInput("range", label = "Range of interest:",
min = 0, max = 100, value = c(0,100))
),
mainPanel(
textOutput("selected_var"),
textOutput("min_max")
)
)
)
# Define server logic ----
server <- function(input, output) {
output$selected_var <- renderText({
paste("You have selected", input$var)
})
output$min_max <- renderText({
paste("You have chosen a range that goes from", input$range[1], "to", input$range[2])
})
}
# Run the app ----
shinyApp(ui = ui, server = server)
|
gather_daily_MG <- function(){
x <- gather_data()
#Find industry returns by finding the mean of the returns of all the stocks in each industry
x<-x %>% group_by(m.ind, date()) %>%
mutate(ind_ret = mean(ret.6.0.m), na.rm=TRUE) %>%
#Get rid of NAs values
x <- filter(x, top.1500 & ! is.na(ind_ret))
## Create ind.class
daily <- x %>% group_by(date) %>%
mutate(ind.class = as.character(ntile(ind_ret, n = 3))) %>%
mutate(ind.class = ifelse(ind.class == "1", "Losers_MG", ind.class)) %>%
mutate(ind.class = ifelse(ind.class == "3", "Winners_MG", ind.class)) %>%
mutate(ind.class = factor(ind.class, levels = c("Losers_MG", "2", "Winners_MG"))) %>%
ungroup()
## ggplot(data = daily, aes(sd.class, log(sd.252.0.d))) + geom_violin() + facet_wrap(~ year)
return(daily)
}
|
/R/gather_daily_MG.R
|
no_license
|
randomgambit/ReplicationProject
|
R
| false
| false
| 947
|
r
|
gather_daily_MG <- function(){
x <- gather_data()
#Find industry returns by finding the mean of the returns of all the stocks in each industry
x<-x %>% group_by(m.ind, date()) %>%
mutate(ind_ret = mean(ret.6.0.m), na.rm=TRUE) %>%
#Get rid of NAs values
x <- filter(x, top.1500 & ! is.na(ind_ret))
## Create ind.class
daily <- x %>% group_by(date) %>%
mutate(ind.class = as.character(ntile(ind_ret, n = 3))) %>%
mutate(ind.class = ifelse(ind.class == "1", "Losers_MG", ind.class)) %>%
mutate(ind.class = ifelse(ind.class == "3", "Winners_MG", ind.class)) %>%
mutate(ind.class = factor(ind.class, levels = c("Losers_MG", "2", "Winners_MG"))) %>%
ungroup()
## ggplot(data = daily, aes(sd.class, log(sd.252.0.d))) + geom_violin() + facet_wrap(~ year)
return(daily)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hc-charts.R, R/shortcuts.R
\name{hcdensity}
\alias{hc_add_series_density}
\alias{hcdensity}
\title{Shorcut to create a density plot}
\usage{
hcdensity(x, area = FALSE, ...)
hc_add_series_density(hc, x, area = FALSE, ...)
}
\arguments{
\item{x}{A numeric vector}
\item{area}{A boolean value to show or not the area}
\item{...}{Aditional shared arguments for the data series
(\url{http://api.highcharts.com/highcharts#series})}
\item{hc}{A \code{highchart} \code{htmlwidget} object.}
}
\description{
Shorcut to create a density plot
}
\examples{
highchart() \%>\%
hc_add_series_density(rnorm(1000)) \%>\%
hc_add_series_density(rexp(1000), area = TRUE)
}
|
/man/hc_add_series_density.Rd
|
no_license
|
SpencerVaradi/highcharter
|
R
| false
| true
| 774
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hc-charts.R, R/shortcuts.R
\name{hcdensity}
\alias{hc_add_series_density}
\alias{hcdensity}
\title{Shorcut to create a density plot}
\usage{
hcdensity(x, area = FALSE, ...)
hc_add_series_density(hc, x, area = FALSE, ...)
}
\arguments{
\item{x}{A numeric vector}
\item{area}{A boolean value to show or not the area}
\item{...}{Aditional shared arguments for the data series
(\url{http://api.highcharts.com/highcharts#series})}
\item{hc}{A \code{highchart} \code{htmlwidget} object.}
}
\description{
Shorcut to create a density plot
}
\examples{
highchart() \%>\%
hc_add_series_density(rnorm(1000)) \%>\%
hc_add_series_density(rexp(1000), area = TRUE)
}
|
######################################################
#### Reed Frost Model Inference ######################
rf_fit <- function(n0, n1, n2, niters, inits, priors) {
q <- numeric(niters)
q[1] <- inits$q
n21 <- numeric(niters)
n21[1] <- inits$n21
for(i in 1:niters) {
q[i + 1] <- rbeta(n = 1,
2 * (n0 + n1) + n21[i] + priors$alpha,
n1 + 2 * n2 + priors$delta)
n21[i + 1] <- rbinom(n = 1, n2, 2 * q[i] / (2 * q[i] + 1))
}
return(data.frame(q = q, n21 = n21))
}
rf_fit1 <- function(n0, n1, n2, niters, inits, priors) {
q <- inits$q
n21 <- inits$n21
out <- rbind(c(), c(q = q, n21 = n21))
for(i in 1:niters) {
q <- rbeta(n = 1,
2 * (n0 + n1) + n21 + priors$alpha,
n1 + 2 * n2 + priors$delta)
n21 <- rbinom(n = 1, n2,
2 * q / (2 * q + 1))
out <- rbind(out, c(q, n21))
}
return(as.data.frame(out))
}
priors = list(alpha = 0.01, delta = 0.01)
inits = list(q = 0.2, n21 = 104)
df <- rf_fit(n0 = 14, n1 = 25, n2 = 104 + 257, 10000, inits, priors)
df1 <- rf_fit1(n0 = 14, n1 = 25, n2 = 104 + 257, 10000, inits, priors)
par(mfrow = c(2, 2))
plot(df$q, type = "l", xlab = "iter", ylab = "q")
abline(h = inits$q, col = "red")
plot(df$n21, type = "l", xlab = "iter", ylab = "n21")
abline(h = inits$n21, col = "red")
plot(df1$q, type = "l", xlab = "iter", ylab = "q")
abline(h = inits$q, col = "red")
plot(df1$n21, type = "l", xlab = "iter", ylab = "n21")
abline(h = inits$n21, col = "red")
pdf("rf_results.pdf", width = 8, height = 5)
par(mfrow = c(2, 2), mar=c(4,4,1,1)+0.1)
n_sim <- length(df$q)
n_mid <- floor(n_sim / 2)
plot(ecdf(df$q[1:n_mid]), main = "", xlab = "q")
lines(ecdf(df$q[(n_mid + 1):n_sim]), col = "blue", lty = "dashed")
plot(ecdf(df$n21[1:n_mid]), main = "", xlab = "n21", pch = ".")
lines(ecdf(df$n21[(n_mid + 1):n_sim]), col = "blue", pch = ".")
plot(density(df$q), xlab = "q", ylab = "density", main = "")
abline(v = inits$q, col = "red")
hist(df$n21, xlab = "n21", main = "")
abline(v = inits$n21, col = "red")
dev.off()
|
/05-Epidemic-Models/R Code/Reed_Frost_inference.R
|
no_license
|
chicas-spatstat-reading-group/events
|
R
| false
| false
| 2,073
|
r
|
######################################################
#### Reed Frost Model Inference ######################
rf_fit <- function(n0, n1, n2, niters, inits, priors) {
q <- numeric(niters)
q[1] <- inits$q
n21 <- numeric(niters)
n21[1] <- inits$n21
for(i in 1:niters) {
q[i + 1] <- rbeta(n = 1,
2 * (n0 + n1) + n21[i] + priors$alpha,
n1 + 2 * n2 + priors$delta)
n21[i + 1] <- rbinom(n = 1, n2, 2 * q[i] / (2 * q[i] + 1))
}
return(data.frame(q = q, n21 = n21))
}
rf_fit1 <- function(n0, n1, n2, niters, inits, priors) {
q <- inits$q
n21 <- inits$n21
out <- rbind(c(), c(q = q, n21 = n21))
for(i in 1:niters) {
q <- rbeta(n = 1,
2 * (n0 + n1) + n21 + priors$alpha,
n1 + 2 * n2 + priors$delta)
n21 <- rbinom(n = 1, n2,
2 * q / (2 * q + 1))
out <- rbind(out, c(q, n21))
}
return(as.data.frame(out))
}
priors = list(alpha = 0.01, delta = 0.01)
inits = list(q = 0.2, n21 = 104)
df <- rf_fit(n0 = 14, n1 = 25, n2 = 104 + 257, 10000, inits, priors)
df1 <- rf_fit1(n0 = 14, n1 = 25, n2 = 104 + 257, 10000, inits, priors)
par(mfrow = c(2, 2))
plot(df$q, type = "l", xlab = "iter", ylab = "q")
abline(h = inits$q, col = "red")
plot(df$n21, type = "l", xlab = "iter", ylab = "n21")
abline(h = inits$n21, col = "red")
plot(df1$q, type = "l", xlab = "iter", ylab = "q")
abline(h = inits$q, col = "red")
plot(df1$n21, type = "l", xlab = "iter", ylab = "n21")
abline(h = inits$n21, col = "red")
pdf("rf_results.pdf", width = 8, height = 5)
par(mfrow = c(2, 2), mar=c(4,4,1,1)+0.1)
n_sim <- length(df$q)
n_mid <- floor(n_sim / 2)
plot(ecdf(df$q[1:n_mid]), main = "", xlab = "q")
lines(ecdf(df$q[(n_mid + 1):n_sim]), col = "blue", lty = "dashed")
plot(ecdf(df$n21[1:n_mid]), main = "", xlab = "n21", pch = ".")
lines(ecdf(df$n21[(n_mid + 1):n_sim]), col = "blue", pch = ".")
plot(density(df$q), xlab = "q", ylab = "density", main = "")
abline(v = inits$q, col = "red")
hist(df$n21, xlab = "n21", main = "")
abline(v = inits$n21, col = "red")
dev.off()
|
best <- function(state, outcome) {
data <- read.csv("C:/users/cclerc/my documents/github/datasciencecoursera/outcome-of-care-measures.csv", colClasses = "character")
statelist <- data[,7]
if (state %in% statelist & outcome == "heart attack") {
newdata <- subset(data, data[,7] == state)
x <- newdata[which.min(newdata[,11]), 2]
x
}
else if (state %in% statelist & outcome == "heart failure") {
newdata <- subset(data, data[,7] == state)
x <- newdata[which.min(newdata[,17]), 2]
x
}
else if (state %in% statelist & outcome == "pneumonia") {
newdata <- subset(data, data[,7] == state)
x <- newdata[which.min(newdata[,23]), 2]
x
}
else if (!(state %in% statelist)) {
stop("invalid state")
}
else if (!(outcome %in% c("heart attack", "heart failure", "pneumonia"))) {
stop("invalid outcome")
}
}
|
/best.R
|
no_license
|
clercc/datasciencecoursera
|
R
| false
| false
| 1,136
|
r
|
best <- function(state, outcome) {
data <- read.csv("C:/users/cclerc/my documents/github/datasciencecoursera/outcome-of-care-measures.csv", colClasses = "character")
statelist <- data[,7]
if (state %in% statelist & outcome == "heart attack") {
newdata <- subset(data, data[,7] == state)
x <- newdata[which.min(newdata[,11]), 2]
x
}
else if (state %in% statelist & outcome == "heart failure") {
newdata <- subset(data, data[,7] == state)
x <- newdata[which.min(newdata[,17]), 2]
x
}
else if (state %in% statelist & outcome == "pneumonia") {
newdata <- subset(data, data[,7] == state)
x <- newdata[which.min(newdata[,23]), 2]
x
}
else if (!(state %in% statelist)) {
stop("invalid state")
}
else if (!(outcome %in% c("heart attack", "heart failure", "pneumonia"))) {
stop("invalid outcome")
}
}
|
##to run the script use Rscript px_to_csv.R [inputFile] [outputFile]
library(pxR)
args <- commandArgs(trailingOnly = TRUE)
print(args[1])
print(args[2])
my.px.object <- read.px(args[1], encoding='iso88591', na.strings = c('"."','".."','"..."','"...."','"......"','":"'))
my.px.data <- as.data.frame(my.px.object)
write.csv(my.px.data, file = args[2])
|
/px_to_csv.R
|
no_license
|
christophebertrand/votation-visualisation
|
R
| false
| false
| 355
|
r
|
##to run the script use Rscript px_to_csv.R [inputFile] [outputFile]
library(pxR)
args <- commandArgs(trailingOnly = TRUE)
print(args[1])
print(args[2])
my.px.object <- read.px(args[1], encoding='iso88591', na.strings = c('"."','".."','"..."','"...."','"......"','":"'))
my.px.data <- as.data.frame(my.px.object)
write.csv(my.px.data, file = args[2])
|
#> options(digits=20);
#> set.seed(12345);
#> RNGkind()
#[1] "Mersenne-Twister" "Inversion" "Rejection"
#> y=rgamma(100,1,1/3);
#> data.frame(y)
# y
1 2.99922719042681640
2 3.38251121850781367
3 1.27709306589877802
4 1.54945455427967937
5 10.43916639837829585
6 0.94036173807023304
7 0.95776622441070036
8 7.09341479352650417
9 0.69887475675245692
10 2.38924418866376875
11 2.80651462649913341
12 0.33035491764854646
13 1.12357546101296157
14 4.81938818298045568
15 2.20061554673705473
16 3.60968560777983960
17 6.17766916608044880
18 0.44454233867456716
19 3.30452369920126054
20 3.07953139542233378
21 1.17544506260162773
22 0.44099436597621267
23 8.99637492975478459
24 6.96159921917923530
25 2.08788103371894262
26 2.72291713360998733
27 0.89128263033845601
28 2.21089003576894738
29 2.46512190573771051
30 0.10727716794309386
31 7.69837889586612967
32 2.99220871268579547
33 6.59477120315059562
34 5.50874359248307144
35 4.74038450761057462
36 0.29321097258886580
37 3.32448397836911891
38 3.75651284741210034
39 9.50134802276910584
40 7.32866908483815926
41 3.81939320982120467
42 0.20584569364400010
43 8.84162741827183574
44 2.76516069193812264
45 2.28616598727192821
46 7.46179607015868918
47 1.48112604543816961
48 4.16593063322184953
49 3.76478558138724484
50 0.27241112394998002
51 1.15870656632378033
52 3.18145372905617840
53 4.52895298901664045
54 0.92381602686318387
55 0.16999347472983328
56 8.07526000742228867
57 3.26434817137673594
58 0.91785869193864467
59 5.97820534658493674
60 4.19398772636775075
61 1.05595259329679658
62 8.17324550685169626
63 0.78396741038888085
64 0.12396512961360098
65 0.61994262784241205
66 5.49261290486086917
67 3.42975023385675026
68 10.58297780246680730
69 0.91180743756700211
70 1.15339470279901213
71 2.25854613763127166
72 1.50552016328836213
73 1.90965567738712694
74 2.79051673541505751
75 1.58939382395709217
76 5.90447115521351318
77 0.92470478199791506
78 5.61289962229240125
79 2.26122291865845160
80 6.50551392455344857
81 0.73949782843903900
82 7.46491593291353617
83 0.44457064833929971
84 5.25950100838691537
85 4.63991844454636926
86 3.71574349465770215
87 5.42281364160530721
88 2.86471006431292352
89 6.38168350684490537
90 2.45392630260535505
91 2.64269443861369036
92 0.36557382366492164
93 1.01489247769163526
94 0.31657748186694779
95 1.64721104181197564
96 0.43736393064466916
97 2.60038998845208580
98 1.02663618850657334
99 0.65213920562624139
100 0.33961121757470114
|
/src/lib/distributions/gamma/__test__/fixture-generation/rgamma1.R
|
permissive
|
R-js/libRmath.js
|
R
| false
| false
| 2,684
|
r
|
#> options(digits=20);
#> set.seed(12345);
#> RNGkind()
#[1] "Mersenne-Twister" "Inversion" "Rejection"
#> y=rgamma(100,1,1/3);
#> data.frame(y)
# y
1 2.99922719042681640
2 3.38251121850781367
3 1.27709306589877802
4 1.54945455427967937
5 10.43916639837829585
6 0.94036173807023304
7 0.95776622441070036
8 7.09341479352650417
9 0.69887475675245692
10 2.38924418866376875
11 2.80651462649913341
12 0.33035491764854646
13 1.12357546101296157
14 4.81938818298045568
15 2.20061554673705473
16 3.60968560777983960
17 6.17766916608044880
18 0.44454233867456716
19 3.30452369920126054
20 3.07953139542233378
21 1.17544506260162773
22 0.44099436597621267
23 8.99637492975478459
24 6.96159921917923530
25 2.08788103371894262
26 2.72291713360998733
27 0.89128263033845601
28 2.21089003576894738
29 2.46512190573771051
30 0.10727716794309386
31 7.69837889586612967
32 2.99220871268579547
33 6.59477120315059562
34 5.50874359248307144
35 4.74038450761057462
36 0.29321097258886580
37 3.32448397836911891
38 3.75651284741210034
39 9.50134802276910584
40 7.32866908483815926
41 3.81939320982120467
42 0.20584569364400010
43 8.84162741827183574
44 2.76516069193812264
45 2.28616598727192821
46 7.46179607015868918
47 1.48112604543816961
48 4.16593063322184953
49 3.76478558138724484
50 0.27241112394998002
51 1.15870656632378033
52 3.18145372905617840
53 4.52895298901664045
54 0.92381602686318387
55 0.16999347472983328
56 8.07526000742228867
57 3.26434817137673594
58 0.91785869193864467
59 5.97820534658493674
60 4.19398772636775075
61 1.05595259329679658
62 8.17324550685169626
63 0.78396741038888085
64 0.12396512961360098
65 0.61994262784241205
66 5.49261290486086917
67 3.42975023385675026
68 10.58297780246680730
69 0.91180743756700211
70 1.15339470279901213
71 2.25854613763127166
72 1.50552016328836213
73 1.90965567738712694
74 2.79051673541505751
75 1.58939382395709217
76 5.90447115521351318
77 0.92470478199791506
78 5.61289962229240125
79 2.26122291865845160
80 6.50551392455344857
81 0.73949782843903900
82 7.46491593291353617
83 0.44457064833929971
84 5.25950100838691537
85 4.63991844454636926
86 3.71574349465770215
87 5.42281364160530721
88 2.86471006431292352
89 6.38168350684490537
90 2.45392630260535505
91 2.64269443861369036
92 0.36557382366492164
93 1.01489247769163526
94 0.31657748186694779
95 1.64721104181197564
96 0.43736393064466916
97 2.60038998845208580
98 1.02663618850657334
99 0.65213920562624139
100 0.33961121757470114
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ISODistance.R
\docType{class}
\name{ISODistance}
\alias{ISODistance}
\title{ISODistance}
\format{\code{\link{R6Class}} object.}
\usage{
ISODistance
}
\value{
Object of \code{\link{R6Class}} for modelling an ISO Distance measure
}
\description{
ISODistance
}
\section{Fields}{
\describe{
\item{\code{value}}{}
}}
\section{Methods}{
\describe{
\item{\code{new(xml,value, uom, useUomURI)}}{
This method is used to instantiate an ISODistance. The \code{uom} argument represents
the symbol of unit of measure used. The parameter \code{useUomURI} can be used to
set the uom as URI, its default value is \code{FALSE}.
}
}
}
\references{
ISO/TS 19103:2005 Geographic information -- Conceptual schema language
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{ISO}
\keyword{distance}
\keyword{length}
\keyword{measure}
|
/man/ISODistance.Rd
|
no_license
|
65MO/geometa
|
R
| false
| true
| 924
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ISODistance.R
\docType{class}
\name{ISODistance}
\alias{ISODistance}
\title{ISODistance}
\format{\code{\link{R6Class}} object.}
\usage{
ISODistance
}
\value{
Object of \code{\link{R6Class}} for modelling an ISO Distance measure
}
\description{
ISODistance
}
\section{Fields}{
\describe{
\item{\code{value}}{}
}}
\section{Methods}{
\describe{
\item{\code{new(xml,value, uom, useUomURI)}}{
This method is used to instantiate an ISODistance. The \code{uom} argument represents
the symbol of unit of measure used. The parameter \code{useUomURI} can be used to
set the uom as URI, its default value is \code{FALSE}.
}
}
}
\references{
ISO/TS 19103:2005 Geographic information -- Conceptual schema language
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{ISO}
\keyword{distance}
\keyword{length}
\keyword{measure}
|
/전역강.step06_function/전역강.D_func4.r
|
no_license
|
SangAu124/R_language
|
R
| false
| false
| 344
|
r
| ||
\name{ENMeval-package}
\alias{ENMeval-package}
\alias{ENMeval}
\docType{package}
\title{ Automated runs and evaluations of ecological niche models }
\description{Automatically partitions data into bins for model training and testing, executes ecological niche models (ENMs) across a range of user-defined settings, and calculates evaluation metrics to help achieve a balance between goodness-of-fit and model complexity.}
\details{
\tabular{ll}{
Package: \tab ENMeval\cr
Type: \tab Package\cr
Version: \tab 0.1.0\cr
Date: \tab 2014-08-25\cr
License: \tab GNU 3.0\cr
}
The \pkg{ENMeval} package (Muscarella \emph{et al.} 2014) (1) automatically partitions data into training and testing bins using one of six methods (including several options for spatially independent partitions as well as user-defined bins), (2) executes a series of ENMs using Maxent (Phillips \emph{et al.} 2006) with a variety of user-defined settings (i.e., feature classes and regularization multipliers), conducting \emph{k}-fold cross validation, and (3) calculates multiple evaluation metrics to aid in selecting model settings that balance model goodness-of-fit and complexity (i.e., "model tuning" or "smoothing").
\code{\link{ENMevaluate}} is the primary function of the \pkg{ENMeval} package, and multiple other functions highlighted below are called when it is run. The six options for partitioning occurrence data into training and testing (i.e., calibration and evaluation) bins are: \emph{n}-1 jackknife, random \emph{k}-fold, user-specified bins, and three explicit methods of masked geographically structured \emph{k}-fold partitioning (see: \code{\link{get.evaluation.bins}}). After model training, these bins are used to calculate five metrics of model performance for each combination of settings: model discrimination (AUC of test localities), the difference between training and testing AUC, two different threshold-based omission rates, and the small sample-size corrected version of the Akaike information criterion (AICc), the latter using the unpartitioned dataset. A model prediction (as a raster layer) using the full (unpartitioned) dataset is generated for each combination of feature class and regularization multiplier settings. Similarity of these models in geographic space (i.e., "niche overlap") can be calculated to better understand how model settings change predictions (see \code{\link{calc.niche.overlap}}). The results of \code{ENMevaluate} are returned as an object of class \code{\link{ENMevaluation-class}}. A basic plotting function (\code{\link{eval.plot}}) can be used to visualize how evaluation metrics depend on model settings.
}
\note{
Currently, \pkg{ENMeval} only implements the Maxent algorithm, but we eventually plan to expand it to work with other algorithms. All calculations are based on the raw Maxent output (i.e., \emph{not} logistic or cumulative transformations) and users can choose whether to use 'clamping' (see Maxent documentation for details on this option). Additionally, Maxent models are run with the arguments: \code{noaddsamplestobackground} and \code{noremoveDuplicates}. Users should consult Maxent documentation (Phillips \emph{et al.} 2006) and other references (e.g., Phillips and Dudik 2008) for more information on these options. We note that interested users can edit the source code of \code{ENMeval} (in particular, the \code{\link{make.args}} and \code{\link{tuning}} functions) if they desire to change these or other options.
\code{ENMevaluate} directly uses several functions from the \pkg{dismo} package (Hijmans \emph{et al.} 2011), the most important of which is the \code{maxent} function that runs the Maxent algorithm (Phillips \emph{et al.} 2006) in Java. Before running this command, the user must first download Maxent from \href{http://www.cs.princeton.edu/~schapire/maxent/}{this website}. Then, place the file 'maxent.jar' in the 'java' folder of the \pkg{dismo} package. The user can locate that folder by typing: \code{system.file("java", package="dismo")}. For additional details, users should consult the documentation of the \pkg{dismo} package.
}
\author{
Robert Muscarella, Peter J. Galante, Mariano Soley-Guardia, Robert A. Boria, Jamie M. Kass, Maria Uriarte and Robert P. Anderson
Maintainer: Robert Muscarella <bob.muscarella@gmail.com>
}
\references{
Hijmans, R. J., Phillips, S., Leathwick, J. and Elith, J. 2011. dismo package for R. Available online at: \url{http://cran.r-project.org/web/packages/dismo/index.html}.
Muscarella, R., Galante, P. J., Soley-Guardia, M., Boria, R. A., Kass, J. M., Uriarte, M., and Anderson, R. P. 2014. ENMeval: An R package for conducting spatially independent evaluations and estimating optimal model complexity for Maxent ecological niche models. \emph{Methods in Ecology and Evolution}, \bold{5}: 1198-1205.
Phillips, S. J., Anderson, R. P., and Schapire, R. E. 2006. Maximum entropy modeling of species geographic distributions. \emph{Ecological Modelling}, \bold{190}: 231-259.
Phillips, S. J., and Dudik, M. 2008. Modeling of species distributions with Maxent: new extensions and a comprehensive evaluation. \emph{Ecography}, \bold{31}: 161-175.
}
\keyword{ niche }
\keyword{ ENM }
\keyword{ SDM }
\seealso{
\code{maxent} in the \pkg{dismo} package
}
|
/man/ENMeval-package.Rd
|
no_license
|
fdzul/ENMeval
|
R
| false
| false
| 5,302
|
rd
|
\name{ENMeval-package}
\alias{ENMeval-package}
\alias{ENMeval}
\docType{package}
\title{ Automated runs and evaluations of ecological niche models }
\description{Automatically partitions data into bins for model training and testing, executes ecological niche models (ENMs) across a range of user-defined settings, and calculates evaluation metrics to help achieve a balance between goodness-of-fit and model complexity.}
\details{
\tabular{ll}{
Package: \tab ENMeval\cr
Type: \tab Package\cr
Version: \tab 0.1.0\cr
Date: \tab 2014-08-25\cr
License: \tab GNU 3.0\cr
}
The \pkg{ENMeval} package (Muscarella \emph{et al.} 2014) (1) automatically partitions data into training and testing bins using one of six methods (including several options for spatially independent partitions as well as user-defined bins), (2) executes a series of ENMs using Maxent (Phillips \emph{et al.} 2006) with a variety of user-defined settings (i.e., feature classes and regularization multipliers), conducting \emph{k}-fold cross validation, and (3) calculates multiple evaluation metrics to aid in selecting model settings that balance model goodness-of-fit and complexity (i.e., "model tuning" or "smoothing").
\code{\link{ENMevaluate}} is the primary function of the \pkg{ENMeval} package, and multiple other functions highlighted below are called when it is run. The six options for partitioning occurrence data into training and testing (i.e., calibration and evaluation) bins are: \emph{n}-1 jackknife, random \emph{k}-fold, user-specified bins, and three explicit methods of masked geographically structured \emph{k}-fold partitioning (see: \code{\link{get.evaluation.bins}}). After model training, these bins are used to calculate five metrics of model performance for each combination of settings: model discrimination (AUC of test localities), the difference between training and testing AUC, two different threshold-based omission rates, and the small sample-size corrected version of the Akaike information criterion (AICc), the latter using the unpartitioned dataset. A model prediction (as a raster layer) using the full (unpartitioned) dataset is generated for each combination of feature class and regularization multiplier settings. Similarity of these models in geographic space (i.e., "niche overlap") can be calculated to better understand how model settings change predictions (see \code{\link{calc.niche.overlap}}). The results of \code{ENMevaluate} are returned as an object of class \code{\link{ENMevaluation-class}}. A basic plotting function (\code{\link{eval.plot}}) can be used to visualize how evaluation metrics depend on model settings.
}
\note{
Currently, \pkg{ENMeval} only implements the Maxent algorithm, but we eventually plan to expand it to work with other algorithms. All calculations are based on the raw Maxent output (i.e., \emph{not} logistic or cumulative transformations) and users can choose whether to use 'clamping' (see Maxent documentation for details on this option). Additionally, Maxent models are run with the arguments: \code{noaddsamplestobackground} and \code{noremoveDuplicates}. Users should consult Maxent documentation (Phillips \emph{et al.} 2006) and other references (e.g., Phillips and Dudik 2008) for more information on these options. We note that interested users can edit the source code of \code{ENMeval} (in particular, the \code{\link{make.args}} and \code{\link{tuning}} functions) if they desire to change these or other options.
\code{ENMevaluate} directly uses several functions from the \pkg{dismo} package (Hijmans \emph{et al.} 2011), the most important of which is the \code{maxent} function that runs the Maxent algorithm (Phillips \emph{et al.} 2006) in Java. Before running this command, the user must first download Maxent from \href{http://www.cs.princeton.edu/~schapire/maxent/}{this website}. Then, place the file 'maxent.jar' in the 'java' folder of the \pkg{dismo} package. The user can locate that folder by typing: \code{system.file("java", package="dismo")}. For additional details, users should consult the documentation of the \pkg{dismo} package.
}
\author{
Robert Muscarella, Peter J. Galante, Mariano Soley-Guardia, Robert A. Boria, Jamie M. Kass, Maria Uriarte and Robert P. Anderson
Maintainer: Robert Muscarella <bob.muscarella@gmail.com>
}
\references{
Hijmans, R. J., Phillips, S., Leathwick, J. and Elith, J. 2011. dismo package for R. Available online at: \url{http://cran.r-project.org/web/packages/dismo/index.html}.
Muscarella, R., Galante, P. J., Soley-Guardia, M., Boria, R. A., Kass, J. M., Uriarte, M., and Anderson, R. P. 2014. ENMeval: An R package for conducting spatially independent evaluations and estimating optimal model complexity for Maxent ecological niche models. \emph{Methods in Ecology and Evolution}, \bold{5}: 1198-1205.
Phillips, S. J., Anderson, R. P., and Schapire, R. E. 2006. Maximum entropy modeling of species geographic distributions. \emph{Ecological Modelling}, \bold{190}: 231-259.
Phillips, S. J., and Dudik, M. 2008. Modeling of species distributions with Maxent: new extensions and a comprehensive evaluation. \emph{Ecography}, \bold{31}: 161-175.
}
\keyword{ niche }
\keyword{ ENM }
\keyword{ SDM }
\seealso{
\code{maxent} in the \pkg{dismo} package
}
|
library(reshape2)
f <- "dataset.zip"
## Get the dataset using the url for dataset in a zip format. Unzip to create the input data folder
if (!file.exists(f)){
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(url, f, method="curl")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(f)
}
# Extract the target variable and features for the train and test set
target <- read.table("UCI HAR Dataset/activity_labels.txt")
target[,2] <- as.character(target[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# There are multiple measurements present in the dataset. Only extract the columns with mean and std deviation
# each measurement
measurements_required <- grep(".*mean.*|.*std.*", features[,2])
measurements_required.names <- features[measurements_required,2]
measurements_required.names <- gsub('-mean', 'Mean', measurements_required.names)
measurements_required.names <- gsub('-std', 'Std', measurements_required.names)
measurements_required.names <- gsub('[-()]', '', measurements_required.names)
measurements_required.names <- gsub('^f', '', measurements_required.names)
measurements_required.names <- gsub('^t', '', measurements_required.names)
# Read input files
train_dataset <- read.table("UCI HAR Dataset/train/X_train.txt")[measurements_required]
activities_train <- read.table("UCI HAR Dataset/train/Y_train.txt")
subjects_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
train_dataset <- cbind(subjects_train, activities_train, train_dataset)
test_dataset <- read.table("UCI HAR Dataset/test/X_test.txt")[measurements_required]
activities_test <- read.table("UCI HAR Dataset/test/Y_test.txt")
subjects_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
test_dataset <- cbind(subjects_test, activities_test, test_dataset)
# Combine train and test datasets to build the final dataset with target variables
final_dataset <- rbind(train_dataset, test_dataset)
colnames(final_dataset) <- c("Target1", "Target2", measurements_required.names)
# turn activities & subjects into factors
final_dataset$Target2 <- factor(final_dataset$Target2, levels = target[,1], labels = target[,2])
final_dataset$Target1 <- as.factor(final_dataset$Target1)
final_dataset.melted <- melt(final_dataset, id = c("Target1", "Target2"))
final_dataset.mean <- dcast(final_dataset.melted, Target1 + Target2 ~ variable, mean)
write.table(final_dataset.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
/CleaningDatasetProgrammingAssignment/run_analysi.r
|
no_license
|
anjaney/datasciencecoursera
|
R
| false
| false
| 2,540
|
r
|
library(reshape2)
f <- "dataset.zip"
## Get the dataset using the url for dataset in a zip format. Unzip to create the input data folder
if (!file.exists(f)){
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(url, f, method="curl")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(f)
}
# Extract the target variable and features for the train and test set
target <- read.table("UCI HAR Dataset/activity_labels.txt")
target[,2] <- as.character(target[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# There are multiple measurements present in the dataset. Only extract the columns with mean and std deviation
# each measurement
measurements_required <- grep(".*mean.*|.*std.*", features[,2])
measurements_required.names <- features[measurements_required,2]
measurements_required.names <- gsub('-mean', 'Mean', measurements_required.names)
measurements_required.names <- gsub('-std', 'Std', measurements_required.names)
measurements_required.names <- gsub('[-()]', '', measurements_required.names)
measurements_required.names <- gsub('^f', '', measurements_required.names)
measurements_required.names <- gsub('^t', '', measurements_required.names)
# Read input files
train_dataset <- read.table("UCI HAR Dataset/train/X_train.txt")[measurements_required]
activities_train <- read.table("UCI HAR Dataset/train/Y_train.txt")
subjects_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
train_dataset <- cbind(subjects_train, activities_train, train_dataset)
test_dataset <- read.table("UCI HAR Dataset/test/X_test.txt")[measurements_required]
activities_test <- read.table("UCI HAR Dataset/test/Y_test.txt")
subjects_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
test_dataset <- cbind(subjects_test, activities_test, test_dataset)
# Combine train and test datasets to build the final dataset with target variables
final_dataset <- rbind(train_dataset, test_dataset)
colnames(final_dataset) <- c("Target1", "Target2", measurements_required.names)
# turn activities & subjects into factors
final_dataset$Target2 <- factor(final_dataset$Target2, levels = target[,1], labels = target[,2])
final_dataset$Target1 <- as.factor(final_dataset$Target1)
final_dataset.melted <- melt(final_dataset, id = c("Target1", "Target2"))
final_dataset.mean <- dcast(final_dataset.melted, Target1 + Target2 ~ variable, mean)
write.table(final_dataset.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\name{sphericalplot}
\alias{sphericalplot}
\title{Spherical plot of reconstructed outline}
\usage{
sphericalplot(r, ...)
}
\arguments{
\item{r}{Object inheriting \code{\link{ReconstructedOutline}}}
\item{...}{Parameters depending on class of \code{r}}
}
\description{
Spherical plot of reconstructed outline
}
\author{
David Sterratt
}
|
/pkg/retistruct/man/sphericalplot.Rd
|
no_license
|
davidcsterratt/retistruct
|
R
| false
| true
| 426
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\name{sphericalplot}
\alias{sphericalplot}
\title{Spherical plot of reconstructed outline}
\usage{
sphericalplot(r, ...)
}
\arguments{
\item{r}{Object inheriting \code{\link{ReconstructedOutline}}}
\item{...}{Parameters depending on class of \code{r}}
}
\description{
Spherical plot of reconstructed outline
}
\author{
David Sterratt
}
|
library(mefa4)
ROOT <- "e:/peter/AB_data_v2016"
#OUTDIR1 <- "e:/peter/AB_data_v2016/out/birds/pred1-josmshf"
#OUTDIRB <- "e:/peter/AB_data_v2016/out/birds/predB-josmshf"
STAGE <- list(veg = 6) # hab=5, hab+clim=6, hab+clim+shf=7
OUTDIR1 <- paste0("e:/peter/josm/2017/stage", STAGE$veg, "/pred1")
OUTDIRB <- paste0("e:/peter/josm/2017/stage", STAGE$veg, "/predB")
load(file.path(ROOT, "out", "kgrid", "kgrid_table.Rdata"))
#source("~/repos/bragging/R/glm_skeleton.R")
#source("~/repos/abmianalytics/R/results_functions.R")
#source("~/repos/bamanalytics/R/makingsense_functions.R")
source("~/repos/abmianalytics/R/maps_functions.R")
regs <- levels(kgrid$LUFxNSR)
kgrid$useN <- !(kgrid$NRNAME %in% c("Grassland", "Parkland") | kgrid$NSRNAME == "Dry Mixedwood")
kgrid$useN[kgrid$NSRNAME == "Dry Mixedwood" & kgrid$POINT_Y > 56.7] <- TRUE
kgrid$useS <- kgrid$NRNAME == "Grassland"
kgrid$useBCR6 <- kgrid$BCRCODE == " 6-BOREAL_TAIGA_PLAINS"
e <- new.env()
#load(file.path(ROOT, "data", "data-full-withrevisit.Rdata"), envir=e)
load(file.path(ROOT, "out", "birds", "data", "data-wrsi.Rdata"), envir=e)
TAX <- droplevels(e$TAX)
TAX$Fn <- droplevels(TAX$English_Name)
levels(TAX$Fn) <- nameAlnum(levels(TAX$Fn), capitalize="mixed", collapse="")
en <- new.env()
load(file.path(ROOT, "out", "birds", "data", "data-josmshf.Rdata"), envir=en)
xnn <- en$DAT
modsn <- en$mods
yyn <- en$YY
BBn <- en$BB
tax <- droplevels(TAX[colnames(yyn),])
rm(e, en)
load(file.path(ROOT, "out", "transitions", paste0(regs[1], ".Rdata")))
Aveg <- rbind(colSums(trVeg))
rownames(Aveg) <- regs[1]
colnames(Aveg) <- colnames(trVeg)
Asoil <- rbind(colSums(trSoil))
rownames(Asoil) <- regs[1]
colnames(Asoil) <- colnames(trSoil)
for (i in 2:length(regs)) {
cat(regs[i], "\n");flush.console()
load(file.path(ROOT, "out", "transitions", paste0(regs[i], ".Rdata")))
Aveg <- rbind(Aveg, colSums(trVeg))
rownames(Aveg) <- regs[1:i]
Asoil <- rbind(Asoil, colSums(trSoil))
rownames(Asoil) <- regs[1:i]
}
Aveg <- Aveg / 10^4
Asoil <- Asoil / 10^4
library(raster)
library(sp)
library(rgdal)
city <-data.frame(x = -c(114,113,112,111,117,118)-c(5,30,49,23,8,48)/60,
y = c(51,53,49,56,58,55)+c(3,33,42,44,31,10)/60)
rownames(city) <- c("Calgary","Edmonton","Lethbridge","Fort McMurray",
"High Level","Grande Prairie")
coordinates(city) <- ~ x + y
proj4string(city) <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
city <- spTransform(city, CRS("+proj=tmerc +lat_0=0 +lon_0=-115 +k=0.9992 +x_0=500000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"))
city <- as.data.frame(city)
cex <- 0.25
legcex <- 1.5
Col1 <- rev(c("#D73027","#FC8D59","#FEE090","#E0F3F8","#91BFDB","#4575B4")) # Colour gradient for reference and current
Col1fun <- colorRampPalette(Col1, space = "rgb") # Function to interpolate among these colours for reference and current
C1 <- Col1fun(100)
Col2 <- c("#C51B7D","#E9A3C9","#FDE0EF","#E6F5D0","#A1D76A","#4D9221") # Colour gradient for difference map
Col2fun <- colorRampPalette(Col2, space = "rgb") # Function to interpolate among these colours for difference map
C2 <- Col2fun(200)
CW <- rgb(0.4,0.3,0.8) # water
CE <- "lightcyan4" # exclude
CSI <- colorRampPalette(c("red","yellow","green"), space = "rgb")(100)
q <- 0.99
H <- 1000
W <- 600
## csv
#spp <- "ALFL"
SPP <- rownames(tax)
#SPP <- c("BOCH","ALFL","BTNW","CAWA","OVEN","OSFL")
PRED_DIR_IN <- "pred1-josmshf"
#PRED_DIR_IN <- "pred1"
#PRED_DIR_OUT <- "pred1cmb"
PREDS <- matrix(0, sum(kgrid$useBCR6), length(SPP))
rownames(PREDS) <- rownames(kgrid)[kgrid$useBCR6]
colnames(PREDS) <- SPP
PREDS0 <- PREDS
AREA_ha <- (1-kgrid$pWater) * kgrid$Area_km2 * 100
AREA_ha <- AREA_ha[kgrid$useBCR6]
for (spp in SPP) {
cat(spp, "--------------------------------------\n");flush.console()
load(file.path(ROOT, "out", "birds", PRED_DIR_IN, spp, paste0(regs[1], ".Rdata")))
rownames(pxNcr1) <- rownames(pxNrf1) <- names(Cells)
pxNcr <- pxNcr1
pxNrf <- pxNrf1
for (i in 2:length(regs)) {
cat(spp, regs[i], "\n");flush.console()
load(file.path(ROOT, "out", "birds", PRED_DIR_IN, spp, paste0(regs[i], ".Rdata")))
rownames(pxNcr1) <- rownames(pxNrf1) <- names(Cells)
pxNcr <- rbind(pxNcr, pxNcr1)
pxNrf <- rbind(pxNrf, pxNrf1)
}
PREDS[,spp] <- pxNcr[rownames(PREDS),]
PREDS0[,spp] <- pxNrf[rownames(PREDS0),]
}
#save(PREDS, PREDS0, file=file.path(ROOT, "out", "birds", "josmshf", "predictions.Rdata"))
load(file.path(ROOT, "out", "birds", "josmshf", "predictions.Rdata"))
N <- colSums(PREDS*AREA_ha) / 10^6
#N <- N[N < max(N)]
summary(N)
## PIF table
pif <- read.csv("~/Dropbox/bam/PIF-AB/popBCR-6AB_v2_22-May-2013.csv")
mefa4::compare_sets(tax$English_Name, pif$Common_Name)
setdiff(tax$English_Name, pif$Common_Name)
pif <- pif[match(tax$English_Name, pif$Common_Name),]
## roadside_bias
load(file.path(ROOT, "out", "birds", "josmshf", "roadside_bias.Rdata"))
load(file.path(ROOT, "out", "birds", "data", "mean-qpad-estimates.Rdata"))
qpad_vals <- qpad_vals[rownames(tax),]
## roadside avoidance
library(mefa4)
load(file.path(ROOT, "out", "birds", "josmshf", "roadside_avoidance.Rdata"))
tmp <- cbind(ROAD=rai_data$ROAD, rai_pred)
rai <- groupSums(tmp[BBn[,1],], 1, rai_data$HAB[BBn[,1]], TRUE)
rai <- t(t(rai) / colSums(rai))
RAI <- 1 - colSums(rai[,1] * rai)
summary(RAI)
RAIc <- RAI-RAI["ROAD"]
#yy <- cbind(ALL=1, ROAD=xnn[BBn[,1],"ROAD01"],
# ifelse(as.matrix(yyn[BBn[,1],]) > 0, 1, 0))
#rai <- groupSums(yy, 1, xnn$hab1[BBn[,1]], TRUE)
#n <- rai[,"ALL"]
#rai <- rai[,-1]
#rai <- t(t(rai) / colSums(rai))
#sai <- groupSums(yy, 1, xnn$hab1[BBn[,1]], TRUE)
#RAI <- 1 - colSums(rai[,1] * rai)
pop <- tax[,c("Species_ID", "English_Name", "Scientific_Name", "Spp")]
pop$RAI <- RAI[match(rownames(pop), names(RAI))]
pop$RAIc <- RAIc[match(rownames(pop), names(RAIc))]
pop$RAIroad <- RAI["ROAD"]
pop$Don <- roadside_bias[rownames(pop), "on"]
pop$Doff <- roadside_bias[rownames(pop), "off"]
pop$DeltaRoad <- roadside_bias[rownames(pop), "onoff"]
pop$Nqpad <- colSums(PREDS*AREA_ha) / 10^6 # M males
pop$Nqpad[pop$Nqpad > 1000] <- NA
pop$Npif <- (pif$Pop_Est / pif$Pair_Adjust) / 10^6 # M males
pop$DeltaObs <- pop$Nqpad / pop$Npif
pop$TimeAdj <- pif$Time_Adjust
pop$MDD <- pif$Detection_Distance_m
pop$p3 <- 1-exp(-3 * qpad_vals$phi0)
pop$EDR <- qpad_vals$phi0 * 100
pop$DeltaTime <- (1/pop$p3)/pop$TimeAdj
pop$DeltaDist <- pop$MDD^2 / pop$EDR^2
pop$DeltaExp <- pop$DeltaRoad * pop$DeltaTime * pop$DeltaDist
pop$DeltaRes <- pop$DeltaObs / pop$DeltaExp
pop <- pop[rowSums(is.na(pop))==0,]
#write.csv(pop, row.names=FALSE, file="~/Dropbox/bam/PIF-AB/qpad-pif-results.csv")
boxplot(log(pop[,c("DeltaRoad", "DeltaTime", "DeltaDist", "DeltaRes")]))
abline(h=0, col=2)
boxplot(log(pop[,c("DeltaObs", "DeltaExp")]))
abline(h=0, col=2)
mat <- log(pop[,c("DeltaObs", "DeltaExp", "DeltaRoad", "DeltaTime", "DeltaDist", "DeltaRes")])
rnd <- runif(nrow(pop), -0.1, 0.1)
boxplot(mat, range=0)
for (i in 2:ncol(mat))
segments(x0=i+rnd-1, x1=i+rnd, y0=mat[,i-1], y1=mat[,i], col="lightgrey")
for (i in 1:ncol(mat))
points(i+rnd, mat[,i], col="darkgrey", pch=19)
abline(h=0, col=2, lwd=2)
boxplot(mat, range=0, add=TRUE)
with(pop, plot(RAI, log(DeltaRes), type="n"))
abline(h=0, v=RAI["ROAD"], col=2, lwd=2)
with(pop, text(RAI, log(DeltaRes), rownames(pop), cex=0.75))
boxplot(pop[,c("Npif", "Nqpad")], ylim=c(0,10))
## plots
res_luf <- list()
res_nsr <- list()
#SPP <- as.character(slt$AOU[slt$map.pred])
for (spp in SPP) {
cat(spp, "\t");flush.console()
load(file.path(ROOT, "out", "birds", "pred1cmb", paste0(spp, ".Rdata")))
km <- data.frame(km)
TYPE <- "C" # combo
#if (!slt[spp, "veghf.north"])
if (!(spp %in% fln))
TYPE <- "S"
#if (!slt[spp, "soilhf.south"])
if (!(spp %in% fls))
TYPE <- "N"
wS <- 1-kgrid$pAspen
if (TYPE == "S")
wS[] <- 1
if (TYPE == "N")
wS[] <- 0
wS[kgrid$useS] <- 1
wS[kgrid$useN] <- 0
cr <- wS * km$CurrS + (1-wS) * km$CurrN
rf <- wS * km$RefS + (1-wS) * km$RefN
#km2 <- as.matrix(cbind(Curr=cr, Ref=rf))
#rownames(km2) <- rownames(km)
#save(km2, file=file.path(ROOT, "out", "birds", "pred1combined", paste0(spp, ".Rdata")))
#cat("\n")
if (FALSE) {
ndat <- normalize_data(rf=rf, cr=cr)
}
# cr <- km$CurrN
# rf <- km$RefN
# cr <- km$CurrS
# rf <- km$RefS
qcr <- quantile(cr, q)
cr[cr>qcr] <- qcr
qrf <- quantile(rf, q)
rf[rf>qrf] <- qrf
if (TRUE) {
mat <- 100 * cbind(Ncurrent=cr, Nreference=rf) # ha to km^2
rownames(mat) <- rownames(kgrid)
res_luf[[spp]] <- groupSums(mat, 1, kgrid$LUF_NAME)
res_nsr[[spp]] <- groupSums(mat, 1, kgrid$NSRNAME)
}
if (TRUE) {
SI <- round(100 * pmin(cr, rf) / pmax(cr, rf))
SI[is.na(SI)] <- 100 # 0/0 is defined as 100 intact
# SI <- 100*as.matrix(dd1km_pred[[4]])[,"UNK"]/rowSums(dd1km_pred[[2]])
# SI <- 100-SI
cr0 <- cr
rf0 <- rf
SI0 <- SI
SI[SI < 1] <- 1 # this is only for mapping
if (FALSE) {
library(raster)
source("~/repos/abmianalytics/R/maps_functions.R")
rt <- raster(file.path(ROOT, "data", "kgrid", "AHM1k.asc"))
r_si <- as_Raster(as.factor(kgrid$Row), as.factor(kgrid$Col), SI0, rt)
plot(r_si)
writeRaster(r_si, paste0(spp, "-intactness_2016-08-12.tif"), overwrite=TRUE)
}
Max <- max(qcr, qrf)
df <- (cr-rf) / Max
df <- sign(df) * abs(df)^0.5
df <- pmin(200, ceiling(99 * df)+100)
df[df==0] <- 1
cr <- pmin(100, ceiling(99 * sqrt(cr / Max))+1)
rf <- pmin(100, ceiling(99 * sqrt(rf / Max))+1)
range(cr)
range(rf)
range(df)
NAM <- as.character(tax[spp, "English_Name"])
TAG <- ""
cat("si\t");flush.console()
fname <- file.path(ROOT, "out", "birds", "figs", "map-si",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=CSI[SI], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, "\nIntactness"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
for (i in 1:100) {
#lines(c(190000, 220000), c(5450000, 5700000), col=CSI[i], lwd=2)
j <- i * abs(diff(c(5450000, 5700000)))/100
segments(190000, 5450000+j, 220000, 5450000+j, col=CSI[i], lwd=2, lend=2)
}
text(240000, 5450000, "0%")
text(240000, 0.5*(5450000 + 5700000), "50%")
text(240000, 5700000, "100%")
## test NAs
# with(kgrid[is.na(SI) & kgrid$pWater <= 0.99,], points(X, Y, col="black", pch=15, cex=cex))
par(op)
dev.off()
if (FALSE) {
load(file.path(ROOT, "out", "kgrid", "veg-hf_1kmgrid_fix-fire_fix-age0.Rdata")) # dd1km_pred
m0 <- as.matrix(dd1km_pred[[2]])
m0 <- 100*m0/rowSums(m0)
m0 <- m0[rf0==0 & m0[,"Water"] <= 99 & m0[,"NonVeg"] <= 99 & kgrid$NRNAME == "Grassland",]
m0 <- m0[,colSums(m0)>0]
#summary(m0)
round(colMeans(m0))
m0 <- as.matrix(dd1km_pred[[4]])
m0 <- 100*m0/rowSums(m0)
m0 <- m0[rf0==0 & m0[,"Water"] <= 99 & kgrid$NRNAME == "Grassland",]
m0 <- m0[,colSums(m0)>0]
round(colMeans(m0))
aggregate(100*as.matrix(dd1km_pred[[2]])[,"NonVeg"]/rowSums(dd1km_pred[[2]]),
list(nr=kgrid$NRNAME, rf0=rf0==0 & kgrid$pWater < 0.9), mean)
}
cat("rf\t");flush.console()
fname <- file.path(ROOT, "out", "birds", "figs", "map-rf",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=C1[rf], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, "\nReference abundance"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
for (i in 1:100) {
#lines(c(190000, 220000), c(5450000, 5700000), col=C1[i], lwd=2)
j <- i * abs(diff(c(5450000, 5700000)))/100
segments(190000, 5450000+j, 220000, 5450000+j, col=C1[i], lwd=2, lend=2)
}
text(240000, 5450000, "0%")
text(240000, 0.5*(5450000 + 5700000), "50%")
text(240000, 5700000, "100%")
par(op)
dev.off()
cat("cr\t");flush.console()
fname <- file.path(ROOT, "out", "birds", "figs", "map-cr",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=C1[cr], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, "\nCurrent abundance"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
for (i in 1:100) {
#lines(c(190000, 220000), c(5450000, 5700000), col=C1[i], lwd=2)
j <- i * abs(diff(c(5450000, 5700000)))/100
segments(190000, 5450000+j, 220000, 5450000+j, col=C1[i], lwd=2, lend=2)
}
text(240000, 5450000, "0%")
text(240000, 0.5*(5450000 + 5700000), "50%")
text(240000, 5700000, "100%")
par(op)
dev.off()
cat("df\n");flush.console()
fname <- file.path(ROOT, "out", "birds", "figs", "map-df",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=C2[df], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, "\nDifference"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
for (i in 1:200) {
#lines(c(190000, 220000), c(5450000, 5700000), col=C2[i], lwd=2)
j <- i * abs(diff(c(5450000, 5700000)))/200
segments(190000, 5450000+j, 220000, 5450000+j, col=C2[i], lwd=2, lend=2)
}
text(245000, 5450000, "-100%")
text(245000, 0.5*(5450000 + 5700000), "0%")
text(245000, 5700000, "+100%")
par(op)
dev.off()
}
}
#save(res_nsr, res_luf, file=file.path(ROOT, "out", "birds", "tables", "luf_Nsummaries.Rdata"))
load(file.path(ROOT, "out", "birds", "tables", "luf_Nsummaries.Rdata"))
LUF <- list()
for (spp in names(res_luf)) {
tmp <- res_luf[[spp]] / 10^6 # M males
tmp <- t(matrix(tmp, 2*nrow(tmp), 1))
colnames(tmp) <- paste(rep(colnames(res_luf[[1]]), each=ncol(tmp)/2),
rep(rownames(res_luf[[1]]), 2))
LUF[[spp]] <- data.frame(Species=tax[spp, "English_Name"], tmp)
}
NSR <- list()
for (spp in names(res_nsr)) {
tmp <- res_nsr[[spp]] / 10^6 # M males
tmp <- t(matrix(tmp, 2*nrow(tmp), 1))
colnames(tmp) <- paste(rep(colnames(res_nsr[[1]]), each=ncol(tmp)/2),
rep(rownames(res_nsr[[1]]), 2))
NSR[[spp]] <- data.frame(Species=tax[spp, "English_Name"], tmp)
}
LUF <- do.call(rbind, LUF)
NSR <- do.call(rbind, NSR)
write.csv(LUF, row.names=FALSE,
file=file.path(ROOT, "out", "birds", "tables", "Birds_Abundance_by_LUFregions.csv"))
write.csv(NSR, row.names=FALSE,
file=file.path(ROOT, "out", "birds", "tables", "Birds_Abundance_by_NaturalSubregions.csv"))
## sector effects
seff_res <- list()
tr_res <- list()
#seff_luf <- list()
#seff_ns <- list()
#uplow <- list()
#uplow_full <- list()
#uplow_luf <- list()
## stuff to exclude
## add col to lxn
## subset counter for loop
PRED_DIR_IN <- "pred1-shf" # "pred1-seismic-as-ES" # "pred1"
restrict_to_HF <- FALSE
TAX <- read.csv("~/repos/abmispecies/_data/birds.csv")
SPP <- as.character(TAX$AOU)[TAX$map.pred]
for (spp in SPP) {
cat(spp, "------------------------\n");flush.console()
#load(file.path(OUTDIR1, spp, paste0(regs[1], ".Rdata")))
load(file.path(ROOT, "out", "birds", PRED_DIR_IN, spp, paste0(regs[1], ".Rdata")))
hbNcr <- hbNcr1[,1]
hbNrf <- hbNrf1[,1]
hbScr <- hbScr1[,1]
hbSrf <- hbSrf1[,1]
for (i in 2:length(regs)) {
cat(spp, regs[i], "\n");flush.console()
#load(file.path(OUTDIR1, spp, paste0(regs[i], ".Rdata")))
load(file.path(ROOT, "out", "birds", PRED_DIR_IN, spp, paste0(regs[i], ".Rdata")))
hbNcr <- rbind(hbNcr, hbNcr1[,1])
hbNrf <- rbind(hbNrf, hbNrf1[,1])
hbScr <- rbind(hbScr, hbScr1[,1])
hbSrf <- rbind(hbSrf, hbSrf1[,1])
}
if (!NSest["north"]) {
hbNcr[] <- 0
hbNrf[] <- 0
}
if (!NSest["south"]) {
hbScr[] <- 0
hbSrf[] <- 0
}
dimnames(hbNcr) <- dimnames(hbNrf) <- list(regs, colnames(Aveg))
hbNcr[is.na(hbNcr)] <- 0
hbNrf[is.na(hbNrf)] <- 0
hbNcr <- hbNcr * Aveg
hbNrf <- hbNrf * Aveg
dimnames(hbScr) <- dimnames(hbSrf) <- list(regs, colnames(Asoil))
hbScr[is.na(hbScr)] <- 0
hbSrf[is.na(hbSrf)] <- 0
hbScr <- hbScr * Asoil
hbSrf <- hbSrf * Asoil
## combined upland/lowland N/S
if (FALSE) {
crN <- groupSums(hbNcr, 2, ch2veg$uplow)
rfN <- groupSums(hbNrf, 2, ch2veg$uplow)
crN[lxn$NRNAME=="Grassland","lowland"] <- 0
crN[lxn$NRNAME=="Grassland","upland"] <- rowSums(hbScr[lxn$NRNAME=="Grassland",])
rfN[lxn$NRNAME=="Grassland","lowland"] <- 0
rfN[lxn$NRNAME=="Grassland","upland"] <- rowSums(hbSrf[lxn$NRNAME=="Grassland",])
uplo <- data.frame(Current=crN, Reference=rfN)
uplow_full[[spp]] <- data.frame(sppid=spp, lxn[,1:3], uplo)
## Exclude stuff here
r0 <- lxn$NSRNAME %in% c("Alpine","Lower Foothills",
"Montane","Subalpine","Upper Foothills")
crN[r0,] <- 0
rfN[r0,] <- 0
## upland/lowland
cr <- colSums(crN)
rf <- colSums(rfN)
cr <- c(total=sum(cr), cr)
rf <- c(total=sum(rf), rf)
si <- 100 * pmin(cr, rf) / pmax(cr, rf)
si2 <- ifelse(cr > rf, 200-si, si)
uplow[[spp]] <- c(Ref=rf, Cur=cr, SI=si, SI200=si2)
cr <- groupSums(groupSums(hbNcr, 2, ch2veg$uplow), 1, lxn$LUF_NAME)
rf <- groupSums(groupSums(hbNrf, 2, ch2veg$uplow), 1, lxn$LUF_NAME)
cr <- cbind(total=rowSums(cr), cr)
rf <- cbind(total=rowSums(rf), rf)
si <- sapply(1:3, function(i) 100 * pmin(cr[,i], rf[,i]) / pmax(cr[,i], rf[,i]))
colnames(si) <- colnames(cr)
si2 <- ifelse(cr > rf, 200-si, si)
uplow_luf[[spp]] <- data.frame(ID=spp, Ref=round(rf), Cur=round(cr),
SI=round(si, 2), SI200=round(si2, 2))
}
## for HF-only sector effects, only need to adjust the total
## i.e. sum only where ch2veg$cr is HF
keep <- rownames(ch2veg) %in% rownames(ch2veg)[ch2veg$isHF]
hbNcr_HFonly <- hbNcr
hbNrf_HFonly <- hbNrf
hbNcr_HFonly[,!keep] <- 0
hbNrf_HFonly[,!keep] <- 0
ThbNcr_HFonly <- colSums(hbNcr_HFonly[lxn$N,])
ThbNrf_HFonly <- colSums(hbNrf_HFonly[lxn$N,])
ThbNcr <- colSums(hbNcr[lxn$N,])
ThbNrf <- colSums(hbNrf[lxn$N,])
Ntot_HFonly <- sum(ThbNrf_HFonly)
Ntot_All <- sum(ThbNrf)
Ntot_Use <- if (restrict_to_HF)
Ntot_HFonly else Ntot_All
df <- (ThbNcr - ThbNrf) / Ntot_Use
dA <- Xtab(AvegN ~ rf + cr, ch2veg)
if (FALSE) {
tv <- read.csv("~/repos/abmianalytics/lookup/lookup-veg-hf-age.csv")
tv2 <- nonDuplicated(tv,Combined,TRUE)
dA2 <- as.matrix(groupSums(dA[,rownames(tv2)], 2, tv2$Sector3))
tv3 <- tv2[rownames(dA2),]
dA2 <- as.matrix(groupSums(dA2, 1, tv3$Sector3))
dA3 <- dA2[,c(c("Agriculture","Forestry","Energy","RuralUrban","Transportation"))]
dA3 <- round(100*t(t(dA3) / colSums(dA3)), 1)
dA3[c("Decid", "Mixwood", "UpConif", "LoConif", "Wet", "OpenOther"),]
}
dN <- Xtab(df ~ rf + cr, ch2veg)
#dA <- colSums(as.matrix(groupSums(dA[,rownames(tv)], 2, tv$Sector2)))
#dN <- colSums(as.matrix(groupSums(dN[,rownames(tv)], 2, tv$Sector2)))
dA <- colSums(as.matrix(groupSums(dA[,rownames(tv)], 2, tv$Sector)))
dN <- colSums(as.matrix(groupSums(dN[,rownames(tv)], 2, tv$Sector)))
U <- dN/dA
seffN <- cbind(dA=dA, dN=dN, U=U)[c("Agriculture","Forestry",
"Energy",#"EnergySoftLin","MineWell",
"RuralUrban","Transportation"),]
keep <- rownames(ch2soil) %in% rownames(ch2soil)[ch2soil$isHF]
hbScr_HFonly <- hbScr
hbSrf_HFonly <- hbSrf
hbScr_HFonly[,!keep] <- 0
hbSrf_HFonly[,!keep] <- 0
ThbScr_HFonly <- colSums(hbScr_HFonly[lxn$S,])
ThbSrf_HFonly <- colSums(hbSrf_HFonly[lxn$S,])
ThbScr <- colSums(hbScr[lxn$S,])
ThbSrf <- colSums(hbSrf[lxn$S,])
Stot_HFonly <- sum(ThbSrf_HFonly)
Stot_All <- sum(ThbSrf)
Stot_Use <- if (restrict_to_HF)
Stot_HFonly else Stot_All
df <- (ThbScr - ThbSrf) / Stot_Use
dA <- Xtab(AsoilS ~ rf + cr, ch2soil)
dN <- Xtab(df ~ rf + cr, ch2soil)
#dA <- colSums(as.matrix(groupSums(dA[,rownames(ts)], 2, ts$Sector2)))
#dN <- colSums(as.matrix(groupSums(dN[,rownames(ts)], 2, ts$Sector2)))
dA <- colSums(as.matrix(groupSums(dA[,rownames(ts)], 2, ts$Sector)))
dN <- colSums(as.matrix(groupSums(dN[,rownames(ts)], 2, ts$Sector)))
U <- dN/dA
seffS <- cbind(dA=dA, dN=dN, U=U)[c("Agriculture","Forestry",
"Energy",#"EnergySoftLin","MineWell",
"RuralUrban","Transportation"),]
seff_res[[spp]] <- list(N=seffN, S=seffS)
tr_res[[spp]] <- list(N=cbind(rf=ThbNrf, cr=ThbNcr), S=cbind(rf=ThbSrf, cr=ThbScr),
NSest=NSest, total=c(Ntot_All=Ntot_All, Stot_All=Stot_All,
Ntot_HFonly=Ntot_HFonly, Stot_HFonly=Stot_HFonly))
#(sum(hbNcr)-sum(hbNrf))/sum(hbNrf)
#(sum(km$CurrN)-sum(km$RefN))/sum(km$RefN)
#100*seff
}
## -new version has the HFonly pop sizes saved
## can be used to retro-fit the effects
#save(seff_res, tr_res, file=file.path(ROOT, "out", "birds", "tables", "sector-effects-new-seismic-as-bf.Rdata"))
#save(seff_res, tr_res, file=file.path(ROOT, "out", "birds", "tables", "sector-effects-new-seismic-as-ES.Rdata"))
#save(seff_res, tr_res, file=file.path(ROOT, "out", "birds", "tables", "sector-effects-new-shf.Rdata"))
load(file.path(ROOT, "out", "birds", "tables", "sector-effects-new-seismic-as-bf.Rdata"))
#load(file.path(ROOT, "out", "birds", "tables", "sector-effects-new-seismic-as-ES.Rdata"))
#load(file.path(ROOT, "out", "birds", "tables", "sector-effects-new-shf.Rdata"))
#spp <- "ALFL"
seff_loc <- list()
seff_lfull <- list()
for (spp in names(tr_res)) {
seff_lfull[[spp]] <- groupSums(as.matrix(tr_res[[spp]]$N)[ch2veg$isHF,], 1,
as.character(ch2veg$cr[ch2veg$isHF]))
seff_loc[[spp]] <- groupSums(seff_lfull[[spp]], 1, tv[rownames(seff_lfull[[spp]]), "Sector"])
}
seff2 <- t(sapply(seff_loc, function(z) (z[,"cr"]-z[,"rf"])/z[,"rf"]))
seff2 <- seff2[,rownames(seff_res[[1]]$N)]
seff1 <- t(sapply(seff_res, function(z) z$N[,"dN"]))
seff2 <- cbind(seff2,
Total=sapply(seff_loc, function(z) (sum(z[,"cr"])-sum(z[,"rf"]))/sum(z[,"rf"])))
seff2 <- seff2[!is.na(seff2[,1]),]
seff2 <- seff2[order(rownames(seff2)),]
seff2[seff2>2] <- 2
round(100*seff2,1)
#AA <- 100*seff_res[[1]]$N[,"dA"]
#a <- round(100*seff2,1)
#aa <- round(t(t(100*seff2) / AA), 1)
d1 <- apply(100*seff1, 2, density, na.rm=TRUE)
d2 <- apply(100*seff2, 2, density, na.rm=TRUE)
for (i in 1:5) {
d1[[i]]$y <- d1[[i]]$y/max(d1[[i]]$y)
d2[[i]]$y <- d2[[i]]$y/max(d2[[i]]$y)
}
par(mfrow=c(1,2))
plot(d1[[1]], xlim=c(-50,50), main="% change inside region", lwd=2)
for (i in 2:5) lines(d1[[i]], col=i, lwd=2)
abline(v=0)
plot(d2[[1]], xlim=c(-100,200), main="% change inside HF", lwd=2)
for (i in 2:5) lines(d1[[i]], col=i, lwd=2)
abline(v=0)
legend("topright", lty=1, col=1:5, bty="n", legend=colnames(seff2), lwd=2)
par(mfrow=c(2,3))
for (i in 1:6) {
hist(100*seff2[,i], main=colnames(seff2)[i], col="lightblue",
xlab="% population change inside HF", border=NA)
abline(v=0, col=4, lty=2)
}
write.csv(round(100*seff2,1), file="sector-effects-birds-early-seral-seismic.csv")
nres <- list()
sres <- list()
for (spp in names(seff_res)) {
nres[[spp]] <- 100*c(PopEffect=seff_res[[spp]]$N[,2], UnitEffect=seff_res[[spp]]$N[,3])
sres[[spp]] <- 100*c(PopEffect=seff_res[[spp]]$S[,2], UnitEffect=seff_res[[spp]]$S[,3])
}
nres <- do.call(rbind, nres)
sres <- do.call(rbind, sres)
nres <- data.frame(Species=tax[rownames(nres), "English_Name"], nres)
sres <- data.frame(Species=tax[rownames(sres), "English_Name"], sres)
## keep only spp that are OK
write.csv(nres, row.names=FALSE,
file=file.path(ROOT, "out", "birds", "tables", "Birds_SectorEffects_North.csv"))
write.csv(sres, row.names=FALSE,
file=file.path(ROOT, "out", "birds", "tables", "Birds_SectorEffects_South.csv"))
for (spp in SPP) {
cat(spp, "\n");flush.console()
for (WHERE in c("north", "south")) {
SEFF <- seff_res[[spp]][[ifelse(WHERE=="north", "N", "S")]]
## Sector effect plot from Dave
## Sectors to plot and their order
sectors <- c("Agriculture","Forestry",
"Energy",#"EnergySoftLin","MineWell",
"RuralUrban","Transportation")
## Names that will fit without overlap
sector.names <- c("Agriculture","Forestry",
"Energy",#"EnergySL","EnergyEX",
"RuralUrban","Transport")
## The colours for each sector above
c1 <- c("tan3","palegreen4","indianred3",#"hotpink4",
"skyblue3","slateblue2")
TOTALS <- if (WHERE=="north")
tr_res[[spp]]$total[c("Ntot_All", "Ntot_HFonly")] else tr_res[[spp]]$total[c("Stot_All", "Stot_HFonly")]
SCALING <- if (restrict_to_HF)
TOTALS[1]/TOTALS[2] else 1
total.effect <- 100 * SCALING * SEFF[sectors,"dN"]
#unit.effect <- 100 * SEFF[sectors,"U"]
unit.effect <- 100 * SCALING * SEFF[sectors,"dN"] / SEFF[sectors,"dA"]
## Max y-axis at 20%, 50% or 100% increments
## (made to be symmetrical with y-min, except if y-max is >100
ymax <- ifelse(max(abs(unit.effect))<20,20,
ifelse(max(abs(unit.effect))<50,50,round(max(abs(unit.effect))+50,-2)))
ymin <- ifelse(ymax>50,min(-100,round(min(unit.effect)-50,-2)),-ymax)
## This is to leave enough space at the top of bars for the text giving the % population change
ymax <- max(ymax,max(unit.effect)+0.08*(max(unit.effect)-min(unit.effect,0)))
## This is to leave enough space at the bottom of negative bars for the
## text giving the % population change
ymin <- min(ymin,min(unit.effect)-0.08*(max(unit.effect,0)-min(unit.effect)))
NAM <- as.character(tax[spp, "English_Name"])
TAG <- ""
png(file.path(ROOT, "out", "birds", "figs",
paste0("sector-", if (restrict_to_HF) "HFonly-" else "", WHERE),
paste0(as.character(tax[spp, "Spp"]), TAG, ".png")),
width=600, height=600)
q <- barplot(unit.effect,
width=100 * SEFF[sectors,"dA"],
space=0,col=c1,border=c1,ylim=c(ymin,ymax),
ylab="Unit effect (%)",xlab="Area (% of region)",
xaxt="n",cex.lab=1.3,cex.axis=1.2,tcl=0.3,
xlim=c(0,round(sum(100 * SEFF[,"dA"])+1,0)),
bty="n",col.axis="grey40",col.lab="grey40",las=2)
rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col = "gray88",border="gray88")
x.at<-pretty(c(0,sum(100 * SEFF[,"dA"])))
axis(side=1,tck=1,at=x.at,lab=rep("",length(x.at)),col="grey95")
y.at<-pretty(c(ymin,ymax),n=6)
axis(side=2,tck=1,at=y.at,lab=rep("",length(y.at)),col="grey95")
q <- barplot(unit.effect,
width=100 * SEFF[sectors,"dA"],
space=0,col=c1,border=c1,ylim=c(ymin,ymax),
ylab="Unit effect (%)",xlab="Area (% of region)",
xaxt="n",cex.lab=1.3,cex.axis=1.2,tcl=0.3,
xlim=c(0,round(sum(100 * SEFF[,"dA"])+1,0)),
bty="n",col.axis="grey40",col.lab="grey40",las=2,add=TRUE)
box(bty="l",col="grey40")
mtext(side=1,line=2,at=x.at,x.at,col="grey40",cex=1.2)
axis(side=1,at=x.at,tcl=0.3,lab=rep("",length(x.at)),col="grey40",
col.axis="grey40",cex.axis=1.2,las=1)
abline(h=0,lwd=2,col="grey40")
## Set the lines so that nearby labels don't overlap
mtext(side=1,at=q+c(0,0,-1,0,+1),sector.names,col=c1,cex=1.3,
adj=0.5,line=c(0.1,0.1,1.1,0.1,1.1))
## Just above positive bars, just below negative ones
y <- unit.effect+0.025*(ymax-ymin)*sign(unit.effect)
## Make sure there is no y-axis overlap in % change labels of
## sectors that are close together on x-axis
if (abs(y[3]-y[4])<0.05*(ymax-ymin))
y[3:4]<-mean(y[3:4])+(c(-0.015,0.015)*(ymax-ymin))[rank(y[3:4])]
## Make sure there is no y-axis overlap in % change labels of sectors
## that are close together on x-axis
if (abs(y[4]-y[5])<0.05*(ymax-ymin))
y[4:5]<-mean(y[4:5])+(c(-0.015,0.015)*(ymax-ymin))[rank(y[4:5])]
#if (abs(y[5]-y[6])<0.05*(ymax-ymin))
# y[5:6]<-mean(y[5:6])+(c(-0.015,0.015)*(ymax-ymin))[rank(y[5:6])]
text(q,y,paste(ifelse(total.effect>0,"+",""),
sprintf("%.1f",total.effect),"%",sep=""),col="darkblue",cex=1.4)
mtext(side=3,line=1,at=0,adj=0,
paste0(NAM, " - ", ifelse(WHERE=="north", "North", "South")),
cex=1.4,col="grey40")
dev.off()
}
}
## CoV
results10km_list <- list()
for (spp in SPP) {
load(file.path(ROOT, "out", "birds", "predB", spp, paste0(regs[1], ".Rdata")))
rownames(pxNcrB) <- rownames(pxNrfB) <- names(Cells)[Cells == 1]
rownames(pxScrB) <- rownames(pxSrfB) <- names(Cells)[Cells == 1]
pxNcr0 <- pxNcrB
#pxNrf0 <- pxNrfB
pxScr0 <- pxScrB
#pxSrf0 <- pxSrfB
for (i in 2:length(regs)) {
cat(spp, regs[i], "\n");flush.console()
load(file.path(ROOT, "out", "birds", "predB", spp, paste0(regs[i], ".Rdata")))
rownames(pxNcrB) <- rownames(pxNrfB) <- names(Cells)[Cells == 1]
rownames(pxScrB) <- rownames(pxSrfB) <- names(Cells)[Cells == 1]
pxNcr0 <- rbind(pxNcr0, pxNcrB)
# pxNrf0 <- rbind(pxNrf0, pxNrfB)
pxScr0 <- rbind(pxScr0, pxScrB)
# pxSrf0 <- rbind(pxSrf0, pxSrfB)
}
pxNcr <- pxNcr0[rownames(ks),]
pxNcr[is.na(pxNcr)] <- 0
#pxNrf <- pxNrf0[rownames(ks),]
pxScr <- pxScr0[rownames(ks),]
pxScr[is.na(pxScr)] <- 0
#pxSrf <- pxSrf0[rownames(ks),]
for (k in 1:ncol(pxNcr)) {
qN <- quantile(pxNcr[is.finite(pxNcr[,k]),k], q, na.rm=TRUE)
pxNcr[pxNcr[,k] > qN,k] <- qN
qS <- quantile(pxScr[is.finite(pxScr[,k]),k], q, na.rm=TRUE)
pxScr[pxScr[,k] > qS,k] <- qS
}
TR <- FALSE # transform to prob scale
TYPE <- "C" # combo
#if (!slt[spp, "veghf.north"])
if (!(spp %in% fln))
TYPE <- "S"
#if (!slt[spp, "soilhf.south"])
if (!(spp %in% fls))
TYPE <- "N"
wS <- 1-ks$pAspen
if (TYPE == "S")
wS[] <- 1
if (TYPE == "N")
wS[] <- 0
wS[ks$useS] <- 1
wS[ks$useN] <- 0
cr <- wS * pxScr + (1-wS) * pxNcr
cr <- 100*cr
# if (TR)
# cr <- 1-exp(-cr)
crveg <- groupMeans(cr, 1, ks$Row10_Col10, na.rm=TRUE)
results10km_list[[as.character(tax[spp,"Spp"])]] <- crveg
}
xy10km <- ks[,c("POINT_X","POINT_Y","Row10_Col10")]
save(xy10km, results10km_list, file=file.path(ROOT, "out", "birds", "tables", "km10results.Rdata"))
slt <- read.csv("~/repos/abmispecies/_data/birds.csv")
rownames(slt) <- slt$AOU
slt$comments <- NULL
mcrvegsd <- matrix(0, nrow(results10km_list[[1]]), sum(slt$map.pred))
rownames(mcrvegsd) <- rownames(results10km_list[[1]])
colnames(mcrvegsd) <- as.character(tax[rownames(slt)[slt$map.pred],"Spp"])
for (spp in rownames(slt)[slt$map.pred]) {
crveg <- results10km_list[[as.character(tax[spp,"Spp"])]]
mcrvegsd[,as.character(tax[spp,"Spp"])] <- apply(crveg, 1, sd)
}
write.csv(mcrvegsd, file=file.path(ROOT, "out", "birds", "tables", "birds-10x10km-SD-summary.csv"))
write.csv(xy10km, file=file.path(ROOT, "out", "birds", "tables", "xy-for-10x10km-SD-summary.csv"))
TAG <- ""
for (spp in SPP) {
crveg <- results10km_list[[as.character(tax[spp,"Spp"])]]
crvegm <- rowMeans(crveg)
crvegsd <- apply(crveg, 1, sd)
crvegsd[crvegsd==0] <- 0.000001
#crvegm <- apply(crveg, 1, median)
#crvegsd <- apply(crveg, 1, IQR)
covC <- crvegsd / crvegm
covC[crvegm==0] <- mean(covC[crvegm!=0], na.rm=TRUE) # will not stick out...
#covN[is.na(covN)] <- 1
#crsoil <- groupMeans(pxScr, 1, ks$Row10_Col10)
#crsoilm <- rowMeans(crsoil)
#crsoilsd <- apply(crsoil, 1, sd)
#crsoilm <- apply(crsoil, 1, median)
#crsoilsd <- apply(crsoil, 1, IQR)
#covS <- crsoilsd / crsoilm
#covS[is.na(covS)] <- 1
#px <- crveg[order(crvegm),]
#matplot(crvegm[order(crvegm)], crveg, type="l", lty=1)
NAM <- as.character(tax[spp, "English_Name"])
cat(spp)
if (FALSE) {
cat("\tMean");flush.console()
zval <- as.integer(cut(covC, breaks=br))
zval <- pmin(100, ceiling(99 * (crvegm / max(crvegm, na.rm=TRUE)))+1)
zval <- zval[match(kgrid$Row10_Col10, rownames(crveg))]
fname <- file.path(ROOT, "out", "birds", "figs", "map-test",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
png(fname, width=W*3, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1, mfrow=c(1,3))
plot(kgrid$X, kgrid$Y, col=C1[zval], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, TAG, "Mean"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
#par(op)
#dev.off()
}
cat("\tCoV");flush.console()
zval <- as.integer(cut(covC, breaks=br))
zval <- zval[match(kgrid$Row10_Col10, rownames(crveg))]
fname <- file.path(ROOT, "out", "birds", "figs", "map-cov-cr",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
if (TR)
fname <- file.path(ROOT, "out", "birds", "figs", "map-test",
paste0(as.character(tax[spp, "Spp"]), TAG, "-CoV", ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=Col[zval], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, TAG, "CoV"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
TEXT <- paste0(100*br[-length(br)], "-", 100*br[-1])
INF <- grepl("Inf", TEXT)
if (any(INF))
TEXT[length(TEXT)] <- paste0(">", 100*br[length(br)-1])
TITLE <- "Coefficient of variation"
legend("bottomleft", border=rev(Col), fill=rev(Col), bty="n", legend=rev(TEXT),
title=TITLE, cex=legcex*0.8)
par(op)
dev.off()
cat("\tSD\n");flush.console()
zval <- as.integer(cut(crvegsd/mean(crvegm,na.rm=TRUE), breaks=br))
zval <- zval[match(kgrid$Row10_Col10, rownames(crveg))]
fname <- file.path(ROOT, "out", "birds", "figs", "map-sd-cr",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
if (TR)
fname <- file.path(ROOT, "out", "birds", "figs", "map-test",
paste0(as.character(tax[spp, "Spp"]), TAG, "-SD", ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=Col[zval], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, TAG, "SE"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
br2 <- round(br * mean(crvegm,na.rm=TRUE), 3)
TEXT <- paste0(br2[-length(br2)], "-", br2[-1])
INF <- grepl("Inf", TEXT)
if (any(INF))
TEXT[length(TEXT)] <- paste0(">", br2[length(br2)-1])
TITLE <- paste0("Prediction Std. Error\n(mean = ", round(mean(crvegm,na.rm=TRUE), 3), ")")
legend("bottomleft", border=rev(Col), fill=rev(Col), bty="n", legend=rev(TEXT),
title=TITLE, cex=legcex*0.8)
par(op)
dev.off()
}
|
/projects/josm-shf/predictions-wLegends.R
|
no_license
|
psolymos/abmianalytics
|
R
| false
| false
| 37,598
|
r
|
library(mefa4)
ROOT <- "e:/peter/AB_data_v2016"
#OUTDIR1 <- "e:/peter/AB_data_v2016/out/birds/pred1-josmshf"
#OUTDIRB <- "e:/peter/AB_data_v2016/out/birds/predB-josmshf"
STAGE <- list(veg = 6) # hab=5, hab+clim=6, hab+clim+shf=7
OUTDIR1 <- paste0("e:/peter/josm/2017/stage", STAGE$veg, "/pred1")
OUTDIRB <- paste0("e:/peter/josm/2017/stage", STAGE$veg, "/predB")
load(file.path(ROOT, "out", "kgrid", "kgrid_table.Rdata"))
#source("~/repos/bragging/R/glm_skeleton.R")
#source("~/repos/abmianalytics/R/results_functions.R")
#source("~/repos/bamanalytics/R/makingsense_functions.R")
source("~/repos/abmianalytics/R/maps_functions.R")
regs <- levels(kgrid$LUFxNSR)
kgrid$useN <- !(kgrid$NRNAME %in% c("Grassland", "Parkland") | kgrid$NSRNAME == "Dry Mixedwood")
kgrid$useN[kgrid$NSRNAME == "Dry Mixedwood" & kgrid$POINT_Y > 56.7] <- TRUE
kgrid$useS <- kgrid$NRNAME == "Grassland"
kgrid$useBCR6 <- kgrid$BCRCODE == " 6-BOREAL_TAIGA_PLAINS"
e <- new.env()
#load(file.path(ROOT, "data", "data-full-withrevisit.Rdata"), envir=e)
load(file.path(ROOT, "out", "birds", "data", "data-wrsi.Rdata"), envir=e)
TAX <- droplevels(e$TAX)
TAX$Fn <- droplevels(TAX$English_Name)
levels(TAX$Fn) <- nameAlnum(levels(TAX$Fn), capitalize="mixed", collapse="")
en <- new.env()
load(file.path(ROOT, "out", "birds", "data", "data-josmshf.Rdata"), envir=en)
xnn <- en$DAT
modsn <- en$mods
yyn <- en$YY
BBn <- en$BB
tax <- droplevels(TAX[colnames(yyn),])
rm(e, en)
load(file.path(ROOT, "out", "transitions", paste0(regs[1], ".Rdata")))
Aveg <- rbind(colSums(trVeg))
rownames(Aveg) <- regs[1]
colnames(Aveg) <- colnames(trVeg)
Asoil <- rbind(colSums(trSoil))
rownames(Asoil) <- regs[1]
colnames(Asoil) <- colnames(trSoil)
for (i in 2:length(regs)) {
cat(regs[i], "\n");flush.console()
load(file.path(ROOT, "out", "transitions", paste0(regs[i], ".Rdata")))
Aveg <- rbind(Aveg, colSums(trVeg))
rownames(Aveg) <- regs[1:i]
Asoil <- rbind(Asoil, colSums(trSoil))
rownames(Asoil) <- regs[1:i]
}
Aveg <- Aveg / 10^4
Asoil <- Asoil / 10^4
library(raster)
library(sp)
library(rgdal)
city <-data.frame(x = -c(114,113,112,111,117,118)-c(5,30,49,23,8,48)/60,
y = c(51,53,49,56,58,55)+c(3,33,42,44,31,10)/60)
rownames(city) <- c("Calgary","Edmonton","Lethbridge","Fort McMurray",
"High Level","Grande Prairie")
coordinates(city) <- ~ x + y
proj4string(city) <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
city <- spTransform(city, CRS("+proj=tmerc +lat_0=0 +lon_0=-115 +k=0.9992 +x_0=500000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"))
city <- as.data.frame(city)
cex <- 0.25
legcex <- 1.5
Col1 <- rev(c("#D73027","#FC8D59","#FEE090","#E0F3F8","#91BFDB","#4575B4")) # Colour gradient for reference and current
Col1fun <- colorRampPalette(Col1, space = "rgb") # Function to interpolate among these colours for reference and current
C1 <- Col1fun(100)
Col2 <- c("#C51B7D","#E9A3C9","#FDE0EF","#E6F5D0","#A1D76A","#4D9221") # Colour gradient for difference map
Col2fun <- colorRampPalette(Col2, space = "rgb") # Function to interpolate among these colours for difference map
C2 <- Col2fun(200)
CW <- rgb(0.4,0.3,0.8) # water
CE <- "lightcyan4" # exclude
CSI <- colorRampPalette(c("red","yellow","green"), space = "rgb")(100)
q <- 0.99
H <- 1000
W <- 600
## csv
#spp <- "ALFL"
SPP <- rownames(tax)
#SPP <- c("BOCH","ALFL","BTNW","CAWA","OVEN","OSFL")
PRED_DIR_IN <- "pred1-josmshf"
#PRED_DIR_IN <- "pred1"
#PRED_DIR_OUT <- "pred1cmb"
PREDS <- matrix(0, sum(kgrid$useBCR6), length(SPP))
rownames(PREDS) <- rownames(kgrid)[kgrid$useBCR6]
colnames(PREDS) <- SPP
PREDS0 <- PREDS
AREA_ha <- (1-kgrid$pWater) * kgrid$Area_km2 * 100
AREA_ha <- AREA_ha[kgrid$useBCR6]
for (spp in SPP) {
cat(spp, "--------------------------------------\n");flush.console()
load(file.path(ROOT, "out", "birds", PRED_DIR_IN, spp, paste0(regs[1], ".Rdata")))
rownames(pxNcr1) <- rownames(pxNrf1) <- names(Cells)
pxNcr <- pxNcr1
pxNrf <- pxNrf1
for (i in 2:length(regs)) {
cat(spp, regs[i], "\n");flush.console()
load(file.path(ROOT, "out", "birds", PRED_DIR_IN, spp, paste0(regs[i], ".Rdata")))
rownames(pxNcr1) <- rownames(pxNrf1) <- names(Cells)
pxNcr <- rbind(pxNcr, pxNcr1)
pxNrf <- rbind(pxNrf, pxNrf1)
}
PREDS[,spp] <- pxNcr[rownames(PREDS),]
PREDS0[,spp] <- pxNrf[rownames(PREDS0),]
}
#save(PREDS, PREDS0, file=file.path(ROOT, "out", "birds", "josmshf", "predictions.Rdata"))
load(file.path(ROOT, "out", "birds", "josmshf", "predictions.Rdata"))
N <- colSums(PREDS*AREA_ha) / 10^6
#N <- N[N < max(N)]
summary(N)
## PIF table
pif <- read.csv("~/Dropbox/bam/PIF-AB/popBCR-6AB_v2_22-May-2013.csv")
mefa4::compare_sets(tax$English_Name, pif$Common_Name)
setdiff(tax$English_Name, pif$Common_Name)
pif <- pif[match(tax$English_Name, pif$Common_Name),]
## roadside_bias
load(file.path(ROOT, "out", "birds", "josmshf", "roadside_bias.Rdata"))
load(file.path(ROOT, "out", "birds", "data", "mean-qpad-estimates.Rdata"))
qpad_vals <- qpad_vals[rownames(tax),]
## roadside avoidance
library(mefa4)
load(file.path(ROOT, "out", "birds", "josmshf", "roadside_avoidance.Rdata"))
tmp <- cbind(ROAD=rai_data$ROAD, rai_pred)
rai <- groupSums(tmp[BBn[,1],], 1, rai_data$HAB[BBn[,1]], TRUE)
rai <- t(t(rai) / colSums(rai))
RAI <- 1 - colSums(rai[,1] * rai)
summary(RAI)
RAIc <- RAI-RAI["ROAD"]
#yy <- cbind(ALL=1, ROAD=xnn[BBn[,1],"ROAD01"],
# ifelse(as.matrix(yyn[BBn[,1],]) > 0, 1, 0))
#rai <- groupSums(yy, 1, xnn$hab1[BBn[,1]], TRUE)
#n <- rai[,"ALL"]
#rai <- rai[,-1]
#rai <- t(t(rai) / colSums(rai))
#sai <- groupSums(yy, 1, xnn$hab1[BBn[,1]], TRUE)
#RAI <- 1 - colSums(rai[,1] * rai)
pop <- tax[,c("Species_ID", "English_Name", "Scientific_Name", "Spp")]
pop$RAI <- RAI[match(rownames(pop), names(RAI))]
pop$RAIc <- RAIc[match(rownames(pop), names(RAIc))]
pop$RAIroad <- RAI["ROAD"]
pop$Don <- roadside_bias[rownames(pop), "on"]
pop$Doff <- roadside_bias[rownames(pop), "off"]
pop$DeltaRoad <- roadside_bias[rownames(pop), "onoff"]
pop$Nqpad <- colSums(PREDS*AREA_ha) / 10^6 # M males
pop$Nqpad[pop$Nqpad > 1000] <- NA
pop$Npif <- (pif$Pop_Est / pif$Pair_Adjust) / 10^6 # M males
pop$DeltaObs <- pop$Nqpad / pop$Npif
pop$TimeAdj <- pif$Time_Adjust
pop$MDD <- pif$Detection_Distance_m
pop$p3 <- 1-exp(-3 * qpad_vals$phi0)
pop$EDR <- qpad_vals$phi0 * 100
pop$DeltaTime <- (1/pop$p3)/pop$TimeAdj
pop$DeltaDist <- pop$MDD^2 / pop$EDR^2
pop$DeltaExp <- pop$DeltaRoad * pop$DeltaTime * pop$DeltaDist
pop$DeltaRes <- pop$DeltaObs / pop$DeltaExp
pop <- pop[rowSums(is.na(pop))==0,]
#write.csv(pop, row.names=FALSE, file="~/Dropbox/bam/PIF-AB/qpad-pif-results.csv")
boxplot(log(pop[,c("DeltaRoad", "DeltaTime", "DeltaDist", "DeltaRes")]))
abline(h=0, col=2)
boxplot(log(pop[,c("DeltaObs", "DeltaExp")]))
abline(h=0, col=2)
mat <- log(pop[,c("DeltaObs", "DeltaExp", "DeltaRoad", "DeltaTime", "DeltaDist", "DeltaRes")])
rnd <- runif(nrow(pop), -0.1, 0.1)
boxplot(mat, range=0)
for (i in 2:ncol(mat))
segments(x0=i+rnd-1, x1=i+rnd, y0=mat[,i-1], y1=mat[,i], col="lightgrey")
for (i in 1:ncol(mat))
points(i+rnd, mat[,i], col="darkgrey", pch=19)
abline(h=0, col=2, lwd=2)
boxplot(mat, range=0, add=TRUE)
with(pop, plot(RAI, log(DeltaRes), type="n"))
abline(h=0, v=RAI["ROAD"], col=2, lwd=2)
with(pop, text(RAI, log(DeltaRes), rownames(pop), cex=0.75))
boxplot(pop[,c("Npif", "Nqpad")], ylim=c(0,10))
## plots
res_luf <- list()
res_nsr <- list()
#SPP <- as.character(slt$AOU[slt$map.pred])
for (spp in SPP) {
cat(spp, "\t");flush.console()
load(file.path(ROOT, "out", "birds", "pred1cmb", paste0(spp, ".Rdata")))
km <- data.frame(km)
TYPE <- "C" # combo
#if (!slt[spp, "veghf.north"])
if (!(spp %in% fln))
TYPE <- "S"
#if (!slt[spp, "soilhf.south"])
if (!(spp %in% fls))
TYPE <- "N"
wS <- 1-kgrid$pAspen
if (TYPE == "S")
wS[] <- 1
if (TYPE == "N")
wS[] <- 0
wS[kgrid$useS] <- 1
wS[kgrid$useN] <- 0
cr <- wS * km$CurrS + (1-wS) * km$CurrN
rf <- wS * km$RefS + (1-wS) * km$RefN
#km2 <- as.matrix(cbind(Curr=cr, Ref=rf))
#rownames(km2) <- rownames(km)
#save(km2, file=file.path(ROOT, "out", "birds", "pred1combined", paste0(spp, ".Rdata")))
#cat("\n")
if (FALSE) {
ndat <- normalize_data(rf=rf, cr=cr)
}
# cr <- km$CurrN
# rf <- km$RefN
# cr <- km$CurrS
# rf <- km$RefS
qcr <- quantile(cr, q)
cr[cr>qcr] <- qcr
qrf <- quantile(rf, q)
rf[rf>qrf] <- qrf
if (TRUE) {
mat <- 100 * cbind(Ncurrent=cr, Nreference=rf) # ha to km^2
rownames(mat) <- rownames(kgrid)
res_luf[[spp]] <- groupSums(mat, 1, kgrid$LUF_NAME)
res_nsr[[spp]] <- groupSums(mat, 1, kgrid$NSRNAME)
}
if (TRUE) {
SI <- round(100 * pmin(cr, rf) / pmax(cr, rf))
SI[is.na(SI)] <- 100 # 0/0 is defined as 100 intact
# SI <- 100*as.matrix(dd1km_pred[[4]])[,"UNK"]/rowSums(dd1km_pred[[2]])
# SI <- 100-SI
cr0 <- cr
rf0 <- rf
SI0 <- SI
SI[SI < 1] <- 1 # this is only for mapping
if (FALSE) {
library(raster)
source("~/repos/abmianalytics/R/maps_functions.R")
rt <- raster(file.path(ROOT, "data", "kgrid", "AHM1k.asc"))
r_si <- as_Raster(as.factor(kgrid$Row), as.factor(kgrid$Col), SI0, rt)
plot(r_si)
writeRaster(r_si, paste0(spp, "-intactness_2016-08-12.tif"), overwrite=TRUE)
}
Max <- max(qcr, qrf)
df <- (cr-rf) / Max
df <- sign(df) * abs(df)^0.5
df <- pmin(200, ceiling(99 * df)+100)
df[df==0] <- 1
cr <- pmin(100, ceiling(99 * sqrt(cr / Max))+1)
rf <- pmin(100, ceiling(99 * sqrt(rf / Max))+1)
range(cr)
range(rf)
range(df)
NAM <- as.character(tax[spp, "English_Name"])
TAG <- ""
cat("si\t");flush.console()
fname <- file.path(ROOT, "out", "birds", "figs", "map-si",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=CSI[SI], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, "\nIntactness"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
for (i in 1:100) {
#lines(c(190000, 220000), c(5450000, 5700000), col=CSI[i], lwd=2)
j <- i * abs(diff(c(5450000, 5700000)))/100
segments(190000, 5450000+j, 220000, 5450000+j, col=CSI[i], lwd=2, lend=2)
}
text(240000, 5450000, "0%")
text(240000, 0.5*(5450000 + 5700000), "50%")
text(240000, 5700000, "100%")
## test NAs
# with(kgrid[is.na(SI) & kgrid$pWater <= 0.99,], points(X, Y, col="black", pch=15, cex=cex))
par(op)
dev.off()
if (FALSE) {
load(file.path(ROOT, "out", "kgrid", "veg-hf_1kmgrid_fix-fire_fix-age0.Rdata")) # dd1km_pred
m0 <- as.matrix(dd1km_pred[[2]])
m0 <- 100*m0/rowSums(m0)
m0 <- m0[rf0==0 & m0[,"Water"] <= 99 & m0[,"NonVeg"] <= 99 & kgrid$NRNAME == "Grassland",]
m0 <- m0[,colSums(m0)>0]
#summary(m0)
round(colMeans(m0))
m0 <- as.matrix(dd1km_pred[[4]])
m0 <- 100*m0/rowSums(m0)
m0 <- m0[rf0==0 & m0[,"Water"] <= 99 & kgrid$NRNAME == "Grassland",]
m0 <- m0[,colSums(m0)>0]
round(colMeans(m0))
aggregate(100*as.matrix(dd1km_pred[[2]])[,"NonVeg"]/rowSums(dd1km_pred[[2]]),
list(nr=kgrid$NRNAME, rf0=rf0==0 & kgrid$pWater < 0.9), mean)
}
cat("rf\t");flush.console()
fname <- file.path(ROOT, "out", "birds", "figs", "map-rf",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=C1[rf], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, "\nReference abundance"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
for (i in 1:100) {
#lines(c(190000, 220000), c(5450000, 5700000), col=C1[i], lwd=2)
j <- i * abs(diff(c(5450000, 5700000)))/100
segments(190000, 5450000+j, 220000, 5450000+j, col=C1[i], lwd=2, lend=2)
}
text(240000, 5450000, "0%")
text(240000, 0.5*(5450000 + 5700000), "50%")
text(240000, 5700000, "100%")
par(op)
dev.off()
cat("cr\t");flush.console()
fname <- file.path(ROOT, "out", "birds", "figs", "map-cr",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=C1[cr], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, "\nCurrent abundance"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
for (i in 1:100) {
#lines(c(190000, 220000), c(5450000, 5700000), col=C1[i], lwd=2)
j <- i * abs(diff(c(5450000, 5700000)))/100
segments(190000, 5450000+j, 220000, 5450000+j, col=C1[i], lwd=2, lend=2)
}
text(240000, 5450000, "0%")
text(240000, 0.5*(5450000 + 5700000), "50%")
text(240000, 5700000, "100%")
par(op)
dev.off()
cat("df\n");flush.console()
fname <- file.path(ROOT, "out", "birds", "figs", "map-df",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=C2[df], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, "\nDifference"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
for (i in 1:200) {
#lines(c(190000, 220000), c(5450000, 5700000), col=C2[i], lwd=2)
j <- i * abs(diff(c(5450000, 5700000)))/200
segments(190000, 5450000+j, 220000, 5450000+j, col=C2[i], lwd=2, lend=2)
}
text(245000, 5450000, "-100%")
text(245000, 0.5*(5450000 + 5700000), "0%")
text(245000, 5700000, "+100%")
par(op)
dev.off()
}
}
#save(res_nsr, res_luf, file=file.path(ROOT, "out", "birds", "tables", "luf_Nsummaries.Rdata"))
load(file.path(ROOT, "out", "birds", "tables", "luf_Nsummaries.Rdata"))
LUF <- list()
for (spp in names(res_luf)) {
tmp <- res_luf[[spp]] / 10^6 # M males
tmp <- t(matrix(tmp, 2*nrow(tmp), 1))
colnames(tmp) <- paste(rep(colnames(res_luf[[1]]), each=ncol(tmp)/2),
rep(rownames(res_luf[[1]]), 2))
LUF[[spp]] <- data.frame(Species=tax[spp, "English_Name"], tmp)
}
NSR <- list()
for (spp in names(res_nsr)) {
tmp <- res_nsr[[spp]] / 10^6 # M males
tmp <- t(matrix(tmp, 2*nrow(tmp), 1))
colnames(tmp) <- paste(rep(colnames(res_nsr[[1]]), each=ncol(tmp)/2),
rep(rownames(res_nsr[[1]]), 2))
NSR[[spp]] <- data.frame(Species=tax[spp, "English_Name"], tmp)
}
LUF <- do.call(rbind, LUF)
NSR <- do.call(rbind, NSR)
write.csv(LUF, row.names=FALSE,
file=file.path(ROOT, "out", "birds", "tables", "Birds_Abundance_by_LUFregions.csv"))
write.csv(NSR, row.names=FALSE,
file=file.path(ROOT, "out", "birds", "tables", "Birds_Abundance_by_NaturalSubregions.csv"))
## sector effects
seff_res <- list()
tr_res <- list()
#seff_luf <- list()
#seff_ns <- list()
#uplow <- list()
#uplow_full <- list()
#uplow_luf <- list()
## stuff to exclude
## add col to lxn
## subset counter for loop
PRED_DIR_IN <- "pred1-shf" # "pred1-seismic-as-ES" # "pred1"
restrict_to_HF <- FALSE
TAX <- read.csv("~/repos/abmispecies/_data/birds.csv")
SPP <- as.character(TAX$AOU)[TAX$map.pred]
for (spp in SPP) {
cat(spp, "------------------------\n");flush.console()
#load(file.path(OUTDIR1, spp, paste0(regs[1], ".Rdata")))
load(file.path(ROOT, "out", "birds", PRED_DIR_IN, spp, paste0(regs[1], ".Rdata")))
hbNcr <- hbNcr1[,1]
hbNrf <- hbNrf1[,1]
hbScr <- hbScr1[,1]
hbSrf <- hbSrf1[,1]
for (i in 2:length(regs)) {
cat(spp, regs[i], "\n");flush.console()
#load(file.path(OUTDIR1, spp, paste0(regs[i], ".Rdata")))
load(file.path(ROOT, "out", "birds", PRED_DIR_IN, spp, paste0(regs[i], ".Rdata")))
hbNcr <- rbind(hbNcr, hbNcr1[,1])
hbNrf <- rbind(hbNrf, hbNrf1[,1])
hbScr <- rbind(hbScr, hbScr1[,1])
hbSrf <- rbind(hbSrf, hbSrf1[,1])
}
if (!NSest["north"]) {
hbNcr[] <- 0
hbNrf[] <- 0
}
if (!NSest["south"]) {
hbScr[] <- 0
hbSrf[] <- 0
}
dimnames(hbNcr) <- dimnames(hbNrf) <- list(regs, colnames(Aveg))
hbNcr[is.na(hbNcr)] <- 0
hbNrf[is.na(hbNrf)] <- 0
hbNcr <- hbNcr * Aveg
hbNrf <- hbNrf * Aveg
dimnames(hbScr) <- dimnames(hbSrf) <- list(regs, colnames(Asoil))
hbScr[is.na(hbScr)] <- 0
hbSrf[is.na(hbSrf)] <- 0
hbScr <- hbScr * Asoil
hbSrf <- hbSrf * Asoil
## combined upland/lowland N/S
if (FALSE) {
crN <- groupSums(hbNcr, 2, ch2veg$uplow)
rfN <- groupSums(hbNrf, 2, ch2veg$uplow)
crN[lxn$NRNAME=="Grassland","lowland"] <- 0
crN[lxn$NRNAME=="Grassland","upland"] <- rowSums(hbScr[lxn$NRNAME=="Grassland",])
rfN[lxn$NRNAME=="Grassland","lowland"] <- 0
rfN[lxn$NRNAME=="Grassland","upland"] <- rowSums(hbSrf[lxn$NRNAME=="Grassland",])
uplo <- data.frame(Current=crN, Reference=rfN)
uplow_full[[spp]] <- data.frame(sppid=spp, lxn[,1:3], uplo)
## Exclude stuff here
r0 <- lxn$NSRNAME %in% c("Alpine","Lower Foothills",
"Montane","Subalpine","Upper Foothills")
crN[r0,] <- 0
rfN[r0,] <- 0
## upland/lowland
cr <- colSums(crN)
rf <- colSums(rfN)
cr <- c(total=sum(cr), cr)
rf <- c(total=sum(rf), rf)
si <- 100 * pmin(cr, rf) / pmax(cr, rf)
si2 <- ifelse(cr > rf, 200-si, si)
uplow[[spp]] <- c(Ref=rf, Cur=cr, SI=si, SI200=si2)
cr <- groupSums(groupSums(hbNcr, 2, ch2veg$uplow), 1, lxn$LUF_NAME)
rf <- groupSums(groupSums(hbNrf, 2, ch2veg$uplow), 1, lxn$LUF_NAME)
cr <- cbind(total=rowSums(cr), cr)
rf <- cbind(total=rowSums(rf), rf)
si <- sapply(1:3, function(i) 100 * pmin(cr[,i], rf[,i]) / pmax(cr[,i], rf[,i]))
colnames(si) <- colnames(cr)
si2 <- ifelse(cr > rf, 200-si, si)
uplow_luf[[spp]] <- data.frame(ID=spp, Ref=round(rf), Cur=round(cr),
SI=round(si, 2), SI200=round(si2, 2))
}
## for HF-only sector effects, only need to adjust the total
## i.e. sum only where ch2veg$cr is HF
keep <- rownames(ch2veg) %in% rownames(ch2veg)[ch2veg$isHF]
hbNcr_HFonly <- hbNcr
hbNrf_HFonly <- hbNrf
hbNcr_HFonly[,!keep] <- 0
hbNrf_HFonly[,!keep] <- 0
ThbNcr_HFonly <- colSums(hbNcr_HFonly[lxn$N,])
ThbNrf_HFonly <- colSums(hbNrf_HFonly[lxn$N,])
ThbNcr <- colSums(hbNcr[lxn$N,])
ThbNrf <- colSums(hbNrf[lxn$N,])
Ntot_HFonly <- sum(ThbNrf_HFonly)
Ntot_All <- sum(ThbNrf)
Ntot_Use <- if (restrict_to_HF)
Ntot_HFonly else Ntot_All
df <- (ThbNcr - ThbNrf) / Ntot_Use
dA <- Xtab(AvegN ~ rf + cr, ch2veg)
if (FALSE) {
tv <- read.csv("~/repos/abmianalytics/lookup/lookup-veg-hf-age.csv")
tv2 <- nonDuplicated(tv,Combined,TRUE)
dA2 <- as.matrix(groupSums(dA[,rownames(tv2)], 2, tv2$Sector3))
tv3 <- tv2[rownames(dA2),]
dA2 <- as.matrix(groupSums(dA2, 1, tv3$Sector3))
dA3 <- dA2[,c(c("Agriculture","Forestry","Energy","RuralUrban","Transportation"))]
dA3 <- round(100*t(t(dA3) / colSums(dA3)), 1)
dA3[c("Decid", "Mixwood", "UpConif", "LoConif", "Wet", "OpenOther"),]
}
dN <- Xtab(df ~ rf + cr, ch2veg)
#dA <- colSums(as.matrix(groupSums(dA[,rownames(tv)], 2, tv$Sector2)))
#dN <- colSums(as.matrix(groupSums(dN[,rownames(tv)], 2, tv$Sector2)))
dA <- colSums(as.matrix(groupSums(dA[,rownames(tv)], 2, tv$Sector)))
dN <- colSums(as.matrix(groupSums(dN[,rownames(tv)], 2, tv$Sector)))
U <- dN/dA
seffN <- cbind(dA=dA, dN=dN, U=U)[c("Agriculture","Forestry",
"Energy",#"EnergySoftLin","MineWell",
"RuralUrban","Transportation"),]
keep <- rownames(ch2soil) %in% rownames(ch2soil)[ch2soil$isHF]
hbScr_HFonly <- hbScr
hbSrf_HFonly <- hbSrf
hbScr_HFonly[,!keep] <- 0
hbSrf_HFonly[,!keep] <- 0
ThbScr_HFonly <- colSums(hbScr_HFonly[lxn$S,])
ThbSrf_HFonly <- colSums(hbSrf_HFonly[lxn$S,])
ThbScr <- colSums(hbScr[lxn$S,])
ThbSrf <- colSums(hbSrf[lxn$S,])
Stot_HFonly <- sum(ThbSrf_HFonly)
Stot_All <- sum(ThbSrf)
Stot_Use <- if (restrict_to_HF)
Stot_HFonly else Stot_All
df <- (ThbScr - ThbSrf) / Stot_Use
dA <- Xtab(AsoilS ~ rf + cr, ch2soil)
dN <- Xtab(df ~ rf + cr, ch2soil)
#dA <- colSums(as.matrix(groupSums(dA[,rownames(ts)], 2, ts$Sector2)))
#dN <- colSums(as.matrix(groupSums(dN[,rownames(ts)], 2, ts$Sector2)))
dA <- colSums(as.matrix(groupSums(dA[,rownames(ts)], 2, ts$Sector)))
dN <- colSums(as.matrix(groupSums(dN[,rownames(ts)], 2, ts$Sector)))
U <- dN/dA
seffS <- cbind(dA=dA, dN=dN, U=U)[c("Agriculture","Forestry",
"Energy",#"EnergySoftLin","MineWell",
"RuralUrban","Transportation"),]
seff_res[[spp]] <- list(N=seffN, S=seffS)
tr_res[[spp]] <- list(N=cbind(rf=ThbNrf, cr=ThbNcr), S=cbind(rf=ThbSrf, cr=ThbScr),
NSest=NSest, total=c(Ntot_All=Ntot_All, Stot_All=Stot_All,
Ntot_HFonly=Ntot_HFonly, Stot_HFonly=Stot_HFonly))
#(sum(hbNcr)-sum(hbNrf))/sum(hbNrf)
#(sum(km$CurrN)-sum(km$RefN))/sum(km$RefN)
#100*seff
}
## -new version has the HFonly pop sizes saved
## can be used to retro-fit the effects
#save(seff_res, tr_res, file=file.path(ROOT, "out", "birds", "tables", "sector-effects-new-seismic-as-bf.Rdata"))
#save(seff_res, tr_res, file=file.path(ROOT, "out", "birds", "tables", "sector-effects-new-seismic-as-ES.Rdata"))
#save(seff_res, tr_res, file=file.path(ROOT, "out", "birds", "tables", "sector-effects-new-shf.Rdata"))
load(file.path(ROOT, "out", "birds", "tables", "sector-effects-new-seismic-as-bf.Rdata"))
#load(file.path(ROOT, "out", "birds", "tables", "sector-effects-new-seismic-as-ES.Rdata"))
#load(file.path(ROOT, "out", "birds", "tables", "sector-effects-new-shf.Rdata"))
#spp <- "ALFL"
seff_loc <- list()
seff_lfull <- list()
for (spp in names(tr_res)) {
seff_lfull[[spp]] <- groupSums(as.matrix(tr_res[[spp]]$N)[ch2veg$isHF,], 1,
as.character(ch2veg$cr[ch2veg$isHF]))
seff_loc[[spp]] <- groupSums(seff_lfull[[spp]], 1, tv[rownames(seff_lfull[[spp]]), "Sector"])
}
seff2 <- t(sapply(seff_loc, function(z) (z[,"cr"]-z[,"rf"])/z[,"rf"]))
seff2 <- seff2[,rownames(seff_res[[1]]$N)]
seff1 <- t(sapply(seff_res, function(z) z$N[,"dN"]))
seff2 <- cbind(seff2,
Total=sapply(seff_loc, function(z) (sum(z[,"cr"])-sum(z[,"rf"]))/sum(z[,"rf"])))
seff2 <- seff2[!is.na(seff2[,1]),]
seff2 <- seff2[order(rownames(seff2)),]
seff2[seff2>2] <- 2
round(100*seff2,1)
#AA <- 100*seff_res[[1]]$N[,"dA"]
#a <- round(100*seff2,1)
#aa <- round(t(t(100*seff2) / AA), 1)
d1 <- apply(100*seff1, 2, density, na.rm=TRUE)
d2 <- apply(100*seff2, 2, density, na.rm=TRUE)
for (i in 1:5) {
d1[[i]]$y <- d1[[i]]$y/max(d1[[i]]$y)
d2[[i]]$y <- d2[[i]]$y/max(d2[[i]]$y)
}
par(mfrow=c(1,2))
plot(d1[[1]], xlim=c(-50,50), main="% change inside region", lwd=2)
for (i in 2:5) lines(d1[[i]], col=i, lwd=2)
abline(v=0)
plot(d2[[1]], xlim=c(-100,200), main="% change inside HF", lwd=2)
for (i in 2:5) lines(d1[[i]], col=i, lwd=2)
abline(v=0)
legend("topright", lty=1, col=1:5, bty="n", legend=colnames(seff2), lwd=2)
par(mfrow=c(2,3))
for (i in 1:6) {
hist(100*seff2[,i], main=colnames(seff2)[i], col="lightblue",
xlab="% population change inside HF", border=NA)
abline(v=0, col=4, lty=2)
}
write.csv(round(100*seff2,1), file="sector-effects-birds-early-seral-seismic.csv")
nres <- list()
sres <- list()
for (spp in names(seff_res)) {
nres[[spp]] <- 100*c(PopEffect=seff_res[[spp]]$N[,2], UnitEffect=seff_res[[spp]]$N[,3])
sres[[spp]] <- 100*c(PopEffect=seff_res[[spp]]$S[,2], UnitEffect=seff_res[[spp]]$S[,3])
}
nres <- do.call(rbind, nres)
sres <- do.call(rbind, sres)
nres <- data.frame(Species=tax[rownames(nres), "English_Name"], nres)
sres <- data.frame(Species=tax[rownames(sres), "English_Name"], sres)
## keep only spp that are OK
write.csv(nres, row.names=FALSE,
file=file.path(ROOT, "out", "birds", "tables", "Birds_SectorEffects_North.csv"))
write.csv(sres, row.names=FALSE,
file=file.path(ROOT, "out", "birds", "tables", "Birds_SectorEffects_South.csv"))
for (spp in SPP) {
cat(spp, "\n");flush.console()
for (WHERE in c("north", "south")) {
SEFF <- seff_res[[spp]][[ifelse(WHERE=="north", "N", "S")]]
## Sector effect plot from Dave
## Sectors to plot and their order
sectors <- c("Agriculture","Forestry",
"Energy",#"EnergySoftLin","MineWell",
"RuralUrban","Transportation")
## Names that will fit without overlap
sector.names <- c("Agriculture","Forestry",
"Energy",#"EnergySL","EnergyEX",
"RuralUrban","Transport")
## The colours for each sector above
c1 <- c("tan3","palegreen4","indianred3",#"hotpink4",
"skyblue3","slateblue2")
TOTALS <- if (WHERE=="north")
tr_res[[spp]]$total[c("Ntot_All", "Ntot_HFonly")] else tr_res[[spp]]$total[c("Stot_All", "Stot_HFonly")]
SCALING <- if (restrict_to_HF)
TOTALS[1]/TOTALS[2] else 1
total.effect <- 100 * SCALING * SEFF[sectors,"dN"]
#unit.effect <- 100 * SEFF[sectors,"U"]
unit.effect <- 100 * SCALING * SEFF[sectors,"dN"] / SEFF[sectors,"dA"]
## Max y-axis at 20%, 50% or 100% increments
## (made to be symmetrical with y-min, except if y-max is >100
ymax <- ifelse(max(abs(unit.effect))<20,20,
ifelse(max(abs(unit.effect))<50,50,round(max(abs(unit.effect))+50,-2)))
ymin <- ifelse(ymax>50,min(-100,round(min(unit.effect)-50,-2)),-ymax)
## This is to leave enough space at the top of bars for the text giving the % population change
ymax <- max(ymax,max(unit.effect)+0.08*(max(unit.effect)-min(unit.effect,0)))
## This is to leave enough space at the bottom of negative bars for the
## text giving the % population change
ymin <- min(ymin,min(unit.effect)-0.08*(max(unit.effect,0)-min(unit.effect)))
NAM <- as.character(tax[spp, "English_Name"])
TAG <- ""
png(file.path(ROOT, "out", "birds", "figs",
paste0("sector-", if (restrict_to_HF) "HFonly-" else "", WHERE),
paste0(as.character(tax[spp, "Spp"]), TAG, ".png")),
width=600, height=600)
q <- barplot(unit.effect,
width=100 * SEFF[sectors,"dA"],
space=0,col=c1,border=c1,ylim=c(ymin,ymax),
ylab="Unit effect (%)",xlab="Area (% of region)",
xaxt="n",cex.lab=1.3,cex.axis=1.2,tcl=0.3,
xlim=c(0,round(sum(100 * SEFF[,"dA"])+1,0)),
bty="n",col.axis="grey40",col.lab="grey40",las=2)
rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col = "gray88",border="gray88")
x.at<-pretty(c(0,sum(100 * SEFF[,"dA"])))
axis(side=1,tck=1,at=x.at,lab=rep("",length(x.at)),col="grey95")
y.at<-pretty(c(ymin,ymax),n=6)
axis(side=2,tck=1,at=y.at,lab=rep("",length(y.at)),col="grey95")
q <- barplot(unit.effect,
width=100 * SEFF[sectors,"dA"],
space=0,col=c1,border=c1,ylim=c(ymin,ymax),
ylab="Unit effect (%)",xlab="Area (% of region)",
xaxt="n",cex.lab=1.3,cex.axis=1.2,tcl=0.3,
xlim=c(0,round(sum(100 * SEFF[,"dA"])+1,0)),
bty="n",col.axis="grey40",col.lab="grey40",las=2,add=TRUE)
box(bty="l",col="grey40")
mtext(side=1,line=2,at=x.at,x.at,col="grey40",cex=1.2)
axis(side=1,at=x.at,tcl=0.3,lab=rep("",length(x.at)),col="grey40",
col.axis="grey40",cex.axis=1.2,las=1)
abline(h=0,lwd=2,col="grey40")
## Set the lines so that nearby labels don't overlap
mtext(side=1,at=q+c(0,0,-1,0,+1),sector.names,col=c1,cex=1.3,
adj=0.5,line=c(0.1,0.1,1.1,0.1,1.1))
## Just above positive bars, just below negative ones
y <- unit.effect+0.025*(ymax-ymin)*sign(unit.effect)
## Make sure there is no y-axis overlap in % change labels of
## sectors that are close together on x-axis
if (abs(y[3]-y[4])<0.05*(ymax-ymin))
y[3:4]<-mean(y[3:4])+(c(-0.015,0.015)*(ymax-ymin))[rank(y[3:4])]
## Make sure there is no y-axis overlap in % change labels of sectors
## that are close together on x-axis
if (abs(y[4]-y[5])<0.05*(ymax-ymin))
y[4:5]<-mean(y[4:5])+(c(-0.015,0.015)*(ymax-ymin))[rank(y[4:5])]
#if (abs(y[5]-y[6])<0.05*(ymax-ymin))
# y[5:6]<-mean(y[5:6])+(c(-0.015,0.015)*(ymax-ymin))[rank(y[5:6])]
text(q,y,paste(ifelse(total.effect>0,"+",""),
sprintf("%.1f",total.effect),"%",sep=""),col="darkblue",cex=1.4)
mtext(side=3,line=1,at=0,adj=0,
paste0(NAM, " - ", ifelse(WHERE=="north", "North", "South")),
cex=1.4,col="grey40")
dev.off()
}
}
## CoV
results10km_list <- list()
for (spp in SPP) {
load(file.path(ROOT, "out", "birds", "predB", spp, paste0(regs[1], ".Rdata")))
rownames(pxNcrB) <- rownames(pxNrfB) <- names(Cells)[Cells == 1]
rownames(pxScrB) <- rownames(pxSrfB) <- names(Cells)[Cells == 1]
pxNcr0 <- pxNcrB
#pxNrf0 <- pxNrfB
pxScr0 <- pxScrB
#pxSrf0 <- pxSrfB
for (i in 2:length(regs)) {
cat(spp, regs[i], "\n");flush.console()
load(file.path(ROOT, "out", "birds", "predB", spp, paste0(regs[i], ".Rdata")))
rownames(pxNcrB) <- rownames(pxNrfB) <- names(Cells)[Cells == 1]
rownames(pxScrB) <- rownames(pxSrfB) <- names(Cells)[Cells == 1]
pxNcr0 <- rbind(pxNcr0, pxNcrB)
# pxNrf0 <- rbind(pxNrf0, pxNrfB)
pxScr0 <- rbind(pxScr0, pxScrB)
# pxSrf0 <- rbind(pxSrf0, pxSrfB)
}
pxNcr <- pxNcr0[rownames(ks),]
pxNcr[is.na(pxNcr)] <- 0
#pxNrf <- pxNrf0[rownames(ks),]
pxScr <- pxScr0[rownames(ks),]
pxScr[is.na(pxScr)] <- 0
#pxSrf <- pxSrf0[rownames(ks),]
for (k in 1:ncol(pxNcr)) {
qN <- quantile(pxNcr[is.finite(pxNcr[,k]),k], q, na.rm=TRUE)
pxNcr[pxNcr[,k] > qN,k] <- qN
qS <- quantile(pxScr[is.finite(pxScr[,k]),k], q, na.rm=TRUE)
pxScr[pxScr[,k] > qS,k] <- qS
}
TR <- FALSE # transform to prob scale
TYPE <- "C" # combo
#if (!slt[spp, "veghf.north"])
if (!(spp %in% fln))
TYPE <- "S"
#if (!slt[spp, "soilhf.south"])
if (!(spp %in% fls))
TYPE <- "N"
wS <- 1-ks$pAspen
if (TYPE == "S")
wS[] <- 1
if (TYPE == "N")
wS[] <- 0
wS[ks$useS] <- 1
wS[ks$useN] <- 0
cr <- wS * pxScr + (1-wS) * pxNcr
cr <- 100*cr
# if (TR)
# cr <- 1-exp(-cr)
crveg <- groupMeans(cr, 1, ks$Row10_Col10, na.rm=TRUE)
results10km_list[[as.character(tax[spp,"Spp"])]] <- crveg
}
xy10km <- ks[,c("POINT_X","POINT_Y","Row10_Col10")]
save(xy10km, results10km_list, file=file.path(ROOT, "out", "birds", "tables", "km10results.Rdata"))
slt <- read.csv("~/repos/abmispecies/_data/birds.csv")
rownames(slt) <- slt$AOU
slt$comments <- NULL
mcrvegsd <- matrix(0, nrow(results10km_list[[1]]), sum(slt$map.pred))
rownames(mcrvegsd) <- rownames(results10km_list[[1]])
colnames(mcrvegsd) <- as.character(tax[rownames(slt)[slt$map.pred],"Spp"])
for (spp in rownames(slt)[slt$map.pred]) {
crveg <- results10km_list[[as.character(tax[spp,"Spp"])]]
mcrvegsd[,as.character(tax[spp,"Spp"])] <- apply(crveg, 1, sd)
}
write.csv(mcrvegsd, file=file.path(ROOT, "out", "birds", "tables", "birds-10x10km-SD-summary.csv"))
write.csv(xy10km, file=file.path(ROOT, "out", "birds", "tables", "xy-for-10x10km-SD-summary.csv"))
TAG <- ""
for (spp in SPP) {
crveg <- results10km_list[[as.character(tax[spp,"Spp"])]]
crvegm <- rowMeans(crveg)
crvegsd <- apply(crveg, 1, sd)
crvegsd[crvegsd==0] <- 0.000001
#crvegm <- apply(crveg, 1, median)
#crvegsd <- apply(crveg, 1, IQR)
covC <- crvegsd / crvegm
covC[crvegm==0] <- mean(covC[crvegm!=0], na.rm=TRUE) # will not stick out...
#covN[is.na(covN)] <- 1
#crsoil <- groupMeans(pxScr, 1, ks$Row10_Col10)
#crsoilm <- rowMeans(crsoil)
#crsoilsd <- apply(crsoil, 1, sd)
#crsoilm <- apply(crsoil, 1, median)
#crsoilsd <- apply(crsoil, 1, IQR)
#covS <- crsoilsd / crsoilm
#covS[is.na(covS)] <- 1
#px <- crveg[order(crvegm),]
#matplot(crvegm[order(crvegm)], crveg, type="l", lty=1)
NAM <- as.character(tax[spp, "English_Name"])
cat(spp)
if (FALSE) {
cat("\tMean");flush.console()
zval <- as.integer(cut(covC, breaks=br))
zval <- pmin(100, ceiling(99 * (crvegm / max(crvegm, na.rm=TRUE)))+1)
zval <- zval[match(kgrid$Row10_Col10, rownames(crveg))]
fname <- file.path(ROOT, "out", "birds", "figs", "map-test",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
png(fname, width=W*3, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1, mfrow=c(1,3))
plot(kgrid$X, kgrid$Y, col=C1[zval], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, TAG, "Mean"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
#par(op)
#dev.off()
}
cat("\tCoV");flush.console()
zval <- as.integer(cut(covC, breaks=br))
zval <- zval[match(kgrid$Row10_Col10, rownames(crveg))]
fname <- file.path(ROOT, "out", "birds", "figs", "map-cov-cr",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
if (TR)
fname <- file.path(ROOT, "out", "birds", "figs", "map-test",
paste0(as.character(tax[spp, "Spp"]), TAG, "-CoV", ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=Col[zval], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, TAG, "CoV"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
TEXT <- paste0(100*br[-length(br)], "-", 100*br[-1])
INF <- grepl("Inf", TEXT)
if (any(INF))
TEXT[length(TEXT)] <- paste0(">", 100*br[length(br)-1])
TITLE <- "Coefficient of variation"
legend("bottomleft", border=rev(Col), fill=rev(Col), bty="n", legend=rev(TEXT),
title=TITLE, cex=legcex*0.8)
par(op)
dev.off()
cat("\tSD\n");flush.console()
zval <- as.integer(cut(crvegsd/mean(crvegm,na.rm=TRUE), breaks=br))
zval <- zval[match(kgrid$Row10_Col10, rownames(crveg))]
fname <- file.path(ROOT, "out", "birds", "figs", "map-sd-cr",
paste0(as.character(tax[spp, "Spp"]), TAG, ".png"))
if (TR)
fname <- file.path(ROOT, "out", "birds", "figs", "map-test",
paste0(as.character(tax[spp, "Spp"]), TAG, "-SD", ".png"))
png(fname, width=W, height=H)
op <- par(mar=c(0, 0, 4, 0) + 0.1)
plot(kgrid$X, kgrid$Y, col=Col[zval], pch=15, cex=cex, ann=FALSE, axes=FALSE)
with(kgrid[kgrid$pWater > 0.99,], points(X, Y, col=CW, pch=15, cex=cex))
# with(kgrid[kgrid$NRNAME == "Rocky Mountain" & kgrid$POINT_X < -112,],
# points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "N")
with(kgrid[kgrid$useS,], points(X, Y, col=CE, pch=15, cex=cex))
if (TYPE == "S")
with(kgrid[kgrid$useN,], points(X, Y, col=CE, pch=15, cex=cex))
mtext(side=3,paste(NAM, TAG, "SE"),col="grey30", cex=legcex)
points(city, pch=18, cex=cex*2)
text(city[,1], city[,2], rownames(city), cex=0.8, adj=-0.1, col="grey10")
# text(378826,5774802,"Insufficient \n data",col="white",cex=0.9)
br2 <- round(br * mean(crvegm,na.rm=TRUE), 3)
TEXT <- paste0(br2[-length(br2)], "-", br2[-1])
INF <- grepl("Inf", TEXT)
if (any(INF))
TEXT[length(TEXT)] <- paste0(">", br2[length(br2)-1])
TITLE <- paste0("Prediction Std. Error\n(mean = ", round(mean(crvegm,na.rm=TRUE), 3), ")")
legend("bottomleft", border=rev(Col), fill=rev(Col), bty="n", legend=rev(TEXT),
title=TITLE, cex=legcex*0.8)
par(op)
dev.off()
}
|
## Put comments here that give an overall description of what your
## functions do
## This is similar to makeVector, it creates a list that sets the matrix, gets the matrix, gets its inverse, and sets its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## again, same as cachemean, it checks if the inverse has been calculated. if so, it gets the inverse from the cache. otherwise it calculates the inverse and stores it via setinverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
heikalm/ProgrammingAssignment2
|
R
| false
| false
| 1,098
|
r
|
## Put comments here that give an overall description of what your
## functions do
## This is similar to makeVector, it creates a list that sets the matrix, gets the matrix, gets its inverse, and sets its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## again, same as cachemean, it checks if the inverse has been calculated. if so, it gets the inverse from the cache. otherwise it calculates the inverse and stores it via setinverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cur_dir.R
\name{setwd_cur}
\alias{setwd_cur}
\title{Set the wd to the current directory of the file}
\usage{
setwd_cur()
}
\description{
Set the wd to the current directory of the file
}
\examples{
setwd_cur()
}
\seealso{
\code{\link{cur_dir}}, \code{\link{cur_dir_source}}
}
\author{
Kelli-Jean Chun, \email{kjchunz@gmail.com}
}
|
/man/setwd_cur.Rd
|
no_license
|
kelli-jean/easyR
|
R
| false
| true
| 408
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cur_dir.R
\name{setwd_cur}
\alias{setwd_cur}
\title{Set the wd to the current directory of the file}
\usage{
setwd_cur()
}
\description{
Set the wd to the current directory of the file
}
\examples{
setwd_cur()
}
\seealso{
\code{\link{cur_dir}}, \code{\link{cur_dir_source}}
}
\author{
Kelli-Jean Chun, \email{kjchunz@gmail.com}
}
|
library(ggpubr)
rt=read.table("PDL1CTLA4exp.txt",sep="\t",header=T,row.names=1,check.names=F)
Type=read.table("cluster.Immunity.txt",sep="\t",check.names=F,row.names=1,header=F)
Type=Type[order(Type[,2]),]
rt=t(rt[,row.names(Type)])
data=data.frame()
for(i in colnames(rt)){
data=rbind(data,cbind(expression=log2(rt[,i]+1),gene=i,Subtype=as.vector(Type[,2])))
}
write.table(data,file="data.txt",sep="\t",row.names=F,quote=F)
data=read.table("MHCIdata.txt",sep="\t",header=T,check.names=F)
data$Subtype=factor(data$Subtype, levels=c("Immunity_L","Immunity_M","Immunity_H"))
p=ggboxplot(data, x="gene", y="expression", color = "Subtype",
ylab="Gene expression (log2(FPKM+1))",
xlab="",
palette = c("chartreuse4","blue","red") )
#p=p+rotate_x_text(100)
axis.title.x=element_text(size=80)
pdf(file="MHCIboxplot.pdf",width=5,height=4)
p+stat_compare_means(aes(group=Subtype),symnum.args=list(cutpoints = c(0, 0.001, 0.01, 0.05, 1), symbols = c("***", "**", "*", "ns")),label = "p.format")
dev.off()
|
/boxplot.R
|
no_license
|
262062/Li-Ma
|
R
| false
| false
| 1,070
|
r
|
library(ggpubr)
rt=read.table("PDL1CTLA4exp.txt",sep="\t",header=T,row.names=1,check.names=F)
Type=read.table("cluster.Immunity.txt",sep="\t",check.names=F,row.names=1,header=F)
Type=Type[order(Type[,2]),]
rt=t(rt[,row.names(Type)])
data=data.frame()
for(i in colnames(rt)){
data=rbind(data,cbind(expression=log2(rt[,i]+1),gene=i,Subtype=as.vector(Type[,2])))
}
write.table(data,file="data.txt",sep="\t",row.names=F,quote=F)
data=read.table("MHCIdata.txt",sep="\t",header=T,check.names=F)
data$Subtype=factor(data$Subtype, levels=c("Immunity_L","Immunity_M","Immunity_H"))
p=ggboxplot(data, x="gene", y="expression", color = "Subtype",
ylab="Gene expression (log2(FPKM+1))",
xlab="",
palette = c("chartreuse4","blue","red") )
#p=p+rotate_x_text(100)
axis.title.x=element_text(size=80)
pdf(file="MHCIboxplot.pdf",width=5,height=4)
p+stat_compare_means(aes(group=Subtype),symnum.args=list(cutpoints = c(0, 0.001, 0.01, 0.05, 1), symbols = c("***", "**", "*", "ns")),label = "p.format")
dev.off()
|
xk=c(1,0,1,2,3,2)#N=6
N=6
tk=fft(xk)
sum1=sum(xk*xk)#19
sum2=(sum(abs(tk)*abs(tk)))/N#114/6=19
#sum1 is equal to sum2 Hence we verified Parseval's theorem
|
/assignment 3/1/3c.R
|
no_license
|
trungnnguyen/time-series-analysis
|
R
| false
| false
| 161
|
r
|
xk=c(1,0,1,2,3,2)#N=6
N=6
tk=fft(xk)
sum1=sum(xk*xk)#19
sum2=(sum(abs(tk)*abs(tk)))/N#114/6=19
#sum1 is equal to sum2 Hence we verified Parseval's theorem
|
Escape <- function(sim){
#browser()
sim$spreadStateE <- data.table(NULL) #ensure always in a determinate state
if (length(sim$ignitionLoci)> 0){
#note that ifesles won't work once these things are nonscalars.
p0 <- if ("scfmPars" %in% names(objs(sim)))
sim$scfmPars$p0
else
P(sim)$p0
# browser()
#print(paste("Year",time(sim), "loci = ", length(sim$ignitionLoci)))
pMap <- sim$flammableMap
pMap <- (!pMap) * p0
sim$spreadStateE <- SpaDES.tools::spread(landscape=sim$flammableMap,
loci=sim$ignitionLoci,
iterations=1,
spreadProb=pMap,
mask=sim$flammableMap,
directions=sim$nNbrs,
returnIndices=TRUE,
id=TRUE)
}
return(invisible(sim))
}
|
/modules/scfmEscape/R/Escape.R
|
no_license
|
chlorophilia/scfmModules
|
R
| false
| false
| 1,014
|
r
|
Escape <- function(sim){
#browser()
sim$spreadStateE <- data.table(NULL) #ensure always in a determinate state
if (length(sim$ignitionLoci)> 0){
#note that ifesles won't work once these things are nonscalars.
p0 <- if ("scfmPars" %in% names(objs(sim)))
sim$scfmPars$p0
else
P(sim)$p0
# browser()
#print(paste("Year",time(sim), "loci = ", length(sim$ignitionLoci)))
pMap <- sim$flammableMap
pMap <- (!pMap) * p0
sim$spreadStateE <- SpaDES.tools::spread(landscape=sim$flammableMap,
loci=sim$ignitionLoci,
iterations=1,
spreadProb=pMap,
mask=sim$flammableMap,
directions=sim$nNbrs,
returnIndices=TRUE,
id=TRUE)
}
return(invisible(sim))
}
|
## Exploratory Data Analysis - exdata-013 Project 1 - Plot 1
## April 2015 RJ Christensen
## Project 1 - Read in Household Power Consumption Data Feb 1-2, 2007
## Construct Plot 1 and save as plot1.png
## Data URL -- https://d396qusza40orc.cloudfront.net/
## exdata%2Fdata%2Fhousehold_power_consumption.zip
## data saved to working directory (wd)
## wd: "C:/Users/Renee/Documents/Coursera/Exploratory Data Analysis/Data"
## specify classes to make read.table run faster. Read in first two columnes
## (Date and Time) as "character" and the last 7 columns as "numeric"
plot1 <- function() {
classes <- c(rep("character", 2), rep("numeric", 7))
power_data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";",
colClasses = classes, na.strings = "?")
## date column - convert to date
power_data$Date <- as.Date(power_data$Date, "%d/%m/%Y")
## extract data for 01 FEB 2007 and 02 FEB 207
data_ext1 <- grep("2007-02-01", power_data[,1], fixed = TRUE)
data_ext2 <- grep("2007-02-02", power_data[,1], fixed = TRUE)
data_extract <- c(data_ext1, data_ext2)
power_data <- power_data[data_extract, ]
## Assume data collected in France time zone is "Europe/Berlin"
## Need to join date and time together
power_data$Date <- paste(power_data$Date, power_data$Time, sep = " ")
## turn column into date/time format
power_data$Date <- strptime(power_data$Date, format = "%Y-%m-%d %H:%M:%S",
tz = "Europe/Berlin")
## Create and save Plot 1 - Histogram of Global Active Power
png(file = "plot1.png")
with(power_data, hist(power_data$Global_active_power, main = "Global Active Power",
col = "red", xlab = "Global Active Power (kilowatts)"))
dev.off()
}
|
/plot1.R
|
no_license
|
rrjesse/ExData_Plotting1
|
R
| false
| false
| 1,908
|
r
|
## Exploratory Data Analysis - exdata-013 Project 1 - Plot 1
## April 2015 RJ Christensen
## Project 1 - Read in Household Power Consumption Data Feb 1-2, 2007
## Construct Plot 1 and save as plot1.png
## Data URL -- https://d396qusza40orc.cloudfront.net/
## exdata%2Fdata%2Fhousehold_power_consumption.zip
## data saved to working directory (wd)
## wd: "C:/Users/Renee/Documents/Coursera/Exploratory Data Analysis/Data"
## specify classes to make read.table run faster. Read in first two columnes
## (Date and Time) as "character" and the last 7 columns as "numeric"
plot1 <- function() {
classes <- c(rep("character", 2), rep("numeric", 7))
power_data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";",
colClasses = classes, na.strings = "?")
## date column - convert to date
power_data$Date <- as.Date(power_data$Date, "%d/%m/%Y")
## extract data for 01 FEB 2007 and 02 FEB 207
data_ext1 <- grep("2007-02-01", power_data[,1], fixed = TRUE)
data_ext2 <- grep("2007-02-02", power_data[,1], fixed = TRUE)
data_extract <- c(data_ext1, data_ext2)
power_data <- power_data[data_extract, ]
## Assume data collected in France time zone is "Europe/Berlin"
## Need to join date and time together
power_data$Date <- paste(power_data$Date, power_data$Time, sep = " ")
## turn column into date/time format
power_data$Date <- strptime(power_data$Date, format = "%Y-%m-%d %H:%M:%S",
tz = "Europe/Berlin")
## Create and save Plot 1 - Histogram of Global Active Power
png(file = "plot1.png")
with(power_data, hist(power_data$Global_active_power, main = "Global Active Power",
col = "red", xlab = "Global Active Power (kilowatts)"))
dev.off()
}
|
# selecting page
page = seq(1, 10, 1)
politico_df <- c()
for (p in 1:length(page)){
# creating url
url <- paste0("https://www.politico.com/story/", p)
# reading url
webpage <- read_html(url)
# pulling down links
links <- webpage %>%
html_nodes("h3 a") %>%
html_attr("href")
# removing any node with magazine
links <- links[!grepl("magazine", links)]
# creating empty vector
page_df <- c()
for (i in 1:length(links)){
# reading link
politico_html <- read_html(links[[i]])
# reading text nodes
politico_text <- politico_html %>%
html_nodes("div p")
# pulling down titles from link url
politico_title <- politico_text[grepl("story-meta__credit", politico_text)]
politico_title <- politico_title %>%
html_text()
# selecting everything bewteen \n abd |
title <- trimws(gsub(".*\\n (.+) \\|.*", "\\1", politico_title))
# if there is no title, then add blank so the code doesn't error out
if (vector_is_empty(title)) { #vector_is_empty is self created function
title <- "blank"
}
# pulling author from title after |
author <- trimws(gsub(".*\\|", "", politico_title))
author <- gsub("\\/.*", "", author)
# if author is na, then add blank so it doesn't error out
if (vector_is_empty(author)) {
author <- "blank"
}
# extracting blog date
politico_date <- politico_text[grepl("story-meta__timestamp", politico_text)]
politico_date <- politico_date %>%
html_text()
# formtting date
date <- as.Date(politico_date, '%m/%d/%Y')
# extracting blog text
politico_text <- politico_text[grepl("story-text__paragraph", politico_text)]
politico_text <- politico_text %>%
html_text()
# collapsing text
politico_text <- paste0(politico_text, collapse = " ")
# creating text from scraped data
tmp_df <- data.frame(date = date, blog = "Politico", author = author, title = title, text = politico_text)
# appending new row to data.frame
page_df <- rbind(page_df, tmp_df)
}
politico_df <- rbind(politico_df, page_df)
}
# unfactoring data.frame
politico_df <- taRifx::unfactor.data.frame(politico_df)
# writing out data
write_rds(politico_df, paste0(data_path,"politico_df.rds"))
|
/rcode/webscraping/old_code/pt05_politico.R
|
no_license
|
wohlfeila/political_text
|
R
| false
| false
| 2,353
|
r
|
# selecting page
page = seq(1, 10, 1)
politico_df <- c()
for (p in 1:length(page)){
# creating url
url <- paste0("https://www.politico.com/story/", p)
# reading url
webpage <- read_html(url)
# pulling down links
links <- webpage %>%
html_nodes("h3 a") %>%
html_attr("href")
# removing any node with magazine
links <- links[!grepl("magazine", links)]
# creating empty vector
page_df <- c()
for (i in 1:length(links)){
# reading link
politico_html <- read_html(links[[i]])
# reading text nodes
politico_text <- politico_html %>%
html_nodes("div p")
# pulling down titles from link url
politico_title <- politico_text[grepl("story-meta__credit", politico_text)]
politico_title <- politico_title %>%
html_text()
# selecting everything bewteen \n abd |
title <- trimws(gsub(".*\\n (.+) \\|.*", "\\1", politico_title))
# if there is no title, then add blank so the code doesn't error out
if (vector_is_empty(title)) { #vector_is_empty is self created function
title <- "blank"
}
# pulling author from title after |
author <- trimws(gsub(".*\\|", "", politico_title))
author <- gsub("\\/.*", "", author)
# if author is na, then add blank so it doesn't error out
if (vector_is_empty(author)) {
author <- "blank"
}
# extracting blog date
politico_date <- politico_text[grepl("story-meta__timestamp", politico_text)]
politico_date <- politico_date %>%
html_text()
# formtting date
date <- as.Date(politico_date, '%m/%d/%Y')
# extracting blog text
politico_text <- politico_text[grepl("story-text__paragraph", politico_text)]
politico_text <- politico_text %>%
html_text()
# collapsing text
politico_text <- paste0(politico_text, collapse = " ")
# creating text from scraped data
tmp_df <- data.frame(date = date, blog = "Politico", author = author, title = title, text = politico_text)
# appending new row to data.frame
page_df <- rbind(page_df, tmp_df)
}
politico_df <- rbind(politico_df, page_df)
}
# unfactoring data.frame
politico_df <- taRifx::unfactor.data.frame(politico_df)
# writing out data
write_rds(politico_df, paste0(data_path,"politico_df.rds"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mc.r
\name{initialize,tgMCCov-method}
\alias{initialize,tgMCCov-method}
\title{Construct a meta cell object}
\usage{
\S4method{initialize}{tgMCCov}(.Object, mc, outliers = c(), scmat)
}
\arguments{
\item{mc}{assignment of metacell id to cell}
\item{scmat}{a single cell RNA matrix object}
}
\description{
This constructs a meta cell cover object. It gets an MC assignment (cell->MC_ID), and a matrix, and call standard api of this class to compute the footprints.
}
|
/man/initialize-tgMCCov-method.Rd
|
permissive
|
echomsky/metacell
|
R
| false
| true
| 545
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mc.r
\name{initialize,tgMCCov-method}
\alias{initialize,tgMCCov-method}
\title{Construct a meta cell object}
\usage{
\S4method{initialize}{tgMCCov}(.Object, mc, outliers = c(), scmat)
}
\arguments{
\item{mc}{assignment of metacell id to cell}
\item{scmat}{a single cell RNA matrix object}
}
\description{
This constructs a meta cell cover object. It gets an MC assignment (cell->MC_ID), and a matrix, and call standard api of this class to compute the footprints.
}
|
#This routine is from https://benjjneb.github.io/dada2/ITS_workflow.html with modifications
#itsxpress is run after primer removal and quality filtering but before dada2 core algorithm
#itsxpress is run outside of R
#this script is the core dada2 algorithm run after itsxpress
library(dada2)
packageVersion("dada2")
library(ShortRead)
packageVersion("ShortRead")
library(Biostrings)
packageVersion("Biostrings")
seqDir = "R1_R2_switched/itsxpress"
list.files(seqDir)
#parse and sort file names, adjust regex as needed
itsFs <- sort(list.files(seqDir, pattern = "_R1_001.fastq.gz", full.names = TRUE))
itsRs <- sort(list.files(seqDir, pattern = "_R2_001.fastq.gz", full.names = TRUE))
#itsxpress outputs 0 len reads. filter for len
#make files
path.len <- file.path(seqDir, "gt_10")
if(!dir.exists(path.len)) dir.create(path.len)
itsFs.len <- file.path(path.len, basename(itsFs))
itsRs.len <- file.path(path.len, basename(itsRs))
#filter
out2 <- filterAndTrim(itsFs, itsFs.len, itsRs, itsRs.len, maxN = 0, maxEE = c(2, 2),
truncQ = 2, minLen = 10, rm.phix = TRUE, compress = TRUE, multithread = 24) # on windows, set multithread = FALSE
head(out2)
saveRDS(out2, "intermediate_RDS/read_filtering_read_counts_2.rds")
# sort filtered read files
itsFs.len <- sort(list.files(path.len, pattern = "_R1_001.fastq.gz", full.names = TRUE))
itsRs.len <- sort(list.files(path.len, pattern = "_R2_001.fastq.gz", full.names = TRUE))
#Vis read quality of its-extracted reads
#If running on a large sample set should index the filename object to [1:25] otherwise will be unreadable
pdf("dada2_processing_tables_figs/read_quality_post_its_extraction.pdf")
print(plotQualityProfile(itsFs.len[1:25]))
print(plotQualityProfile(itsRs.len[1:25]))
dev.off()
#This is the start of the core algorithm pipeline
#At this point the tutorial at https://benjjneb.github.io/dada2/tutorial.html is likely more informative than the ITS specific tutorial
#Learn the error rates
errF <- learnErrors(itsFs.len, multithread = 24)
errR <- learnErrors(itsRs.len, multithread = 24)
#Viz
pdf("dada2_processing_tables_figs/error_rate_graphs.pdf")
print(plotErrors(errF, nominalQ = TRUE))
print(plotErrors(errR, nominalQ = TRUE))
dev.off()
#derep
#it seems like the derep step is not strictly necessary and is wrapped in the dada2 alg. (it's no longer a separate step in the main tutorial)
derepFs <- derepFastq(itsFs.len, verbose = TRUE)
derepRs <- derepFastq(itsRs.len, verbose = TRUE)
get.sample.name <- function(fname) strsplit(basename(fname), "_")[[1]][1]
sample.names <- unname(sapply(itsFs.len, get.sample.name))
names(derepFs) <- sample.names
names(derepRs) <- sample.names
#DADA2 alogorithm
#pooling samples is not default, but increases sensitivity to low abundance sequences shared across samples
dadaFs <- dada(derepFs, err = errF, multithread = 24, pool = F)
dadaRs <- dada(derepRs, err = errR, multithread = 24, pool = F)
#merge pairs
mergers <- mergePairs(dadaFs, derepFs, dadaRs, derepRs, verbose=TRUE, maxMismatch = 0)
#make seq table
seqtab <- makeSequenceTable(mergers)
dim(seqtab)
#remove chimeras
seqtab.nochim <- removeBimeraDenovo(seqtab, method="consensus", multithread=24, verbose=TRUE)
saveRDS(seqtab.nochim, file = "intermediate_RDS/dada2_seq_table_no_chim.rds")
#These files saved only for sequence counting purposes in "dada2_tables_to_file
getN <- function(x) sum(getUniques(x))
denoisedF.getN = data.frame(denoisedF = sapply(dadaFs, getN), sample = rownames(data.frame(sapply(dadaFs, getN))))
saveRDS(denoisedF.getN, "intermediate_RDS/denoisedF.getN.df.rds")
denoisedR.getN = data.frame(denoisedR = sapply(dadaRs, getN), sample = rownames(data.frame(sapply(dadaRs, getN))))
saveRDS(denoisedR.getN, "intermediate_RDS/denoisedR.getN.df.rds")
mergers.getN = data.frame(merged = sapply(mergers, getN), sample = rownames(data.frame(sapply(mergers, getN))))
saveRDS(mergers.getN, "intermediate_RDS/mergers.getN.df.rds")
|
/processing_methods_comparison/dada2-slurm_files_sep_pool_F.r
|
no_license
|
ewmorr/neonectria_barcoding_012220
|
R
| false
| false
| 3,926
|
r
|
#This routine is from https://benjjneb.github.io/dada2/ITS_workflow.html with modifications
#itsxpress is run after primer removal and quality filtering but before dada2 core algorithm
#itsxpress is run outside of R
#this script is the core dada2 algorithm run after itsxpress
library(dada2)
packageVersion("dada2")
library(ShortRead)
packageVersion("ShortRead")
library(Biostrings)
packageVersion("Biostrings")
seqDir = "R1_R2_switched/itsxpress"
list.files(seqDir)
#parse and sort file names, adjust regex as needed
itsFs <- sort(list.files(seqDir, pattern = "_R1_001.fastq.gz", full.names = TRUE))
itsRs <- sort(list.files(seqDir, pattern = "_R2_001.fastq.gz", full.names = TRUE))
#itsxpress outputs 0 len reads. filter for len
#make files
path.len <- file.path(seqDir, "gt_10")
if(!dir.exists(path.len)) dir.create(path.len)
itsFs.len <- file.path(path.len, basename(itsFs))
itsRs.len <- file.path(path.len, basename(itsRs))
#filter
out2 <- filterAndTrim(itsFs, itsFs.len, itsRs, itsRs.len, maxN = 0, maxEE = c(2, 2),
truncQ = 2, minLen = 10, rm.phix = TRUE, compress = TRUE, multithread = 24) # on windows, set multithread = FALSE
head(out2)
saveRDS(out2, "intermediate_RDS/read_filtering_read_counts_2.rds")
# sort filtered read files
itsFs.len <- sort(list.files(path.len, pattern = "_R1_001.fastq.gz", full.names = TRUE))
itsRs.len <- sort(list.files(path.len, pattern = "_R2_001.fastq.gz", full.names = TRUE))
#Vis read quality of its-extracted reads
#If running on a large sample set should index the filename object to [1:25] otherwise will be unreadable
pdf("dada2_processing_tables_figs/read_quality_post_its_extraction.pdf")
print(plotQualityProfile(itsFs.len[1:25]))
print(plotQualityProfile(itsRs.len[1:25]))
dev.off()
#This is the start of the core algorithm pipeline
#At this point the tutorial at https://benjjneb.github.io/dada2/tutorial.html is likely more informative than the ITS specific tutorial
#Learn the error rates
errF <- learnErrors(itsFs.len, multithread = 24)
errR <- learnErrors(itsRs.len, multithread = 24)
#Viz
pdf("dada2_processing_tables_figs/error_rate_graphs.pdf")
print(plotErrors(errF, nominalQ = TRUE))
print(plotErrors(errR, nominalQ = TRUE))
dev.off()
#derep
#it seems like the derep step is not strictly necessary and is wrapped in the dada2 alg. (it's no longer a separate step in the main tutorial)
derepFs <- derepFastq(itsFs.len, verbose = TRUE)
derepRs <- derepFastq(itsRs.len, verbose = TRUE)
get.sample.name <- function(fname) strsplit(basename(fname), "_")[[1]][1]
sample.names <- unname(sapply(itsFs.len, get.sample.name))
names(derepFs) <- sample.names
names(derepRs) <- sample.names
#DADA2 alogorithm
#pooling samples is not default, but increases sensitivity to low abundance sequences shared across samples
dadaFs <- dada(derepFs, err = errF, multithread = 24, pool = F)
dadaRs <- dada(derepRs, err = errR, multithread = 24, pool = F)
#merge pairs
mergers <- mergePairs(dadaFs, derepFs, dadaRs, derepRs, verbose=TRUE, maxMismatch = 0)
#make seq table
seqtab <- makeSequenceTable(mergers)
dim(seqtab)
#remove chimeras
seqtab.nochim <- removeBimeraDenovo(seqtab, method="consensus", multithread=24, verbose=TRUE)
saveRDS(seqtab.nochim, file = "intermediate_RDS/dada2_seq_table_no_chim.rds")
#These files saved only for sequence counting purposes in "dada2_tables_to_file
getN <- function(x) sum(getUniques(x))
denoisedF.getN = data.frame(denoisedF = sapply(dadaFs, getN), sample = rownames(data.frame(sapply(dadaFs, getN))))
saveRDS(denoisedF.getN, "intermediate_RDS/denoisedF.getN.df.rds")
denoisedR.getN = data.frame(denoisedR = sapply(dadaRs, getN), sample = rownames(data.frame(sapply(dadaRs, getN))))
saveRDS(denoisedR.getN, "intermediate_RDS/denoisedR.getN.df.rds")
mergers.getN = data.frame(merged = sapply(mergers, getN), sample = rownames(data.frame(sapply(mergers, getN))))
saveRDS(mergers.getN, "intermediate_RDS/mergers.getN.df.rds")
|
library(readxl)
library(dplyr)
model <- read_xlsx("/Users/wemigliari/Documents/R/tabelas/tour_gov_health_barna.xlsx",
sheet = "esp_cat_swedes")
library(GGally)
ggpairs(model) # Plot all the correlations + normal distributions
## Model 1
model_mlr_ar <- lm(model$Arrivals_n_s ~ model$Arrivals_cat_total_nordics)
summary(model_mlr_ar)
confint(model_mlr_ar)
## Model 2
model_mlr_ar_n <- lm(model$Arrivals_cat ~ model$Arrivals_cat_dk +
model$Arrivals_cat_nor + model$Arrivals_cat_se +
model$Arrivals_cat_fin, data = model)
summary(model_mlr_ar_n)
confint(model_mlr_ar_n)
####
library(extrafont)
par(mfrow = c(2,3), family= "Arial", cex = 0.5, oma = c(4, 1, 1, 4))
qqnorm(model$Arrivals_n_s/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Nordics in Spain") +
qqline(model$Arrivals_n_s/1000, col = "darkgreen", lwd = 2)
qqnorm(model$Arrivals_cat_total_nordics/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Nordics in Catalonia") +
qqline(model$Arrivals_cat_total_nordics/1000, col = "darkgreen", lwd = 2)
qqnorm(model$Arrivals_cat_dk/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Danish in Catalonia") +
qqline(model$Arrivals_cat_dk/1000, col = "darkgreen", lwd = 2)
qqnorm(model$Arrivals_cat_fin/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Finnish in Catalonia") +
qqline(model$Arrivals_cat_fin/1000, col = "darkgreen", lwd = 2)
qqnorm(model$Arrivals_cat_nor/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Norwegians in Catalonia") +
qqline(model$Arrivals_cat_nor/1000, col = "darkgreen", lwd = 2)
qqnorm(model$Arrivals_cat_se/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Swedes in Catalonia") +
qqline(model$Arrivals_cat_se/1000, col = "darkgreen", lwd = 2)
####
max(model$Accumulated_arrivals_n_s/1000000)
#[1] 5.826548
min(model$Accumulated_arrivals_n_s/1000000)
#[1] 0
mean(model$Accumulated_arrivals_n_s/1000000)
#[1] 2.81716
sd(model$Accumulated_arrivals_n_s/1000000)
#[1] 1.803582
x <- seq(0, 5.826548, by = 0.05)
y <- dnorm(x, mean = 2.81716, sd = 1.803582)
z <- qnorm(x, mean = 2.81716, sd = 1.803582)
par(family= "Arial", cex = 0.6, oma = c(4, 1, 1, 4))
plot(x, z, col = "gray", type = "l", xlim = c(0,1))
|
/public_health_spain_cat_nordic.R
|
no_license
|
wemigliari/governance
|
R
| false
| false
| 2,340
|
r
|
library(readxl)
library(dplyr)
model <- read_xlsx("/Users/wemigliari/Documents/R/tabelas/tour_gov_health_barna.xlsx",
sheet = "esp_cat_swedes")
library(GGally)
ggpairs(model) # Plot all the correlations + normal distributions
## Model 1
model_mlr_ar <- lm(model$Arrivals_n_s ~ model$Arrivals_cat_total_nordics)
summary(model_mlr_ar)
confint(model_mlr_ar)
## Model 2
model_mlr_ar_n <- lm(model$Arrivals_cat ~ model$Arrivals_cat_dk +
model$Arrivals_cat_nor + model$Arrivals_cat_se +
model$Arrivals_cat_fin, data = model)
summary(model_mlr_ar_n)
confint(model_mlr_ar_n)
####
library(extrafont)
par(mfrow = c(2,3), family= "Arial", cex = 0.5, oma = c(4, 1, 1, 4))
qqnorm(model$Arrivals_n_s/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Nordics in Spain") +
qqline(model$Arrivals_n_s/1000, col = "darkgreen", lwd = 2)
qqnorm(model$Arrivals_cat_total_nordics/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Nordics in Catalonia") +
qqline(model$Arrivals_cat_total_nordics/1000, col = "darkgreen", lwd = 2)
qqnorm(model$Arrivals_cat_dk/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Danish in Catalonia") +
qqline(model$Arrivals_cat_dk/1000, col = "darkgreen", lwd = 2)
qqnorm(model$Arrivals_cat_fin/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Finnish in Catalonia") +
qqline(model$Arrivals_cat_fin/1000, col = "darkgreen", lwd = 2)
qqnorm(model$Arrivals_cat_nor/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Norwegians in Catalonia") +
qqline(model$Arrivals_cat_nor/1000, col = "darkgreen", lwd = 2)
qqnorm(model$Arrivals_cat_se/1000, pch = 1, frame = FALSE, main = "Normal Q-Q Plot Arrivals of Swedes in Catalonia") +
qqline(model$Arrivals_cat_se/1000, col = "darkgreen", lwd = 2)
####
max(model$Accumulated_arrivals_n_s/1000000)
#[1] 5.826548
min(model$Accumulated_arrivals_n_s/1000000)
#[1] 0
mean(model$Accumulated_arrivals_n_s/1000000)
#[1] 2.81716
sd(model$Accumulated_arrivals_n_s/1000000)
#[1] 1.803582
x <- seq(0, 5.826548, by = 0.05)
y <- dnorm(x, mean = 2.81716, sd = 1.803582)
z <- qnorm(x, mean = 2.81716, sd = 1.803582)
par(family= "Arial", cex = 0.6, oma = c(4, 1, 1, 4))
plot(x, z, col = "gray", type = "l", xlim = c(0,1))
|
# line_plots <- function()
filter_alignments <- function(alignments, regions, regions_filter="both", minimum=10, maximum=30, cutoff=.001){
alignments <- filter_by_regions(alignments = alignments, regions = regions, type = regions_filter)
alignments <- filter_alignments_by_size_range(alignments = alignments, minimum = minimum, maximum = maximum)
alignments <- remove_overrepresented_sequences(alignments = alignments, cutoff = cutoff)
return(sort.GenomicRanges(alignments))
}
set_dataset_names <- function(input_dir, output_dir, dataset_info){
dataset_names <- list(fastq_file=NA, # The full name of the file (basename.ext)
basename=NA, # The basename of the file (basename)
name=NA, # The descriptive name of the file (basename or other)
output_dir=NA # The output directory (.../outputdir/basename/)
)
dataset_names["fastq_file"] <- dataset_info[[1]]
dataset_names["basename"] <- strsplit(dataset_info, "\\.")[[1]][1]
if (is.na(names(dataset_info))){
dataset_names["name"] <- dataset_names[["basename"]]
} else{
dataset_names["name"] <- names(dataset_info[1])}
dataset_names["output_dir"] <- create_output_dirs(out_dir = output_dir, name = dataset_names[["name"]])
dataset_names["figure_dir"] <- create_output_dirs(out_dir = dataset_names[["output_dir"]], name =c("figures"))
return(dataset_names)
}
#
# main_workflow <- function()
# {
# incProgress(amount = .2, detail = "Loading intervals")
# values$genome_data <- load_genome_data(path = values$genomes_dir,
# genome = values$selected_genome[["Version"]])
# print(values$genome_data)
# print(values$selected_genome[["Gene.Sets"]])
# incProgress(amount = .2, detail = "Loading gene sets")
# values$gene_sets <- load_gene_sets(gene_sets = values$selected_genome[["Gene.Sets"]])
#
# ### Load alignment
# incProgress(amount = .2, detail = "Loading alignments")
# values$alignments <- load_alignments(path = values$bam_path)
#
# incProgress(amount = .2, detail = "Filtering regions")
# values$alignments <- filter_by_regions(alignments = values$alignments,
# regions = values$genome_data[["gene_intervals"]],
# type = "both")
# incProgress(amount = .1, detail = "Filtering alignment sizes")
# values$alignments <- filter_alignments_by_size(alignments = values$alignments,
# minimum = input$get_range[[1]],
# maximum = input$get_range[[2]])
# incProgress(amount = .1, detail = "Filtering overrpreseented reads")
# values$alignments <- remove_overrepresented_sequences(alignments = values$alignments,
# cutoff = input$read_cutoff)
#
# values$alignments <- get_genome_sequence(gr = values$alignments,
# genome_sequence = load_fasta_genome(
# path = values$selected_genome[['Genome.FASTA']]
# ))
#
# print('print(values$alignments)')
# print(values$alignments)
#
# incProgress(amount = .1, detail = "Filtering mismatches")
# mismatch_indexes <- filter_BAM_tags(values$alignments)
# values$two_mm <- values$alignments[mismatch_indexes$two_mm]
# values$no_mm <- values$alignments[mismatch_indexes$no_mm]
# values$no_mm_in_seed <- values$alignments[mismatch_indexes$no_mm_seed]
# values$shuffled <- shuffle_alignments(alignments = values$two_mm,
# intervals = values$genome_data[["gene_intervals"]],
# antisense = TRUE)
# #values$two_mm <- filter_alignments(alignments = values$two_mm, regions = )
#
# print('The length of values$two_mm is: ')
# print(length(x = print(values$two_mm)))
# #print(values$two_mm)
# #print(values$no_mm)
# #print(values$no_mm_in_seed)
# print('making the plots')
# print('making the plots two_mm')
# print(width(values$two_mm))
# print(strand(values$two_mm))
# print(mcols(values$two_mm))
#
# create_output_dirs(out_dir = values$output_dir,
# name = 'five_prime')
# p <- make_length_plots(gr = values$two_mm,
# path = paste(values$output_dir,
# '/five_prime',
# sep = ''),
# label = "two_mm__all_genes")
# output$fivepp_all <- renderPlot({p})
# print('making the plots no_mm')
# make_length_plots(gr = values$no_mm,
# path = paste(values$output_dir,
# '/five_prime',
# sep = ''),
# label = "no_mm__all_genes")
# print('making the plots no_mm_in_seed')
# make_length_plots(gr = values$no_mm_in_seed,
# path = paste(values$output_dir,
# '/five_prime',
# sep = ''),
# label = "no_mm_in_seed__all_genes")
# }
# process_fastq <- function(input_dir, output_dir, dataset_info, adapter_file, genome, alignment_settings){
#
# # Trim the adapter sequences
# run_cutadapt()
#
# #Align the reads using bowtie
# for(j in 1:length(alignment_settings)){
# dataset_names[names(alignment_settings[j])] <- run_bowtie(
# alignment_settings[j], # The options for this alignment file
# names(alignment_settings[j]), # The name of the sam file configuration
# genome # ID of genome
# )
# }
# system(command = paste("gzip -f",
# paste(dataset_names["output_dir"],"/", dataset_names["basename"], ".trimmed.fastq", sep = ""),
# wait = TRUE)
# )
# return(dataset_names)
# }
|
/R/workflows.R
|
no_license
|
alb202/PACER
|
R
| false
| false
| 5,956
|
r
|
# line_plots <- function()
filter_alignments <- function(alignments, regions, regions_filter="both", minimum=10, maximum=30, cutoff=.001){
alignments <- filter_by_regions(alignments = alignments, regions = regions, type = regions_filter)
alignments <- filter_alignments_by_size_range(alignments = alignments, minimum = minimum, maximum = maximum)
alignments <- remove_overrepresented_sequences(alignments = alignments, cutoff = cutoff)
return(sort.GenomicRanges(alignments))
}
set_dataset_names <- function(input_dir, output_dir, dataset_info){
dataset_names <- list(fastq_file=NA, # The full name of the file (basename.ext)
basename=NA, # The basename of the file (basename)
name=NA, # The descriptive name of the file (basename or other)
output_dir=NA # The output directory (.../outputdir/basename/)
)
dataset_names["fastq_file"] <- dataset_info[[1]]
dataset_names["basename"] <- strsplit(dataset_info, "\\.")[[1]][1]
if (is.na(names(dataset_info))){
dataset_names["name"] <- dataset_names[["basename"]]
} else{
dataset_names["name"] <- names(dataset_info[1])}
dataset_names["output_dir"] <- create_output_dirs(out_dir = output_dir, name = dataset_names[["name"]])
dataset_names["figure_dir"] <- create_output_dirs(out_dir = dataset_names[["output_dir"]], name =c("figures"))
return(dataset_names)
}
#
# main_workflow <- function()
# {
# incProgress(amount = .2, detail = "Loading intervals")
# values$genome_data <- load_genome_data(path = values$genomes_dir,
# genome = values$selected_genome[["Version"]])
# print(values$genome_data)
# print(values$selected_genome[["Gene.Sets"]])
# incProgress(amount = .2, detail = "Loading gene sets")
# values$gene_sets <- load_gene_sets(gene_sets = values$selected_genome[["Gene.Sets"]])
#
# ### Load alignment
# incProgress(amount = .2, detail = "Loading alignments")
# values$alignments <- load_alignments(path = values$bam_path)
#
# incProgress(amount = .2, detail = "Filtering regions")
# values$alignments <- filter_by_regions(alignments = values$alignments,
# regions = values$genome_data[["gene_intervals"]],
# type = "both")
# incProgress(amount = .1, detail = "Filtering alignment sizes")
# values$alignments <- filter_alignments_by_size(alignments = values$alignments,
# minimum = input$get_range[[1]],
# maximum = input$get_range[[2]])
# incProgress(amount = .1, detail = "Filtering overrpreseented reads")
# values$alignments <- remove_overrepresented_sequences(alignments = values$alignments,
# cutoff = input$read_cutoff)
#
# values$alignments <- get_genome_sequence(gr = values$alignments,
# genome_sequence = load_fasta_genome(
# path = values$selected_genome[['Genome.FASTA']]
# ))
#
# print('print(values$alignments)')
# print(values$alignments)
#
# incProgress(amount = .1, detail = "Filtering mismatches")
# mismatch_indexes <- filter_BAM_tags(values$alignments)
# values$two_mm <- values$alignments[mismatch_indexes$two_mm]
# values$no_mm <- values$alignments[mismatch_indexes$no_mm]
# values$no_mm_in_seed <- values$alignments[mismatch_indexes$no_mm_seed]
# values$shuffled <- shuffle_alignments(alignments = values$two_mm,
# intervals = values$genome_data[["gene_intervals"]],
# antisense = TRUE)
# #values$two_mm <- filter_alignments(alignments = values$two_mm, regions = )
#
# print('The length of values$two_mm is: ')
# print(length(x = print(values$two_mm)))
# #print(values$two_mm)
# #print(values$no_mm)
# #print(values$no_mm_in_seed)
# print('making the plots')
# print('making the plots two_mm')
# print(width(values$two_mm))
# print(strand(values$two_mm))
# print(mcols(values$two_mm))
#
# create_output_dirs(out_dir = values$output_dir,
# name = 'five_prime')
# p <- make_length_plots(gr = values$two_mm,
# path = paste(values$output_dir,
# '/five_prime',
# sep = ''),
# label = "two_mm__all_genes")
# output$fivepp_all <- renderPlot({p})
# print('making the plots no_mm')
# make_length_plots(gr = values$no_mm,
# path = paste(values$output_dir,
# '/five_prime',
# sep = ''),
# label = "no_mm__all_genes")
# print('making the plots no_mm_in_seed')
# make_length_plots(gr = values$no_mm_in_seed,
# path = paste(values$output_dir,
# '/five_prime',
# sep = ''),
# label = "no_mm_in_seed__all_genes")
# }
# process_fastq <- function(input_dir, output_dir, dataset_info, adapter_file, genome, alignment_settings){
#
# # Trim the adapter sequences
# run_cutadapt()
#
# #Align the reads using bowtie
# for(j in 1:length(alignment_settings)){
# dataset_names[names(alignment_settings[j])] <- run_bowtie(
# alignment_settings[j], # The options for this alignment file
# names(alignment_settings[j]), # The name of the sam file configuration
# genome # ID of genome
# )
# }
# system(command = paste("gzip -f",
# paste(dataset_names["output_dir"],"/", dataset_names["basename"], ".trimmed.fastq", sep = ""),
# wait = TRUE)
# )
# return(dataset_names)
# }
|
########################################################################
# #
# Analysis of the Scholtzia Data Set as in Hahn & Jensen (2013) #
# #
########################################################################
# load the scholtzia data set and the intensity estimates used in H&J(2013) from sostatpp
data(scholtzia)
data(intensities)
# "home made" quadrats:
#
# the data show higher intensity at the bottom than at the top of the plot,
# therefore we divide it into two halves according to y-coordinate
testset <- twoquadsets(scholtzia, nx = 1, ny = 2, grady = TRUE)
# now the subsets are subdivided into 2x2 and 4x1 quadrats, respectively
# new lo: low intensity, the upper part (was hi), 2x2 hi: lower part, 4x1
quadsets <- list(lo = quadrats(testset$hi[[1]], nx = 2, ny = 2),
hi = quadrats(testset$lo[[1]], nx = 4, ny = 1))
# colors for plotting, and a plot of the quadrats (Figure 19)
styles <- list(hi = simplist(col="red", alpha=.4, col.win="red", alpha.win=.4),
lo = simplist(col="blue", alpha=.4, col.win="blue"))
quadratsplot(scholtzia, quadsets, styles, pch=16, cex=.5)
##### ---------- analysis as locally rescaled second-order stationary --------
scholtzia_s <- rescaled(scholtzia, intensity = scholtzia.intens)
test_s <- sos.test(scholtzia_s, quadsets, rmax = 1.25, use.tbar = TRUE)
print(test_s)
# visualisation: Figure 20, upper left
plot(test_s, styles)
##### ----------- adjusting intensity on quadrats by "normpower" ----------
test_s2 <- sos.test(scholtzia_s, quadsets, rmax = 1.25, use.tbar = TRUE, normpowe = 2)
print(test_s2)
# visualisation: Figure 20, upper right
plot(test_s2, styles)
##### ---------- analysis as locally rescaled second-order stationary --------
scholtzia_w <- reweighted(scholtzia, intensity = scholtzia.intens)
test_w <- sos.test(scholtzia_w, quadsets, rmax = 2, use.tbar = TRUE)
print(test_w)
# visualisation: Figure 20, lower left
plot(test_w, styles)
##### ----------- adjusting intensity on quadrats by "normpower" ----------
test_w2 <- sos.test(scholtzia_w, quadsets, rmax = 2, use.tbar = TRUE, normpower = 2)
print(test_w2)
# visualisation: Figure 20, lower right
plot(test_w2, styles)
|
/demo/scholtzia.R
|
no_license
|
ute/hidden2statspat
|
R
| false
| false
| 2,352
|
r
|
########################################################################
# #
# Analysis of the Scholtzia Data Set as in Hahn & Jensen (2013) #
# #
########################################################################
# load the scholtzia data set and the intensity estimates used in H&J(2013) from sostatpp
data(scholtzia)
data(intensities)
# "home made" quadrats:
#
# the data show higher intensity at the bottom than at the top of the plot,
# therefore we divide it into two halves according to y-coordinate
testset <- twoquadsets(scholtzia, nx = 1, ny = 2, grady = TRUE)
# now the subsets are subdivided into 2x2 and 4x1 quadrats, respectively
# new lo: low intensity, the upper part (was hi), 2x2 hi: lower part, 4x1
quadsets <- list(lo = quadrats(testset$hi[[1]], nx = 2, ny = 2),
hi = quadrats(testset$lo[[1]], nx = 4, ny = 1))
# colors for plotting, and a plot of the quadrats (Figure 19)
styles <- list(hi = simplist(col="red", alpha=.4, col.win="red", alpha.win=.4),
lo = simplist(col="blue", alpha=.4, col.win="blue"))
quadratsplot(scholtzia, quadsets, styles, pch=16, cex=.5)
##### ---------- analysis as locally rescaled second-order stationary --------
scholtzia_s <- rescaled(scholtzia, intensity = scholtzia.intens)
test_s <- sos.test(scholtzia_s, quadsets, rmax = 1.25, use.tbar = TRUE)
print(test_s)
# visualisation: Figure 20, upper left
plot(test_s, styles)
##### ----------- adjusting intensity on quadrats by "normpower" ----------
test_s2 <- sos.test(scholtzia_s, quadsets, rmax = 1.25, use.tbar = TRUE, normpowe = 2)
print(test_s2)
# visualisation: Figure 20, upper right
plot(test_s2, styles)
##### ---------- analysis as locally rescaled second-order stationary --------
scholtzia_w <- reweighted(scholtzia, intensity = scholtzia.intens)
test_w <- sos.test(scholtzia_w, quadsets, rmax = 2, use.tbar = TRUE)
print(test_w)
# visualisation: Figure 20, lower left
plot(test_w, styles)
##### ----------- adjusting intensity on quadrats by "normpower" ----------
test_w2 <- sos.test(scholtzia_w, quadsets, rmax = 2, use.tbar = TRUE, normpower = 2)
print(test_w2)
# visualisation: Figure 20, lower right
plot(test_w2, styles)
|
Cox.plot <-
function(x, y, file, var.label.x, var.label.y, ...)
{
kk<-!is.na(x) & !is.na(y)
x<-x[kk]
y<-y[kk]
dots.args <- eval(substitute(alist(...)))
onefile <- FALSE
if (!is.null(dots.args$onefile))
onefile<- dots.args$onefile
if (is.null(file))
dev.new()
else {
if (length(grep("bmp$",file)))
bmp(file,...)
if (length(grep("png$",file)))
png(file,...)
if (length(grep("tif$",file)))
tiff(file,...)
if (length(grep("jpg$",file)))
jpeg(file,...)
if (length(grep("pdf$",file)))
if (!onefile)
pdf(file,...)
}
plot(y[,1],x,type="n",xlab="time",ylab=var.label.x)
points(y[y[,2]==0,1],x[y[,2]==0],pch=21,cex=1,bg="black",col="black")
points(y[y[,2]==1,1],x[y[,2]==1],pch=21,cex=1,bg="red",col="red")
title(main = paste("Time plot of '",var.label.y,"' by '",var.label.x,"'", sep=""))
legend("topright",c("censored","observed"),pch=19,col=c("black","red"),pt.bg=c("black","red"))
if (!is.null(file) && (length(grep("pdf$",file))==0 || !onefile))
dev.off()
}
|
/compareGroups/R/Cox.plot.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,155
|
r
|
Cox.plot <-
function(x, y, file, var.label.x, var.label.y, ...)
{
kk<-!is.na(x) & !is.na(y)
x<-x[kk]
y<-y[kk]
dots.args <- eval(substitute(alist(...)))
onefile <- FALSE
if (!is.null(dots.args$onefile))
onefile<- dots.args$onefile
if (is.null(file))
dev.new()
else {
if (length(grep("bmp$",file)))
bmp(file,...)
if (length(grep("png$",file)))
png(file,...)
if (length(grep("tif$",file)))
tiff(file,...)
if (length(grep("jpg$",file)))
jpeg(file,...)
if (length(grep("pdf$",file)))
if (!onefile)
pdf(file,...)
}
plot(y[,1],x,type="n",xlab="time",ylab=var.label.x)
points(y[y[,2]==0,1],x[y[,2]==0],pch=21,cex=1,bg="black",col="black")
points(y[y[,2]==1,1],x[y[,2]==1],pch=21,cex=1,bg="red",col="red")
title(main = paste("Time plot of '",var.label.y,"' by '",var.label.x,"'", sep=""))
legend("topright",c("censored","observed"),pch=19,col=c("black","red"),pt.bg=c("black","red"))
if (!is.null(file) && (length(grep("pdf$",file))==0 || !onefile))
dev.off()
}
|
heatmapUI = function(id) {
ns = NS(id)
tagList(
textAreaInput(ns('genes'), 'Enter a list of orthoIDs', height = '200px', width = '600px'),
actionButton(ns('example'), 'Example'),
actionButton(ns('clear'), 'Clear'),
p('Plot log10-scaled gene expressions values across species'),
p('Optionally normalize columns (individual samples)'),
checkboxInput(ns('normalizeCols'), 'Normalize columns?'),
checkboxInput(ns('normalizeRows'), 'Normalize rows?'),
checkboxInput(ns('redGreen'), 'Red-black-green colors?'),
selectInput(ns('species'), 'Species', multiple = T, choices = c(), width = '600px'),
p('Download as CSV'),
downloadButton(ns('downloadData'), 'Download'),
h2('Heatmaps'),
plotOutput(ns('heatmap'), height = '700px', width = '1000px')
)
}
heatmapServer = function(input, output, session) {
heatmapData = reactive({
if (is.null(input$genes) | input$genes == '') {
return()
}
conn = pool::poolCheckout(pool)
on.exit(pool::poolReturn(conn))
gen = strsplit(input$genes, '\n')
gen = sapply(gen, trimws)
gen = sapply(gen, function(elt) {
dbQuoteString(conn, elt)
})
mylist = paste0('(', do.call(paste, c(as.list(gen), sep = ',')), ')')
query = sprintf('SELECT o.ortholog_id, o.species_id, od.symbol, o.gene_id, e.value, e.tissue FROM orthologs o JOIN species s on o.species_id=s.species_id JOIN orthodescriptions od on o.ortholog_id = od.ortholog_id JOIN expression e on e.gene_id = o.gene_id WHERE o.ortholog_id IN %s', mylist)
rs = DBI::dbSendQuery(conn, query)
ret = DBI::dbFetch(rs)
v = unique(ret$species_id)
updateSelectInput(session, "species", choices=v, selected=v)
ret
})
matrixData = reactive({
ret = heatmapData()
if (is.null(input$genes) | input$genes == '') {
return()
}
if (is.null(input$species)) {
return()
}
ret = subset(ret, species_id %in% input$species)
h = reshape2::acast(ret, ortholog_id + symbol ~ species_id + tissue)
h[is.na(h)] = 0
h
})
output$heatmap = renderPlot({
h = matrixData()
if(is.null(h)) {
return()
}
d = log(h + 1)
if(input$normalizeCols) {
d = scale(d)[1:nrow(d),1:ncol(d)]
}
if(input$normalizeRows) {
e = t(d)
d = t(scale(e)[1:nrow(e),1:ncol(e)])
}
pal = colorRampPalette(rev(RColorBrewer::brewer.pal(n = 7, name = "RdYlBu")))(200)
if(input$redGreen) {
pal = colorRampPalette(c("green", "black", "red"))(200)
}
pheatmap::pheatmap(d, color=pal)
})
output$downloadData <- downloadHandler(
filename = 'heatmap.csv',
content = function(file) {
write.table(matrixData(), file, quote = F, sep = '\t')
}
)
observeEvent(input$example, {
updateTextAreaInput(session, 'genes', value = config$sample_heatmap)
})
observe({
input$genes
session$doBookmark()
})
observeEvent(input$normalizeRows, {
session$doBookmark()
})
observeEvent(input$normalizeCols, {
session$doBookmark()
})
observeEvent(input$clear, {
updateTextAreaInput(session, 'genes', value='')
})
setBookmarkExclude(c('example', 'clear'))
}
|
/page/heatmap.R
|
no_license
|
msuefishlab/shinyorthologs
|
R
| false
| false
| 3,542
|
r
|
heatmapUI = function(id) {
ns = NS(id)
tagList(
textAreaInput(ns('genes'), 'Enter a list of orthoIDs', height = '200px', width = '600px'),
actionButton(ns('example'), 'Example'),
actionButton(ns('clear'), 'Clear'),
p('Plot log10-scaled gene expressions values across species'),
p('Optionally normalize columns (individual samples)'),
checkboxInput(ns('normalizeCols'), 'Normalize columns?'),
checkboxInput(ns('normalizeRows'), 'Normalize rows?'),
checkboxInput(ns('redGreen'), 'Red-black-green colors?'),
selectInput(ns('species'), 'Species', multiple = T, choices = c(), width = '600px'),
p('Download as CSV'),
downloadButton(ns('downloadData'), 'Download'),
h2('Heatmaps'),
plotOutput(ns('heatmap'), height = '700px', width = '1000px')
)
}
heatmapServer = function(input, output, session) {
heatmapData = reactive({
if (is.null(input$genes) | input$genes == '') {
return()
}
conn = pool::poolCheckout(pool)
on.exit(pool::poolReturn(conn))
gen = strsplit(input$genes, '\n')
gen = sapply(gen, trimws)
gen = sapply(gen, function(elt) {
dbQuoteString(conn, elt)
})
mylist = paste0('(', do.call(paste, c(as.list(gen), sep = ',')), ')')
query = sprintf('SELECT o.ortholog_id, o.species_id, od.symbol, o.gene_id, e.value, e.tissue FROM orthologs o JOIN species s on o.species_id=s.species_id JOIN orthodescriptions od on o.ortholog_id = od.ortholog_id JOIN expression e on e.gene_id = o.gene_id WHERE o.ortholog_id IN %s', mylist)
rs = DBI::dbSendQuery(conn, query)
ret = DBI::dbFetch(rs)
v = unique(ret$species_id)
updateSelectInput(session, "species", choices=v, selected=v)
ret
})
matrixData = reactive({
ret = heatmapData()
if (is.null(input$genes) | input$genes == '') {
return()
}
if (is.null(input$species)) {
return()
}
ret = subset(ret, species_id %in% input$species)
h = reshape2::acast(ret, ortholog_id + symbol ~ species_id + tissue)
h[is.na(h)] = 0
h
})
output$heatmap = renderPlot({
h = matrixData()
if(is.null(h)) {
return()
}
d = log(h + 1)
if(input$normalizeCols) {
d = scale(d)[1:nrow(d),1:ncol(d)]
}
if(input$normalizeRows) {
e = t(d)
d = t(scale(e)[1:nrow(e),1:ncol(e)])
}
pal = colorRampPalette(rev(RColorBrewer::brewer.pal(n = 7, name = "RdYlBu")))(200)
if(input$redGreen) {
pal = colorRampPalette(c("green", "black", "red"))(200)
}
pheatmap::pheatmap(d, color=pal)
})
output$downloadData <- downloadHandler(
filename = 'heatmap.csv',
content = function(file) {
write.table(matrixData(), file, quote = F, sep = '\t')
}
)
observeEvent(input$example, {
updateTextAreaInput(session, 'genes', value = config$sample_heatmap)
})
observe({
input$genes
session$doBookmark()
})
observeEvent(input$normalizeRows, {
session$doBookmark()
})
observeEvent(input$normalizeCols, {
session$doBookmark()
})
observeEvent(input$clear, {
updateTextAreaInput(session, 'genes', value='')
})
setBookmarkExclude(c('example', 'clear'))
}
|
## Plot 4
## R code file
Sys.setlocale("LC_TIME", "English")
all_data<-read.csv("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?",nrows=2075259,check.names=F,stringsAsFactors=F,comment.char="",quote="\"")
all_data$Date<-as.Date(all_data$Date,format="%d/%m/%Y")
data<-subset(all_data,subset=(Date>="2007-02-01"&Date<="2007-02-02"))
head(data)
dateTime<-paste(as.Date(data$Date),data$Time)
data$DateTime<-strptime(dateTime,format="%Y-%m-%d %H:%M:%S")
par(mfrow=c(2,2))
with(data,{
plot(data$DateTime,data$Global_active_power,type="l",xlab="",ylab="Global Active Power",cex.lab=0.8,cex.axis=0.8)
plot(data$DateTime,data$Voltage,type="l",xlab="datetime",ylab="Voltage",cex.lab=0.8,cex.axis=0.8)
plot(data$DateTime,data$Sub_metering_1,type="l",col="black",xlab="",ylab="Energy sub metering",cex.lab=0.8,cex.axis=0.8)
lines(data$DateTime,data$Sub_metering_2,col="red")
lines(data$DateTime,data$Sub_metering_3,col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=1,col=c("black","red","blue"),cex=0.7,bty="n")
plot(data$DateTime,data$Global_reactive_power,type="l",xlab="datetime",ylab="Global_Reactive_Power",cex.lab=0.8,cex.axis=0.8)
})
dev.copy(png,height=480,width=480,units="px",file="plot4.png")
dev.off()
|
/plot4.R
|
no_license
|
ladg340/ExData_Plotting1
|
R
| false
| false
| 1,316
|
r
|
## Plot 4
## R code file
Sys.setlocale("LC_TIME", "English")
all_data<-read.csv("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?",nrows=2075259,check.names=F,stringsAsFactors=F,comment.char="",quote="\"")
all_data$Date<-as.Date(all_data$Date,format="%d/%m/%Y")
data<-subset(all_data,subset=(Date>="2007-02-01"&Date<="2007-02-02"))
head(data)
dateTime<-paste(as.Date(data$Date),data$Time)
data$DateTime<-strptime(dateTime,format="%Y-%m-%d %H:%M:%S")
par(mfrow=c(2,2))
with(data,{
plot(data$DateTime,data$Global_active_power,type="l",xlab="",ylab="Global Active Power",cex.lab=0.8,cex.axis=0.8)
plot(data$DateTime,data$Voltage,type="l",xlab="datetime",ylab="Voltage",cex.lab=0.8,cex.axis=0.8)
plot(data$DateTime,data$Sub_metering_1,type="l",col="black",xlab="",ylab="Energy sub metering",cex.lab=0.8,cex.axis=0.8)
lines(data$DateTime,data$Sub_metering_2,col="red")
lines(data$DateTime,data$Sub_metering_3,col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=1,col=c("black","red","blue"),cex=0.7,bty="n")
plot(data$DateTime,data$Global_reactive_power,type="l",xlab="datetime",ylab="Global_Reactive_Power",cex.lab=0.8,cex.axis=0.8)
})
dev.copy(png,height=480,width=480,units="px",file="plot4.png")
dev.off()
|
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
Sisimulder/ExData_Plotting1
|
R
| false
| false
| 545
|
r
|
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
.overcat <- function(msg, prevmsglength) {
if (prevmsglength>0) {cat("\r")}
cat(msg)
return(nchar(msg))
}
.ULI <- function(...) {
redondGeo <- cbind(...) ## always a matrix
if (ncol(redondGeo)==0L) return(rep(1L,nrow(redondGeo))) ## .generateInitPhi with constant predictor led here
if (nrow(redondGeo)==1L) return(1L) ## trivial case where the forllowingcode fails
# redondFac <- apply(redondGeo,1,paste,collapse=" ") ## always characters whatever the number of columns
# redondFac <- as.integer(as.factor(redondFac)) ## as.factor effectively distinguishes unique character strings
# uniqueFac <- unique(redondFac) ## seems to preserve order ## unique(<integer>) has unambiguous behaviour
# sapply(lapply(redondFac, `==`, uniqueFac ), which)
redondFac <- apply(redondGeo,2L,factor,labels="") # not cute use of labels...
redondFac <- apply(redondFac,1L,paste,collapse="_") ## paste factors
redondFac <- as.character(factor(redondFac))
uniqueFac <- unique(redondFac) ## seems to preserve order ## unique(<integer>) has unambiguous behaviour
uniqueIdx <- seq(length(uniqueFac))
names(uniqueIdx) <- uniqueFac
return(uniqueIdx[redondFac])
}
projpath <- function() {
fn <- get("getActiveProject",envir = asNamespace("rstudioapi"))
fn()
}
|
/fuzzedpackages/blackbox/R/blackbox_internals.R
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 1,306
|
r
|
.overcat <- function(msg, prevmsglength) {
if (prevmsglength>0) {cat("\r")}
cat(msg)
return(nchar(msg))
}
.ULI <- function(...) {
redondGeo <- cbind(...) ## always a matrix
if (ncol(redondGeo)==0L) return(rep(1L,nrow(redondGeo))) ## .generateInitPhi with constant predictor led here
if (nrow(redondGeo)==1L) return(1L) ## trivial case where the forllowingcode fails
# redondFac <- apply(redondGeo,1,paste,collapse=" ") ## always characters whatever the number of columns
# redondFac <- as.integer(as.factor(redondFac)) ## as.factor effectively distinguishes unique character strings
# uniqueFac <- unique(redondFac) ## seems to preserve order ## unique(<integer>) has unambiguous behaviour
# sapply(lapply(redondFac, `==`, uniqueFac ), which)
redondFac <- apply(redondGeo,2L,factor,labels="") # not cute use of labels...
redondFac <- apply(redondFac,1L,paste,collapse="_") ## paste factors
redondFac <- as.character(factor(redondFac))
uniqueFac <- unique(redondFac) ## seems to preserve order ## unique(<integer>) has unambiguous behaviour
uniqueIdx <- seq(length(uniqueFac))
names(uniqueIdx) <- uniqueFac
return(uniqueIdx[redondFac])
}
projpath <- function() {
fn <- get("getActiveProject",envir = asNamespace("rstudioapi"))
fn()
}
|
#' Gauss-Seidel based Optimization & estimation
#'
#' @description Function utilizes the Gauss-Seidel optimization to solve equation Ax=b
#' @param A : Input matrix
#' @param b : Response
#' @param x : Initial solutions
#' @param iter : Number of Iterations
#' @param tol : Convergence tolerance
#' @param w : Relaxation paramter used to compute weighted avg. of previous solution. w=1 represent no relaxation
#' @param witr : Iteration after which relaxation parameter become active
#' @return optimal : Optimal solutions
#' @return initial : initial solution
#' @return relaxationFactor : relaxation factor
#' @examples
#' A<-matrix(c(4,-1,1, -1,4,-2,1,-2,4), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(12,-1, 5), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="gaussseidel", iter=500, tol=1e-7)
gaussSeidel<-function(A, b, x=NULL, iter=500, tol=1e-7, w=1, witr=NULL){
if(is.null(witr)) witr<-iter
witrp<-1 # Difference of iteration to update w set to 1
# Initial solution (random values)
nROW<-nrow(A)
if(is.null(x)) x<-matrix(rep(0, each=nROW), nrow = nROW, byrow=T)
xini<-x
for(i in 1:iter){
xold<-x
for(k in 1:nROW){
x[k]<-(w/A[k,k])*(b[k]-sapply(k, FUN=nonDiagMultipication, A, x))
}
dx<-sqrt(t(x-xold)%*%(x-xold))
if(dx<=tol){
return(list("optimal"=x, "initial"=xini, "relaxationFactor"=w, "itr.conv"=i))
} else
{
if(i==witr){
dx1<-dx
}
if(i==(witr+witrp)){
dx2<-dx
w<-2/(1+sqrt(1-((dx2/dx1)^(1/witrp))))
}
}
}
print("Optimization Failed to Converge...")
return(list("x"=x, "xini"=xini, "relaxationFactor"=w, "itr.conv"=i))
}
#' Non-diagnoal multipication
#'
#' @description Function for non-diagnoal multipication
#' @param i : Column Index of Matrix A
#' @param A : Input matrix
#' @param beta : Response
#' @return asum: Non-diagnol contribution
nonDiagMultipication<-function(i, A, beta){
a<-seq(1, length(beta), by=1)
asum<-sum(A[i,a!=i]*beta[a!=i])
return(asum)
}
|
/R/gaussSeidel.R
|
no_license
|
cran/optR
|
R
| false
| false
| 2,089
|
r
|
#' Gauss-Seidel based Optimization & estimation
#'
#' @description Function utilizes the Gauss-Seidel optimization to solve equation Ax=b
#' @param A : Input matrix
#' @param b : Response
#' @param x : Initial solutions
#' @param iter : Number of Iterations
#' @param tol : Convergence tolerance
#' @param w : Relaxation paramter used to compute weighted avg. of previous solution. w=1 represent no relaxation
#' @param witr : Iteration after which relaxation parameter become active
#' @return optimal : Optimal solutions
#' @return initial : initial solution
#' @return relaxationFactor : relaxation factor
#' @examples
#' A<-matrix(c(4,-1,1, -1,4,-2,1,-2,4), nrow=3,ncol=3, byrow = TRUE)
#' b<-matrix(c(12,-1, 5), nrow=3,ncol=1,byrow=TRUE)
#' Z<-optR(A, b, method="gaussseidel", iter=500, tol=1e-7)
gaussSeidel<-function(A, b, x=NULL, iter=500, tol=1e-7, w=1, witr=NULL){
if(is.null(witr)) witr<-iter
witrp<-1 # Difference of iteration to update w set to 1
# Initial solution (random values)
nROW<-nrow(A)
if(is.null(x)) x<-matrix(rep(0, each=nROW), nrow = nROW, byrow=T)
xini<-x
for(i in 1:iter){
xold<-x
for(k in 1:nROW){
x[k]<-(w/A[k,k])*(b[k]-sapply(k, FUN=nonDiagMultipication, A, x))
}
dx<-sqrt(t(x-xold)%*%(x-xold))
if(dx<=tol){
return(list("optimal"=x, "initial"=xini, "relaxationFactor"=w, "itr.conv"=i))
} else
{
if(i==witr){
dx1<-dx
}
if(i==(witr+witrp)){
dx2<-dx
w<-2/(1+sqrt(1-((dx2/dx1)^(1/witrp))))
}
}
}
print("Optimization Failed to Converge...")
return(list("x"=x, "xini"=xini, "relaxationFactor"=w, "itr.conv"=i))
}
#' Non-diagnoal multipication
#'
#' @description Function for non-diagnoal multipication
#' @param i : Column Index of Matrix A
#' @param A : Input matrix
#' @param beta : Response
#' @return asum: Non-diagnol contribution
nonDiagMultipication<-function(i, A, beta){
a<-seq(1, length(beta), by=1)
asum<-sum(A[i,a!=i]*beta[a!=i])
return(asum)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/capa.R
\name{capa.uv}
\alias{capa.uv}
\title{Detection of univariate anomalous segments and points using CAPA.}
\usage{
capa.uv(x, beta = NULL, beta_tilde = NULL, type = "meanvar",
min_seg_len = 10, max_seg_len = Inf, transform = robustscale)
}
\arguments{
\item{x}{A numeric vector containing the data which is to be inspected.}
\item{beta}{A numeric constant indicating the penalty for adding an additional epidemic changepoint. It defaults to a BIC style penalty if no argument is provided.}
\item{beta_tilde}{A numeric constant indicating the penalty for adding an additional point anomaly. It defaults to a BIC style penalty if no argument is provided.}
\item{type}{A string indicating which type of deviations from the baseline are considered. Can be "meanvar" for collective anomalies characterised by joint changes in mean and
variance (the default) or "mean" for collective anomalies characterised by changes in mean only.}
\item{min_seg_len}{An integer indicating the minimum length of epidemic changes. It must be at least 2 and defaults to 10.}
\item{max_seg_len}{An integer indicating the maximum length of epidemic changes. It must be at least the min_seg_len and defaults to Inf.}
\item{transform}{A function used to transform the data prior to analysis by \code{\link{capa.uv}}. This can, for example, be used to compensate for the effects of autocorrelation
in the data. Importantly, the untransformed data remains available for post processing results obtained using \code{\link{capa.uv}}. The package includes several methods that are commonly used for
the transform, (see \code{\link{robustscale}} and \code{\link{ac_corrected}}), but a user defined function can be specified. The default values is \code{transform=robust_scale}.}
}
\value{
An S4 class of type capa.uv.class.
}
\description{
A technique for detecting anomalous segments and points in univariate time series data based on CAPA (Collective And Point Anomalies) by Fisch et al. (2018). CAPA assumes that the data has a certain mean and variance for most
time points and detects segments in which the mean and/or variance deviates from the typical mean and variance as collective anomalies. It also detects point
outliers and returns a measure of strength for the changes in mean and variance. If the number of anomalous windows scales linearly with the number of
data points, CAPA scales linearly with the number of data points. At
worst, if there are no anomalies at all and \code{max_seg_len} is unspecified, the computational cost of CAPA scales quadratically with the number of data points.
}
\examples{
library(anomaly)
# Simulated data example
set.seed(2018)
# Generate data typically following a normal distribution with mean 0 and variance 1.
# Then introduce 3 anomaly windows and 4 point outliers.
x = rnorm(5000)
x[401:500] = rnorm(100,4,1)
x[1601:1800] = rnorm(200,0,0.01)
x[3201:3500] = rnorm(300,0,10)
x[c(1000,2000,3000,4000)] = rnorm(4,0,100)
res<-capa.uv(x)
res
plot(res)
}
\references{
\insertRef{2018arXiv180601947F}{anomaly}
}
|
/man/capa.uv.Rd
|
no_license
|
Fisch-Alex/anomaly
|
R
| false
| true
| 3,121
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/capa.R
\name{capa.uv}
\alias{capa.uv}
\title{Detection of univariate anomalous segments and points using CAPA.}
\usage{
capa.uv(x, beta = NULL, beta_tilde = NULL, type = "meanvar",
min_seg_len = 10, max_seg_len = Inf, transform = robustscale)
}
\arguments{
\item{x}{A numeric vector containing the data which is to be inspected.}
\item{beta}{A numeric constant indicating the penalty for adding an additional epidemic changepoint. It defaults to a BIC style penalty if no argument is provided.}
\item{beta_tilde}{A numeric constant indicating the penalty for adding an additional point anomaly. It defaults to a BIC style penalty if no argument is provided.}
\item{type}{A string indicating which type of deviations from the baseline are considered. Can be "meanvar" for collective anomalies characterised by joint changes in mean and
variance (the default) or "mean" for collective anomalies characterised by changes in mean only.}
\item{min_seg_len}{An integer indicating the minimum length of epidemic changes. It must be at least 2 and defaults to 10.}
\item{max_seg_len}{An integer indicating the maximum length of epidemic changes. It must be at least the min_seg_len and defaults to Inf.}
\item{transform}{A function used to transform the data prior to analysis by \code{\link{capa.uv}}. This can, for example, be used to compensate for the effects of autocorrelation
in the data. Importantly, the untransformed data remains available for post processing results obtained using \code{\link{capa.uv}}. The package includes several methods that are commonly used for
the transform, (see \code{\link{robustscale}} and \code{\link{ac_corrected}}), but a user defined function can be specified. The default values is \code{transform=robust_scale}.}
}
\value{
An S4 class of type capa.uv.class.
}
\description{
A technique for detecting anomalous segments and points in univariate time series data based on CAPA (Collective And Point Anomalies) by Fisch et al. (2018). CAPA assumes that the data has a certain mean and variance for most
time points and detects segments in which the mean and/or variance deviates from the typical mean and variance as collective anomalies. It also detects point
outliers and returns a measure of strength for the changes in mean and variance. If the number of anomalous windows scales linearly with the number of
data points, CAPA scales linearly with the number of data points. At
worst, if there are no anomalies at all and \code{max_seg_len} is unspecified, the computational cost of CAPA scales quadratically with the number of data points.
}
\examples{
library(anomaly)
# Simulated data example
set.seed(2018)
# Generate data typically following a normal distribution with mean 0 and variance 1.
# Then introduce 3 anomaly windows and 4 point outliers.
x = rnorm(5000)
x[401:500] = rnorm(100,4,1)
x[1601:1800] = rnorm(200,0,0.01)
x[3201:3500] = rnorm(300,0,10)
x[c(1000,2000,3000,4000)] = rnorm(4,0,100)
res<-capa.uv(x)
res
plot(res)
}
\references{
\insertRef{2018arXiv180601947F}{anomaly}
}
|
library(gmodels)
library(class)
wbcd <- read.csv("wisc_bc_data.csv", stringsAsFactors = FALSE)
wbcd <- wbcd[-1]
wbcd$diagnosis <- factor(wbcd$diagnosis,levels = c("B","M"),labels = c("Benign","Malovelent"))
table(wbcd$diagnosis)
normalize = function(x) { return((x - min(x))/(max(x)-min(x)))}
wbcd_n <- as.data.frame(lapply(wbcd[2:31],normalize))
wbcd_train <- wbcd_n[1:469,]
wbcd_test <- wbcd_n[470:569,]
wbcd_train_labels <- wbcd[1:469, 1]
wbcd_test_labels <- wbcd[470:569, 1]
wbcd_test_pred <- knn(wbcd_train,wbcd_test,wbcd_train_labels,k=21)
CrossTable(x = wbcd_test_labels,y = wbcd_test_pred,prop.chisq = FALSE)
|
/Cancer_detection.R
|
no_license
|
Shashwat05/KNN--Classifier-
|
R
| false
| false
| 631
|
r
|
library(gmodels)
library(class)
wbcd <- read.csv("wisc_bc_data.csv", stringsAsFactors = FALSE)
wbcd <- wbcd[-1]
wbcd$diagnosis <- factor(wbcd$diagnosis,levels = c("B","M"),labels = c("Benign","Malovelent"))
table(wbcd$diagnosis)
normalize = function(x) { return((x - min(x))/(max(x)-min(x)))}
wbcd_n <- as.data.frame(lapply(wbcd[2:31],normalize))
wbcd_train <- wbcd_n[1:469,]
wbcd_test <- wbcd_n[470:569,]
wbcd_train_labels <- wbcd[1:469, 1]
wbcd_test_labels <- wbcd[470:569, 1]
wbcd_test_pred <- knn(wbcd_train,wbcd_test,wbcd_train_labels,k=21)
CrossTable(x = wbcd_test_labels,y = wbcd_test_pred,prop.chisq = FALSE)
|
library(ccube)
library(dplyr)
source('SVclone/write_output.R')
args <- commandArgs(trailingOnly = TRUE)
if(length(args) == 0 | args[1] == "-h") {
cat("
Perform post-assignment of SVs.
Usage:
Rscript post_assign.R <SV RData results> <SNV RData results> <results folder> <sample> [--joint]\n\n
The --joint flag indicates that variants will be post-assigned to
a joint SV + SNV model. By default, SVs will be assigned to the
provided SNV model.
")
q(save="no")
}
svres <- args[1]
snvres <- args[2]
resultFolder <- args[3]
sample <- args[4]
joint_model <- length(grep("--joint", args)) > 0
if (!file.exists(svres) | !file.exists(snvres)) {
print('SV or SNV data file do not exist. Exiting.')
q(save="no")
}
load(svres)
load(snvres)
system(paste('mkdir -p', resultFolder))
svdata <- doubleBreakPtsRes$ssm
snvdata <- snvRes$ssm
if (joint_model) {
print('Post-assigning using a joint SV + SNV model...')
postAssignResSVs <- RunPostAssignPipeline(snvRes = snvRes$res,
svRes = doubleBreakPtsRes$res,
mydata = svdata)
save(postAssignResSVs, file=paste0(resultFolder, sample, "_ccube_postAssign_sv_results.RData"))
write_sv_output(postAssignResSVs, resultFolder, sample)
MakeCcubeStdPlot_sv(res = postAssignResSVs$res, ssm = postAssignResSVs$ssm,
printPlot = T, fn = paste0(resultFolder, sample, "_ccube_sv_postAssign_results.pdf"))
system(paste0('mkdir -p ', resultFolder, '/snvs'))
postAssignResSNVs <- RunPostAssignPipeline(snvRes = snvRes$res,
svRes = doubleBreakPtsRes$res,
mydata = snvdata)
save(postAssignResSNVs, file=paste0(resultFolder, '/snvs/', sample, "_ccube_postAssign_snv_results.RData"))
write_snv_output(postAssignResSNVs, paste0(resultFolder, '/snvs/'), sample)
MakeCcubeStdPlot(res = postAssignResSNVs$res, ssm = postAssignResSNVs$ssm,
printPlot = T, fn = paste0(resultFolder, '/snvs/', sample, "_ccube_snv_postAssign_results.pdf"))
} else {
print('Post-assigning SVs using SNV results...')
postAssignRes <- RunPostAssignPipeline(svRes = snvRes$res,
mydata = svdata)
save(postAssignRes, file=paste0(resultFolder, sample, "_ccube_sv_postAssign_results.RData"))
write_sv_output(postAssignRes, resultFolder, sample)
MakeCcubeStdPlot_sv(res = postAssignRes$res, ssm = postAssignRes$ssm,
printPlot = T, fn = paste0(resultFolder, sample, "_ccube_sv_postAssign_results.pdf"))
}
|
/SVclone/post_assign.R
|
permissive
|
qindan2008/SVclone
|
R
| false
| false
| 2,722
|
r
|
library(ccube)
library(dplyr)
source('SVclone/write_output.R')
args <- commandArgs(trailingOnly = TRUE)
if(length(args) == 0 | args[1] == "-h") {
cat("
Perform post-assignment of SVs.
Usage:
Rscript post_assign.R <SV RData results> <SNV RData results> <results folder> <sample> [--joint]\n\n
The --joint flag indicates that variants will be post-assigned to
a joint SV + SNV model. By default, SVs will be assigned to the
provided SNV model.
")
q(save="no")
}
svres <- args[1]
snvres <- args[2]
resultFolder <- args[3]
sample <- args[4]
joint_model <- length(grep("--joint", args)) > 0
if (!file.exists(svres) | !file.exists(snvres)) {
print('SV or SNV data file do not exist. Exiting.')
q(save="no")
}
load(svres)
load(snvres)
system(paste('mkdir -p', resultFolder))
svdata <- doubleBreakPtsRes$ssm
snvdata <- snvRes$ssm
if (joint_model) {
print('Post-assigning using a joint SV + SNV model...')
postAssignResSVs <- RunPostAssignPipeline(snvRes = snvRes$res,
svRes = doubleBreakPtsRes$res,
mydata = svdata)
save(postAssignResSVs, file=paste0(resultFolder, sample, "_ccube_postAssign_sv_results.RData"))
write_sv_output(postAssignResSVs, resultFolder, sample)
MakeCcubeStdPlot_sv(res = postAssignResSVs$res, ssm = postAssignResSVs$ssm,
printPlot = T, fn = paste0(resultFolder, sample, "_ccube_sv_postAssign_results.pdf"))
system(paste0('mkdir -p ', resultFolder, '/snvs'))
postAssignResSNVs <- RunPostAssignPipeline(snvRes = snvRes$res,
svRes = doubleBreakPtsRes$res,
mydata = snvdata)
save(postAssignResSNVs, file=paste0(resultFolder, '/snvs/', sample, "_ccube_postAssign_snv_results.RData"))
write_snv_output(postAssignResSNVs, paste0(resultFolder, '/snvs/'), sample)
MakeCcubeStdPlot(res = postAssignResSNVs$res, ssm = postAssignResSNVs$ssm,
printPlot = T, fn = paste0(resultFolder, '/snvs/', sample, "_ccube_snv_postAssign_results.pdf"))
} else {
print('Post-assigning SVs using SNV results...')
postAssignRes <- RunPostAssignPipeline(svRes = snvRes$res,
mydata = svdata)
save(postAssignRes, file=paste0(resultFolder, sample, "_ccube_sv_postAssign_results.RData"))
write_sv_output(postAssignRes, resultFolder, sample)
MakeCcubeStdPlot_sv(res = postAssignRes$res, ssm = postAssignRes$ssm,
printPlot = T, fn = paste0(resultFolder, sample, "_ccube_sv_postAssign_results.pdf"))
}
|
## Loads the data (set load to 1)
load <- 1
if (load){
features <- read.table("features.txt",sep=" ",col.names=c("num","feat"))
actlab <-read.table("activity_labels.txt",sep=" ",col.names=c("label","activity"))
subject_test <-read.table("./test/subject_test.txt",col.names="subject")
subject_train <-read.table("./train/subject_train.txt",col.names="subject")
xtrain <- read.table("./train/X_train.txt")
ytrain <- read.table("./train/y_train.txt",col.names="activity")
xtest <- read.table("./test/X_test.txt")
ytest <- read.table("./test/y_test.txt",col.names="activity")
#names(xtest)<-as.vector(as.character(features[,2]))
#names(xtrain)<-as.vector(as.character(features[,2]))
}
## Merges the data in one dataset
group <- factor(rep("test", length(ytest)))
datatest <- cbind(subject_test,group,ytest, xtest)
group <- factor(rep("train", length(ytrain)))
datatrain <- cbind(subject_train,group,ytrain,xtrain)
data<- rbind(datatest,datatrain)
data$actlab <- actlab[as.character(data$activity),2]
##
## Aggregates mean and sd by subject and activity
means<-aggregate(data,by=list(act=data$activity,sub=data$subject),FUN=mean)
stds<-aggregate(data,by=list(act=data$activity,sub=data$subject),FUN=sd)
means$actlab <-actlab[as.character(means$act),2]
names(means)<-paste(names(means),rep("mean",length(means)),sep="")
stds$actlab <-actlab[as.character(stds$act),2]
names(stds)<-paste(names(stds),rep("sd",length(stds)),sep="")
results <- data.frame(means,stds)
write.table(results, "dataset.txt")
|
/run_analysis.R
|
no_license
|
ebalp/cleaningproject
|
R
| false
| false
| 1,582
|
r
|
## Loads the data (set load to 1)
load <- 1
if (load){
features <- read.table("features.txt",sep=" ",col.names=c("num","feat"))
actlab <-read.table("activity_labels.txt",sep=" ",col.names=c("label","activity"))
subject_test <-read.table("./test/subject_test.txt",col.names="subject")
subject_train <-read.table("./train/subject_train.txt",col.names="subject")
xtrain <- read.table("./train/X_train.txt")
ytrain <- read.table("./train/y_train.txt",col.names="activity")
xtest <- read.table("./test/X_test.txt")
ytest <- read.table("./test/y_test.txt",col.names="activity")
#names(xtest)<-as.vector(as.character(features[,2]))
#names(xtrain)<-as.vector(as.character(features[,2]))
}
## Merges the data in one dataset
group <- factor(rep("test", length(ytest)))
datatest <- cbind(subject_test,group,ytest, xtest)
group <- factor(rep("train", length(ytrain)))
datatrain <- cbind(subject_train,group,ytrain,xtrain)
data<- rbind(datatest,datatrain)
data$actlab <- actlab[as.character(data$activity),2]
##
## Aggregates mean and sd by subject and activity
means<-aggregate(data,by=list(act=data$activity,sub=data$subject),FUN=mean)
stds<-aggregate(data,by=list(act=data$activity,sub=data$subject),FUN=sd)
means$actlab <-actlab[as.character(means$act),2]
names(means)<-paste(names(means),rep("mean",length(means)),sep="")
stds$actlab <-actlab[as.character(stds$act),2]
names(stds)<-paste(names(stds),rep("sd",length(stds)),sep="")
results <- data.frame(means,stds)
write.table(results, "dataset.txt")
|
#The dataset set be loaded using MASS library
#Structure of code:
#Loading data
#Data prep
#EDA
#Model building and accuracy analysis
#Final Analysis
## DATA LOADING
library(MASS)
housing <- Boston
library(corrplot) #seeing correlation
library(ggplot2) #visualization
library(caTools) #splitting into test and train
library(dplyr) #data manipulation
library(plotly) #converting ggplot2 to interactive viz
## DATA PREP
colSums(is.na(housing)) # no NA
## EDA
str(housing)
#rad and chas are int
head(housing)
summary(housing)
#too much difference between mean and median of a variable indicates outliers
#finding correlation
corrplot(cor(housing))
## Data splitting into test and train
#sample.split should be used for classification since
#it splits data from vector Y into 2 bins in pre-defined
#ration while preserving ratios of different labels
#for regression sample fucntion should work
set.seed(100)
sample <- sample.int(n= nrow(housing), size= floor(0.75*nrow(housing)), replace =F)
train <- housing[sample,]
test <- housing[-sample,]
#Fitting Simple Linear regression
model <- lm(medv ~., data= train)
summary(model) #Multiple R square = 0.77, F= 98.74
#Null hypothesis: Coeff associated with var = 0
#Alternate hypotheis: Coef associated with var != 0
# Rsquare = 0.7786, F= 98.74
# P value < 0.05: Statistically significant
# P value > 0.05: Not statistically significant and indicates strong evidence of null hypothesis
#Improving the model
#removing the var which are not statistically significant
model1 <- lm(medv~.-age-indus, data = train)
summary(model1) #Multiple R square = 0.77, F= 117.1
# PART 2: Variable selection using best subset regression
model2<- regsubsets(medv~., data = train, nvmax= 13)
reg_summary= summary(model2)
#regsubset() has built in plot function
#finding model with largest Adjusted R^2
which.max(reg_summary$adjr2) #model with 11 variables
plot(model2, scale = "r2") #r^2 becomes 0.78 when we include 11 variables and does not
#change as we increase the number of variables as per the output.
plot(model2, scale ="bic")
# PART 3: Variable selection using stepwise regression
nullmodel <- lm(medv~1, data= train)
fullmodel <- lm(medv~., data = train)
#forward selection
model3 <- step(nullmodel, scope = list(lower= nullmodel, upper= fullmodel), direction = "forward")
#forward selection gives model with least AIC with 11 variables
#medv ~ lstat + rm + ptratio + dis + black + chas + zn + crim +
#nox + rad + tax
#These are the same variables which were given by best subset regression model with max Adjusted R2 value
#backward selection
model4 <- step(fullmodel, direction = "backward")
#model with least AIC:
#medv ~ crim + zn + chas + nox + rm + dis + rad + tax + ptratio +
#black + lstat
#Hence same variables are covered
#STEPWISE SELECTION
model5 <- step(nullmodel, scope = list(lower = nullmodel, upper = fullmodel), direction="both")
AIC(model5)
BIC(model5)
summary(model5)
#MODEL ASSESSMENT
par(mfrow=c(2,2))
plot(model5)
#Residuals vs Fitted plot shows that the relationship between medv and predictors is not completely linear. Also, normal qq plot is skewed implying that residuals are not normally distributed. A different functional from may be required.
#Models are compared based on adjusted r square, AIC, BIC criteria for in-sample performance and mean square prediction error (MSPE) for out-of-sample performance
#In-sample performance
#MSE
model3.sum= summary(model3)
(model3.sum$sigma)^2
model3.sum$r.squared
model3.sum$adj.r.squared
AIC(model3)
BIC(model3)
#17.20317 sigma^2
#0.7782574 r^2
# 0.7716111 Adjusted R^2
# 2167.652 AIC
# 2218.84 BIC
model4.sum= summary(model4)
(model4.sum$sigma)^2
model4.sum$r.squared
model4.sum$adj.r.squared
AIC(model4)
BIC(model4)
#17.20317
#0.7782574
#0.7716111
#2167.652
#2218.84
model5.sum= summary(model5)
(model5.sum$sigma)^2
model5.sum$r.squared
model5.sum$adj.r.squared
AIC(model5)
BIC(model5)
#17.20317
#0.7782574
#0.7716111
#2167.652
#2218.84
#Hence same values for all the models
#Out-of-sample Prediction or test error (MSPE)
model3.pred.test <- predict(model3, newdata = test)
model3.mspe <- mean((model3.pred.test - test$medv) ^ 2)
model3.mspe
#39.63285
model4.pred.test <- predict(model4, newdata = test)
model4.mspe <- mean((model4.pred.test - test$medv) ^ 2)
model4.mspe
#39.63285
model5.pred.test <- predict(model5, newdata = test)
model5.mspe <- mean((model5.pred.test - test$medv) ^ 2)
model5.mspe
#39.63285
#All the statistics are same for all models using `forward`, `backward` and `both` variable selection techniques
#Cross Validation
model.glm = glm(medv ~ . -indus -age, data = housing)
x= cv.glm(data = housing, glmfit = model.glm, K = 5)
#23.17014
#We did not split the dataset into validation as showing validation was was not
#the purpose here. Purpose was variable selection
##################### MSPE FOR TEST WAS = 39.63285###################
#We noted in above exercise that there might be some non linearity between medv and "x" variables
#Let's explore that
plot(medv~., data = housing) #non linearity between lstat and medv
#Implementing Generalized Additive model
#Please read more about gam, it's also a library in R
#Splines can only be included for continuous variables
#Hence cannot be applied on chas and rad
housing.gam <- gam(medv ~ s(crim) + s(zn) + s(indus) + s(nox) + s(rm) + s(age) + s(dis) +
s(tax) + s(ptratio) + s(black) + s(lstat) + chas + rad, data = train)
summ = summary(housing.gam)
#Varibles with edf = 1 have linear relation with medv
#age and black
#Plotting non linear relation
plot(housing.gam)
pred= predict(housing.gam, data= test)
mspe = mean(pred - test$medv)^2
#6.397296
#Very low as compared to the model formed by variable selection
#Hence going ahead with this model
### COMPARING AGAINST TREE BASED MODEL
#2 CART
library(rpart)
library(ROCR)
housing.rpart <- rpart(formula = medv ~ ., data = train, cp = 0.001)
housing.rpart
plot(housing.rpart)
text(housing.rpart, pretty=2)
plotcp(housing.rpart) #choosing cp = 0.029 and pruning the tree
text(housing.rpart)
pruned <- prune(housing.rpart, cp = 0.0077)
pruned
plot(pruned)
text(pruned, pretty = 2)
#Comparing MSE
#Large tree: training data
mean((predict(housing.rpart)- train$medv)^2)
#6.827475
#Pruned tree: training data
mean((predict(pruned)- train$medv)^2)
#10.4313
#Out of sample MSE
mean((predict(pruned, newdata = test)- test$medv)^2)
#44.30111
#Worse than Gneralized linear regression model
#######################NEURAL NET#####################
######################################################
installed.packages("nnet")
library(nnet)
#For neural net implimentation we require scaled variables
maxs <- apply(housing, 2, max)
mins <- apply(housing, 2, min)
housing_scaled <- as.data.frame(scale(housing, center= mins, scale= maxs-mins))
#splitting dataset
set.seed(100)
sample_scaled <- sample.int(n= nrow(housing_scaled), size= floor(0.75*nrow(housing_scaled)), replace =F)
train_scaled <- housing_scaled[sample_scaled,]
test_scaled <- housing_scaled[-sample_scaled,]
#using as.data.frame in front of scale since it returns a matrix
#Number of neurons should be between the input and output layer size
#Usually 2/3 of input size
n <- names(train_scaled)
f<- as.formula(paste("medv ~", paste(n[!n %in% "medv"], collapse = "+")))
net <- neuralnet(f, data= train_scaled, hidden=c(5,3), linear.output = T)
#y~. is not acceptable in neuralnet()
#linear.output specifies if we want to do regression.
#If TRUE then regression, if FALSE then classification
plot(net)
predict_nn <- compute(net, test_scaled[,1:13])
#converting scaled predictions to original values
predicted_nn_unscale <- predict_nn$net.result*(max(housing$medv)- min(housing$medv))+min(housing$medv)
#converting scaled test to original value
test_original <- (test_scaled$medv)*(max(housing$medv)-min(housing$medv))+min(housing$medv)
#calculating MSE
MSE_nn <- sum((test_original- predicted_nn_unscale)^2)/nrow(test_scaled)
MSE_nn
#22.26195
|
/basic_housing.R
|
no_license
|
aiim-lab/BasicML
|
R
| false
| false
| 8,326
|
r
|
#The dataset set be loaded using MASS library
#Structure of code:
#Loading data
#Data prep
#EDA
#Model building and accuracy analysis
#Final Analysis
## DATA LOADING
library(MASS)
housing <- Boston
library(corrplot) #seeing correlation
library(ggplot2) #visualization
library(caTools) #splitting into test and train
library(dplyr) #data manipulation
library(plotly) #converting ggplot2 to interactive viz
## DATA PREP
colSums(is.na(housing)) # no NA
## EDA
str(housing)
#rad and chas are int
head(housing)
summary(housing)
#too much difference between mean and median of a variable indicates outliers
#finding correlation
corrplot(cor(housing))
## Data splitting into test and train
#sample.split should be used for classification since
#it splits data from vector Y into 2 bins in pre-defined
#ration while preserving ratios of different labels
#for regression sample fucntion should work
set.seed(100)
sample <- sample.int(n= nrow(housing), size= floor(0.75*nrow(housing)), replace =F)
train <- housing[sample,]
test <- housing[-sample,]
#Fitting Simple Linear regression
model <- lm(medv ~., data= train)
summary(model) #Multiple R square = 0.77, F= 98.74
#Null hypothesis: Coeff associated with var = 0
#Alternate hypotheis: Coef associated with var != 0
# Rsquare = 0.7786, F= 98.74
# P value < 0.05: Statistically significant
# P value > 0.05: Not statistically significant and indicates strong evidence of null hypothesis
#Improving the model
#removing the var which are not statistically significant
model1 <- lm(medv~.-age-indus, data = train)
summary(model1) #Multiple R square = 0.77, F= 117.1
# PART 2: Variable selection using best subset regression
model2<- regsubsets(medv~., data = train, nvmax= 13)
reg_summary= summary(model2)
#regsubset() has built in plot function
#finding model with largest Adjusted R^2
which.max(reg_summary$adjr2) #model with 11 variables
plot(model2, scale = "r2") #r^2 becomes 0.78 when we include 11 variables and does not
#change as we increase the number of variables as per the output.
plot(model2, scale ="bic")
# PART 3: Variable selection using stepwise regression
nullmodel <- lm(medv~1, data= train)
fullmodel <- lm(medv~., data = train)
#forward selection
model3 <- step(nullmodel, scope = list(lower= nullmodel, upper= fullmodel), direction = "forward")
#forward selection gives model with least AIC with 11 variables
#medv ~ lstat + rm + ptratio + dis + black + chas + zn + crim +
#nox + rad + tax
#These are the same variables which were given by best subset regression model with max Adjusted R2 value
#backward selection
model4 <- step(fullmodel, direction = "backward")
#model with least AIC:
#medv ~ crim + zn + chas + nox + rm + dis + rad + tax + ptratio +
#black + lstat
#Hence same variables are covered
#STEPWISE SELECTION
model5 <- step(nullmodel, scope = list(lower = nullmodel, upper = fullmodel), direction="both")
AIC(model5)
BIC(model5)
summary(model5)
#MODEL ASSESSMENT
par(mfrow=c(2,2))
plot(model5)
#Residuals vs Fitted plot shows that the relationship between medv and predictors is not completely linear. Also, normal qq plot is skewed implying that residuals are not normally distributed. A different functional from may be required.
#Models are compared based on adjusted r square, AIC, BIC criteria for in-sample performance and mean square prediction error (MSPE) for out-of-sample performance
#In-sample performance
#MSE
model3.sum= summary(model3)
(model3.sum$sigma)^2
model3.sum$r.squared
model3.sum$adj.r.squared
AIC(model3)
BIC(model3)
#17.20317 sigma^2
#0.7782574 r^2
# 0.7716111 Adjusted R^2
# 2167.652 AIC
# 2218.84 BIC
model4.sum= summary(model4)
(model4.sum$sigma)^2
model4.sum$r.squared
model4.sum$adj.r.squared
AIC(model4)
BIC(model4)
#17.20317
#0.7782574
#0.7716111
#2167.652
#2218.84
model5.sum= summary(model5)
(model5.sum$sigma)^2
model5.sum$r.squared
model5.sum$adj.r.squared
AIC(model5)
BIC(model5)
#17.20317
#0.7782574
#0.7716111
#2167.652
#2218.84
#Hence same values for all the models
#Out-of-sample Prediction or test error (MSPE)
model3.pred.test <- predict(model3, newdata = test)
model3.mspe <- mean((model3.pred.test - test$medv) ^ 2)
model3.mspe
#39.63285
model4.pred.test <- predict(model4, newdata = test)
model4.mspe <- mean((model4.pred.test - test$medv) ^ 2)
model4.mspe
#39.63285
model5.pred.test <- predict(model5, newdata = test)
model5.mspe <- mean((model5.pred.test - test$medv) ^ 2)
model5.mspe
#39.63285
#All the statistics are same for all models using `forward`, `backward` and `both` variable selection techniques
#Cross Validation
model.glm = glm(medv ~ . -indus -age, data = housing)
x= cv.glm(data = housing, glmfit = model.glm, K = 5)
#23.17014
#We did not split the dataset into validation as showing validation was was not
#the purpose here. Purpose was variable selection
##################### MSPE FOR TEST WAS = 39.63285###################
#We noted in above exercise that there might be some non linearity between medv and "x" variables
#Let's explore that
plot(medv~., data = housing) #non linearity between lstat and medv
#Implementing Generalized Additive model
#Please read more about gam, it's also a library in R
#Splines can only be included for continuous variables
#Hence cannot be applied on chas and rad
housing.gam <- gam(medv ~ s(crim) + s(zn) + s(indus) + s(nox) + s(rm) + s(age) + s(dis) +
s(tax) + s(ptratio) + s(black) + s(lstat) + chas + rad, data = train)
summ = summary(housing.gam)
#Varibles with edf = 1 have linear relation with medv
#age and black
#Plotting non linear relation
plot(housing.gam)
pred= predict(housing.gam, data= test)
mspe = mean(pred - test$medv)^2
#6.397296
#Very low as compared to the model formed by variable selection
#Hence going ahead with this model
### COMPARING AGAINST TREE BASED MODEL
#2 CART
library(rpart)
library(ROCR)
housing.rpart <- rpart(formula = medv ~ ., data = train, cp = 0.001)
housing.rpart
plot(housing.rpart)
text(housing.rpart, pretty=2)
plotcp(housing.rpart) #choosing cp = 0.029 and pruning the tree
text(housing.rpart)
pruned <- prune(housing.rpart, cp = 0.0077)
pruned
plot(pruned)
text(pruned, pretty = 2)
#Comparing MSE
#Large tree: training data
mean((predict(housing.rpart)- train$medv)^2)
#6.827475
#Pruned tree: training data
mean((predict(pruned)- train$medv)^2)
#10.4313
#Out of sample MSE
mean((predict(pruned, newdata = test)- test$medv)^2)
#44.30111
#Worse than Gneralized linear regression model
#######################NEURAL NET#####################
######################################################
installed.packages("nnet")
library(nnet)
#For neural net implimentation we require scaled variables
maxs <- apply(housing, 2, max)
mins <- apply(housing, 2, min)
housing_scaled <- as.data.frame(scale(housing, center= mins, scale= maxs-mins))
#splitting dataset
set.seed(100)
sample_scaled <- sample.int(n= nrow(housing_scaled), size= floor(0.75*nrow(housing_scaled)), replace =F)
train_scaled <- housing_scaled[sample_scaled,]
test_scaled <- housing_scaled[-sample_scaled,]
#using as.data.frame in front of scale since it returns a matrix
#Number of neurons should be between the input and output layer size
#Usually 2/3 of input size
n <- names(train_scaled)
f<- as.formula(paste("medv ~", paste(n[!n %in% "medv"], collapse = "+")))
net <- neuralnet(f, data= train_scaled, hidden=c(5,3), linear.output = T)
#y~. is not acceptable in neuralnet()
#linear.output specifies if we want to do regression.
#If TRUE then regression, if FALSE then classification
plot(net)
predict_nn <- compute(net, test_scaled[,1:13])
#converting scaled predictions to original values
predicted_nn_unscale <- predict_nn$net.result*(max(housing$medv)- min(housing$medv))+min(housing$medv)
#converting scaled test to original value
test_original <- (test_scaled$medv)*(max(housing$medv)-min(housing$medv))+min(housing$medv)
#calculating MSE
MSE_nn <- sum((test_original- predicted_nn_unscale)^2)/nrow(test_scaled)
MSE_nn
#22.26195
|
# ----------- Examples --------------
#' Code Examples
#'
#' Learn by example - copy/paste code from Examples below.\cr
#' This code collection is to demonstrate various concepts of
#' data preparation, conversion, grouping,
#' parameter setting, visual fine-tuning,
#' custom rendering, plugins attachment,
#' Shiny plots & interactions through Shiny proxy.\cr
#'
#' @return No return value, used only for help
#'
#' @seealso \href{https://helgasoft.github.io/echarty/}{website} has many more examples
#'
#' @examples
#' \donttest{
#'
#' #------ Basic scatter chart, instant display
#' cars %>% ec.init()
#'
#' #------ Same chart, change theme and save for further processing
#' p <- cars %>% ec.init() %>% ec.theme('dark')
#' p
#'
#'
#' #------ JSON back and forth
#' tmp <- cars %>% ec.init()
#' tmp
#' json <- tmp %>% ec.inspect()
#' ec.fromJson(json) %>% ec.theme("dark")
#'
#'
#' #------ Data grouping
#' library(dplyr)
#' iris %>% group_by(Species) %>% ec.init() # by factor column
#' iris %>% mutate(Species=as.character(Species)) %>%
#' group_by(Species) %>% ec.init() # by non-factor column
#'
#' p <- Orange %>% group_by(Tree) %>% ec.init() # by factor column
#' p$x$opts$series <- lapply(p$x$opts$series, function(x) {
#' x$symbolSize=10; x$encode=list(x='age', y='circumference'); x } )
#' p
#'
#'
#' #------ Pie
#' is <- sort(islands); is <- is[is>60]
#' is <- data.frame(name=names(is), value=as.character(unname(is)))
#' data <- ec.data(is, 'names')
#' p <- ec.init()
#' p$x$opts <- list(
#' title = list(text = "Landmasses over 60,000 mi\u00B2", left = 'center'),
#' tooltip = list(trigger='item'),
#' series = list(type='pie', radius='50%', data=data, name='mi\u00B2'))
#' p
#'
#'
#' #------ Liquidfill plugin
#' if (interactive()) {
#' p <- ec.init(load=c('liquid'), preset=FALSE)
#' p$x$opts$series[[1]] <- list(
#' type='liquidFill', data=c(0.6, 0.5, 0.4, 0.3), # amplitude=0,
#' waveAnimation=FALSE, animationDuration=0, animationDurationUpdate=0
#' )
#' p
#' }
#'
#'
#' #------ Heatmap
#' times <- c(5,1,0,0,0,0,0,0,0,0,0,2,4,1,1,3,4,6,4,4,3,3,2,5,7,0,0,0,0,0,
#' 0,0,0,0,5,2,2,6,9,11,6,7,8,12,5,5,7,2,1,1,0,0,0,0,0,0,0,0,3,2,
#' 1,9,8,10,6,5,5,5,7,4,2,4,7,3,0,0,0,0,0,0,1,0,5,4,7,14,13,12,9,5,
#' 5,10,6,4,4,1,1,3,0,0,0,1,0,0,0,2,4,4,2,4,4,14,12,1,8,5,3,7,3,0,
#' 2,1,0,3,0,0,0,0,2,0,4,1,5,10,5,7,11,6,0,5,3,4,2,0,1,0,0,0,0,0,
#' 0,0,0,0,1,0,2,1,3,4,0,0,0,0,1,2,2,6)
#' df <- NULL; n <- 1;
#' for(i in 0:6) { df <- rbind(df, data.frame(0:23, rep(i,24), times[n:(n+23)])); n<-n+24 }
#' hours <- ec.data(df); hours <- hours[-1] # remove columns row
#' times <- c('12a',paste0(1:11,'a'),'12p',paste0(1:11,'p'))
#' days <- c('Saturday','Friday','Thursday','Wednesday','Tuesday','Monday','Sunday')
#' p <- ec.init(preset=FALSE)
#' p$x$opts <- list( title = list(text='Punch Card Heatmap'),
#' tooltip = list(position='top'),grid=list(height='50%',top='10%'),
#' xAxis = list(type='category', data=times, splitArea=list(show=TRUE)),
#' yAxis = list(type='category', data=days, splitArea=list(show=TRUE)),
#' visualMap = list(min=0,max=10,calculable=TRUE,orient='horizontal',left='center',bottom='15%'),
#' series = list(list(name='Hours', type = 'heatmap', data= hours,label=list(show=TRUE),
#' emphasis=list(itemStyle=list(shadowBlur=10,shadowColor='rgba(0,0,0,0.5)'))))
#' )
#' p
#'
#'
#' #------ Plugin leaflet
#' tmp <- quakes %>% dplyr::relocate('long') # set in lon,lat order
#' p <- tmp %>% ec.init(load='leaflet')
#' p$x$opts$legend = list(data=list(list(name='quakes')))
#' p$x$opts$series[[1]]$name = 'quakes'
#' p$x$opts$series[[1]]$symbolSize = ec.clmn(4)
#' p
#'
#'
#' #------ Plugin 'world' with lines and color coding
#' flights <- NULL
#' flights <- try(read.csv(paste0('https://raw.githubusercontent.com/plotly/datasets/master/',
#' '2011_february_aa_flight_paths.csv')), silent = TRUE)
#' if (!is.null(flights)) {
#' tmp <- data.frame(airport1 = unique(head(flights,10)$airport1),
#' color = c("#387e78","#eeb422","#d9534f",'magenta'))
#' tmp <- head(flights,10) %>% inner_join(tmp) # add color by airport
#' p <- tmp %>% ec.init(load='world')
#' p$x$opts$geo$center= c(mean(flights$start_lon), mean(flights$start_lat))
#' p$x$opts$series <- list(list(
#' type='lines', coordinateSystem='geo',
#' data = lapply(ec.data(tmp, 'names'), function(x)
#' list(coords = list(c(x$start_lon,x$start_lat),
#' c(x$end_lon,x$end_lat)),
#' colr = x$color)
#' )
#' ,lineStyle = list(curveness=0.3, width=3, color=ec.clmn('colr'))
#' ))
#' p
#' }
#'
#'
#' #------ Plugin 3D
#' if (interactive()) {
#' data <- list()
#' for(y in 1:dim(volcano)[2]) for(x in 1:dim(volcano)[1])
#' data <- append(data, list(c(x, y, volcano[x,y])))
#' p <- ec.init(load = '3D')
#' p$x$opts$series <- list(type = 'surface', data = data)
#' p
#' }
#'
#'
#' #------ 3D chart with custom coloring
#' if (interactive()) {
#' df <- data.frame(Species = unique(iris$Species),
#' color = c("#387e78","#eeb422","#d9534f"))
#' df <- iris %>% dplyr::inner_join(df) # add 6th column 'color' for Species
#' p <- df %>% ec.init(load = '3D')
#' p$x$opts$xAxis3D <- list(name='Petal.Length')
#' p$x$opts$yAxis3D <- list(name='Sepal.Width')
#' p$x$opts$zAxis3D <- list(name='Sepal.Length')
#' p$x$opts$series <- list(list(
#' type='scatter3D', symbolSize=7,
#' encode=list(x='Petal.Length', y='Sepal.Width', z='Sepal.Length'),
#' itemStyle=list(color = ec.clmn(6) ) # index of column 'color'
#' ))
#' p
#' }
#'
#'
#' #------ Surface data equation with JS code
#' if (interactive()) {
#' p <- ec.init(load='3D')
#' p$x$opts$series[[1]] <- list(
#' type = 'surface',
#' equation = list(
#' x = list(min=-3,max=4,step=0.05),
#' y = list(min=-3,max=3,step=0.05),
#' z = htmlwidgets::JS("function (x, y) {
#' return Math.sin(x * x + y * y) * x / Math.PI; }")
#' )
#' )
#' p
#' }
#'
#'
#' #------ Surface with data from a data.frame
#' if (interactive()) {
#' library(dplyr)
#' data <- expand.grid(
#' x = seq(0, 2, by = 0.1),
#' y = seq(0, 1, by = 0.1)
#' ) %>% mutate(z = x * (y ^ 2)) %>% select(x,y,z)
#' p <- ec.init(load='3D')
#' p$x$opts$series[[1]] <- list(
#' type = 'surface',
#' data = ec.data(data, 'values'))
#' p
#' }
#'
#'
#' #------ Band serie with customization
#' # first column ('day') usually goes to the X-axis
#' # try also alternative data setting - replace lines *1 with *2
#' library(dplyr)
#' dats <- as.data.frame(EuStockMarkets) %>% mutate(day=1:n()) %>%
#' relocate(day) %>% slice_head(n=100)
#' p <- ec.init(load='custom') # *1 = unnamed data
#' #p <- dats %>% ec.init(load='custom') # *2 = dataset
#' p$x$opts$series = append(
#' ecr.band(dats, 'DAX','FTSE', name='Ftse-Dax', color='lemonchiffon'),
#' list(list(type='line', name='CAC', color='red', symbolSize=1,
#' data = ec.data(dats %>% select(day,CAC), 'values') # *1
#' #encode=list(x='day', y='CAC') # *2
#' ))
#' )
#' p$x$opts$legend <- list(ey='')
#' p$x$opts$dataZoom <- list(type='slider', end=50)
#' p
#'
#'
#' #------ Timeline animation
#' p <- Orange %>% dplyr::group_by(age) %>% ec.init(
#' tl.series=list(type='bar', encode=list(x='Tree', y='circumference'))
#' )
#' p$x$opts$timeline <- append(p$x$opts$timeline, list(autoPlay=TRUE))
#' p$x$opts$options <- lapply(p$x$opts$options,
#' function(o) { o$title$text <- paste('age',o$title$text,'days'); o })
#' p$x$opts$xAxis <- list(type='category', name='tree')
#' p$x$opts$yAxis <- list(max=max(Orange$circumference))
#' p
#'
#'
#' #------ Boxplot
#' bdf <- data.frame(vx = sample(LETTERS[1:3], size=20, replace=TRUE),
#' vy = rnorm(20)) %>% group_by(vx) %>% group_split()
#' dats <- lapply(bdf, function(x) boxplot.stats(x$vy)$stats )
#' p <- ec.init()
#' p$x$opts <- list( # overwrite presets
#' xAxis = list(ey=''),
#' yAxis = list(type = 'category', data = unique(unlist(lapply(bdf, `[`, , 1))) ),
#' series = list(list(type = 'boxplot', data = dats))
#' )
#' p
#'
#'
#' #------ EChartsJS v.5 feature custom transform - a regression line
#' # presets for xAxis,yAxis,dataset and series are used
#' dset <- data.frame(x=1:10, y=sample(1:100,10))
#' p <- dset %>% ec.init(js='echarts.registerTransform(ecStat.transform.regression)')
#' p$x$opts$dataset[[2]] <- list(transform = list(type='ecStat:regression'))
#' p$x$opts$series[[2]] <- list(
#' type='line', itemStyle=list(color='red'), datasetIndex=1)
#' p
#'
#'
#' #------ EChartsJS v.5 features transform and sort
#' datset <- list(
#' list(source=list(
#' list('name', 'age', 'profession', 'score', 'date'),
#' list('Hannah Krause', 41, 'Engineer', 314, '2011-02-12'),
#' list('Zhao Qian', 20, 'Teacher', 351, '2011-03-01'),
#' list('Jasmin Krause', 52, 'Musician', 287, '2011-02-14'),
#' list('Li Lei', 37, 'Teacher', 219, '2011-02-18'),
#' list('Karle Neumann', 25, 'Engineer', 253, '2011-04-02'),
#' list('Adrian Groß', 19, 'Teacher', NULL, '2011-01-16'),
#' list('Mia Neumann', 71, 'Engineer', 165, '2011-03-19'),
#' list('Böhm Fuchs', 36, 'Musician', 318, '2011-02-24'),
#' list('Han Meimei', 67, 'Engineer', 366, '2011-03-12'))),
#' list(transform = list(type= 'sort', config=list(
#' list(dimension='profession', order='desc'),
#' list(dimension='score', order='desc'))
#' )))
#' p <- ec.init(title = list(
#' text='Data transform, multiple-sort bar',
#' subtext='JS source',
#' sublink=paste0('https://echarts.apache.org/next/examples/en/editor.html',
#' '?c=doc-example/data-transform-multiple-sort-bar'),
#' left='center'))
#' p$x$opts$dataset <- datset
#' p$x$opts$xAxis <- list(type = 'category', axisLabel=list(interval=0, rotate=30))
#' p$x$opts$yAxis <- list(name='score')
#' p$x$opts$series[[1]] <- list(
#' type='bar',
#' label=list( show=TRUE, rotate=90, position='insideBottom',
#' align='left', verticalAlign='middle'
#' ),
#' itemStyle=list(
#' color = htmlwidgets::JS("function (params) {
#' return ({
#' Engineer: '#5470c6',
#' Teacher: '#91cc75',
#' Musician: '#fac858'
#' })[params.data[2]]
#' }")
#' ),
#' encode=list( x='name', y='score', label=list('profession') ),
#' datasetIndex = 1
#' )
#' p$x$opts$tooltip <- list(trigger='item', axisPointer=list(type='shadow'))
#' p
#'
#'
#' #------ Sunburst
#' # see website for different ways to set hierarchical data
#' data = list(list(name='Grandpa',children=list(list(name='Uncle Leo',value=15,
#' children=list(list(name='Cousin Jack',value=2), list(name='Cousin Mary',value=5,
#' children=list(list(name='Jackson',value=2))), list(name='Cousin Ben',value=4))),
#' list(name='Father',value=10,children=list(list(name='Me',value=5),
#' list(name='Brother Peter',value=1))))), list(name='Nancy',children=list(
#' list(name='Uncle Nike',children=list(list(name='Cousin Betty',value=1),
#' list(name='Cousin Jenny',value=2))))))
#' p <- ec.init()
#' p$x$opts <- list(
#' series = list(list(type='sunburst', data=data,
#' radius=list(0, '90%'), label=list(rotate='radial')
#' )))
#' p
#'
#'
#' #------ registerMap JSON
#' json <- jsonlite::read_json("https://echarts.apache.org/examples/data/asset/geo/USA.json")
#' dusa <- USArrests %>% dplyr::mutate(states = row.names(.))
#' p <- ec.init(preset=FALSE)
#' p$x$registerMap <- list(list(mapName = 'USA', geoJSON = json))
#' # registerMap supports also maps in SVG format, see website gallery
#' p$x$opts <- list(
#' visualMap = list(type='continuous', calculable=TRUE,
#' min=min(dusa$UrbanPop), max=max(dusa$UrbanPop))
#' ,series = list( list(type='map', map='USA', name='UrbanPop', roam=TRUE,
#' data = lapply(ec.data(dusa,'names'), function(x) list(name=x$states, value=x$UrbanPop))
#' ))
#' )
#' p
#'
#'
#' #------ Error Bars on grouped data
#' library(dplyr)
#' df <- mtcars %>% group_by(cyl,gear) %>% summarise(yy=round(mean(mpg),2)) %>%
#' mutate(low=round(yy-cyl*runif(1),2), high=round(yy+cyl*runif(1),2)) %>%
#' relocate(cyl, .after = last_col()) # move group column behind first four cols
#' p <- df %>% ec.init(group1='bar', load='custom') %>%
#' ecr.ebars(df, name = 'eb'
#' ,tooltip = list(formatter=ec.clmn('high <b>%d</b><br>low <b>%d</b>', 4,3)))
#' p$x$opts$tooltip <- list(ey='')
#' p
#'
#'
#' #------ Gauge
#' p <- ec.init(preset=FALSE);
#' p$x$opts$series <- list(list(
#' type = 'gauge', max = 160, min=40,
#' detail = list(formatter='\U1F9E0={value}'),
#' data = list(list(value=85, name='IQ test')) ))
#' p
#'
#'
#' #------ Custom gauge with animation
#' p <- ec.init(js = "setInterval(function () {
#' opts.series[0].data[0].value = (Math.random() * 100).toFixed(2) - 0;
#' chart.setOption(opts, true);}, 2000);")
#' p$x$opts <- list(series=list(list(type = 'gauge',
#' axisLine = list(lineStyle=list(width=30,
#' color = list(c(0.3, '#67e0e3'),c(0.7, '#37a2da'),c(1, '#fd666d')))),
#' pointer = list(itemStyle=list(color='auto')),
#' axisTick = list(distance=-30,length=8, lineStyle=list(color='#fff',width=2)),
#' splitLine = list(distance=-30,length=30, lineStyle=list(color='#fff',width=4)),
#' axisLabel = list(color='auto',distance=40,fontSize=20),
#' detail = list(valueAnimation=TRUE, formatter='{value} km/h',color='auto'),
#' data = list(list(value=70))
#' )))
#' p
#'
#'
#' #------ Sankey and graph plots
#' # prepare data
#' sankey <- data.frame(
#' node = c("a","b", "c", "d", "e"),
#' source = c("a", "b", "c", "d", "c"),
#' target = c("b", "c", "d", "e", "e"),
#' value = c(5, 6, 2, 8, 13),
#' stringsAsFactors = FALSE
#' )
#'
#' p <- ec.init(preset=FALSE)
#' p$x$opts$series[[1]] <- list( type='sankey',
#' data = lapply(ec.data(sankey,'names'), function(x) list(name=x$node)),
#' edges = ec.data(sankey,'names')
#' )
#' p
#'
#'
#' # graph plot with same data ---------------
#' p <- ec.init(preset=FALSE, title=list(text="Graph"))
#' p$x$opts$series[[1]] <- list( type='graph',
#' layout = 'force', # try 'circular' too
#' data = lapply(ec.data(sankey,'names'),
#' function(x) list(name=x$node, tooltip = list(show=FALSE))),
#' edges = lapply(ec.data(sankey,'names'),
#' function(x) { x$lineStyle <- list(width=x$value); x }),
#' emphasis = list(focus='adjacency',
#' label=list( position='right', show=TRUE)),
#' label = list(show=TRUE), roam = TRUE, zoom = 4,
#' tooltip=list(textStyle=list(color='blue')),
#' lineStyle = list(curveness=0.3)
#' )
#' p$x$opts$tooltip <- list(trigger='item')
#' p
#'
#'
#' #------ group connect
#' main <- mtcars %>% ec.init(height = 200)
#' main$x$opts$series[[1]]$name <- "this legend is shared"
#' main$x$opts$legend <- list(show=FALSE)
#' main$x$group <- 'group1' # same group name for all charts
#'
#' q1 <- main; q1$x$opts$series[[1]]$encode <- list(y='hp', x='mpg');
#' q1$x$opts$legend <- list(show=TRUE) # show first legend to share
#' q2 <- main; q2$x$opts$series[[1]]$encode <- list(y='wt', x='mpg');
#' q3 <- main; q3$x$opts$series[[1]]$encode <- list(y='drat', x='mpg');
#' q4 <- main; q4$x$opts$series[[1]]$encode <- list(y='qsec', x='mpg');
#' q4$x$connect <- 'group1'
#' # q4$x$disconnect <- 'group1' # ok too
#' if (interactive()) {
#' ec.layout(list(q1,q2,q3,q4), cols=2, title='group connect')
#' }
#'
#' #------------- Shiny interactive charts demo ---------------
#' if (interactive()) {
#' demo(eshiny, package='echarty')
#' }
#'
#' } # donttest
#' @export
ec.examples <- function(){
cat("copy/paste code from ?ec.examples Help\n Or run all examples at once with example('ec.examples') and they'll show up in the Viewer.")
}
|
/R/examples.R
|
permissive
|
statunizaga/echarty
|
R
| false
| false
| 16,027
|
r
|
# ----------- Examples --------------
#' Code Examples
#'
#' Learn by example - copy/paste code from Examples below.\cr
#' This code collection is to demonstrate various concepts of
#' data preparation, conversion, grouping,
#' parameter setting, visual fine-tuning,
#' custom rendering, plugins attachment,
#' Shiny plots & interactions through Shiny proxy.\cr
#'
#' @return No return value, used only for help
#'
#' @seealso \href{https://helgasoft.github.io/echarty/}{website} has many more examples
#'
#' @examples
#' \donttest{
#'
#' #------ Basic scatter chart, instant display
#' cars %>% ec.init()
#'
#' #------ Same chart, change theme and save for further processing
#' p <- cars %>% ec.init() %>% ec.theme('dark')
#' p
#'
#'
#' #------ JSON back and forth
#' tmp <- cars %>% ec.init()
#' tmp
#' json <- tmp %>% ec.inspect()
#' ec.fromJson(json) %>% ec.theme("dark")
#'
#'
#' #------ Data grouping
#' library(dplyr)
#' iris %>% group_by(Species) %>% ec.init() # by factor column
#' iris %>% mutate(Species=as.character(Species)) %>%
#' group_by(Species) %>% ec.init() # by non-factor column
#'
#' p <- Orange %>% group_by(Tree) %>% ec.init() # by factor column
#' p$x$opts$series <- lapply(p$x$opts$series, function(x) {
#' x$symbolSize=10; x$encode=list(x='age', y='circumference'); x } )
#' p
#'
#'
#' #------ Pie
#' is <- sort(islands); is <- is[is>60]
#' is <- data.frame(name=names(is), value=as.character(unname(is)))
#' data <- ec.data(is, 'names')
#' p <- ec.init()
#' p$x$opts <- list(
#' title = list(text = "Landmasses over 60,000 mi\u00B2", left = 'center'),
#' tooltip = list(trigger='item'),
#' series = list(type='pie', radius='50%', data=data, name='mi\u00B2'))
#' p
#'
#'
#' #------ Liquidfill plugin
#' if (interactive()) {
#' p <- ec.init(load=c('liquid'), preset=FALSE)
#' p$x$opts$series[[1]] <- list(
#' type='liquidFill', data=c(0.6, 0.5, 0.4, 0.3), # amplitude=0,
#' waveAnimation=FALSE, animationDuration=0, animationDurationUpdate=0
#' )
#' p
#' }
#'
#'
#' #------ Heatmap
#' times <- c(5,1,0,0,0,0,0,0,0,0,0,2,4,1,1,3,4,6,4,4,3,3,2,5,7,0,0,0,0,0,
#' 0,0,0,0,5,2,2,6,9,11,6,7,8,12,5,5,7,2,1,1,0,0,0,0,0,0,0,0,3,2,
#' 1,9,8,10,6,5,5,5,7,4,2,4,7,3,0,0,0,0,0,0,1,0,5,4,7,14,13,12,9,5,
#' 5,10,6,4,4,1,1,3,0,0,0,1,0,0,0,2,4,4,2,4,4,14,12,1,8,5,3,7,3,0,
#' 2,1,0,3,0,0,0,0,2,0,4,1,5,10,5,7,11,6,0,5,3,4,2,0,1,0,0,0,0,0,
#' 0,0,0,0,1,0,2,1,3,4,0,0,0,0,1,2,2,6)
#' df <- NULL; n <- 1;
#' for(i in 0:6) { df <- rbind(df, data.frame(0:23, rep(i,24), times[n:(n+23)])); n<-n+24 }
#' hours <- ec.data(df); hours <- hours[-1] # remove columns row
#' times <- c('12a',paste0(1:11,'a'),'12p',paste0(1:11,'p'))
#' days <- c('Saturday','Friday','Thursday','Wednesday','Tuesday','Monday','Sunday')
#' p <- ec.init(preset=FALSE)
#' p$x$opts <- list( title = list(text='Punch Card Heatmap'),
#' tooltip = list(position='top'),grid=list(height='50%',top='10%'),
#' xAxis = list(type='category', data=times, splitArea=list(show=TRUE)),
#' yAxis = list(type='category', data=days, splitArea=list(show=TRUE)),
#' visualMap = list(min=0,max=10,calculable=TRUE,orient='horizontal',left='center',bottom='15%'),
#' series = list(list(name='Hours', type = 'heatmap', data= hours,label=list(show=TRUE),
#' emphasis=list(itemStyle=list(shadowBlur=10,shadowColor='rgba(0,0,0,0.5)'))))
#' )
#' p
#'
#'
#' #------ Plugin leaflet
#' tmp <- quakes %>% dplyr::relocate('long') # set in lon,lat order
#' p <- tmp %>% ec.init(load='leaflet')
#' p$x$opts$legend = list(data=list(list(name='quakes')))
#' p$x$opts$series[[1]]$name = 'quakes'
#' p$x$opts$series[[1]]$symbolSize = ec.clmn(4)
#' p
#'
#'
#' #------ Plugin 'world' with lines and color coding
#' flights <- NULL
#' flights <- try(read.csv(paste0('https://raw.githubusercontent.com/plotly/datasets/master/',
#' '2011_february_aa_flight_paths.csv')), silent = TRUE)
#' if (!is.null(flights)) {
#' tmp <- data.frame(airport1 = unique(head(flights,10)$airport1),
#' color = c("#387e78","#eeb422","#d9534f",'magenta'))
#' tmp <- head(flights,10) %>% inner_join(tmp) # add color by airport
#' p <- tmp %>% ec.init(load='world')
#' p$x$opts$geo$center= c(mean(flights$start_lon), mean(flights$start_lat))
#' p$x$opts$series <- list(list(
#' type='lines', coordinateSystem='geo',
#' data = lapply(ec.data(tmp, 'names'), function(x)
#' list(coords = list(c(x$start_lon,x$start_lat),
#' c(x$end_lon,x$end_lat)),
#' colr = x$color)
#' )
#' ,lineStyle = list(curveness=0.3, width=3, color=ec.clmn('colr'))
#' ))
#' p
#' }
#'
#'
#' #------ Plugin 3D
#' if (interactive()) {
#' data <- list()
#' for(y in 1:dim(volcano)[2]) for(x in 1:dim(volcano)[1])
#' data <- append(data, list(c(x, y, volcano[x,y])))
#' p <- ec.init(load = '3D')
#' p$x$opts$series <- list(type = 'surface', data = data)
#' p
#' }
#'
#'
#' #------ 3D chart with custom coloring
#' if (interactive()) {
#' df <- data.frame(Species = unique(iris$Species),
#' color = c("#387e78","#eeb422","#d9534f"))
#' df <- iris %>% dplyr::inner_join(df) # add 6th column 'color' for Species
#' p <- df %>% ec.init(load = '3D')
#' p$x$opts$xAxis3D <- list(name='Petal.Length')
#' p$x$opts$yAxis3D <- list(name='Sepal.Width')
#' p$x$opts$zAxis3D <- list(name='Sepal.Length')
#' p$x$opts$series <- list(list(
#' type='scatter3D', symbolSize=7,
#' encode=list(x='Petal.Length', y='Sepal.Width', z='Sepal.Length'),
#' itemStyle=list(color = ec.clmn(6) ) # index of column 'color'
#' ))
#' p
#' }
#'
#'
#' #------ Surface data equation with JS code
#' if (interactive()) {
#' p <- ec.init(load='3D')
#' p$x$opts$series[[1]] <- list(
#' type = 'surface',
#' equation = list(
#' x = list(min=-3,max=4,step=0.05),
#' y = list(min=-3,max=3,step=0.05),
#' z = htmlwidgets::JS("function (x, y) {
#' return Math.sin(x * x + y * y) * x / Math.PI; }")
#' )
#' )
#' p
#' }
#'
#'
#' #------ Surface with data from a data.frame
#' if (interactive()) {
#' library(dplyr)
#' data <- expand.grid(
#' x = seq(0, 2, by = 0.1),
#' y = seq(0, 1, by = 0.1)
#' ) %>% mutate(z = x * (y ^ 2)) %>% select(x,y,z)
#' p <- ec.init(load='3D')
#' p$x$opts$series[[1]] <- list(
#' type = 'surface',
#' data = ec.data(data, 'values'))
#' p
#' }
#'
#'
#' #------ Band serie with customization
#' # first column ('day') usually goes to the X-axis
#' # try also alternative data setting - replace lines *1 with *2
#' library(dplyr)
#' dats <- as.data.frame(EuStockMarkets) %>% mutate(day=1:n()) %>%
#' relocate(day) %>% slice_head(n=100)
#' p <- ec.init(load='custom') # *1 = unnamed data
#' #p <- dats %>% ec.init(load='custom') # *2 = dataset
#' p$x$opts$series = append(
#' ecr.band(dats, 'DAX','FTSE', name='Ftse-Dax', color='lemonchiffon'),
#' list(list(type='line', name='CAC', color='red', symbolSize=1,
#' data = ec.data(dats %>% select(day,CAC), 'values') # *1
#' #encode=list(x='day', y='CAC') # *2
#' ))
#' )
#' p$x$opts$legend <- list(ey='')
#' p$x$opts$dataZoom <- list(type='slider', end=50)
#' p
#'
#'
#' #------ Timeline animation
#' p <- Orange %>% dplyr::group_by(age) %>% ec.init(
#' tl.series=list(type='bar', encode=list(x='Tree', y='circumference'))
#' )
#' p$x$opts$timeline <- append(p$x$opts$timeline, list(autoPlay=TRUE))
#' p$x$opts$options <- lapply(p$x$opts$options,
#' function(o) { o$title$text <- paste('age',o$title$text,'days'); o })
#' p$x$opts$xAxis <- list(type='category', name='tree')
#' p$x$opts$yAxis <- list(max=max(Orange$circumference))
#' p
#'
#'
#' #------ Boxplot
#' bdf <- data.frame(vx = sample(LETTERS[1:3], size=20, replace=TRUE),
#' vy = rnorm(20)) %>% group_by(vx) %>% group_split()
#' dats <- lapply(bdf, function(x) boxplot.stats(x$vy)$stats )
#' p <- ec.init()
#' p$x$opts <- list( # overwrite presets
#' xAxis = list(ey=''),
#' yAxis = list(type = 'category', data = unique(unlist(lapply(bdf, `[`, , 1))) ),
#' series = list(list(type = 'boxplot', data = dats))
#' )
#' p
#'
#'
#' #------ EChartsJS v.5 feature custom transform - a regression line
#' # presets for xAxis,yAxis,dataset and series are used
#' dset <- data.frame(x=1:10, y=sample(1:100,10))
#' p <- dset %>% ec.init(js='echarts.registerTransform(ecStat.transform.regression)')
#' p$x$opts$dataset[[2]] <- list(transform = list(type='ecStat:regression'))
#' p$x$opts$series[[2]] <- list(
#' type='line', itemStyle=list(color='red'), datasetIndex=1)
#' p
#'
#'
#' #------ EChartsJS v.5 features transform and sort
#' datset <- list(
#' list(source=list(
#' list('name', 'age', 'profession', 'score', 'date'),
#' list('Hannah Krause', 41, 'Engineer', 314, '2011-02-12'),
#' list('Zhao Qian', 20, 'Teacher', 351, '2011-03-01'),
#' list('Jasmin Krause', 52, 'Musician', 287, '2011-02-14'),
#' list('Li Lei', 37, 'Teacher', 219, '2011-02-18'),
#' list('Karle Neumann', 25, 'Engineer', 253, '2011-04-02'),
#' list('Adrian Groß', 19, 'Teacher', NULL, '2011-01-16'),
#' list('Mia Neumann', 71, 'Engineer', 165, '2011-03-19'),
#' list('Böhm Fuchs', 36, 'Musician', 318, '2011-02-24'),
#' list('Han Meimei', 67, 'Engineer', 366, '2011-03-12'))),
#' list(transform = list(type= 'sort', config=list(
#' list(dimension='profession', order='desc'),
#' list(dimension='score', order='desc'))
#' )))
#' p <- ec.init(title = list(
#' text='Data transform, multiple-sort bar',
#' subtext='JS source',
#' sublink=paste0('https://echarts.apache.org/next/examples/en/editor.html',
#' '?c=doc-example/data-transform-multiple-sort-bar'),
#' left='center'))
#' p$x$opts$dataset <- datset
#' p$x$opts$xAxis <- list(type = 'category', axisLabel=list(interval=0, rotate=30))
#' p$x$opts$yAxis <- list(name='score')
#' p$x$opts$series[[1]] <- list(
#' type='bar',
#' label=list( show=TRUE, rotate=90, position='insideBottom',
#' align='left', verticalAlign='middle'
#' ),
#' itemStyle=list(
#' color = htmlwidgets::JS("function (params) {
#' return ({
#' Engineer: '#5470c6',
#' Teacher: '#91cc75',
#' Musician: '#fac858'
#' })[params.data[2]]
#' }")
#' ),
#' encode=list( x='name', y='score', label=list('profession') ),
#' datasetIndex = 1
#' )
#' p$x$opts$tooltip <- list(trigger='item', axisPointer=list(type='shadow'))
#' p
#'
#'
#' #------ Sunburst
#' # see website for different ways to set hierarchical data
#' data = list(list(name='Grandpa',children=list(list(name='Uncle Leo',value=15,
#' children=list(list(name='Cousin Jack',value=2), list(name='Cousin Mary',value=5,
#' children=list(list(name='Jackson',value=2))), list(name='Cousin Ben',value=4))),
#' list(name='Father',value=10,children=list(list(name='Me',value=5),
#' list(name='Brother Peter',value=1))))), list(name='Nancy',children=list(
#' list(name='Uncle Nike',children=list(list(name='Cousin Betty',value=1),
#' list(name='Cousin Jenny',value=2))))))
#' p <- ec.init()
#' p$x$opts <- list(
#' series = list(list(type='sunburst', data=data,
#' radius=list(0, '90%'), label=list(rotate='radial')
#' )))
#' p
#'
#'
#' #------ registerMap JSON
#' json <- jsonlite::read_json("https://echarts.apache.org/examples/data/asset/geo/USA.json")
#' dusa <- USArrests %>% dplyr::mutate(states = row.names(.))
#' p <- ec.init(preset=FALSE)
#' p$x$registerMap <- list(list(mapName = 'USA', geoJSON = json))
#' # registerMap supports also maps in SVG format, see website gallery
#' p$x$opts <- list(
#' visualMap = list(type='continuous', calculable=TRUE,
#' min=min(dusa$UrbanPop), max=max(dusa$UrbanPop))
#' ,series = list( list(type='map', map='USA', name='UrbanPop', roam=TRUE,
#' data = lapply(ec.data(dusa,'names'), function(x) list(name=x$states, value=x$UrbanPop))
#' ))
#' )
#' p
#'
#'
#' #------ Error Bars on grouped data
#' library(dplyr)
#' df <- mtcars %>% group_by(cyl,gear) %>% summarise(yy=round(mean(mpg),2)) %>%
#' mutate(low=round(yy-cyl*runif(1),2), high=round(yy+cyl*runif(1),2)) %>%
#' relocate(cyl, .after = last_col()) # move group column behind first four cols
#' p <- df %>% ec.init(group1='bar', load='custom') %>%
#' ecr.ebars(df, name = 'eb'
#' ,tooltip = list(formatter=ec.clmn('high <b>%d</b><br>low <b>%d</b>', 4,3)))
#' p$x$opts$tooltip <- list(ey='')
#' p
#'
#'
#' #------ Gauge
#' p <- ec.init(preset=FALSE);
#' p$x$opts$series <- list(list(
#' type = 'gauge', max = 160, min=40,
#' detail = list(formatter='\U1F9E0={value}'),
#' data = list(list(value=85, name='IQ test')) ))
#' p
#'
#'
#' #------ Custom gauge with animation
#' p <- ec.init(js = "setInterval(function () {
#' opts.series[0].data[0].value = (Math.random() * 100).toFixed(2) - 0;
#' chart.setOption(opts, true);}, 2000);")
#' p$x$opts <- list(series=list(list(type = 'gauge',
#' axisLine = list(lineStyle=list(width=30,
#' color = list(c(0.3, '#67e0e3'),c(0.7, '#37a2da'),c(1, '#fd666d')))),
#' pointer = list(itemStyle=list(color='auto')),
#' axisTick = list(distance=-30,length=8, lineStyle=list(color='#fff',width=2)),
#' splitLine = list(distance=-30,length=30, lineStyle=list(color='#fff',width=4)),
#' axisLabel = list(color='auto',distance=40,fontSize=20),
#' detail = list(valueAnimation=TRUE, formatter='{value} km/h',color='auto'),
#' data = list(list(value=70))
#' )))
#' p
#'
#'
#' #------ Sankey and graph plots
#' # prepare data
#' sankey <- data.frame(
#' node = c("a","b", "c", "d", "e"),
#' source = c("a", "b", "c", "d", "c"),
#' target = c("b", "c", "d", "e", "e"),
#' value = c(5, 6, 2, 8, 13),
#' stringsAsFactors = FALSE
#' )
#'
#' p <- ec.init(preset=FALSE)
#' p$x$opts$series[[1]] <- list( type='sankey',
#' data = lapply(ec.data(sankey,'names'), function(x) list(name=x$node)),
#' edges = ec.data(sankey,'names')
#' )
#' p
#'
#'
#' # graph plot with same data ---------------
#' p <- ec.init(preset=FALSE, title=list(text="Graph"))
#' p$x$opts$series[[1]] <- list( type='graph',
#' layout = 'force', # try 'circular' too
#' data = lapply(ec.data(sankey,'names'),
#' function(x) list(name=x$node, tooltip = list(show=FALSE))),
#' edges = lapply(ec.data(sankey,'names'),
#' function(x) { x$lineStyle <- list(width=x$value); x }),
#' emphasis = list(focus='adjacency',
#' label=list( position='right', show=TRUE)),
#' label = list(show=TRUE), roam = TRUE, zoom = 4,
#' tooltip=list(textStyle=list(color='blue')),
#' lineStyle = list(curveness=0.3)
#' )
#' p$x$opts$tooltip <- list(trigger='item')
#' p
#'
#'
#' #------ group connect
#' main <- mtcars %>% ec.init(height = 200)
#' main$x$opts$series[[1]]$name <- "this legend is shared"
#' main$x$opts$legend <- list(show=FALSE)
#' main$x$group <- 'group1' # same group name for all charts
#'
#' q1 <- main; q1$x$opts$series[[1]]$encode <- list(y='hp', x='mpg');
#' q1$x$opts$legend <- list(show=TRUE) # show first legend to share
#' q2 <- main; q2$x$opts$series[[1]]$encode <- list(y='wt', x='mpg');
#' q3 <- main; q3$x$opts$series[[1]]$encode <- list(y='drat', x='mpg');
#' q4 <- main; q4$x$opts$series[[1]]$encode <- list(y='qsec', x='mpg');
#' q4$x$connect <- 'group1'
#' # q4$x$disconnect <- 'group1' # ok too
#' if (interactive()) {
#' ec.layout(list(q1,q2,q3,q4), cols=2, title='group connect')
#' }
#'
#' #------------- Shiny interactive charts demo ---------------
#' if (interactive()) {
#' demo(eshiny, package='echarty')
#' }
#'
#' } # donttest
#' @export
ec.examples <- function(){
cat("copy/paste code from ?ec.examples Help\n Or run all examples at once with example('ec.examples') and they'll show up in the Viewer.")
}
|
/STA401/TP/TP4/TP4.R
|
no_license
|
collomsu/L2_INFO
|
R
| false
| false
| 1,486
|
r
| ||
#' @export
hydro_hitamaelar <- function(con) {
tbl_mar(con,"hydro.hitamaelar") %>%
dplyr::select(-c(snt:sbn)) %>%
dplyr::mutate(ar = to_number(to_char(timi, "YYYY")))
}
#' @export
hydro_stadir <- function(con) {
tbl_mar(con, "hydro.stadir") %>%
dplyr::select(-c(snt:sbn))
}
#' @export
hydro_stodvanofn <- function(con) {
tbl_mar(con, "hydro.stodvanofn") %>%
dplyr::mutate(lengd = -lengd * 100,
breidd = breidd * 100) %>%
mar:::geoconvert()
}
#' @export
hydro_observation <- function(con) {
tbl_mar(con, "hydro.observation") %>%
dplyr::select(-c(snt:sbn))
}
#' @export
hydro_trolltog <- function(con) {
tbl_mar(con, "hydro.trolltog") %>%
dplyr::select(-c(snt:sbn))
}
#' @export
hydro_station <- function(con) {
tbl_mar(con, "hydro.station") %>%
dplyr::select(-c(snt:sbn)) %>%
dplyr::mutate(lon = as.integer(longitude) * 100 + ifelse(is.na(la_sec), 0, as.integer(lo_sec)),
lat = as.integer(latitude) * 100 + ifelse(is.na(lo_sec), 0, as.integer(la_sec))) %>%
mar:::geoconvert(col.names = c("lon", "lat")) %>%
dplyr::mutate(lon = ifelse(lo_id == "W", -lon, lon),
lat = ifelse(la_id == "N", lat, -lat)) %>%
dplyr::select(t_id:id, lon, lat, q_cont:name)
}
#' @export
hydro_sonda <- function(con) {
tbl_mar(con, "hydro.sonda") %>%
dplyr::select(-c(snt:sbg))
}
|
/R/hydro.R
|
no_license
|
einarhjorleifsson/mar
|
R
| false
| false
| 1,359
|
r
|
#' @export
hydro_hitamaelar <- function(con) {
tbl_mar(con,"hydro.hitamaelar") %>%
dplyr::select(-c(snt:sbn)) %>%
dplyr::mutate(ar = to_number(to_char(timi, "YYYY")))
}
#' @export
hydro_stadir <- function(con) {
tbl_mar(con, "hydro.stadir") %>%
dplyr::select(-c(snt:sbn))
}
#' @export
hydro_stodvanofn <- function(con) {
tbl_mar(con, "hydro.stodvanofn") %>%
dplyr::mutate(lengd = -lengd * 100,
breidd = breidd * 100) %>%
mar:::geoconvert()
}
#' @export
hydro_observation <- function(con) {
tbl_mar(con, "hydro.observation") %>%
dplyr::select(-c(snt:sbn))
}
#' @export
hydro_trolltog <- function(con) {
tbl_mar(con, "hydro.trolltog") %>%
dplyr::select(-c(snt:sbn))
}
#' @export
hydro_station <- function(con) {
tbl_mar(con, "hydro.station") %>%
dplyr::select(-c(snt:sbn)) %>%
dplyr::mutate(lon = as.integer(longitude) * 100 + ifelse(is.na(la_sec), 0, as.integer(lo_sec)),
lat = as.integer(latitude) * 100 + ifelse(is.na(lo_sec), 0, as.integer(la_sec))) %>%
mar:::geoconvert(col.names = c("lon", "lat")) %>%
dplyr::mutate(lon = ifelse(lo_id == "W", -lon, lon),
lat = ifelse(la_id == "N", lat, -lat)) %>%
dplyr::select(t_id:id, lon, lat, q_cont:name)
}
#' @export
hydro_sonda <- function(con) {
tbl_mar(con, "hydro.sonda") %>%
dplyr::select(-c(snt:sbg))
}
|
# lowPass.443()
# to reduce noise and improve the diagnostic coeficient of the pneumo data
# can also be used with the raw EDA data
# will also add 2 columns to the time series data frames
# for the filtered upper and lower pneumo data
# first order Butterworth filter
#
###
# define the low pass .43 function
lowPass.443 <- function(x, GAIN = 2.254050840e+01, zplane = 0.9112708567) {
# filter to smooth peneumo and raw EDA data
# to improve the diagnostic coefficient by reducing high frequency noise
# to improve the
# x is a column vector from the time series
# data <- data
# GAIN <- GAIN
# zplane <- zplane
xv1 <- x[1]
yv1 <- 0
output <- rep(NA, length(x))
# output <- NULL
for (i in 1:length(x)) {
xv0 <- xv1
xv1 <- x[i] / GAIN
yv0 <- yv1
yv1 <- (xv1 + xv0) + (zplane * yv0)
output[i] <- yv1
# output <- c(output, yv1)
}
return(output)
} # end lowpass .443 function
plot.ts(myCardioData[,7][1:1000])
smoothedCardio <- lowPass.443(myCardioData[,7])
plot.ts(smoothedCardio[1:1000])
smoothedCardio2 <- lowPass.443(smoothedCardio)
plot.ts(smoothedCardio2[1:1000])
smoothedCardio3 <- lowPass.443(smoothedCardio2)
plot.ts(smoothedCardio3[1:1000])
smoothCardioA1 <-
for (i in 1:nrow(smoothedCardio)) {
smoothCardioA1 <- mean(smoothedCardio[,7][i:i+29])
return(x)
}
|
/lowPass.443.1stOrder.R
|
no_license
|
raymondnelson/NCCA_ASCII_Parse
|
R
| false
| false
| 1,379
|
r
|
# lowPass.443()
# to reduce noise and improve the diagnostic coeficient of the pneumo data
# can also be used with the raw EDA data
# will also add 2 columns to the time series data frames
# for the filtered upper and lower pneumo data
# first order Butterworth filter
#
###
# define the low pass .43 function
lowPass.443 <- function(x, GAIN = 2.254050840e+01, zplane = 0.9112708567) {
# filter to smooth peneumo and raw EDA data
# to improve the diagnostic coefficient by reducing high frequency noise
# to improve the
# x is a column vector from the time series
# data <- data
# GAIN <- GAIN
# zplane <- zplane
xv1 <- x[1]
yv1 <- 0
output <- rep(NA, length(x))
# output <- NULL
for (i in 1:length(x)) {
xv0 <- xv1
xv1 <- x[i] / GAIN
yv0 <- yv1
yv1 <- (xv1 + xv0) + (zplane * yv0)
output[i] <- yv1
# output <- c(output, yv1)
}
return(output)
} # end lowpass .443 function
plot.ts(myCardioData[,7][1:1000])
smoothedCardio <- lowPass.443(myCardioData[,7])
plot.ts(smoothedCardio[1:1000])
smoothedCardio2 <- lowPass.443(smoothedCardio)
plot.ts(smoothedCardio2[1:1000])
smoothedCardio3 <- lowPass.443(smoothedCardio2)
plot.ts(smoothedCardio3[1:1000])
smoothCardioA1 <-
for (i in 1:nrow(smoothedCardio)) {
smoothCardioA1 <- mean(smoothedCardio[,7][i:i+29])
return(x)
}
|
## This function creates a vector that stores
## 1-sets the matrix, 2-gets the matrix
## 3-sets the inverse of the matrics
## 4-gets the inverse of the matrix
## This vector becomes the input to the cacheSolve function below
makeCacheVector <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This functions calculates the inverse of the vector from above.
## It checks if the inverse has been found. If so, it gets the inverse
## from the cache. Otherwise, it calculates the inverse and sets the results
## in the cache with the setinv function.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## Return a matrix that is the inverse of 'x'
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
cmccracken/ProgrammingAssignment2
|
R
| false
| false
| 1,173
|
r
|
## This function creates a vector that stores
## 1-sets the matrix, 2-gets the matrix
## 3-sets the inverse of the matrics
## 4-gets the inverse of the matrix
## This vector becomes the input to the cacheSolve function below
makeCacheVector <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This functions calculates the inverse of the vector from above.
## It checks if the inverse has been found. If so, it gets the inverse
## from the cache. Otherwise, it calculates the inverse and sets the results
## in the cache with the setinv function.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## Return a matrix that is the inverse of 'x'
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r2_helpers.R
\name{get_var_comps}
\alias{get_var_comps}
\title{Extract variance components from merMod.}
\usage{
get_var_comps(mod, expct, overdisp_name)
}
\arguments{
\item{mod}{A merMod object.}
\item{expct}{expectation.}
\item{overdisp_name}{name of overdispersion term}
}
\value{
Fixed, random and residual variance
}
\description{
Extract variance components from merMod.
}
\keyword{internal}
|
/man/get_var_comps.Rd
|
no_license
|
mastoffel/partR2
|
R
| false
| true
| 478
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r2_helpers.R
\name{get_var_comps}
\alias{get_var_comps}
\title{Extract variance components from merMod.}
\usage{
get_var_comps(mod, expct, overdisp_name)
}
\arguments{
\item{mod}{A merMod object.}
\item{expct}{expectation.}
\item{overdisp_name}{name of overdispersion term}
}
\value{
Fixed, random and residual variance
}
\description{
Extract variance components from merMod.
}
\keyword{internal}
|
load("antibiotics_2010.Rdata")
myPDF("antibioticsPlots.pdf", 6, 6,
mfrow = c(1, 3),
mgp = c(1.9, 0.5, 0),
mar = c(3, 3, 1, .5) + 0.1)
par(mfrow = c(2, 2))
plot(antibiotics.2010$consumption ~ antibiotics.2010$female,
main = "Consumption vs Female",
ylab = "Consumption (DID)", xlab = "Percentage Female Pop",
pch = 21, col = COL[1], bg = COL[1, 3], cex.main = 0.8, cex.lab = 0.9)
plot(antibiotics.2010$consumption ~ antibiotics.2010$lifeexp,
main = "Consumption vs Life Expectancy",
ylab = "Consumption (DID)", xlab = "Life Expectancy (yrs)",
pch = 21, col = COL[1], bg = COL[1, 3], cex.main = 0.8, cex.lab = 0.9)
plot(antibiotics.2010$consumption ~ antibiotics.2010$illiteracy,
main = "Consumption vs Percent Illiterate", cex = 0.75,
ylab = "Consumption (DID)", xlab = "Percentage Illiterate",
pch = 21, col = COL[1], bg = COL[1, 3], cex.main = 0.8, cex.lab = 0.9)
plot(antibiotics.2010$consumption ~ antibiotics.2010$popdensity,
main = "Consumption vs Pop Density", cex = 0.75,
ylab = "Consumption (DID)", xlab = "Density (1,000 people per sq km)",
pch = 21, col = COL[1], bg = COL[1, 3], cex.main = 0.8, cex.lab = 0.9)
dev.off()
|
/ch_multiple_linear_regression_oi_biostat/figures/eoce/antibiotic/antibiotic.R
|
no_license
|
OI-Biostat/oi_biostat_text
|
R
| false
| false
| 1,217
|
r
|
load("antibiotics_2010.Rdata")
myPDF("antibioticsPlots.pdf", 6, 6,
mfrow = c(1, 3),
mgp = c(1.9, 0.5, 0),
mar = c(3, 3, 1, .5) + 0.1)
par(mfrow = c(2, 2))
plot(antibiotics.2010$consumption ~ antibiotics.2010$female,
main = "Consumption vs Female",
ylab = "Consumption (DID)", xlab = "Percentage Female Pop",
pch = 21, col = COL[1], bg = COL[1, 3], cex.main = 0.8, cex.lab = 0.9)
plot(antibiotics.2010$consumption ~ antibiotics.2010$lifeexp,
main = "Consumption vs Life Expectancy",
ylab = "Consumption (DID)", xlab = "Life Expectancy (yrs)",
pch = 21, col = COL[1], bg = COL[1, 3], cex.main = 0.8, cex.lab = 0.9)
plot(antibiotics.2010$consumption ~ antibiotics.2010$illiteracy,
main = "Consumption vs Percent Illiterate", cex = 0.75,
ylab = "Consumption (DID)", xlab = "Percentage Illiterate",
pch = 21, col = COL[1], bg = COL[1, 3], cex.main = 0.8, cex.lab = 0.9)
plot(antibiotics.2010$consumption ~ antibiotics.2010$popdensity,
main = "Consumption vs Pop Density", cex = 0.75,
ylab = "Consumption (DID)", xlab = "Density (1,000 people per sq km)",
pch = 21, col = COL[1], bg = COL[1, 3], cex.main = 0.8, cex.lab = 0.9)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/param_merge.r
\name{param.merge}
\alias{param.merge}
\title{Merge fitted height-diameter parameters with tree by tree data. Depricated.}
\usage{
param.merge(data, wparm, dbh = "D4")
}
\arguments{
\item{data}{Object returned by mergefp}
\item{wparm}{Object returned by \code{fit.weib}}
\item{dbh}{Name of column containing diameter data. Default is "D4". Used when estimating height.}
}
\description{
Function to merge fitted parameters of Weibull height-diameter equations to tree data returned by \code{mergefp}
}
\author{
Martin Sullivan, Gabriela Lopez-Gonzalez
}
|
/man/param.merge.Rd
|
no_license
|
ForestPlots/BiomasaFP_old
|
R
| false
| true
| 648
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/param_merge.r
\name{param.merge}
\alias{param.merge}
\title{Merge fitted height-diameter parameters with tree by tree data. Depricated.}
\usage{
param.merge(data, wparm, dbh = "D4")
}
\arguments{
\item{data}{Object returned by mergefp}
\item{wparm}{Object returned by \code{fit.weib}}
\item{dbh}{Name of column containing diameter data. Default is "D4". Used when estimating height.}
}
\description{
Function to merge fitted parameters of Weibull height-diameter equations to tree data returned by \code{mergefp}
}
\author{
Martin Sullivan, Gabriela Lopez-Gonzalez
}
|
library(shiny)
library(ggplot2)
library(robustbase)
library(reshape)
library(xlsx)
DataScrubbing <- function(file_name)
{
# This function takes an excel spreadsheet with the first row as labels. It will cut off all rows beyond 100
# for the columns with both continuous/Discrete and categorical data and returns
# a data set with the first column as the header
file_name <- paste(file_name,".xlsx",sep="") # add excel extenstion
library(xlsx) # import excel library for reading
df <- read.xlsx(file_name,1,header=FALSE,stringsAsFactors = FALSE)
columns <- df[1,]; # store the column names for future reference
df <- df[2:dim(df)[1],] # remove the first row
row_names <- df[,1];
names(df) <- columns
# cut rows beyond 100
if (dim(df)[2]>100)
{
df <- df[,1:100] # chop dataframe down
}
dataTypes <- vector(mode="character", length=dim(df)[2]) # define a vector to hold each columns data type
# we loop through each column and determine its type
for (i in 1:dim(df)[2])
{
# first task is to scrub the data
df[,i] <- gsub(" ", "", df[,i]) # remove spaces
df[,i] <- tolower(df[,i])
# check to make sure there are no na n/a and we missed this as continuous data
na_indi <- which(df[,i] =="na" | df[,i]=="n/a")
if (length(na_indi) > 0 ) # we found some Nas
{
df[na_indi,i] <- NA
}
na_indi <- sum(is.na(df[,i])) # get initial count of na indices
# check if it is numeric by converting to it
test <- df[,i] # holder variable
test <- as.numeric(test)
na_indi2 <- sum(is.na(test))
if (na_indi2>na_indi) #must be characters
{
dataTypes[i] <- "character"
} else
{
dataTypes[i] <- "double"
df[,i] <- test
}
}
# we now look to convert to factors
for (i in 1:(dim(df)[2]))
{
if (dataTypes[i] == "character")
{
dataTypes[i] = "factor"
df[,i] <- as.factor(df[,i])
if (nlevels(df[,i]) > 6) # bad column and we delete
{
# df[,i] <- NULL # remove column
dataTypes[i] <- 0 # mark to remove data type
}
}
}
r_indi <- which(dataTypes == 0)
df[,r_indi] <- NULL
dataTypes <- dataTypes[-r_indi]
df <- cbind(row_names,df)
return(list(dataTypes,df))
}
input <- DataScrubbing("home/ec2-user/big-dog/public/data/bbqpizza")
input_data <- input[[2]]
row_names <- input_data[,1]
input_data[,1] <- NULL
data <- input_data
# Define UI for application that plots random distributions
shinyUI(navbarPage("Big Dog Analytics", id = "tabs",
tabPanel("Marginal Distributions", value = "MD",
# Sidebar with a slider input for number of observations
sidebarPanel(
selectInput(inputId = "col_names",
label = "Select",
colnames(data)),
selectInput(inputId = "show_type",
label = "Select",
list("Histogram" = "hist",
"Kernel Density" = "kd",
"Combined" = "comb"))
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("MarginalPlot")
)
),
tabPanel("Outlier Analysis", value = "OA",
sidebarPanel(
sliderInput(inputId = "pval", label = "Rejection P-Value", min=0, max=10, value=5, step = 1)
),
mainPanel(
plotOutput("Outliers")
)
),
tabPanel("Correlation Analysis", value = "CA",
sidebarPanel(),
mainPanel(
plotOutput("Corr", hover = "plot_hover"
),
verbatimTextOutput("hover$info")
)
),
tabPanel("Mean Vector", value = "MV",
sidebarPanel(),
mainPanel(
plotOutput("Mean_o")
)
)
))
shinyServer(function(input, output) {
Marginals <- function(data,name,type){
if (type == "hist"){
p <- ggplot(data, aes_q(x = as.name(name))) + geom_histogram(fill = "deepskyblue2", alpha = 0.2, color = "white") + title("Marginal Distribution") + ylab('Counts')
} else if (type == "kd"){
p <- ggplot(data, aes_q(x = as.name(name))) + geom_density(fill = "blue" , alpha = 0.2) + title("Marginal Distribution") + ylab('Density')
}
else{
p <- ggplot(data, aes_q(x = as.name(name))) + geom_histogram(aes(y = ..density..), fill = "deepskyblue2", color = "white", alpha = 0.2) + geom_density(fill = "blue" , alpha = 0.2) + title("Marginal Distribution") + ylab('Density')
}
p <- p + theme(text = element_text(size=20))
}
Outliers <- function(data,cutoff_in){
num_cols <- dim(data)[1]
mahalanobis_dist <- mahalanobis(data,colMeans(data),cov(data), ,tol=1e-20)
cutoff <- qchisq(1 - cutoff_in / 100, dim(data)[2], ncp = 0, lower.tail = TRUE, log.p = FALSE)
outlier <- mahalanobis_dist > cutoff
df_outliers <- data.frame(x = c(1:dim(data)[1]), y = log(sqrt(mahalanobis_dist)), z = outlier)
p <- ggplot(df_outliers,aes(x = x,y = y))
p <- p + geom_point(aes(colour = z)) + geom_abline(intercept = log(sqrt(cutoff)), slope = 0,linetype="dashed",colour = "red") + labs(x = "Observation Number",y = "log(Mahalanobis Distances)", title = paste("Outlier Plot")) + scale_colour_manual(name="Type", values = c("FALSE" = "blue","TRUE" = "#FF0080"), breaks=c("TRUE", "FALSE"), labels=c("Outlier", "Inlier"))
p <- p + theme(plot.title = element_text(vjust=2), text = element_text(size=20))
}
Correlation <- function(data){
data_t <- data[,order(colnames(data))]
result <- cor(data_t)
temp <- result
temp[lower.tri(temp)] <- NA
temp <- melt(temp)
temp <- na.omit(temp)
p <- ggplot(temp, aes(X2, X1, fill = value)) + geom_tile(alpha = 0.5, colour = "white") + scale_fill_gradient2(low = "steelblue", high = "red", mid = "violet", midpoint = 0, limit = c(-1,1), name = "Pearson\ncorrelation\n")
base_size <- 14
p <- p + theme_grey(base_size = base_size) + labs(x = "", y = "") + scale_x_discrete(expand = c(0, 0)) + scale_y_discrete(expand = c(0, 0)) + ggtitle("Correlation Heatmap")
p <- p + theme(axis.ticks = element_blank(), plot.title = element_text(vjust=2), axis.text.x = element_blank(), axis.text.y = element_blank(), text = element_text(size=20), legend.text=element_text(size=20), legend.title = element_text(size = 20)) + guides(fill = guide_colorbar(barwidth = 2, barheight = 10, title.position = "top", title.vjust = 10))
#+ geom_text(aes(X2, X1, label = round(value,2)), color = "black", size = 10)
}
Mean_Vectors <- function(data){
num_vars <- dim(data)[2]
output_mean <- vector(,num_vars)
output_se <- vector(,num_vars)
for (i in c(1:num_vars)){
name <- colnames(data)[i]
output_mean[i] <- mean(data[,i],na.rm = TRUE)
output_se[i] <- sd(data[,i],na.rm = TRUE) / sqrt(length(data[,3][!is.na(data[,3])]))
}
df <- data.frame(names = colnames(data), means = output_mean)
limits <- aes(ymax = output_mean + output_se, ymin=output_mean - output_se)
p <- ggplot(df, aes(x = names, y = means))
p <- p + geom_point() + geom_errorbar(limits, width=0.3) + ylab("Mean") + xlab("")
}
output$MarginalPlot <- renderPlot({
p <- Marginals(data,input$col_names,input$show_type)
print(p)
})
output$Outliers <- renderPlot({
p <- Outliers(data,input$pval)
print(p)
})
output$Corr <- renderPlot({
p <- Correlation(data)
print(p)
})
output$Mean_o <- renderPlot({
p <- Mean_Vectors(data)
print(p)
})
output$hover_info <- renderPrint({
cat("input$plot_hover:\n")
str('hi')
})
})
|
/functions/App.R
|
no_license
|
schew/big-dog
|
R
| false
| false
| 7,308
|
r
|
library(shiny)
library(ggplot2)
library(robustbase)
library(reshape)
library(xlsx)
DataScrubbing <- function(file_name)
{
# This function takes an excel spreadsheet with the first row as labels. It will cut off all rows beyond 100
# for the columns with both continuous/Discrete and categorical data and returns
# a data set with the first column as the header
file_name <- paste(file_name,".xlsx",sep="") # add excel extenstion
library(xlsx) # import excel library for reading
df <- read.xlsx(file_name,1,header=FALSE,stringsAsFactors = FALSE)
columns <- df[1,]; # store the column names for future reference
df <- df[2:dim(df)[1],] # remove the first row
row_names <- df[,1];
names(df) <- columns
# cut rows beyond 100
if (dim(df)[2]>100)
{
df <- df[,1:100] # chop dataframe down
}
dataTypes <- vector(mode="character", length=dim(df)[2]) # define a vector to hold each columns data type
# we loop through each column and determine its type
for (i in 1:dim(df)[2])
{
# first task is to scrub the data
df[,i] <- gsub(" ", "", df[,i]) # remove spaces
df[,i] <- tolower(df[,i])
# check to make sure there are no na n/a and we missed this as continuous data
na_indi <- which(df[,i] =="na" | df[,i]=="n/a")
if (length(na_indi) > 0 ) # we found some Nas
{
df[na_indi,i] <- NA
}
na_indi <- sum(is.na(df[,i])) # get initial count of na indices
# check if it is numeric by converting to it
test <- df[,i] # holder variable
test <- as.numeric(test)
na_indi2 <- sum(is.na(test))
if (na_indi2>na_indi) #must be characters
{
dataTypes[i] <- "character"
} else
{
dataTypes[i] <- "double"
df[,i] <- test
}
}
# we now look to convert to factors
for (i in 1:(dim(df)[2]))
{
if (dataTypes[i] == "character")
{
dataTypes[i] = "factor"
df[,i] <- as.factor(df[,i])
if (nlevels(df[,i]) > 6) # bad column and we delete
{
# df[,i] <- NULL # remove column
dataTypes[i] <- 0 # mark to remove data type
}
}
}
r_indi <- which(dataTypes == 0)
df[,r_indi] <- NULL
dataTypes <- dataTypes[-r_indi]
df <- cbind(row_names,df)
return(list(dataTypes,df))
}
input <- DataScrubbing("home/ec2-user/big-dog/public/data/bbqpizza")
input_data <- input[[2]]
row_names <- input_data[,1]
input_data[,1] <- NULL
data <- input_data
# Define UI for application that plots random distributions
shinyUI(navbarPage("Big Dog Analytics", id = "tabs",
tabPanel("Marginal Distributions", value = "MD",
# Sidebar with a slider input for number of observations
sidebarPanel(
selectInput(inputId = "col_names",
label = "Select",
colnames(data)),
selectInput(inputId = "show_type",
label = "Select",
list("Histogram" = "hist",
"Kernel Density" = "kd",
"Combined" = "comb"))
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("MarginalPlot")
)
),
tabPanel("Outlier Analysis", value = "OA",
sidebarPanel(
sliderInput(inputId = "pval", label = "Rejection P-Value", min=0, max=10, value=5, step = 1)
),
mainPanel(
plotOutput("Outliers")
)
),
tabPanel("Correlation Analysis", value = "CA",
sidebarPanel(),
mainPanel(
plotOutput("Corr", hover = "plot_hover"
),
verbatimTextOutput("hover$info")
)
),
tabPanel("Mean Vector", value = "MV",
sidebarPanel(),
mainPanel(
plotOutput("Mean_o")
)
)
))
shinyServer(function(input, output) {
Marginals <- function(data,name,type){
if (type == "hist"){
p <- ggplot(data, aes_q(x = as.name(name))) + geom_histogram(fill = "deepskyblue2", alpha = 0.2, color = "white") + title("Marginal Distribution") + ylab('Counts')
} else if (type == "kd"){
p <- ggplot(data, aes_q(x = as.name(name))) + geom_density(fill = "blue" , alpha = 0.2) + title("Marginal Distribution") + ylab('Density')
}
else{
p <- ggplot(data, aes_q(x = as.name(name))) + geom_histogram(aes(y = ..density..), fill = "deepskyblue2", color = "white", alpha = 0.2) + geom_density(fill = "blue" , alpha = 0.2) + title("Marginal Distribution") + ylab('Density')
}
p <- p + theme(text = element_text(size=20))
}
Outliers <- function(data,cutoff_in){
num_cols <- dim(data)[1]
mahalanobis_dist <- mahalanobis(data,colMeans(data),cov(data), ,tol=1e-20)
cutoff <- qchisq(1 - cutoff_in / 100, dim(data)[2], ncp = 0, lower.tail = TRUE, log.p = FALSE)
outlier <- mahalanobis_dist > cutoff
df_outliers <- data.frame(x = c(1:dim(data)[1]), y = log(sqrt(mahalanobis_dist)), z = outlier)
p <- ggplot(df_outliers,aes(x = x,y = y))
p <- p + geom_point(aes(colour = z)) + geom_abline(intercept = log(sqrt(cutoff)), slope = 0,linetype="dashed",colour = "red") + labs(x = "Observation Number",y = "log(Mahalanobis Distances)", title = paste("Outlier Plot")) + scale_colour_manual(name="Type", values = c("FALSE" = "blue","TRUE" = "#FF0080"), breaks=c("TRUE", "FALSE"), labels=c("Outlier", "Inlier"))
p <- p + theme(plot.title = element_text(vjust=2), text = element_text(size=20))
}
Correlation <- function(data){
data_t <- data[,order(colnames(data))]
result <- cor(data_t)
temp <- result
temp[lower.tri(temp)] <- NA
temp <- melt(temp)
temp <- na.omit(temp)
p <- ggplot(temp, aes(X2, X1, fill = value)) + geom_tile(alpha = 0.5, colour = "white") + scale_fill_gradient2(low = "steelblue", high = "red", mid = "violet", midpoint = 0, limit = c(-1,1), name = "Pearson\ncorrelation\n")
base_size <- 14
p <- p + theme_grey(base_size = base_size) + labs(x = "", y = "") + scale_x_discrete(expand = c(0, 0)) + scale_y_discrete(expand = c(0, 0)) + ggtitle("Correlation Heatmap")
p <- p + theme(axis.ticks = element_blank(), plot.title = element_text(vjust=2), axis.text.x = element_blank(), axis.text.y = element_blank(), text = element_text(size=20), legend.text=element_text(size=20), legend.title = element_text(size = 20)) + guides(fill = guide_colorbar(barwidth = 2, barheight = 10, title.position = "top", title.vjust = 10))
#+ geom_text(aes(X2, X1, label = round(value,2)), color = "black", size = 10)
}
Mean_Vectors <- function(data){
num_vars <- dim(data)[2]
output_mean <- vector(,num_vars)
output_se <- vector(,num_vars)
for (i in c(1:num_vars)){
name <- colnames(data)[i]
output_mean[i] <- mean(data[,i],na.rm = TRUE)
output_se[i] <- sd(data[,i],na.rm = TRUE) / sqrt(length(data[,3][!is.na(data[,3])]))
}
df <- data.frame(names = colnames(data), means = output_mean)
limits <- aes(ymax = output_mean + output_se, ymin=output_mean - output_se)
p <- ggplot(df, aes(x = names, y = means))
p <- p + geom_point() + geom_errorbar(limits, width=0.3) + ylab("Mean") + xlab("")
}
output$MarginalPlot <- renderPlot({
p <- Marginals(data,input$col_names,input$show_type)
print(p)
})
output$Outliers <- renderPlot({
p <- Outliers(data,input$pval)
print(p)
})
output$Corr <- renderPlot({
p <- Correlation(data)
print(p)
})
output$Mean_o <- renderPlot({
p <- Mean_Vectors(data)
print(p)
})
output$hover_info <- renderPrint({
cat("input$plot_hover:\n")
str('hi')
})
})
|
library(shiny)
library(sigmajs)
ui <- fluidPage(
fluidRow(
column(3, actionButton("add", "add nodes & edges"))
),
sigmajsOutput("sg", height = "100vh")
)
server <- function(input, output) {
ids <- as.character(1:100) # create 100 nodes
n <- 150 # number of edges
# create edges with random delay FIRST
edges <- data.frame(
id = 1:n,
source = sample(ids, n, replace = TRUE),
target = sample(ids, n, replace = TRUE),
created_at = cumsum(ceiling(rnorm(n, 500, 50))),
stringsAsFactors = FALSE
)
# get source and target
src <- dplyr::select(edges, id = source, created_at)
tgt <- dplyr::select(edges, id = target, created_at)
# nodes appear at their first edge appearance
nodes <- src %>%
dplyr::bind_rows(tgt) %>% # bind edges source/target to have "nodes"
dplyr::group_by(id) %>% # find minimum by id - when node should appear
dplyr::summarise(
appear_at = min(created_at) - 1 # Minus one millisecond to ensure node appears BEFORE any edge connecting to it
) %>%
dplyr::ungroup() %>%
dplyr::mutate( # add labels, color and size
label = sample(LETTERS, n(), replace = TRUE),
size = runif(n(), 1, 5),
color = colorRampPalette(c("#B1E2A3", "#98D3A5", "#328983", "#1C5C70", "#24C96B"))(n())
)
# initialise "empty" visualisation
output$sg <- renderSigmajs({
sigmajs(type = "webgl") %>% # use webgl
sg_force()
})
# add nodes and edges with delay
observeEvent(input$add, {
sigmajsProxy("sg") %>%
sg_add_nodes_delay_p(nodes, appear_at, id, label, size, color, cumsum = FALSE, refresh = TRUE) %>%
sg_add_edges_delay_p(edges, created_at, id, source, target, cumsum = FALSE, refresh = TRUE)
})
}
shinyApp(ui, server)
|
/demo/add-delay.R
|
permissive
|
takewiki/sigmajs
|
R
| false
| false
| 1,682
|
r
|
library(shiny)
library(sigmajs)
ui <- fluidPage(
fluidRow(
column(3, actionButton("add", "add nodes & edges"))
),
sigmajsOutput("sg", height = "100vh")
)
server <- function(input, output) {
ids <- as.character(1:100) # create 100 nodes
n <- 150 # number of edges
# create edges with random delay FIRST
edges <- data.frame(
id = 1:n,
source = sample(ids, n, replace = TRUE),
target = sample(ids, n, replace = TRUE),
created_at = cumsum(ceiling(rnorm(n, 500, 50))),
stringsAsFactors = FALSE
)
# get source and target
src <- dplyr::select(edges, id = source, created_at)
tgt <- dplyr::select(edges, id = target, created_at)
# nodes appear at their first edge appearance
nodes <- src %>%
dplyr::bind_rows(tgt) %>% # bind edges source/target to have "nodes"
dplyr::group_by(id) %>% # find minimum by id - when node should appear
dplyr::summarise(
appear_at = min(created_at) - 1 # Minus one millisecond to ensure node appears BEFORE any edge connecting to it
) %>%
dplyr::ungroup() %>%
dplyr::mutate( # add labels, color and size
label = sample(LETTERS, n(), replace = TRUE),
size = runif(n(), 1, 5),
color = colorRampPalette(c("#B1E2A3", "#98D3A5", "#328983", "#1C5C70", "#24C96B"))(n())
)
# initialise "empty" visualisation
output$sg <- renderSigmajs({
sigmajs(type = "webgl") %>% # use webgl
sg_force()
})
# add nodes and edges with delay
observeEvent(input$add, {
sigmajsProxy("sg") %>%
sg_add_nodes_delay_p(nodes, appear_at, id, label, size, color, cumsum = FALSE, refresh = TRUE) %>%
sg_add_edges_delay_p(edges, created_at, id, source, target, cumsum = FALSE, refresh = TRUE)
})
}
shinyApp(ui, server)
|
#' Real-time Rt Estimation, Forecasting and Reporting
#'
#' @description `r lifecycle::badge("maturing")`
#' This function wraps the functionality of `estimate_infections()` and `forecast_infections()` in order
#' to estimate Rt and cases by date of infection, forecast into these infections into the future. It also contains
#' additional functionality to convert forecasts to date of report and produce summary output useful for reporting
#' results and interpreting them. See [here](https://gist.github.com/seabbs/163d0f195892cde685c70473e1f5e867) for an
#' example of using `epinow` to estimate Rt for Covid-19 in a country from the ECDC data source.
#' @param output A character vector of optional output to return. Supported options are samples ("samples"),
#' plots ("plots"), the run time ("timing"), copying the dated folder into a latest folder (if `target_folder` is not null,
#' set using "latest"), and the stan fit ("fit"). The default is to return all options. This argument uses partial matching
#' so for example passing "sam" will lead to samples being reported.
#' @param return_output Logical, defaults to FALSE. Should output be returned, this automatically updates to TRUE
#' if no directory for saving is specified.
#' @param forecast_args A list of arguments to pass to `forecast_infections()`. Unless at a minimum a `forecast_model` is passed
#' then `forecast_infections` will be bypassed.
#' @return A list of output from estimate_infections, forecast_infections, report_cases, and report_summary.
#' @export
#' @seealso estimate_infections simulate_infections forecast_infections regional_epinow
#' @inheritParams setup_target_folder
#' @inheritParams estimate_infections
#' @inheritParams forecast_infections
#' @inheritParams setup_default_logging
#' @importFrom data.table setDT
#' @importFrom lubridate days
#' @importFrom futile.logger flog.fatal flog.warn flog.error flog.debug ftry
#' @importFrom rlang cnd_muffle
#' @examples
#' \donttest{
#' # set number of cores to use
#' options(mc.cores = ifelse(interactive(), 4, 1))
#' # construct example distributions
#' generation_time <- get_generation_time(disease = "SARS-CoV-2", source = "ganyani")
#' incubation_period <- get_incubation_period(disease = "SARS-CoV-2", source = "lauer")
#' reporting_delay <- list(
#' mean = convert_to_logmean(3, 1),
#' mean_sd = 0.1,
#' sd = convert_to_logsd(3, 1),
#' sd_sd = 0.1,
#' max = 10
#' )
#'
#' # example case data
#' reported_cases <- example_confirmed[1:40]
#'
#' # estimate Rt and nowcast/forecast cases by date of infection
#' out <- epinow(
#' reported_cases = reported_cases, generation_time = generation_time,
#' rt = rt_opts(prior = list(mean = 2, sd = 0.1)),
#' delays = delay_opts(incubation_period, reporting_delay)
#' )
#' # summary of the latest estimates
#' summary(out)
#' # plot estimates
#' plot(out)
#'
#' # summary of R estimates
#' summary(out, type = "parameters", params = "R")
#' }
epinow <- function(reported_cases,
generation_time,
delays = delay_opts(),
truncation = trunc_opts(),
rt = rt_opts(),
backcalc = backcalc_opts(),
gp = gp_opts(),
obs = obs_opts(),
stan = stan_opts(),
horizon = 7,
CrIs = c(0.2, 0.5, 0.9),
zero_threshold = 50,
return_output = FALSE,
output = c("samples", "plots", "latest", "fit", "timing"),
target_folder = NULL, target_date,
forecast_args = NULL, logs = tempdir(),
id = "epinow", verbose = interactive()) {
if (is.null(target_folder)) {
return_output <- TRUE
}
if (is.null(CrIs) | length(CrIs) == 0 | !is.numeric(CrIs)) {
futile.logger::flog.fatal("At least one credible interval must be specified",
name = "EpiNow2.epinow"
)
stop("At least one credible interval must be specified")
}
# check verbose settings and set logger to match---------------------------
if (verbose) {
futile.logger::flog.threshold(futile.logger::DEBUG,
name = "EpiNow2.epinow"
)
}
# target data -------------------------------------------------------------
if (missing(target_date)) {
target_date <- max(reported_cases$date, na.rm = TRUE)
}
# setup logging -----------------------------------------------------------
setup_default_logging(
logs = logs,
target_date = target_date,
mirror_epinow = TRUE
)
# setup input -------------------------------------------------------------
output <- match_output_arguments(output,
supported_args = c(
"plots", "samples",
"fit", "timing",
"latest"
),
logger = "EpiNow2.epinow",
level = "debug"
)
# set up folders ----------------------------------------------------------
target_folders <- setup_target_folder(target_folder, target_date)
target_folder <- target_folders$date
latest_folder <- target_folders$latest
# specify internal functions
epinow_internal <- function() {
# check verbose settings and set logger to match---------------------------
if (verbose) {
futile.logger::flog.threshold(futile.logger::DEBUG,
name = "EpiNow2.epinow"
)
}
# convert input to DT -----------------------------------------------------
reported_cases <- setup_dt(reported_cases)
# save input data ---------------------------------------------------------
save_input(reported_cases, target_folder)
# make sure the horizon is as specified from the target date --------------
horizon <- update_horizon(horizon, target_date, reported_cases)
# estimate infections and Reproduction no ---------------------------------
estimates <- estimate_infections(
reported_cases = reported_cases,
generation_time = generation_time,
delays = delays,
truncation = truncation,
rt = rt,
backcalc = backcalc,
gp = gp,
obs = obs,
stan = stan,
CrIs = CrIs,
zero_threshold = zero_threshold,
horizon = horizon,
verbose = verbose,
id = id
)
if (!output["fit"]) {
estimates$fit <- NULL
estimates$args <- NULL
}
save_estimate_infections(estimates, target_folder,
samples = output["samples"],
return_fit = output["fit"]
)
# forecast infections and reproduction number -----------------------------
if (!is.null(forecast_args)) {
forecast <- do.call(
forecast_infections,
c(
list(
infections = estimates$summarised[variable == "infections"][type != "forecast"][, type := NULL],
rts = estimates$summarised[variable == "R"][type != "forecast"][, type := NULL],
gt_mean = estimates$summarised[variable == "gt_mean"]$mean,
gt_sd = estimates$summarised[variable == "gt_sd"]$mean,
gt_max = generation_time$max,
horizon = horizon,
CrIs = CrIs
),
forecast_args
)
)
save_forecast_infections(forecast, target_folder, samples = output["samples"])
} else {
forecast <- NULL
}
# report forecasts ---------------------------------------------------------
estimated_reported_cases <- estimates_by_report_date(estimates,
forecast,
delays = delays,
target_folder = target_folder,
samples = output["samples"],
CrIs = CrIs
)
# report estimates --------------------------------------------------------
summary <- summary.estimate_infections(estimates,
return_numeric = TRUE,
target_folder = target_folder
)
# plot --------------------------------------------------------------------
if (output["plots"]) {
plots <- plot.estimate_infections(estimates,
type = "all",
target_folder = target_folder
)
} else {
plots <- NULL
}
if (return_output) {
out <- construct_output(estimates,
forecast,
estimated_reported_cases,
plots = plots,
summary,
samples = output["samples"]
)
return(out)
} else {
return(invisible(NULL))
}
}
# start processing with system timing and error catching
start_time <- Sys.time()
out <- tryCatch(withCallingHandlers(
epinow_internal(),
warning = function(w) {
futile.logger::flog.warn("%s: %s - %s", id, w$message, toString(w$call),
name = "EpiNow2.epinow"
)
rlang::cnd_muffle(w)
}
),
error = function(e) {
if (id %in% "epinow") {
stop(e)
} else {
error_text <- sprintf("%s: %s - %s", id, e$message, toString(e$call))
futile.logger::flog.error(error_text,
name = "EpiNow2.epinow"
)
rlang::cnd_muffle(e)
return(list(error = error_text))
}
}
)
end_time <- Sys.time()
if (!is.null(out$error)) {
out$trace <- rlang::trace_back()
}
if (!is.null(target_folder) & !is.null(out$error)) {
saveRDS(out$error, paste0(target_folder, "/error.rds"))
saveRDS(out$trace, paste0(target_folder, "/trace.rds"))
}
# log timing if specified
if (output["timing"]) {
out$timing <- round(as.numeric(end_time - start_time), 1)
if (!is.null(target_folder)) {
saveRDS(out$timing, paste0(target_folder, "/runtime.rds"))
}
}
# copy all results to latest folder
if (output["latest"]) {
copy_results_to_latest(target_folder, latest_folder)
}
# return output
if (return_output) {
class(out) <- c("epinow", class(out))
return(out)
} else {
return(invisible(NULL))
}
}
|
/R/epinow.R
|
permissive
|
medewitt/EpiNow2
|
R
| false
| false
| 9,707
|
r
|
#' Real-time Rt Estimation, Forecasting and Reporting
#'
#' @description `r lifecycle::badge("maturing")`
#' This function wraps the functionality of `estimate_infections()` and `forecast_infections()` in order
#' to estimate Rt and cases by date of infection, forecast into these infections into the future. It also contains
#' additional functionality to convert forecasts to date of report and produce summary output useful for reporting
#' results and interpreting them. See [here](https://gist.github.com/seabbs/163d0f195892cde685c70473e1f5e867) for an
#' example of using `epinow` to estimate Rt for Covid-19 in a country from the ECDC data source.
#' @param output A character vector of optional output to return. Supported options are samples ("samples"),
#' plots ("plots"), the run time ("timing"), copying the dated folder into a latest folder (if `target_folder` is not null,
#' set using "latest"), and the stan fit ("fit"). The default is to return all options. This argument uses partial matching
#' so for example passing "sam" will lead to samples being reported.
#' @param return_output Logical, defaults to FALSE. Should output be returned, this automatically updates to TRUE
#' if no directory for saving is specified.
#' @param forecast_args A list of arguments to pass to `forecast_infections()`. Unless at a minimum a `forecast_model` is passed
#' then `forecast_infections` will be bypassed.
#' @return A list of output from estimate_infections, forecast_infections, report_cases, and report_summary.
#' @export
#' @seealso estimate_infections simulate_infections forecast_infections regional_epinow
#' @inheritParams setup_target_folder
#' @inheritParams estimate_infections
#' @inheritParams forecast_infections
#' @inheritParams setup_default_logging
#' @importFrom data.table setDT
#' @importFrom lubridate days
#' @importFrom futile.logger flog.fatal flog.warn flog.error flog.debug ftry
#' @importFrom rlang cnd_muffle
#' @examples
#' \donttest{
#' # set number of cores to use
#' options(mc.cores = ifelse(interactive(), 4, 1))
#' # construct example distributions
#' generation_time <- get_generation_time(disease = "SARS-CoV-2", source = "ganyani")
#' incubation_period <- get_incubation_period(disease = "SARS-CoV-2", source = "lauer")
#' reporting_delay <- list(
#' mean = convert_to_logmean(3, 1),
#' mean_sd = 0.1,
#' sd = convert_to_logsd(3, 1),
#' sd_sd = 0.1,
#' max = 10
#' )
#'
#' # example case data
#' reported_cases <- example_confirmed[1:40]
#'
#' # estimate Rt and nowcast/forecast cases by date of infection
#' out <- epinow(
#' reported_cases = reported_cases, generation_time = generation_time,
#' rt = rt_opts(prior = list(mean = 2, sd = 0.1)),
#' delays = delay_opts(incubation_period, reporting_delay)
#' )
#' # summary of the latest estimates
#' summary(out)
#' # plot estimates
#' plot(out)
#'
#' # summary of R estimates
#' summary(out, type = "parameters", params = "R")
#' }
epinow <- function(reported_cases,
generation_time,
delays = delay_opts(),
truncation = trunc_opts(),
rt = rt_opts(),
backcalc = backcalc_opts(),
gp = gp_opts(),
obs = obs_opts(),
stan = stan_opts(),
horizon = 7,
CrIs = c(0.2, 0.5, 0.9),
zero_threshold = 50,
return_output = FALSE,
output = c("samples", "plots", "latest", "fit", "timing"),
target_folder = NULL, target_date,
forecast_args = NULL, logs = tempdir(),
id = "epinow", verbose = interactive()) {
if (is.null(target_folder)) {
return_output <- TRUE
}
if (is.null(CrIs) | length(CrIs) == 0 | !is.numeric(CrIs)) {
futile.logger::flog.fatal("At least one credible interval must be specified",
name = "EpiNow2.epinow"
)
stop("At least one credible interval must be specified")
}
# check verbose settings and set logger to match---------------------------
if (verbose) {
futile.logger::flog.threshold(futile.logger::DEBUG,
name = "EpiNow2.epinow"
)
}
# target data -------------------------------------------------------------
if (missing(target_date)) {
target_date <- max(reported_cases$date, na.rm = TRUE)
}
# setup logging -----------------------------------------------------------
setup_default_logging(
logs = logs,
target_date = target_date,
mirror_epinow = TRUE
)
# setup input -------------------------------------------------------------
output <- match_output_arguments(output,
supported_args = c(
"plots", "samples",
"fit", "timing",
"latest"
),
logger = "EpiNow2.epinow",
level = "debug"
)
# set up folders ----------------------------------------------------------
target_folders <- setup_target_folder(target_folder, target_date)
target_folder <- target_folders$date
latest_folder <- target_folders$latest
# specify internal functions
epinow_internal <- function() {
# check verbose settings and set logger to match---------------------------
if (verbose) {
futile.logger::flog.threshold(futile.logger::DEBUG,
name = "EpiNow2.epinow"
)
}
# convert input to DT -----------------------------------------------------
reported_cases <- setup_dt(reported_cases)
# save input data ---------------------------------------------------------
save_input(reported_cases, target_folder)
# make sure the horizon is as specified from the target date --------------
horizon <- update_horizon(horizon, target_date, reported_cases)
# estimate infections and Reproduction no ---------------------------------
estimates <- estimate_infections(
reported_cases = reported_cases,
generation_time = generation_time,
delays = delays,
truncation = truncation,
rt = rt,
backcalc = backcalc,
gp = gp,
obs = obs,
stan = stan,
CrIs = CrIs,
zero_threshold = zero_threshold,
horizon = horizon,
verbose = verbose,
id = id
)
if (!output["fit"]) {
estimates$fit <- NULL
estimates$args <- NULL
}
save_estimate_infections(estimates, target_folder,
samples = output["samples"],
return_fit = output["fit"]
)
# forecast infections and reproduction number -----------------------------
if (!is.null(forecast_args)) {
forecast <- do.call(
forecast_infections,
c(
list(
infections = estimates$summarised[variable == "infections"][type != "forecast"][, type := NULL],
rts = estimates$summarised[variable == "R"][type != "forecast"][, type := NULL],
gt_mean = estimates$summarised[variable == "gt_mean"]$mean,
gt_sd = estimates$summarised[variable == "gt_sd"]$mean,
gt_max = generation_time$max,
horizon = horizon,
CrIs = CrIs
),
forecast_args
)
)
save_forecast_infections(forecast, target_folder, samples = output["samples"])
} else {
forecast <- NULL
}
# report forecasts ---------------------------------------------------------
estimated_reported_cases <- estimates_by_report_date(estimates,
forecast,
delays = delays,
target_folder = target_folder,
samples = output["samples"],
CrIs = CrIs
)
# report estimates --------------------------------------------------------
summary <- summary.estimate_infections(estimates,
return_numeric = TRUE,
target_folder = target_folder
)
# plot --------------------------------------------------------------------
if (output["plots"]) {
plots <- plot.estimate_infections(estimates,
type = "all",
target_folder = target_folder
)
} else {
plots <- NULL
}
if (return_output) {
out <- construct_output(estimates,
forecast,
estimated_reported_cases,
plots = plots,
summary,
samples = output["samples"]
)
return(out)
} else {
return(invisible(NULL))
}
}
# start processing with system timing and error catching
start_time <- Sys.time()
out <- tryCatch(withCallingHandlers(
epinow_internal(),
warning = function(w) {
futile.logger::flog.warn("%s: %s - %s", id, w$message, toString(w$call),
name = "EpiNow2.epinow"
)
rlang::cnd_muffle(w)
}
),
error = function(e) {
if (id %in% "epinow") {
stop(e)
} else {
error_text <- sprintf("%s: %s - %s", id, e$message, toString(e$call))
futile.logger::flog.error(error_text,
name = "EpiNow2.epinow"
)
rlang::cnd_muffle(e)
return(list(error = error_text))
}
}
)
end_time <- Sys.time()
if (!is.null(out$error)) {
out$trace <- rlang::trace_back()
}
if (!is.null(target_folder) & !is.null(out$error)) {
saveRDS(out$error, paste0(target_folder, "/error.rds"))
saveRDS(out$trace, paste0(target_folder, "/trace.rds"))
}
# log timing if specified
if (output["timing"]) {
out$timing <- round(as.numeric(end_time - start_time), 1)
if (!is.null(target_folder)) {
saveRDS(out$timing, paste0(target_folder, "/runtime.rds"))
}
}
# copy all results to latest folder
if (output["latest"]) {
copy_results_to_latest(target_folder, latest_folder)
}
# return output
if (return_output) {
class(out) <- c("epinow", class(out))
return(out)
} else {
return(invisible(NULL))
}
}
|
library(ape)
testtree <- read.tree("9449_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9449_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/9449_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("9449_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9449_0_unrooted.txt")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/readAnnotate.R
\docType{methods}
\name{getFlanks}
\alias{getFlanks}
\alias{getFlanks,GRanges-method}
\title{Function to get upstream and downstream adjecent regions to a genomic feature such as CpG islands}
\usage{
getFlanks(grange,flank=2000,clean=TRUE)
\S4method{getFlanks}{GRanges}(grange, flank = 2000, clean = TRUE)
}
\arguments{
\item{grange}{GRanges object for the feature}
\item{flank}{number of basepairs for the flanking regions}
\item{clean}{If set to TRUE, flanks overlapping with other main features
will be trimmed, and overlapping flanks will be removed.
This will remove multiple counts when other features overlap
with flanks}
}
\value{
GRanges object for flanking regions
}
\description{
Function to get upstream and downstream adjecent regions to a genomic feature such as CpG islands
}
\examples{
data(cpgi)
cpgi.flanks = getFlanks(cpgi)
head(cpgi.flanks)
}
|
/man/getFlanks-methods.Rd
|
no_license
|
al2na/genomation
|
R
| false
| false
| 1,010
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/readAnnotate.R
\docType{methods}
\name{getFlanks}
\alias{getFlanks}
\alias{getFlanks,GRanges-method}
\title{Function to get upstream and downstream adjecent regions to a genomic feature such as CpG islands}
\usage{
getFlanks(grange,flank=2000,clean=TRUE)
\S4method{getFlanks}{GRanges}(grange, flank = 2000, clean = TRUE)
}
\arguments{
\item{grange}{GRanges object for the feature}
\item{flank}{number of basepairs for the flanking regions}
\item{clean}{If set to TRUE, flanks overlapping with other main features
will be trimmed, and overlapping flanks will be removed.
This will remove multiple counts when other features overlap
with flanks}
}
\value{
GRanges object for flanking regions
}
\description{
Function to get upstream and downstream adjecent regions to a genomic feature such as CpG islands
}
\examples{
data(cpgi)
cpgi.flanks = getFlanks(cpgi)
head(cpgi.flanks)
}
|
#' Plot the W~Q curve at a measurement station (pegel)
#'
#' This function take an mID and draw the waterlevel-discharge at its location
#'
#' @param pegel Name of the measurement station (for the plot title)
#' @param ... other parameter to pass to his_from_case function
#'
#' @return a ggplot2 graphic
#' @export
w_q_pegel <- function(
pegel = NULL,
...
# mID = NULL,
# qID = NULL,
# wID = NULL,
# case.name = NULL
# sobek.project = NULL,
){
qt <- his_from_case(...,
param = 'discharge')
wt <- his_from_case(...,
param = 'waterlevel')
colnames(qt) <- c('ts', 'Q', 'Legende')
colnames(wt) <- c('ts', 'W', 'Legende')
qwt <- merge(qt, wt, by = c('ts', 'Legende'), sort = FALSE)
qwt[, Legende := str_extract(Legende, 'HW\\d{4}_.{6}')]
qwt <- qwt[Q > min(Q, na.rm = TRUE) * 1.05]
q_min <- min(qwt$Q, na.rm = TRUE)
q_max <- max(qwt$Q, na.rm = TRUE)
g <- ggplot(data = qwt, aes(x = Q, y = W,
color = Legende,
shape = Legende
)
) +
theme_bw() + theme(legend.position = 'bottom') +
labs(title = paste('Wasserstand-Abfluss Kurven am Pegel:', pegel),
caption = unique(qwt$case)) +
xlab('Abfluss (m³/s)') + ylab('Wasserstand (m+NHN)') +
geom_point(size = 1) +
# stat_smooth(method = 'glm', formula = y ~ poly(x, 2)) +
scale_x_continuous(
breaks = pretty(q_min:q_max, 10, 10)
)
g
}
|
/R/w_q_curve.R
|
no_license
|
dquang/sobekioNHWSP
|
R
| false
| false
| 1,545
|
r
|
#' Plot the W~Q curve at a measurement station (pegel)
#'
#' This function take an mID and draw the waterlevel-discharge at its location
#'
#' @param pegel Name of the measurement station (for the plot title)
#' @param ... other parameter to pass to his_from_case function
#'
#' @return a ggplot2 graphic
#' @export
w_q_pegel <- function(
pegel = NULL,
...
# mID = NULL,
# qID = NULL,
# wID = NULL,
# case.name = NULL
# sobek.project = NULL,
){
qt <- his_from_case(...,
param = 'discharge')
wt <- his_from_case(...,
param = 'waterlevel')
colnames(qt) <- c('ts', 'Q', 'Legende')
colnames(wt) <- c('ts', 'W', 'Legende')
qwt <- merge(qt, wt, by = c('ts', 'Legende'), sort = FALSE)
qwt[, Legende := str_extract(Legende, 'HW\\d{4}_.{6}')]
qwt <- qwt[Q > min(Q, na.rm = TRUE) * 1.05]
q_min <- min(qwt$Q, na.rm = TRUE)
q_max <- max(qwt$Q, na.rm = TRUE)
g <- ggplot(data = qwt, aes(x = Q, y = W,
color = Legende,
shape = Legende
)
) +
theme_bw() + theme(legend.position = 'bottom') +
labs(title = paste('Wasserstand-Abfluss Kurven am Pegel:', pegel),
caption = unique(qwt$case)) +
xlab('Abfluss (m³/s)') + ylab('Wasserstand (m+NHN)') +
geom_point(size = 1) +
# stat_smooth(method = 'glm', formula = y ~ poly(x, 2)) +
scale_x_continuous(
breaks = pretty(q_min:q_max, 10, 10)
)
g
}
|
install.packages("rjson")
library("rjson")
result <- fromJSON(file = "test2.json")
result2 <- fromJSON(file = "freeb.json")
# Print the result.
print(result2)
result$businesses[[10]]$id
|
/project.R
|
no_license
|
TokyoExpress/kickstarter-analysis
|
R
| false
| false
| 203
|
r
|
install.packages("rjson")
library("rjson")
result <- fromJSON(file = "test2.json")
result2 <- fromJSON(file = "freeb.json")
# Print the result.
print(result2)
result$businesses[[10]]$id
|
library(downloader)
url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/mice_pheno.csv"
filename <- basename(url)
download(url, destfile=filename)
dat <- na.omit( read.csv(filename) )
############################################################
## If a list of numbers has a distribution that is well
## approximated by the normal distribution, what proportion
## of these numbers are within one standard deviation away
## from the list's average?
## Hint: Use the pnorm() function.
answer <- 68
############################################################
## What proportion of these numbers are within two
## standard deviations away from the list's average?
95
############################################################
## What proportion of these numbers are within three
## standard deviations away from the list's average?
99
############################################################
## Define y to be the weights of males on the control diet.
## What proportion of the mice are within one standard
## deviation away from the average weight?
## Remember to use popsd() from rafalib for the
## population standard deviation.
library(rafalib)
y <- filter(dat, Sex == "M") %>% select(Bodyweight, Diet)
y <- filter (y, Diet == "chow")
ymean <- mean(y$Bodyweight)
ysd <- popsd(y$Bodyweight)
ylow <- ymean - ysd
yhigh <- ymean + ysd
yprop <- (nrow(filter(y,Bodyweight <= yhigh,
Bodyweight >= ylow))
/ nrow(y))
############################################################
## What proportion of these numbers are within two standard
## deviations away from the list's average?
ylow <- ymean - 2*ysd
yhigh <- ymean + 2*ysd
yprop <- (nrow(filter(y,Bodyweight <= yhigh,
Bodyweight >= ylow))
/ nrow(y))
############################################################
## What proportion of these numbers are within three
## standard deviations away from the list's average?
ylow <- ymean - 3*ysd
yhigh <- ymean + 3*ysd
yprop <- (nrow(filter(y,Bodyweight <= yhigh,
Bodyweight >= ylow))
/ nrow(y))
############################################################
## Note that the numbers for the normal distribution and
#3 our weights are relatively close. Also, notice that we
## are indirectly comparing quantiles of the normal
## distribution to quantiles of the mouse weight
## distribution. We can actually compare all quantiles
## using a qqplot.
y <- filter(dat, Sex=="M" & Diet=="chow") %>% select(Bodyweight) %>% unlist
z <- ( y - mean(y) ) / popsd(y)
## ^^ should have been used for above answers, FYI
qqnorm(z)
abline(0,1)
############################################################
## Here we are going to use the function replicate() to
## learn about the distribution of random variables.
## All the above exercises relate to the normal distribution
## as an approximation of the distribution of a fixed list
## of numbers or a population. We have not yet discussed
## probability in these exercises. If the distribution of
## a list of numbers is approximately normal, then if we
## pick a number at random from this distribution, it will
## follow a normal distribution. However, it is important
## to remember that stating that some quantity has a
## distribution does not necessarily imply this quantity
## is random. Also, keep in mind that this is not related
## to the central limit theorem. The central limit applies
## to averages of random variables. Let's explore this
## concept.
## We will now take a sample of size 25 from the population
## of males on the chow diet. The average of this sample
## is our random variable. We will use the replicate()
## function to observe 10,000 realizations of this random
## variable. Set the seed at 1, then generate these 10,000
## averages. Make a histogram and qq-plot of these 10,000
## numbers against the normal distribution.
## We can see that, as predicted by the CLT, the
## distribution of the random variable is very well
## approximated by the normal distribution.
y <- filter(dat, Sex=="M" & Diet=="chow") %>% select(Bodyweight) %>% unlist
set.seed(1)
avgs <- replicate(10000, mean( sample(y, 25)))
mypar(1,2)
hist(avgs)
qqnorm(avgs)
qqline(avgs)
## What is the average of the distribution of the sample average?
mean(avgs)
############################################################
## What is the standard deviation of the distribution of
## sample averages (use popsd())?
popsd(avgs)
|
/statsandr/cltexercises.r
|
no_license
|
whatisakeagan/learnin
|
R
| false
| false
| 4,536
|
r
|
library(downloader)
url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/mice_pheno.csv"
filename <- basename(url)
download(url, destfile=filename)
dat <- na.omit( read.csv(filename) )
############################################################
## If a list of numbers has a distribution that is well
## approximated by the normal distribution, what proportion
## of these numbers are within one standard deviation away
## from the list's average?
## Hint: Use the pnorm() function.
answer <- 68
############################################################
## What proportion of these numbers are within two
## standard deviations away from the list's average?
95
############################################################
## What proportion of these numbers are within three
## standard deviations away from the list's average?
99
############################################################
## Define y to be the weights of males on the control diet.
## What proportion of the mice are within one standard
## deviation away from the average weight?
## Remember to use popsd() from rafalib for the
## population standard deviation.
library(rafalib)
y <- filter(dat, Sex == "M") %>% select(Bodyweight, Diet)
y <- filter (y, Diet == "chow")
ymean <- mean(y$Bodyweight)
ysd <- popsd(y$Bodyweight)
ylow <- ymean - ysd
yhigh <- ymean + ysd
yprop <- (nrow(filter(y,Bodyweight <= yhigh,
Bodyweight >= ylow))
/ nrow(y))
############################################################
## What proportion of these numbers are within two standard
## deviations away from the list's average?
ylow <- ymean - 2*ysd
yhigh <- ymean + 2*ysd
yprop <- (nrow(filter(y,Bodyweight <= yhigh,
Bodyweight >= ylow))
/ nrow(y))
############################################################
## What proportion of these numbers are within three
## standard deviations away from the list's average?
ylow <- ymean - 3*ysd
yhigh <- ymean + 3*ysd
yprop <- (nrow(filter(y,Bodyweight <= yhigh,
Bodyweight >= ylow))
/ nrow(y))
############################################################
## Note that the numbers for the normal distribution and
#3 our weights are relatively close. Also, notice that we
## are indirectly comparing quantiles of the normal
## distribution to quantiles of the mouse weight
## distribution. We can actually compare all quantiles
## using a qqplot.
y <- filter(dat, Sex=="M" & Diet=="chow") %>% select(Bodyweight) %>% unlist
z <- ( y - mean(y) ) / popsd(y)
## ^^ should have been used for above answers, FYI
qqnorm(z)
abline(0,1)
############################################################
## Here we are going to use the function replicate() to
## learn about the distribution of random variables.
## All the above exercises relate to the normal distribution
## as an approximation of the distribution of a fixed list
## of numbers or a population. We have not yet discussed
## probability in these exercises. If the distribution of
## a list of numbers is approximately normal, then if we
## pick a number at random from this distribution, it will
## follow a normal distribution. However, it is important
## to remember that stating that some quantity has a
## distribution does not necessarily imply this quantity
## is random. Also, keep in mind that this is not related
## to the central limit theorem. The central limit applies
## to averages of random variables. Let's explore this
## concept.
## We will now take a sample of size 25 from the population
## of males on the chow diet. The average of this sample
## is our random variable. We will use the replicate()
## function to observe 10,000 realizations of this random
## variable. Set the seed at 1, then generate these 10,000
## averages. Make a histogram and qq-plot of these 10,000
## numbers against the normal distribution.
## We can see that, as predicted by the CLT, the
## distribution of the random variable is very well
## approximated by the normal distribution.
y <- filter(dat, Sex=="M" & Diet=="chow") %>% select(Bodyweight) %>% unlist
set.seed(1)
avgs <- replicate(10000, mean( sample(y, 25)))
mypar(1,2)
hist(avgs)
qqnorm(avgs)
qqline(avgs)
## What is the average of the distribution of the sample average?
mean(avgs)
############################################################
## What is the standard deviation of the distribution of
## sample averages (use popsd())?
popsd(avgs)
|
library(data.table)
phosphosites_final = fread("../../Data/External/Phosphosite/Phopshosites_diseases_and_regulatory.txt", sep="\t")
setorder(phosphosites_final, prot_residue_id)
setcolorder(phosphosites_final, c(ncol(phosphosites_final), 1:(ncol(phosphosites_final)-1)) )
full_data_file = "../../Data/Raw/Proteome/Perseus_rep2-3_20150330.txt.gz"
data = fread(paste("gzip -dc",full_data_file),sep="\t",header=T)
tmp = data[,100:ncol(data),with=F]
data = data[,!duplicated(colnames(data)), with=F]
data$phosphorylation_detected_cnt = apply(data[,c("Nuc_rep2_phospho", "Nuc_rep3_phospho", "Total_rep2_phospho", "Total_rep3_phospho"),with=F], 1, function(x) sum(!is.nan(x)) )
data$Gene_names = apply(tmp, 1, function(x) paste(unique(x[which((!is.na(x) & x!=""))]), collapse=";") )
setorder(data, "Protein", "Sequence window","Modification window")
# we usually find several protein identifiers per identified site. Only one or few will match Phopshosite plus data base IDs
# We therefore assume that the group of possible proteins for that site can be represented by the matching proteins
# from Phosphosite plus (which is usually the reviewed protein variant for a gene from SwissProt).
# so here we first expand data so that each row contains a single protein accession
prot_and_pos = data[,c("Proteins","Positions within proteins","PhosphoSitePlus window","Unique identifier"),with=F]
uid = prot_and_pos$"Unique identifier"
prot_split = strsplit(data$Proteins,";")
pos_split = strsplit(data$"Positions within proteins",";")
prot_cnts = sapply(prot_split, length)
pos_cnts = sapply(pos_split, length)
if(any(which(prot_cnts!=pos_cnts)) ) stop("Conflicting protein ID and position numbers")
expanded_prot_ids = data.table(uid=rep(uid, times=prot_cnts), prot_id = unlist(prot_split), pos = unlist(pos_split), ppsite_window = rep(prot_and_pos$"PhosphoSitePlus window", times=prot_cnts) )
# unfortunately, there might also be more than one phosphosite IDs - will have to split them, too
ppsite_ids_split = lapply(strsplit(expanded_prot_ids$ppsite_window,";"), function(x) if(length(x)==0) {""} else {x} )
#ppsite_ids_split = strsplit(expanded_prot_ids$ppsite_window,";")
ppsite_ids_len = sapply(ppsite_ids_split, length)
expanded_prots_ids_and_site_windows = data.table(uid = rep(expanded_prot_ids$uid, times=ppsite_ids_len),
prot_id = rep(expanded_prot_ids$prot_id, times=ppsite_ids_len ),
pos = rep(expanded_prot_ids$pos, times=ppsite_ids_len ),
ppsite_window = unlist(ppsite_ids_split) )
setkey(expanded_prots_ids_and_site_windows, "prot_id","ppsite_window", "pos")
phosphosites_final$pos = substr(phosphosites_final$MOD_RSD,2,nchar(phosphosites_final$MOD_RSD))
setkey(phosphosites_final, "ACC_ID","SITE_ID", "pos")
# merge protein site IDs with phosphosite plus data base
pp = expanded_prots_ids_and_site_windows[phosphosites_final, allow.cartesian=TRUE]
mpp = pp[uid %in% names(which(table(pp$uid)>1))]
setorder(mpp, "uid","prot_id","pos")
# some UIDs match to several entries in phosphosite. Aggregate them all on one line
pp_ts = melt(pp, id.vars = "uid")
pp_sw = dcast.data.table(pp_ts, uid ~ variable, fun.aggregate=function(x) paste(unique(x[which(!is.na(x))]), collapse=";"), value.var="value")
# now finally merge the original data table
setkey(pp_sw, "uid")
setkey(data, "Unique identifier")
data_merged = pp_sw[data]
setcolorder(data_merged, c(colnames(data)[!colnames(data)=="Unique identifier"], colnames(pp_sw)))
setorder(data_merged, "Leading proteins", "pos", "Charge")
save(data_merged, file= "../../Data/Processed/Proteome/Phosphoproteome_infection_annotated_with_phopsphosite_plus.Rdata")
|
/Code/Phophoproteome/PreparePhosphoproteomeData.R
|
permissive
|
MPIIB-Department-TFMeyer/Zadora_et_al_Phosphoproteome
|
R
| false
| false
| 3,769
|
r
|
library(data.table)
phosphosites_final = fread("../../Data/External/Phosphosite/Phopshosites_diseases_and_regulatory.txt", sep="\t")
setorder(phosphosites_final, prot_residue_id)
setcolorder(phosphosites_final, c(ncol(phosphosites_final), 1:(ncol(phosphosites_final)-1)) )
full_data_file = "../../Data/Raw/Proteome/Perseus_rep2-3_20150330.txt.gz"
data = fread(paste("gzip -dc",full_data_file),sep="\t",header=T)
tmp = data[,100:ncol(data),with=F]
data = data[,!duplicated(colnames(data)), with=F]
data$phosphorylation_detected_cnt = apply(data[,c("Nuc_rep2_phospho", "Nuc_rep3_phospho", "Total_rep2_phospho", "Total_rep3_phospho"),with=F], 1, function(x) sum(!is.nan(x)) )
data$Gene_names = apply(tmp, 1, function(x) paste(unique(x[which((!is.na(x) & x!=""))]), collapse=";") )
setorder(data, "Protein", "Sequence window","Modification window")
# we usually find several protein identifiers per identified site. Only one or few will match Phopshosite plus data base IDs
# We therefore assume that the group of possible proteins for that site can be represented by the matching proteins
# from Phosphosite plus (which is usually the reviewed protein variant for a gene from SwissProt).
# so here we first expand data so that each row contains a single protein accession
prot_and_pos = data[,c("Proteins","Positions within proteins","PhosphoSitePlus window","Unique identifier"),with=F]
uid = prot_and_pos$"Unique identifier"
prot_split = strsplit(data$Proteins,";")
pos_split = strsplit(data$"Positions within proteins",";")
prot_cnts = sapply(prot_split, length)
pos_cnts = sapply(pos_split, length)
if(any(which(prot_cnts!=pos_cnts)) ) stop("Conflicting protein ID and position numbers")
expanded_prot_ids = data.table(uid=rep(uid, times=prot_cnts), prot_id = unlist(prot_split), pos = unlist(pos_split), ppsite_window = rep(prot_and_pos$"PhosphoSitePlus window", times=prot_cnts) )
# unfortunately, there might also be more than one phosphosite IDs - will have to split them, too
ppsite_ids_split = lapply(strsplit(expanded_prot_ids$ppsite_window,";"), function(x) if(length(x)==0) {""} else {x} )
#ppsite_ids_split = strsplit(expanded_prot_ids$ppsite_window,";")
ppsite_ids_len = sapply(ppsite_ids_split, length)
expanded_prots_ids_and_site_windows = data.table(uid = rep(expanded_prot_ids$uid, times=ppsite_ids_len),
prot_id = rep(expanded_prot_ids$prot_id, times=ppsite_ids_len ),
pos = rep(expanded_prot_ids$pos, times=ppsite_ids_len ),
ppsite_window = unlist(ppsite_ids_split) )
setkey(expanded_prots_ids_and_site_windows, "prot_id","ppsite_window", "pos")
phosphosites_final$pos = substr(phosphosites_final$MOD_RSD,2,nchar(phosphosites_final$MOD_RSD))
setkey(phosphosites_final, "ACC_ID","SITE_ID", "pos")
# merge protein site IDs with phosphosite plus data base
pp = expanded_prots_ids_and_site_windows[phosphosites_final, allow.cartesian=TRUE]
mpp = pp[uid %in% names(which(table(pp$uid)>1))]
setorder(mpp, "uid","prot_id","pos")
# some UIDs match to several entries in phosphosite. Aggregate them all on one line
pp_ts = melt(pp, id.vars = "uid")
pp_sw = dcast.data.table(pp_ts, uid ~ variable, fun.aggregate=function(x) paste(unique(x[which(!is.na(x))]), collapse=";"), value.var="value")
# now finally merge the original data table
setkey(pp_sw, "uid")
setkey(data, "Unique identifier")
data_merged = pp_sw[data]
setcolorder(data_merged, c(colnames(data)[!colnames(data)=="Unique identifier"], colnames(pp_sw)))
setorder(data_merged, "Leading proteins", "pos", "Charge")
save(data_merged, file= "../../Data/Processed/Proteome/Phosphoproteome_infection_annotated_with_phopsphosite_plus.Rdata")
|
context("plotting")
daily_data <- readRDS("daily_averages.rds")
daily_data <- daily_data[daily_data$id == "a", , drop = FALSE]
annual_data <- readRDS("annual_98_percentiles.rds")
annual_data <- annual_data[annual_data$id == "a", , drop = FALSE]
test_that("mid_breaks works", {
expect_is(mid_breaks(), "function")
fn <- mid_breaks()
expect_equal(fn(as.Date(c("2005-01-01", "2010-01-01"))),
as.Date(c("2005-07-02", "2006-07-02", "2007-07-02", "2008-07-02",
"2009-07-02", "2010-07-02")))
expect_error(fn(as.Date(c("2005-01-01", "2010-01-01", "2015-01-01"))))
fn <- mid_breaks(1)
expect_error(fn(1:3))
}
)
test_that("plot_ts fails correctly", {
names(daily_data)[2:3] <- c("date", "avg_24h")
# Invalid parameter name
expect_error(plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.524h",
rep_yr = 2013, plot_exceedances = FALSE))
# Wrong name for date column
names(daily_data)[2:3] <- c("foo", "avg_24h")
expect_error(plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE))
# Wrong name for parameter column
names(daily_data)[2:3] <- c("date", "foo")
expect_error(plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE))
# Wrong data formats
names(daily_data) <- c("date", "foo", "avg_24h", "n_readings") # date is character
expect_error(plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE))
names(daily_data) <- c("avg_24h", "date", "foo", "n_readings") # avg_24h is character
expect_error(plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE))
})
test_that("plot_ts works without caaqs_data (ozone)", {
names(daily_data)[2:3] <- c("date", "max8hr")
p <- plot_ts(daily_data, caaqs_data = NULL, parameter = "o3",
rep_yr = 2013, plot_exceedances = FALSE)
expect_is(p, "ggplot")
expect_is(ggplot2::ggplot_build(p), "list")
})
# test_that("works with caaqs_data (ozone)", {
# names(daily_data) <- c("id", "date", "max8hr", "n_readings")
# caaqs_data <- pm_24h_caaq(annual_data)
# p <- plot_ts(daily_data, caaqs_data = caaqs_data, parameter = "o3",
# rep_yr = 2013, plot_exceedances = FALSE)
# expect_is(p, "ggplot")
# expect_is(ggplot2::ggplot_build(p), "list")
# })
test_that("plot_ts works without caaqs_data (pm_24h)", {
names(daily_data)[2:3] <- c("date", "avg_24h")
p <- plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE)
expect_is(p, "ggplot")
expect_is(ggplot2::ggplot_build(p), "list")
})
test_that("works with caaqs_data (pm24h)", {
names(daily_data) <- c("id", "date", "avg_24h", "n_readings")
caaqs_data <- pm_24h_caaq(annual_data)
p <- plot_ts(daily_data, caaqs_data = caaqs_data, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE)
expect_is(p, "ggplot")
expect_is(ggplot2::ggplot_build(p), "list")
})
test_that("plot_ts works without caaqs_data (pm_annual)", {
names(daily_data)[2:3] <- c("date", "avg_24h")
p <- plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_annual",
rep_yr = 2013, plot_exceedances = FALSE)
expect_is(p, "ggplot")
expect_is(ggplot2::ggplot_build(p), "list")
})
test_that("works with caaqs_data (pm_annual)", {
names(daily_data) <- c("id", "date", "avg_24h", "n_readings")
names(annual_data)[3] <- "ann_avg"
debugonce(pm_annual_caaq)
caaqs_data <- pm_annual_caaq(annual_data)
p <- plot_ts(daily_data, caaqs_data = caaqs_data,
parameter = "pm2.5_annual", rep_yr = 2013, plot_exceedances = FALSE)
expect_is(p, "ggplot")
expect_is(ggplot2::ggplot_build(p), "list")
})
|
/tests/testthat/test-plot.R
|
permissive
|
nograpes/rcaaqs
|
R
| false
| false
| 3,943
|
r
|
context("plotting")
daily_data <- readRDS("daily_averages.rds")
daily_data <- daily_data[daily_data$id == "a", , drop = FALSE]
annual_data <- readRDS("annual_98_percentiles.rds")
annual_data <- annual_data[annual_data$id == "a", , drop = FALSE]
test_that("mid_breaks works", {
expect_is(mid_breaks(), "function")
fn <- mid_breaks()
expect_equal(fn(as.Date(c("2005-01-01", "2010-01-01"))),
as.Date(c("2005-07-02", "2006-07-02", "2007-07-02", "2008-07-02",
"2009-07-02", "2010-07-02")))
expect_error(fn(as.Date(c("2005-01-01", "2010-01-01", "2015-01-01"))))
fn <- mid_breaks(1)
expect_error(fn(1:3))
}
)
test_that("plot_ts fails correctly", {
names(daily_data)[2:3] <- c("date", "avg_24h")
# Invalid parameter name
expect_error(plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.524h",
rep_yr = 2013, plot_exceedances = FALSE))
# Wrong name for date column
names(daily_data)[2:3] <- c("foo", "avg_24h")
expect_error(plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE))
# Wrong name for parameter column
names(daily_data)[2:3] <- c("date", "foo")
expect_error(plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE))
# Wrong data formats
names(daily_data) <- c("date", "foo", "avg_24h", "n_readings") # date is character
expect_error(plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE))
names(daily_data) <- c("avg_24h", "date", "foo", "n_readings") # avg_24h is character
expect_error(plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE))
})
test_that("plot_ts works without caaqs_data (ozone)", {
names(daily_data)[2:3] <- c("date", "max8hr")
p <- plot_ts(daily_data, caaqs_data = NULL, parameter = "o3",
rep_yr = 2013, plot_exceedances = FALSE)
expect_is(p, "ggplot")
expect_is(ggplot2::ggplot_build(p), "list")
})
# test_that("works with caaqs_data (ozone)", {
# names(daily_data) <- c("id", "date", "max8hr", "n_readings")
# caaqs_data <- pm_24h_caaq(annual_data)
# p <- plot_ts(daily_data, caaqs_data = caaqs_data, parameter = "o3",
# rep_yr = 2013, plot_exceedances = FALSE)
# expect_is(p, "ggplot")
# expect_is(ggplot2::ggplot_build(p), "list")
# })
test_that("plot_ts works without caaqs_data (pm_24h)", {
names(daily_data)[2:3] <- c("date", "avg_24h")
p <- plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE)
expect_is(p, "ggplot")
expect_is(ggplot2::ggplot_build(p), "list")
})
test_that("works with caaqs_data (pm24h)", {
names(daily_data) <- c("id", "date", "avg_24h", "n_readings")
caaqs_data <- pm_24h_caaq(annual_data)
p <- plot_ts(daily_data, caaqs_data = caaqs_data, parameter = "pm2.5_24h",
rep_yr = 2013, plot_exceedances = FALSE)
expect_is(p, "ggplot")
expect_is(ggplot2::ggplot_build(p), "list")
})
test_that("plot_ts works without caaqs_data (pm_annual)", {
names(daily_data)[2:3] <- c("date", "avg_24h")
p <- plot_ts(daily_data, caaqs_data = NULL, parameter = "pm2.5_annual",
rep_yr = 2013, plot_exceedances = FALSE)
expect_is(p, "ggplot")
expect_is(ggplot2::ggplot_build(p), "list")
})
test_that("works with caaqs_data (pm_annual)", {
names(daily_data) <- c("id", "date", "avg_24h", "n_readings")
names(annual_data)[3] <- "ann_avg"
debugonce(pm_annual_caaq)
caaqs_data <- pm_annual_caaq(annual_data)
p <- plot_ts(daily_data, caaqs_data = caaqs_data,
parameter = "pm2.5_annual", rep_yr = 2013, plot_exceedances = FALSE)
expect_is(p, "ggplot")
expect_is(ggplot2::ggplot_build(p), "list")
})
|
#' Fit the phenotyping algorithm PheNorm using EHR features
#'
#' @description
#' The function requires as input:
#' * a surrogate, such as the ICD code
#' * the healthcare utilization
#' It can leverage other EHR features (optional) to assist risk prediction.
#'
#' @param nm.logS.ori name of the surrogates (log(ICD+1), log(NLP+1) and log(ICD+NLP+1))
#' @param nm.utl name of healthcare utilization (e.g. note count, encounter_num etc)
#' @param dat all data columns need to be log-transformed and need column names
#' @param nm.X additional features other than the main ICD and NLP
#' @param corrupt.rate rate for random corruption denoising, between 0 and 1, default value=0.3
#' @param train.size size of training sample, default value 10 * nrow(dat)
#' @return list containing probability and beta coefficient
#' @examples
#' \dontrun{
#' set.seed(1234)
#' fit.dat <- read.csv("https://raw.githubusercontent.com/celehs/PheNorm/master/data-raw/data.csv")
#' fit.phenorm=PheNorm.Prob("ICD", "utl", fit.dat, nm.X = NULL,
#' corrupt.rate=0.3, train.size=nrow(fit.dat));
#' head(fit.phenorm$probs)
#' }
#' @export
PheNorm.Prob <- function(nm.logS.ori, nm.utl, dat, nm.X = NULL, corrupt.rate = 0.3, train.size = 10 * nrow(dat)) {
dat <- as.matrix(dat)
S.ori <- dat[, nm.logS.ori, drop = FALSE]
utl <- dat[, nm.utl]
a.hat <- apply(S.ori, 2, function(S) {findMagicNumber(S, utl)$coef})
S.norm <- S.ori - VTM(a.hat, nrow(dat)) * utl
if (!is.null(nm.X)) {
X <- as.matrix(dat[, nm.X])
SX.norm <- cbind(S.norm, X, utl)
id <- sample(1:nrow(dat), train.size, replace = TRUE)
SX.norm.corrupt <- apply(SX.norm[id, ], 2,
function(x) {ifelse(rbinom(length(id), 1, corrupt.rate), mean(x), x)}
)
b.all <- apply(S.norm, 2, function(ss) {lm(ss[id] ~ SX.norm.corrupt - 1)$coef})
b.all[is.na(b.all)] <- 0
S.norm <- as.matrix(SX.norm) %*% b.all
b.all <- b.all[-dim(b.all)[1], ]
} else {
b.all <- NULL
}
if (length(nm.logS.ori) > 1) {
postprob <- apply(S.norm, 2,
function(x) {
fit = normalmixEM2comp2(x, lambda = 0.5,
mu = quantile(x, probs=c(1/3, 2/3)), sigsqrd = 1
)
fit$posterior[, 2]
}
)
list("probs" = rowMeans(postprob, na.rm = TRUE), "betas" = b.all)
} else {
fit <- normalmixEM2comp2(unlist(S.norm), lambda = 0.5,
mu = quantile(S.norm, probs=c(1/3, 2/3)), sigsqrd = 1
)
list("probs" = fit$posterior[,2], "betas" = b.all)
}
}
|
/R/PheNorm_Prob.R
|
no_license
|
celehs/PheNorm
|
R
| false
| false
| 2,743
|
r
|
#' Fit the phenotyping algorithm PheNorm using EHR features
#'
#' @description
#' The function requires as input:
#' * a surrogate, such as the ICD code
#' * the healthcare utilization
#' It can leverage other EHR features (optional) to assist risk prediction.
#'
#' @param nm.logS.ori name of the surrogates (log(ICD+1), log(NLP+1) and log(ICD+NLP+1))
#' @param nm.utl name of healthcare utilization (e.g. note count, encounter_num etc)
#' @param dat all data columns need to be log-transformed and need column names
#' @param nm.X additional features other than the main ICD and NLP
#' @param corrupt.rate rate for random corruption denoising, between 0 and 1, default value=0.3
#' @param train.size size of training sample, default value 10 * nrow(dat)
#' @return list containing probability and beta coefficient
#' @examples
#' \dontrun{
#' set.seed(1234)
#' fit.dat <- read.csv("https://raw.githubusercontent.com/celehs/PheNorm/master/data-raw/data.csv")
#' fit.phenorm=PheNorm.Prob("ICD", "utl", fit.dat, nm.X = NULL,
#' corrupt.rate=0.3, train.size=nrow(fit.dat));
#' head(fit.phenorm$probs)
#' }
#' @export
PheNorm.Prob <- function(nm.logS.ori, nm.utl, dat, nm.X = NULL, corrupt.rate = 0.3, train.size = 10 * nrow(dat)) {
dat <- as.matrix(dat)
S.ori <- dat[, nm.logS.ori, drop = FALSE]
utl <- dat[, nm.utl]
a.hat <- apply(S.ori, 2, function(S) {findMagicNumber(S, utl)$coef})
S.norm <- S.ori - VTM(a.hat, nrow(dat)) * utl
if (!is.null(nm.X)) {
X <- as.matrix(dat[, nm.X])
SX.norm <- cbind(S.norm, X, utl)
id <- sample(1:nrow(dat), train.size, replace = TRUE)
SX.norm.corrupt <- apply(SX.norm[id, ], 2,
function(x) {ifelse(rbinom(length(id), 1, corrupt.rate), mean(x), x)}
)
b.all <- apply(S.norm, 2, function(ss) {lm(ss[id] ~ SX.norm.corrupt - 1)$coef})
b.all[is.na(b.all)] <- 0
S.norm <- as.matrix(SX.norm) %*% b.all
b.all <- b.all[-dim(b.all)[1], ]
} else {
b.all <- NULL
}
if (length(nm.logS.ori) > 1) {
postprob <- apply(S.norm, 2,
function(x) {
fit = normalmixEM2comp2(x, lambda = 0.5,
mu = quantile(x, probs=c(1/3, 2/3)), sigsqrd = 1
)
fit$posterior[, 2]
}
)
list("probs" = rowMeans(postprob, na.rm = TRUE), "betas" = b.all)
} else {
fit <- normalmixEM2comp2(unlist(S.norm), lambda = 0.5,
mu = quantile(S.norm, probs=c(1/3, 2/3)), sigsqrd = 1
)
list("probs" = fit$posterior[,2], "betas" = b.all)
}
}
|
# Swirl Dates & Times Practice
t2 <- as.POSIXlt(Sys.time())
str(unclass(t2))
List of 11
$ sec : num 4.05
$ min : int 14
$ hour : int 16
$ mday : int 5
$ mon : int 5
$ year : int 119
$ wday : int 3
$ yday : int 155
$ isdst : int 1
$ zone : chr "PDT"
$ gmtoff: int -25200
- attr(*, "tzone")= chr [1:3] "" "PST" "PDT"
|
/Swirl Dates and Times Practice.R
|
no_license
|
BeaRam/datasciencecoursera
|
R
| false
| false
| 329
|
r
|
# Swirl Dates & Times Practice
t2 <- as.POSIXlt(Sys.time())
str(unclass(t2))
List of 11
$ sec : num 4.05
$ min : int 14
$ hour : int 16
$ mday : int 5
$ mon : int 5
$ year : int 119
$ wday : int 3
$ yday : int 155
$ isdst : int 1
$ zone : chr "PDT"
$ gmtoff: int -25200
- attr(*, "tzone")= chr [1:3] "" "PST" "PDT"
|
#' Train a model using Cloud ML
#'
#' Upload a TensorFlow application to Google Cloud, and use that application to
#' train a model.
#'
#' @param application
#' The path to a TensorFlow application. Defaults to
#' the current working directory.
#'
#' @param config
#' The name of the configuration to be used. Defaults to
#' the `"cloudml"` configuration.
#'
#' @param ...
#' Named arguments, used to supply runtime configuration
#' settings to your TensorFlow application.
#'
#' @seealso [job_describe()], [job_collect()], [job_cancel()]
#'
#' @export
cloudml_train <- function(application = getwd(),
config = "cloudml",
entrypoint = "train.R",
...)
{
# prepare application for deployment
id <- unique_job_name(application, config)
overlay <- list(...)
deployment <- scope_deployment(
id = id,
application = application,
context = "cloudml",
config = config,
overlay = overlay,
entrypoint = entrypoint
)
# read configuration
gcloud <- gcloud_config()
cloudml <- cloudml_config()
# move to deployment parent directory and spray __init__.py
directory <- deployment$directory
scope_setup_py(directory)
setwd(dirname(directory))
# generate deployment script
arguments <- (MLArgumentsBuilder()
("jobs")
("submit")
("training")
(id)
("--job-dir=%s", file.path(cloudml[["storage"]], "staging"))
("--package-path=%s", basename(directory))
("--module-name=%s.cloudml.deploy", basename(directory))
("--staging-bucket=%s", gcloud[["staging-bucket"]])
("--runtime-version=%s", gcloud[["runtime-version"]])
("--region=%s", gcloud[["region"]])
("--")
("Rscript"))
# TODO: re-enable these
# ("--job-dir=%s", overlay$job_dir)
# ("--staging-bucket=%s", overlay$staging_bucket)
# ("--region=%s", overlay$region)
# ("--runtime-version=%s", overlay$runtime_version)
# ("--config=%s/%s", basename(application), overlay$hypertune)
# submit job through command line interface
output <- gexec(gcloud(), arguments(), stdout = TRUE, stderr = TRUE)
# inform user of successful job submission
template <- c(
"Job '%1$s' successfully submitted.",
"",
"Check status and collect output with:",
"- job_status(\"%1$s\")",
"- job_collect(\"%1$s\")"
)
rendered <- sprintf(paste(template, collapse = "\n"), id)
message(rendered)
# call 'describe' to discover additional information related to
# the job, and generate a 'job' object from that
#
# print stderr output from a 'describe' call (this gives the
# user URLs that can be navigated to for more information)
arguments <- (MLArgumentsBuilder()
("jobs")
("describe")
(id))
sofile <- tempfile("stdout-")
sefile <- tempfile("stderr-")
output <- gexec(gcloud(), arguments(), stdout = sofile, stderr = sefile)
stdout <- readChar(sofile, file.info(sofile)$size, TRUE)
stderr <- readChar(sefile, file.info(sefile)$size, TRUE)
# write stderr to the console
message(stderr)
# create job object
description <- yaml::yaml.load(stdout)
job <- cloudml_job("train", id, description)
register_job(job)
invisible(job)
}
#' Cancel a job
#'
#' Cancel a job.
#'
#' @inheritParams job_status
#'
#' @family job management
#'
#' @export
job_cancel <- function(job) {
job <- as.cloudml_job(job)
arguments <- (MLArgumentsBuilder()
("jobs")
("cancel")
(job))
gexec(gcloud(), arguments())
}
#' Describe a job
#'
#' @inheritParams job_status
#'
#' @family job management
#'
#' @export
job_describe <- function(job) {
job <- as.cloudml_job(job)
arguments <- (MLArgumentsBuilder()
("jobs")
("describe")
(job))
output <- gexec(gcloud(), arguments(), stdout = TRUE)
# return as R list
yaml::yaml.load(paste(output, collapse = "\n"))
}
#' List all jobs
#'
#' List existing Google Cloud ML jobs.
#'
#' @param filter
#' Filter the set of jobs to be returned.
#'
#' @param limit
#' The maximum number of resources to list. By default,
#' all jobs will be listed.
#'
#' @param page_size
#' Some services group resource list output into pages.
#' This flag specifies the maximum number of resources per
#' page. The default is determined by the service if it
#' supports paging, otherwise it is unlimited (no paging).
#'
#' @param sort_by
#' A comma-separated list of resource field key names to
#' sort by. The default order is ascending. Prefix a field
#' with `~` for descending order on that field.
#'
#' @param uri
#' Print a list of resource URIs instead of the default
#' output.
#'
#' @family job management
#'
#' @export
job_list <- function(filter = NULL,
limit = NULL,
page_size = NULL,
sort_by = NULL,
uri = FALSE)
{
arguments <- (
MLArgumentsBuilder()
("jobs")
("list")
("--filter=%s", filter)
("--limit=%i", as.integer(limit))
("--page-size=%i", as.integer(page_size))
("--sort-by=%s", sort_by)
(if (uri) "--uri"))
output <- gexec(gcloud(), arguments(), stdout = TRUE, stderr = TRUE)
if (!uri) {
pasted <- paste(output, collapse = "\n")
output <- readr::read_table2(pasted)
}
output
}
#' Show job log stream
#'
#' Show logs from a running Cloud ML Engine job.
#'
#' @inheritParams job_status
#'
#' @param polling_interval
#' Number of seconds to wait between efforts to fetch the
#' latest log messages.
#'
#' @param task_name
#' If set, display only the logs for this particular task.
#'
#' @param allow_multiline_logs
#' Output multiline log messages as single records.
#'
#' @family job management
#'
#' @export
job_stream <- function(job,
polling_interval = 60,
task_name = NULL,
allow_multiline_logs = FALSE)
{
job <- as.cloudml_job(job)
arguments <- (
MLArgumentsBuilder()
("jobs")
("stream-logs")
("--polling-interval=%i", as.integer(polling_interval))
("--task-name=%s", task_name))
if (allow_multiline_logs)
arguments("--allow-multiline-logs")
gexec(gcloud(), arguments())
}
#' Current status of a job
#'
#' Get the status of a job, as an \R list.
#'
#' @param job Job name or job object.
#'
#' @family job management
#'
#' @export
job_status <- function(job) {
job <- as.cloudml_job(job)
arguments <- (MLArgumentsBuilder()
("jobs")
("describe")
(job))
# request job description from gcloud
output <- gexec(gcloud(), arguments(), stdout = TRUE, stderr = FALSE)
# parse as YAML and return
yaml::yaml.load(paste(output, collapse = "\n"))
}
#' Collect job output
#'
#' Collect the job outputs (e.g. fitted model) from a job.
#' If the job has not yet finished running, `job_collect()`
#' will block and wait until the job has finished.
#'
#' @inheritParams job_status
#'
#' @param destination
#' The destination directory in which model outputs should
#' be downloaded. Defaults to `jobs/cloudml`.
#'
#' @family job management
#'
#' @export
job_collect <- function(job, destination = "jobs/cloudml") {
job <- as.cloudml_job(job)
id <- job$id
# helper function for writing job status to console
write_status <- function(status, time) {
# generate message
fmt <- ">>> [state: %s; last updated %s]"
msg <- sprintf(fmt, status$state, time)
whitespace <- ""
width <- getOption("width")
if (nchar(msg) < width)
whitespace <- paste(rep("", width - nchar(msg)), collapse = " ")
# generate and write console text (overwrite old output)
output <- paste0("\r", msg, whitespace)
cat(output, sep = "")
}
# get the job status
status <- job_status(job)
time <- Sys.time()
# if we're already done, attempt download of outputs
if (status$state == "SUCCEEDED")
return(job_download(job, destination))
# if the job has failed, report error
if (status$state == "FAILED") {
fmt <- "job '%s' failed [state: %s]"
stopf(fmt, id, status$state)
}
# otherwise, notify the user and begin polling
fmt <- ">>> Job '%s' is currently running -- please wait...\n"
printf(fmt, id)
write_status(status, time)
# TODO: should we give up after a while? (user can always interrupt)
repeat {
# get the job status
status <- job_status(job)
time <- Sys.time()
write_status(status, time)
# download outputs on success
if (status$state == "SUCCEEDED") {
printf("\n")
return(job_download(job, destination))
}
# if the job has failed, report error
if (status$state == "FAILED") {
printf("\n")
fmt <- "job '%s' failed [state: %s]"
stopf(fmt, id, status$state)
}
# job isn't ready yet; sleep for a while and try again
Sys.sleep(30)
}
stop("failed to receive job outputs")
}
job_download <- function(job, destination = "jobs/cloudml") {
job <- as.cloudml_job(job)
source <- job_dir(job)
if (!is_gs_uri(source)) {
fmt <- "job directory '%s' is not a Google Storage URI"
stopf(fmt, source)
}
# check that we have an output folder associated
# with this job -- 'gsutil ls' will return with
# non-zero status when attempting to query a
# non-existent gs URL
arguments <- (
ShellArgumentsBuilder()
("ls")
(source))
status <- gexec(gsutil(), arguments())
if (status) {
fmt <- "no directory at path '%s'"
stopf(fmt, source)
}
ensure_directory(destination)
gs_copy(source, destination)
}
|
/R/jobs.R
|
no_license
|
Geoany/cloudml
|
R
| false
| false
| 9,931
|
r
|
#' Train a model using Cloud ML
#'
#' Upload a TensorFlow application to Google Cloud, and use that application to
#' train a model.
#'
#' @param application
#' The path to a TensorFlow application. Defaults to
#' the current working directory.
#'
#' @param config
#' The name of the configuration to be used. Defaults to
#' the `"cloudml"` configuration.
#'
#' @param ...
#' Named arguments, used to supply runtime configuration
#' settings to your TensorFlow application.
#'
#' @seealso [job_describe()], [job_collect()], [job_cancel()]
#'
#' @export
cloudml_train <- function(application = getwd(),
config = "cloudml",
entrypoint = "train.R",
...)
{
# prepare application for deployment
id <- unique_job_name(application, config)
overlay <- list(...)
deployment <- scope_deployment(
id = id,
application = application,
context = "cloudml",
config = config,
overlay = overlay,
entrypoint = entrypoint
)
# read configuration
gcloud <- gcloud_config()
cloudml <- cloudml_config()
# move to deployment parent directory and spray __init__.py
directory <- deployment$directory
scope_setup_py(directory)
setwd(dirname(directory))
# generate deployment script
arguments <- (MLArgumentsBuilder()
("jobs")
("submit")
("training")
(id)
("--job-dir=%s", file.path(cloudml[["storage"]], "staging"))
("--package-path=%s", basename(directory))
("--module-name=%s.cloudml.deploy", basename(directory))
("--staging-bucket=%s", gcloud[["staging-bucket"]])
("--runtime-version=%s", gcloud[["runtime-version"]])
("--region=%s", gcloud[["region"]])
("--")
("Rscript"))
# TODO: re-enable these
# ("--job-dir=%s", overlay$job_dir)
# ("--staging-bucket=%s", overlay$staging_bucket)
# ("--region=%s", overlay$region)
# ("--runtime-version=%s", overlay$runtime_version)
# ("--config=%s/%s", basename(application), overlay$hypertune)
# submit job through command line interface
output <- gexec(gcloud(), arguments(), stdout = TRUE, stderr = TRUE)
# inform user of successful job submission
template <- c(
"Job '%1$s' successfully submitted.",
"",
"Check status and collect output with:",
"- job_status(\"%1$s\")",
"- job_collect(\"%1$s\")"
)
rendered <- sprintf(paste(template, collapse = "\n"), id)
message(rendered)
# call 'describe' to discover additional information related to
# the job, and generate a 'job' object from that
#
# print stderr output from a 'describe' call (this gives the
# user URLs that can be navigated to for more information)
arguments <- (MLArgumentsBuilder()
("jobs")
("describe")
(id))
sofile <- tempfile("stdout-")
sefile <- tempfile("stderr-")
output <- gexec(gcloud(), arguments(), stdout = sofile, stderr = sefile)
stdout <- readChar(sofile, file.info(sofile)$size, TRUE)
stderr <- readChar(sefile, file.info(sefile)$size, TRUE)
# write stderr to the console
message(stderr)
# create job object
description <- yaml::yaml.load(stdout)
job <- cloudml_job("train", id, description)
register_job(job)
invisible(job)
}
#' Cancel a job
#'
#' Cancel a job.
#'
#' @inheritParams job_status
#'
#' @family job management
#'
#' @export
job_cancel <- function(job) {
job <- as.cloudml_job(job)
arguments <- (MLArgumentsBuilder()
("jobs")
("cancel")
(job))
gexec(gcloud(), arguments())
}
#' Describe a job
#'
#' @inheritParams job_status
#'
#' @family job management
#'
#' @export
job_describe <- function(job) {
job <- as.cloudml_job(job)
arguments <- (MLArgumentsBuilder()
("jobs")
("describe")
(job))
output <- gexec(gcloud(), arguments(), stdout = TRUE)
# return as R list
yaml::yaml.load(paste(output, collapse = "\n"))
}
#' List all jobs
#'
#' List existing Google Cloud ML jobs.
#'
#' @param filter
#' Filter the set of jobs to be returned.
#'
#' @param limit
#' The maximum number of resources to list. By default,
#' all jobs will be listed.
#'
#' @param page_size
#' Some services group resource list output into pages.
#' This flag specifies the maximum number of resources per
#' page. The default is determined by the service if it
#' supports paging, otherwise it is unlimited (no paging).
#'
#' @param sort_by
#' A comma-separated list of resource field key names to
#' sort by. The default order is ascending. Prefix a field
#' with `~` for descending order on that field.
#'
#' @param uri
#' Print a list of resource URIs instead of the default
#' output.
#'
#' @family job management
#'
#' @export
job_list <- function(filter = NULL,
limit = NULL,
page_size = NULL,
sort_by = NULL,
uri = FALSE)
{
arguments <- (
MLArgumentsBuilder()
("jobs")
("list")
("--filter=%s", filter)
("--limit=%i", as.integer(limit))
("--page-size=%i", as.integer(page_size))
("--sort-by=%s", sort_by)
(if (uri) "--uri"))
output <- gexec(gcloud(), arguments(), stdout = TRUE, stderr = TRUE)
if (!uri) {
pasted <- paste(output, collapse = "\n")
output <- readr::read_table2(pasted)
}
output
}
#' Show job log stream
#'
#' Show logs from a running Cloud ML Engine job.
#'
#' @inheritParams job_status
#'
#' @param polling_interval
#' Number of seconds to wait between efforts to fetch the
#' latest log messages.
#'
#' @param task_name
#' If set, display only the logs for this particular task.
#'
#' @param allow_multiline_logs
#' Output multiline log messages as single records.
#'
#' @family job management
#'
#' @export
job_stream <- function(job,
polling_interval = 60,
task_name = NULL,
allow_multiline_logs = FALSE)
{
job <- as.cloudml_job(job)
arguments <- (
MLArgumentsBuilder()
("jobs")
("stream-logs")
("--polling-interval=%i", as.integer(polling_interval))
("--task-name=%s", task_name))
if (allow_multiline_logs)
arguments("--allow-multiline-logs")
gexec(gcloud(), arguments())
}
#' Current status of a job
#'
#' Get the status of a job, as an \R list.
#'
#' @param job Job name or job object.
#'
#' @family job management
#'
#' @export
job_status <- function(job) {
job <- as.cloudml_job(job)
arguments <- (MLArgumentsBuilder()
("jobs")
("describe")
(job))
# request job description from gcloud
output <- gexec(gcloud(), arguments(), stdout = TRUE, stderr = FALSE)
# parse as YAML and return
yaml::yaml.load(paste(output, collapse = "\n"))
}
#' Collect job output
#'
#' Collect the job outputs (e.g. fitted model) from a job.
#' If the job has not yet finished running, `job_collect()`
#' will block and wait until the job has finished.
#'
#' @inheritParams job_status
#'
#' @param destination
#' The destination directory in which model outputs should
#' be downloaded. Defaults to `jobs/cloudml`.
#'
#' @family job management
#'
#' @export
job_collect <- function(job, destination = "jobs/cloudml") {
job <- as.cloudml_job(job)
id <- job$id
# helper function for writing job status to console
write_status <- function(status, time) {
# generate message
fmt <- ">>> [state: %s; last updated %s]"
msg <- sprintf(fmt, status$state, time)
whitespace <- ""
width <- getOption("width")
if (nchar(msg) < width)
whitespace <- paste(rep("", width - nchar(msg)), collapse = " ")
# generate and write console text (overwrite old output)
output <- paste0("\r", msg, whitespace)
cat(output, sep = "")
}
# get the job status
status <- job_status(job)
time <- Sys.time()
# if we're already done, attempt download of outputs
if (status$state == "SUCCEEDED")
return(job_download(job, destination))
# if the job has failed, report error
if (status$state == "FAILED") {
fmt <- "job '%s' failed [state: %s]"
stopf(fmt, id, status$state)
}
# otherwise, notify the user and begin polling
fmt <- ">>> Job '%s' is currently running -- please wait...\n"
printf(fmt, id)
write_status(status, time)
# TODO: should we give up after a while? (user can always interrupt)
repeat {
# get the job status
status <- job_status(job)
time <- Sys.time()
write_status(status, time)
# download outputs on success
if (status$state == "SUCCEEDED") {
printf("\n")
return(job_download(job, destination))
}
# if the job has failed, report error
if (status$state == "FAILED") {
printf("\n")
fmt <- "job '%s' failed [state: %s]"
stopf(fmt, id, status$state)
}
# job isn't ready yet; sleep for a while and try again
Sys.sleep(30)
}
stop("failed to receive job outputs")
}
job_download <- function(job, destination = "jobs/cloudml") {
job <- as.cloudml_job(job)
source <- job_dir(job)
if (!is_gs_uri(source)) {
fmt <- "job directory '%s' is not a Google Storage URI"
stopf(fmt, source)
}
# check that we have an output folder associated
# with this job -- 'gsutil ls' will return with
# non-zero status when attempting to query a
# non-existent gs URL
arguments <- (
ShellArgumentsBuilder()
("ls")
(source))
status <- gexec(gsutil(), arguments())
if (status) {
fmt <- "no directory at path '%s'"
stopf(fmt, source)
}
ensure_directory(destination)
gs_copy(source, destination)
}
|
#' Prepare reads for plotting with Rcircos
#'
#' Input data frame should have column names ReadID, Chr, RefStart and RefEnd
#' @param readData - data frame of fragments detected in MC-HiC
#' @param readsToDraw - vector of read IDs
#' @return data frame with 6 columns for pairs of interacting loci
#' @export
prepareLinkData<-function(readData,readsToDraw) {
firstChr<-c()
firstStart<-c()
firstEnd<-c()
secondChr<-c()
secondStart<-c()
secondEnd<-c()
for (rd in readsToDraw) {
currentRead<-readData[readData$ReadID==rd,]
firstChr <- c(firstChr, currentRead$Chr[1:(dim(currentRead)[1]-1)])
firstStart <- c(firstStart, currentRead$RefStart[1:(dim(currentRead)[1]-1)])
firstEnd <- c(firstEnd, currentRead$RefEnd[1:(dim(currentRead)[1]-1)])
secondChr <- c(secondChr, currentRead$Chr[2:(dim(currentRead)[1])])
secondStart <- c(secondStart, currentRead$RefStart[2:(dim(currentRead)[1])])
secondEnd <- c(secondEnd, currentRead$RefEnd[2:(dim(currentRead)[1])])
}
RCircosLink<- data.frame(firstChr=firstChr,firstStart=firstStart,firstEnd=firstEnd,
secondChr=secondChr,secondStart=secondStart,secondEnd=secondEnd,
stringsAsFactors=F)
return(RCircosLink)
}
#' Prepare the core Rcircos plot for C. elegans data
#'
#' @param base.per.unit - integer for the size of the units that are plotted
#' @param chr.exclude - vector of names of chromosomes to exclude
#' @param track.inside - number of tracks to have inside the circle
#' @param track.outside - number of tracks to have outside the circle
#' @return plots ideogram
#' @export
baseRcircosCE<-function(base.per.unit=3000, chr.exclude=NULL, highlight.width=10, tracks.inside=1, tracks.outside=0){
Chrnames<-c("chrI","chrII","chrIII","chrIV","chrV","chrX","MtDNA") # used to get rid of mtDNA
ce11 <- list( "chrI" = 15072434,
"chrII" = 15279421,
"chrIII" = 13783801,
"chrIV" = 17493829,
"chrV" = 20924180,
"chrX" = 17718942,
"MtDNA" = 13794)
ce11.ideo<-data.frame(Choromsome=Chrnames,ChromStart=0,ChromEnd=unlist(ce11),Band=1,Stain="gvar")
cyto.info <- ce11.ideo
RCircos.Set.Core.Components(cyto.info, chr.exclude,tracks.inside, tracks.outside)
rcircos.params <- RCircos.Get.Plot.Parameters()
rcircos.params$base.per.unit<-base.per.unit
rcircos.params$chrom.width=0 #0.1
rcircos.params$highlight.width=highlight.width #1
RCircos.Reset.Plot.Parameters(rcircos.params)
RCircos.Set.Plot.Area()
par(mai=c(0.25, 0.25, 0.25, 0.25))
plot.window(c(-1.5,1.5), c(-1.5, 1.5))
RCircos.Chromosome.Ideogram.Plot()
}
#' Prepare a list of points of view for 4C
#'
#' Will use chromosome length to find positions at 20%, 50% and 80% of chromosome's
#' length to act as points of view for arms and center
#' @param chrLengthList - a named list with lengths of chromsomes
#' @param winSize - the size of the window around the POV for selecting interactions (must be an even number)
#' @return data.frame with points of view
#' @export
generatePOV<-function(chrLengthList=NULL,winSize=10000){
if (is.null(chrLengthList)){
chrLengthList <- list( "chrI" = 15072434,
"chrII" = 15279421,
"chrIII" = 13783801,
"chrIV" = 17493829,
"chrV" = 20924180,
"chrX" = 17718942)
chrLengthList<-(unlist(chrLengthList))
}
left<-round(0.2*chrLengthList/1000,0)*1000
center<-round(0.5*chrLengthList/1000,0)*1000
right<-round(0.8*chrLengthList/1000,0)*1000
names(left)<-paste(names(left),"left",sep="_")
names(right)<-paste(names(right),"right",sep="_")
names(center)<-paste(names(center),"center",sep="_")
POV<-data.frame(POVname=c(names(left),names(center),names(right)),
POVpos=c(left,center,right),row.names=NULL)
POV$chr<-gsub("_.*","",POV$POVname)
POV$start<-POV$POVpos-winSize/2
POV$end<-POV$POVpos+winSize/2
POV<-POV[order(POV$chr,POV$start),]
return(POV)
}
|
/js_RcircosPlotting.R
|
no_license
|
CellFateNucOrg/afterMC-HiCplots
|
R
| false
| false
| 4,044
|
r
|
#' Prepare reads for plotting with Rcircos
#'
#' Input data frame should have column names ReadID, Chr, RefStart and RefEnd
#' @param readData - data frame of fragments detected in MC-HiC
#' @param readsToDraw - vector of read IDs
#' @return data frame with 6 columns for pairs of interacting loci
#' @export
prepareLinkData<-function(readData,readsToDraw) {
firstChr<-c()
firstStart<-c()
firstEnd<-c()
secondChr<-c()
secondStart<-c()
secondEnd<-c()
for (rd in readsToDraw) {
currentRead<-readData[readData$ReadID==rd,]
firstChr <- c(firstChr, currentRead$Chr[1:(dim(currentRead)[1]-1)])
firstStart <- c(firstStart, currentRead$RefStart[1:(dim(currentRead)[1]-1)])
firstEnd <- c(firstEnd, currentRead$RefEnd[1:(dim(currentRead)[1]-1)])
secondChr <- c(secondChr, currentRead$Chr[2:(dim(currentRead)[1])])
secondStart <- c(secondStart, currentRead$RefStart[2:(dim(currentRead)[1])])
secondEnd <- c(secondEnd, currentRead$RefEnd[2:(dim(currentRead)[1])])
}
RCircosLink<- data.frame(firstChr=firstChr,firstStart=firstStart,firstEnd=firstEnd,
secondChr=secondChr,secondStart=secondStart,secondEnd=secondEnd,
stringsAsFactors=F)
return(RCircosLink)
}
#' Prepare the core Rcircos plot for C. elegans data
#'
#' @param base.per.unit - integer for the size of the units that are plotted
#' @param chr.exclude - vector of names of chromosomes to exclude
#' @param track.inside - number of tracks to have inside the circle
#' @param track.outside - number of tracks to have outside the circle
#' @return plots ideogram
#' @export
baseRcircosCE<-function(base.per.unit=3000, chr.exclude=NULL, highlight.width=10, tracks.inside=1, tracks.outside=0){
Chrnames<-c("chrI","chrII","chrIII","chrIV","chrV","chrX","MtDNA") # used to get rid of mtDNA
ce11 <- list( "chrI" = 15072434,
"chrII" = 15279421,
"chrIII" = 13783801,
"chrIV" = 17493829,
"chrV" = 20924180,
"chrX" = 17718942,
"MtDNA" = 13794)
ce11.ideo<-data.frame(Choromsome=Chrnames,ChromStart=0,ChromEnd=unlist(ce11),Band=1,Stain="gvar")
cyto.info <- ce11.ideo
RCircos.Set.Core.Components(cyto.info, chr.exclude,tracks.inside, tracks.outside)
rcircos.params <- RCircos.Get.Plot.Parameters()
rcircos.params$base.per.unit<-base.per.unit
rcircos.params$chrom.width=0 #0.1
rcircos.params$highlight.width=highlight.width #1
RCircos.Reset.Plot.Parameters(rcircos.params)
RCircos.Set.Plot.Area()
par(mai=c(0.25, 0.25, 0.25, 0.25))
plot.window(c(-1.5,1.5), c(-1.5, 1.5))
RCircos.Chromosome.Ideogram.Plot()
}
#' Prepare a list of points of view for 4C
#'
#' Will use chromosome length to find positions at 20%, 50% and 80% of chromosome's
#' length to act as points of view for arms and center
#' @param chrLengthList - a named list with lengths of chromsomes
#' @param winSize - the size of the window around the POV for selecting interactions (must be an even number)
#' @return data.frame with points of view
#' @export
generatePOV<-function(chrLengthList=NULL,winSize=10000){
if (is.null(chrLengthList)){
chrLengthList <- list( "chrI" = 15072434,
"chrII" = 15279421,
"chrIII" = 13783801,
"chrIV" = 17493829,
"chrV" = 20924180,
"chrX" = 17718942)
chrLengthList<-(unlist(chrLengthList))
}
left<-round(0.2*chrLengthList/1000,0)*1000
center<-round(0.5*chrLengthList/1000,0)*1000
right<-round(0.8*chrLengthList/1000,0)*1000
names(left)<-paste(names(left),"left",sep="_")
names(right)<-paste(names(right),"right",sep="_")
names(center)<-paste(names(center),"center",sep="_")
POV<-data.frame(POVname=c(names(left),names(center),names(right)),
POVpos=c(left,center,right),row.names=NULL)
POV$chr<-gsub("_.*","",POV$POVname)
POV$start<-POV$POVpos-winSize/2
POV$end<-POV$POVpos+winSize/2
POV<-POV[order(POV$chr,POV$start),]
return(POV)
}
|
setwd("~/Documents/GSDC")
library(oncoPredict)
library(tidyverse)
countdata_predict <- read_tsv("countdata77.txt", comment="#")
countdata_predict <- countdata_predict[!duplicated(countdata_predict$Geneid), ]
testExpr <- countdata_predict %>%
as.data.frame() %>%
column_to_rownames("Geneid") %>% # turn the geneid column into rownames
as.matrix()
seqdata <- read_tsv("tamoxifen_countdata.txt")
sample_ic50 <- read_tsv("IC50_tamoxifen.txt")
trainingExpr <- seqdata %>%
as.data.frame() %>%
column_to_rownames("GeneID") %>% # turn the geneid column into rownames
as.matrix()
sample_ic50_training <- sample_ic50 %>%
as.data.frame() %>%
column_to_rownames("CosmicID") %>% # turn the geneid column into rownames
as.matrix()
calcPhenotype(trainingExprData = trainingExpr,
trainingPtype = sample_ic50_training,
testExprData = testExpr,
batchCorrect = 'eb', # "eb" for ComBat
powerTransformPhenotype = TRUE,
removeLowVaryingGenes = 0.2,
minNumSamples = 10,
printOutput = TRUE,
removeLowVaringGenesFrom='rawData',
cc=TRUE,
rsq=TRUE)
|
/GDSC_oncopredict.R
|
no_license
|
fang2065/FangBioinfo
|
R
| false
| false
| 1,197
|
r
|
setwd("~/Documents/GSDC")
library(oncoPredict)
library(tidyverse)
countdata_predict <- read_tsv("countdata77.txt", comment="#")
countdata_predict <- countdata_predict[!duplicated(countdata_predict$Geneid), ]
testExpr <- countdata_predict %>%
as.data.frame() %>%
column_to_rownames("Geneid") %>% # turn the geneid column into rownames
as.matrix()
seqdata <- read_tsv("tamoxifen_countdata.txt")
sample_ic50 <- read_tsv("IC50_tamoxifen.txt")
trainingExpr <- seqdata %>%
as.data.frame() %>%
column_to_rownames("GeneID") %>% # turn the geneid column into rownames
as.matrix()
sample_ic50_training <- sample_ic50 %>%
as.data.frame() %>%
column_to_rownames("CosmicID") %>% # turn the geneid column into rownames
as.matrix()
calcPhenotype(trainingExprData = trainingExpr,
trainingPtype = sample_ic50_training,
testExprData = testExpr,
batchCorrect = 'eb', # "eb" for ComBat
powerTransformPhenotype = TRUE,
removeLowVaryingGenes = 0.2,
minNumSamples = 10,
printOutput = TRUE,
removeLowVaringGenesFrom='rawData',
cc=TRUE,
rsq=TRUE)
|
\encoding{UTF-8}
\name{kf}
\alias{kf}
\title{
Discrete associated kernel function
}
\description{
This function computes the discrete associated kernel function.
}
\usage{
kf(x, t, h, ker, a = 1, c = 2)
}
\arguments{
\item{x}{
The target.
}
\item{t}{
A single value or the grid where the discrete associated kernel function is computed.
}
\item{h}{
The bandwidth or smoothing parameter.
}
\item{ker}{
The associated kernel: "dirDU" DiracDU,"bino" Binomial, "triang" Discrete Triangular kernel.
}
\item{a}{
The arm in Discrete Triangular kernel. The default value is 1.
}
\item{c}{
The number of categories in DiracDU kernel. The default value is 2.
}
}
\details{
The associated kernel is one of the three which have been defined in the sections above : DiracDU, Binomial and Discrete Triangular; see Kokonendji and Senga Kiessé (2011), and also Kokonendji et al. (2007).
}
\value{
Returns the value of the discrete associated kernel function at t according to the target and the bandwidth.
}
\references{
Kokonendji, C.C. and Senga Kiessé, T. (2011). Discrete associated kernel method and extensions,
\emph{ Statistical Methodology} \bold{8}, 497 - 516.
Kokonendji, C.C., Senga Kiessé, T. and Zocchi, S.S. (2007). Discrete triangular distributions and non-parametric estimation for
probability mass function,
\emph{ Journal of Nonparametric Statistics } \bold{19}, 241 - 254.
}
\author{
W. E. Wansouwé, C. C. Kokonendji and D. T. Kolyang
}
\examples{
x<-4
h<-0.1
t<-0:10
kf(x,t,h,"bino")
}
|
/man/kf.Rd
|
no_license
|
cran/Disake
|
R
| false
| false
| 1,540
|
rd
|
\encoding{UTF-8}
\name{kf}
\alias{kf}
\title{
Discrete associated kernel function
}
\description{
This function computes the discrete associated kernel function.
}
\usage{
kf(x, t, h, ker, a = 1, c = 2)
}
\arguments{
\item{x}{
The target.
}
\item{t}{
A single value or the grid where the discrete associated kernel function is computed.
}
\item{h}{
The bandwidth or smoothing parameter.
}
\item{ker}{
The associated kernel: "dirDU" DiracDU,"bino" Binomial, "triang" Discrete Triangular kernel.
}
\item{a}{
The arm in Discrete Triangular kernel. The default value is 1.
}
\item{c}{
The number of categories in DiracDU kernel. The default value is 2.
}
}
\details{
The associated kernel is one of the three which have been defined in the sections above : DiracDU, Binomial and Discrete Triangular; see Kokonendji and Senga Kiessé (2011), and also Kokonendji et al. (2007).
}
\value{
Returns the value of the discrete associated kernel function at t according to the target and the bandwidth.
}
\references{
Kokonendji, C.C. and Senga Kiessé, T. (2011). Discrete associated kernel method and extensions,
\emph{ Statistical Methodology} \bold{8}, 497 - 516.
Kokonendji, C.C., Senga Kiessé, T. and Zocchi, S.S. (2007). Discrete triangular distributions and non-parametric estimation for
probability mass function,
\emph{ Journal of Nonparametric Statistics } \bold{19}, 241 - 254.
}
\author{
W. E. Wansouwé, C. C. Kokonendji and D. T. Kolyang
}
\examples{
x<-4
h<-0.1
t<-0:10
kf(x,t,h,"bino")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotGeneral.R
\name{sysToBMatrixDf}
\alias{sysToBMatrixDf}
\title{sysToBMatrixDf}
\usage{
sysToBMatrixDf(mySys, applyLabels = TRUE)
}
\arguments{
\item{mySys}{An R object of class ConQuestSys, returned by the function conquestr::ConQuestSys}
\item{applyLabels}{A bool indicating whether labels (e.g., dimension labels) should be appended.}
}
\value{
A data frame containing R the labelled B matrix.
}
\description{
Read an R object of class ConQuestSys and create a labelled representation of the B matrix (scoring matrix). This maps item response catagories to items and dimensions. Returns long data frame, where items are duplicated if they are in many dimnsions.
}
\examples{
myBMatrix<- sysToBMatrixDf(ConQuestSys())
\dontrun{
# if you run the above example you will have the B Matrix from the example system file.
str(myBMatrix)
}
}
|
/conquestr/man/sysToBMatrixDf.Rd
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| true
| 918
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotGeneral.R
\name{sysToBMatrixDf}
\alias{sysToBMatrixDf}
\title{sysToBMatrixDf}
\usage{
sysToBMatrixDf(mySys, applyLabels = TRUE)
}
\arguments{
\item{mySys}{An R object of class ConQuestSys, returned by the function conquestr::ConQuestSys}
\item{applyLabels}{A bool indicating whether labels (e.g., dimension labels) should be appended.}
}
\value{
A data frame containing R the labelled B matrix.
}
\description{
Read an R object of class ConQuestSys and create a labelled representation of the B matrix (scoring matrix). This maps item response catagories to items and dimensions. Returns long data frame, where items are duplicated if they are in many dimnsions.
}
\examples{
myBMatrix<- sysToBMatrixDf(ConQuestSys())
\dontrun{
# if you run the above example you will have the B Matrix from the example system file.
str(myBMatrix)
}
}
|
context("rpart")
test_that("data_tree() returns the correct classes", {
if(!require("rpart", quietly = TRUE)) skip("package rpart not available")
fit <- rpart(Kyphosis ~ Age + Number + Start, method="class", data=kyphosis)
tdata <- dendro_data(fit)
expect_is(tdata, "dendro")
expect_is(segment(tdata), "data.frame")
expect_is(label(tdata), "data.frame")
expect_is(leaf_label(tdata), "data.frame")
})
|
/data/genthat_extracted_code/ggdendro/tests/test-3-rpart.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 442
|
r
|
context("rpart")
test_that("data_tree() returns the correct classes", {
if(!require("rpart", quietly = TRUE)) skip("package rpart not available")
fit <- rpart(Kyphosis ~ Age + Number + Start, method="class", data=kyphosis)
tdata <- dendro_data(fit)
expect_is(tdata, "dendro")
expect_is(segment(tdata), "data.frame")
expect_is(label(tdata), "data.frame")
expect_is(leaf_label(tdata), "data.frame")
})
|
### How many Patients are there stratified by BMI?###
#Install and load fhircrackr
#install.packages("fhircrackr") #do this only once
library(fhircrackr)
library(dplyr)
#check out vignette for how to use package
vignette(topic = "fhircrackr", package="fhircrackr")
#Download Observations coding for body height
search_request_height <- paste0("https://mii-agiop-3p.life.uni-leipzig.de/fhir/Observation?code=8302-2") #define search request
height_bundles <- fhir_search(search_request_height) #download bundles
#Download Observations coding for body weight
search_request_weight <- paste0("https://mii-agiop-3p.life.uni-leipzig.de/fhir/Observation?code=29463-7") #define search request
weight_bundles <- fhir_search(search_request_weight) #download bundles
#data frame design
design_height <- list(
Observation = list(
resource = "//Observation",
cols = list(
patient = "subject/reference",
height = "valueQuantity/value",
height_unit = "valueQuantity/code"
)
)
)
design_weight <- list(
Observation = list(
resource = "//Observation",
cols = list(
patient = "subject/reference",
weight = "valueQuantity/value",
weight_unit = "valueQuantity/code"
)
)
)
#flatten resources
dfs_height <- fhir_crack(height_bundles, design_height)
heights <- dfs_height$Observation
dfs_weight <- fhir_crack(weight_bundles, design_weight)
weights <- dfs_weight$Observation
#check that all values have the same unit
unique(heights$height_unit) #one unit is kg, there seems to be a mistake
heights[heights$height_unit=="kg",] #check which one is wrong
heights <- heights[heights$height_unit=="cm",]#only keep valid heights
unique(weights$weight_unit)#everything is fine
#merge data
data <- full_join(heights, weights, by="patient")
View(data)
#convert to correct data type
data$height <- as.numeric(data$height)
data$weight <- as.numeric(data$weight)
#compute BMI and weight classes
data$BMI <- data$weight/((data$height/100))^2
data$BMI_class <- cut(data$BMI,
breaks = c(0, 18.5, 25, 30, 35, 40, Inf),
labels = c("Underweight", "Normal weight", "Overweight", "Obesity Class 1", "Obesity Class 2", "Obesity Class 3"),
right = FALSE)
#display in numbers and plot
table(data$BMI_class)
plot(data$BMI_class)
|
/CountBMIGroups.R
|
no_license
|
JePrzybilla/Projectathon2020
|
R
| false
| false
| 2,336
|
r
|
### How many Patients are there stratified by BMI?###
#Install and load fhircrackr
#install.packages("fhircrackr") #do this only once
library(fhircrackr)
library(dplyr)
#check out vignette for how to use package
vignette(topic = "fhircrackr", package="fhircrackr")
#Download Observations coding for body height
search_request_height <- paste0("https://mii-agiop-3p.life.uni-leipzig.de/fhir/Observation?code=8302-2") #define search request
height_bundles <- fhir_search(search_request_height) #download bundles
#Download Observations coding for body weight
search_request_weight <- paste0("https://mii-agiop-3p.life.uni-leipzig.de/fhir/Observation?code=29463-7") #define search request
weight_bundles <- fhir_search(search_request_weight) #download bundles
#data frame design
design_height <- list(
Observation = list(
resource = "//Observation",
cols = list(
patient = "subject/reference",
height = "valueQuantity/value",
height_unit = "valueQuantity/code"
)
)
)
design_weight <- list(
Observation = list(
resource = "//Observation",
cols = list(
patient = "subject/reference",
weight = "valueQuantity/value",
weight_unit = "valueQuantity/code"
)
)
)
#flatten resources
dfs_height <- fhir_crack(height_bundles, design_height)
heights <- dfs_height$Observation
dfs_weight <- fhir_crack(weight_bundles, design_weight)
weights <- dfs_weight$Observation
#check that all values have the same unit
unique(heights$height_unit) #one unit is kg, there seems to be a mistake
heights[heights$height_unit=="kg",] #check which one is wrong
heights <- heights[heights$height_unit=="cm",]#only keep valid heights
unique(weights$weight_unit)#everything is fine
#merge data
data <- full_join(heights, weights, by="patient")
View(data)
#convert to correct data type
data$height <- as.numeric(data$height)
data$weight <- as.numeric(data$weight)
#compute BMI and weight classes
data$BMI <- data$weight/((data$height/100))^2
data$BMI_class <- cut(data$BMI,
breaks = c(0, 18.5, 25, 30, 35, 40, Inf),
labels = c("Underweight", "Normal weight", "Overweight", "Obesity Class 1", "Obesity Class 2", "Obesity Class 3"),
right = FALSE)
#display in numbers and plot
table(data$BMI_class)
plot(data$BMI_class)
|
# This file provides a read_data() function, which:
# - Reads in the .csv datasource in the /data subfolder
# - Performs any necessary transformations on the data
# - Returns the data as a data.table
library(data.table)
library(lubridate)
library(magrittr)
library(stringr)
# Define the columns we expect to find when reading the data
expected_cols <- c("Time of Measurement", "Weight", "BMI", "Body Fat",
"Fat-free Body Weight", "Subcutaneous Fat", "Visceral Fat",
"Body Water", "Skeletal Muscle", "Muscle Mass", "Bone Mass",
"Protein", "BMR", "Metabolic Age")
# Define the columns that contain numeric data
data_cols <- c("Weight", "BMI", "Body Fat", "Fat-free Body Weight",
"Subcutaneous Fat", "Visceral Fat", "Body Water",
"Skeletal Muscle", "Muscle Mass", "Bone Mass",
"Protein", "BMR", "Metabolic Age")
scales_char_to_numeric <- function(string){
#' Takes a character value from the scales data, removes any description
#' of the unit (e.g. kgs, kcal, %) and returns the numeric value
#'
#' @param string the string to be turned into a numeric value
# Remove numeric values and decimal point to identify the character suffix
suffix <- str_remove_all(string, "[[[:digit:]]\\.]")
# If the suffix is unrecognised, throw an error
expected_suffixes <- c("%", "kg", "kcal", "")
if(!all(suffix %in% expected_suffixes)){
error_suffixes <- suffix[!(suffix %in% c("%", "kg", "kcal", ""))]
stop(
paste0(
c("Unrecognised character suffixes: ", error_suffixes),
collapse=" "
)
)
}
# Remove suffix from string if it exists and isn't blank
string[suffix != ""] <- str_remove_all(
string[suffix != ""],
paste(expected_suffixes[expected_suffixes != ""], collapse="|")
)
# Convert to numeric
numeric_out <- as.numeric(string)
# If value was a percent, return as a decimal
numeric_out[suffix == "%"] <- numeric_out[suffix == "%"]/100
return(numeric_out)
}
read_scales_data <- function(directory="data", expected_cols_=expected_cols){
#' 1) Looks for any .csv files in the data directory, and reads in the first
#' instance.
#' 2) Checks whether the read data matches the expected columns.
#' 3) Returns data as a data.table
#'
#' @param directory The directory in which to look for data
#' @param expected_cols A vector of column names the data should have
# Find all .csv files in the specified directory
files <- list.files(directory, pattern="csv")
# Throw error if none are found
if(length(files) <= 0){stop(paste0("No .csv files found in /", directory))}
# Read first .csv file
data <- fread(paste0(directory, "/", files[1]))
# Check the column names are correct, if not throw an error.
if(!all(expected_cols_ %in% colnames(data))){
stop(
paste0(
c(
"The following columns are missing in the data:",
expected_cols[!(expected_cols %in% colnames(data))]
), collapse=" "
)
)
}
return(data)
}
clean_scales_data <- function(data, data_cols_=data_cols){
#' Takes the result of the read_scales_data() function and applies necessary
#' transformations for the dashboard.
#'
#' @param data The output of the read_scales_data() function
#' @param data_cols_w_suffix A vector of data columns that have a suffix,
#' e.g. kg
# Throw away rows with NA values, this can happen if the scales don't
# successfully read impedence data
data <- na.omit(data)
# Turn the time into a proper datetime format, and rename as "Time"
data[, `Time of Measurement` := as_datetime(
`Time of Measurement`,
format="%b %d, %Y %I:%M:%S %p"
)] %>% setnames(., old = "Time of Measurement", new = "Time")
# Remove suffixes from data columns and transform to numeric
data[,
(data_cols) := lapply(.SD, function(x) scales_char_to_numeric(x)),
.SDcols = data_cols_
]
return(data[])
}
add_metrics <- function(data){
#' Takes cleaned scales data and adds additional metrics which are functions
#' of existing metrics
#'
#' @param data The output of clean_scales_data()
data[, `:=` (
`Body Fat kg` = round(`Body Fat` * Weight, 7),
`Muscle Mass %` = round(`Muscle Mass` / Weight, 7),
`Bone Mass %` = round(`Bone Mass` / Weight, 7)
)]
return(data[])
}
get_scales_data <- function(directory="data"){
#' Reads the scales data, applys the cleaning function, adds metrics,
#' then returns the resulting data.table
data <- read_scales_data(directory=directory) %>%
clean_scales_data() %>% add_metrics()
return(data)
}
|
/health/read_scales_data.R
|
no_license
|
thecasper2/health_dashboard
|
R
| false
| false
| 4,902
|
r
|
# This file provides a read_data() function, which:
# - Reads in the .csv datasource in the /data subfolder
# - Performs any necessary transformations on the data
# - Returns the data as a data.table
library(data.table)
library(lubridate)
library(magrittr)
library(stringr)
# Define the columns we expect to find when reading the data
expected_cols <- c("Time of Measurement", "Weight", "BMI", "Body Fat",
"Fat-free Body Weight", "Subcutaneous Fat", "Visceral Fat",
"Body Water", "Skeletal Muscle", "Muscle Mass", "Bone Mass",
"Protein", "BMR", "Metabolic Age")
# Define the columns that contain numeric data
data_cols <- c("Weight", "BMI", "Body Fat", "Fat-free Body Weight",
"Subcutaneous Fat", "Visceral Fat", "Body Water",
"Skeletal Muscle", "Muscle Mass", "Bone Mass",
"Protein", "BMR", "Metabolic Age")
scales_char_to_numeric <- function(string){
#' Takes a character value from the scales data, removes any description
#' of the unit (e.g. kgs, kcal, %) and returns the numeric value
#'
#' @param string the string to be turned into a numeric value
# Remove numeric values and decimal point to identify the character suffix
suffix <- str_remove_all(string, "[[[:digit:]]\\.]")
# If the suffix is unrecognised, throw an error
expected_suffixes <- c("%", "kg", "kcal", "")
if(!all(suffix %in% expected_suffixes)){
error_suffixes <- suffix[!(suffix %in% c("%", "kg", "kcal", ""))]
stop(
paste0(
c("Unrecognised character suffixes: ", error_suffixes),
collapse=" "
)
)
}
# Remove suffix from string if it exists and isn't blank
string[suffix != ""] <- str_remove_all(
string[suffix != ""],
paste(expected_suffixes[expected_suffixes != ""], collapse="|")
)
# Convert to numeric
numeric_out <- as.numeric(string)
# If value was a percent, return as a decimal
numeric_out[suffix == "%"] <- numeric_out[suffix == "%"]/100
return(numeric_out)
}
read_scales_data <- function(directory="data", expected_cols_=expected_cols){
#' 1) Looks for any .csv files in the data directory, and reads in the first
#' instance.
#' 2) Checks whether the read data matches the expected columns.
#' 3) Returns data as a data.table
#'
#' @param directory The directory in which to look for data
#' @param expected_cols A vector of column names the data should have
# Find all .csv files in the specified directory
files <- list.files(directory, pattern="csv")
# Throw error if none are found
if(length(files) <= 0){stop(paste0("No .csv files found in /", directory))}
# Read first .csv file
data <- fread(paste0(directory, "/", files[1]))
# Check the column names are correct, if not throw an error.
if(!all(expected_cols_ %in% colnames(data))){
stop(
paste0(
c(
"The following columns are missing in the data:",
expected_cols[!(expected_cols %in% colnames(data))]
), collapse=" "
)
)
}
return(data)
}
clean_scales_data <- function(data, data_cols_=data_cols){
#' Takes the result of the read_scales_data() function and applies necessary
#' transformations for the dashboard.
#'
#' @param data The output of the read_scales_data() function
#' @param data_cols_w_suffix A vector of data columns that have a suffix,
#' e.g. kg
# Throw away rows with NA values, this can happen if the scales don't
# successfully read impedence data
data <- na.omit(data)
# Turn the time into a proper datetime format, and rename as "Time"
data[, `Time of Measurement` := as_datetime(
`Time of Measurement`,
format="%b %d, %Y %I:%M:%S %p"
)] %>% setnames(., old = "Time of Measurement", new = "Time")
# Remove suffixes from data columns and transform to numeric
data[,
(data_cols) := lapply(.SD, function(x) scales_char_to_numeric(x)),
.SDcols = data_cols_
]
return(data[])
}
add_metrics <- function(data){
#' Takes cleaned scales data and adds additional metrics which are functions
#' of existing metrics
#'
#' @param data The output of clean_scales_data()
data[, `:=` (
`Body Fat kg` = round(`Body Fat` * Weight, 7),
`Muscle Mass %` = round(`Muscle Mass` / Weight, 7),
`Bone Mass %` = round(`Bone Mass` / Weight, 7)
)]
return(data[])
}
get_scales_data <- function(directory="data"){
#' Reads the scales data, applys the cleaning function, adds metrics,
#' then returns the resulting data.table
data <- read_scales_data(directory=directory) %>%
clean_scales_data() %>% add_metrics()
return(data)
}
|
fitPlot <- function(country="France", file=NULL, file.test=NULL, nb.day=2, plot.tau=F, plot.pred=T, plot.pred0=T,
plot.day=T, log.scale=F, level=0.9, pred.int=F, conf.int=F) {
if (is.null(file))
file <- gsub(" ","",paste0("data/",country,".RData"))
# if (!is.null(file.train))
# load(file.train)
# else
load(file)
R0 <- subset(R0, day<=max(d$day)+nb.day)
pl1 <- ggplot(R0) +
xlab("date") + geom_hline(aes(yintercept=1), colour="blue", linetype="dashed") +
geom_line(aes(date, r0), color="red", size=1) +
# scale_y_continuous(name="Reff", breaks=c(1))
scale_y_continuous(name="Reff")
M.est <- length(tau.est)-1
if (plot.day) {
Dmo <- subset(d, variable=="dy")
Dmc <- subset(dc, variable=="dy" & day<=max(d$day)+nb.day)
pl2 <- ggplot(Dmo) +
xlab("") + ylab("") + expand_limits(y=0) +
facet_wrap( ~type, scales = "free_y", ncol=1)
if (pred.int) {
sd <- Dmo %>% group_by(type) %>% summarize(sd=mean(sd))
Dmc$lwr <- Dmc$pred + qnorm((1-level)/2)*subset(sd, type=="confirmed")$sd
Dmc$upr <- Dmc$pred + qnorm((1+level)/2)*subset(sd, type=="confirmed")$sd
id <- which(Dmc$type=="deaths")
Dmc$lwr[id] <- Dmc$pred[id] + qnorm((1-level)/2)*subset(sd, type=="deaths")$sd
Dmc$upr[id] <- Dmc$pred[id] + qnorm((1+level)/2)*subset(sd, type=="deaths")$sd
# Dmc$lwr <- subset(Qu[['pred.pred']], variable=="dy" & day<=max(d$day)+nb.day)[,2]
# Dmc$upr <- subset(Qu[['pred.pred']], variable=="dy" & day<=max(d$day)+nb.day)[,4]
pl2 <- pl2 + geom_ribbon(data=Dmc, aes(x=date, ymin = lwr, ymax = upr),
alpha=0.1, inherit.aes=F, fill="blue")
}
# if (conf.int) {
# Dmc$lwr <- subset(Qu[['pred0.conf']], variable=="dy" & day<=max(d$day)+nb.day)[,2]
# Dmc$upr <- subset(Qu[['pred0.conf']], variable=="dy" & day<=max(d$day)+nb.day)[,4]
# pl2 <- pl2 + geom_ribbon(data=Dmc, aes(x=date, ymin = lwr, ymax = upr),
# alpha=0.4, inherit.aes=F, fill="#339900")
# }
if (plot.pred0) {
if (plot.pred)
pl2 <- pl2 + geom_line(data=Dmc, aes(date,pred, color="#339900"), size=0.75)
else
pl2 <- pl2 + geom_line(data=Dmc, aes(date,pred0, color="#339900"), size=0.75)
}
if (plot.pred0)
pl2 <- pl2 + geom_point( aes(date, value, color="#993399"), size=2) +
scale_colour_manual(values = c( "#339900", "#993399"),
labels= c("prediction", "data"),
guide = guide_legend(override.aes = list(linetype = c( "solid", "blank"), shape = c(NA, 16)))) +
theme(legend.title = element_blank(), legend.position=c(0.1, 0.9))
else
pl2 <- pl2 + geom_point( aes(date, value), color="#993399", size=2)
} else {
Dmo <- subset(d, variable=="y")
Dmc <- subset(dc, variable=="y" & day<=max(d$day)+nb.day)
pl2 <- ggplot(Dmo) +
geom_line(data=Dmc, aes(date,pred), color="#339900", size=0.75) +
geom_point( aes(date, value), color="#993399", size=2) +
xlab("") + ylab("") + expand_limits(y=0) +
facet_wrap( ~type, scales = "free_y")
}
if (plot.tau & M.est>1) {
tau.est <- tau.est[2: (length(tau.est)-1)]
Tau.est1 <- data.frame(day=tau.est, type="confirmed", variable="y")
Tau.est1 <- rbind(Tau.est1, data.frame(day=tau.est, type="confirmed", variable="dy"))
Tau.est2 <- data.frame(day=tau.est, type="deaths", variable="y")
Tau.est2 <- rbind(Tau.est2, data.frame(day=tau.est, type="deaths", variable="dy"))
Tau.est <- rbind(Tau.est1, Tau.est2)
Tau.est$group <- as.factor(with(Tau.est, paste(type, variable)))
levels(Tau.est$group) <- c("Daily number of confirmed cases", "Cumulated number of confirmed cases",
"Daily number of deaths", "Cumulated number of deaths")
Tau.est$group = factor(Tau.est$group,levels(Tau.est$group)[c(2, 1, 4, 3)])
Tau.est$date <- as.Date(Tau.est$day, "2020-01-21")
pl1 <- pl1 + geom_vline(data=Tau.est, aes(xintercept=date), color="red", linetype="dashed")
pl2 <- pl2 + geom_vline(data=Tau.est, aes(xintercept=date), color="red", linetype="dashed")
}
if (log.scale)
pl2 <- pl2 + scale_y_log10()
if (!is.null(file.test)) {
day.max <- max(d$day)
load(file.test)
if (plot.day)
pl2 <- pl2 + geom_point(data=subset(d, variable=='dy' & day>day.max), aes(date, value), color="red", size=3)
else
pl2 <- pl2 + geom_point(data=subset(d, variable=='y' & day>day.max), aes(date, value), color="red", size=3)
}
return(list(pl.R0=pl1, pl.fit=pl2))
}
|
/R/fitPlot.R
|
no_license
|
MarcLavielle/covidix
|
R
| false
| false
| 4,676
|
r
|
fitPlot <- function(country="France", file=NULL, file.test=NULL, nb.day=2, plot.tau=F, plot.pred=T, plot.pred0=T,
plot.day=T, log.scale=F, level=0.9, pred.int=F, conf.int=F) {
if (is.null(file))
file <- gsub(" ","",paste0("data/",country,".RData"))
# if (!is.null(file.train))
# load(file.train)
# else
load(file)
R0 <- subset(R0, day<=max(d$day)+nb.day)
pl1 <- ggplot(R0) +
xlab("date") + geom_hline(aes(yintercept=1), colour="blue", linetype="dashed") +
geom_line(aes(date, r0), color="red", size=1) +
# scale_y_continuous(name="Reff", breaks=c(1))
scale_y_continuous(name="Reff")
M.est <- length(tau.est)-1
if (plot.day) {
Dmo <- subset(d, variable=="dy")
Dmc <- subset(dc, variable=="dy" & day<=max(d$day)+nb.day)
pl2 <- ggplot(Dmo) +
xlab("") + ylab("") + expand_limits(y=0) +
facet_wrap( ~type, scales = "free_y", ncol=1)
if (pred.int) {
sd <- Dmo %>% group_by(type) %>% summarize(sd=mean(sd))
Dmc$lwr <- Dmc$pred + qnorm((1-level)/2)*subset(sd, type=="confirmed")$sd
Dmc$upr <- Dmc$pred + qnorm((1+level)/2)*subset(sd, type=="confirmed")$sd
id <- which(Dmc$type=="deaths")
Dmc$lwr[id] <- Dmc$pred[id] + qnorm((1-level)/2)*subset(sd, type=="deaths")$sd
Dmc$upr[id] <- Dmc$pred[id] + qnorm((1+level)/2)*subset(sd, type=="deaths")$sd
# Dmc$lwr <- subset(Qu[['pred.pred']], variable=="dy" & day<=max(d$day)+nb.day)[,2]
# Dmc$upr <- subset(Qu[['pred.pred']], variable=="dy" & day<=max(d$day)+nb.day)[,4]
pl2 <- pl2 + geom_ribbon(data=Dmc, aes(x=date, ymin = lwr, ymax = upr),
alpha=0.1, inherit.aes=F, fill="blue")
}
# if (conf.int) {
# Dmc$lwr <- subset(Qu[['pred0.conf']], variable=="dy" & day<=max(d$day)+nb.day)[,2]
# Dmc$upr <- subset(Qu[['pred0.conf']], variable=="dy" & day<=max(d$day)+nb.day)[,4]
# pl2 <- pl2 + geom_ribbon(data=Dmc, aes(x=date, ymin = lwr, ymax = upr),
# alpha=0.4, inherit.aes=F, fill="#339900")
# }
if (plot.pred0) {
if (plot.pred)
pl2 <- pl2 + geom_line(data=Dmc, aes(date,pred, color="#339900"), size=0.75)
else
pl2 <- pl2 + geom_line(data=Dmc, aes(date,pred0, color="#339900"), size=0.75)
}
if (plot.pred0)
pl2 <- pl2 + geom_point( aes(date, value, color="#993399"), size=2) +
scale_colour_manual(values = c( "#339900", "#993399"),
labels= c("prediction", "data"),
guide = guide_legend(override.aes = list(linetype = c( "solid", "blank"), shape = c(NA, 16)))) +
theme(legend.title = element_blank(), legend.position=c(0.1, 0.9))
else
pl2 <- pl2 + geom_point( aes(date, value), color="#993399", size=2)
} else {
Dmo <- subset(d, variable=="y")
Dmc <- subset(dc, variable=="y" & day<=max(d$day)+nb.day)
pl2 <- ggplot(Dmo) +
geom_line(data=Dmc, aes(date,pred), color="#339900", size=0.75) +
geom_point( aes(date, value), color="#993399", size=2) +
xlab("") + ylab("") + expand_limits(y=0) +
facet_wrap( ~type, scales = "free_y")
}
if (plot.tau & M.est>1) {
tau.est <- tau.est[2: (length(tau.est)-1)]
Tau.est1 <- data.frame(day=tau.est, type="confirmed", variable="y")
Tau.est1 <- rbind(Tau.est1, data.frame(day=tau.est, type="confirmed", variable="dy"))
Tau.est2 <- data.frame(day=tau.est, type="deaths", variable="y")
Tau.est2 <- rbind(Tau.est2, data.frame(day=tau.est, type="deaths", variable="dy"))
Tau.est <- rbind(Tau.est1, Tau.est2)
Tau.est$group <- as.factor(with(Tau.est, paste(type, variable)))
levels(Tau.est$group) <- c("Daily number of confirmed cases", "Cumulated number of confirmed cases",
"Daily number of deaths", "Cumulated number of deaths")
Tau.est$group = factor(Tau.est$group,levels(Tau.est$group)[c(2, 1, 4, 3)])
Tau.est$date <- as.Date(Tau.est$day, "2020-01-21")
pl1 <- pl1 + geom_vline(data=Tau.est, aes(xintercept=date), color="red", linetype="dashed")
pl2 <- pl2 + geom_vline(data=Tau.est, aes(xintercept=date), color="red", linetype="dashed")
}
if (log.scale)
pl2 <- pl2 + scale_y_log10()
if (!is.null(file.test)) {
day.max <- max(d$day)
load(file.test)
if (plot.day)
pl2 <- pl2 + geom_point(data=subset(d, variable=='dy' & day>day.max), aes(date, value), color="red", size=3)
else
pl2 <- pl2 + geom_point(data=subset(d, variable=='y' & day>day.max), aes(date, value), color="red", size=3)
}
return(list(pl.R0=pl1, pl.fit=pl2))
}
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{annotate_assembly}
\alias{annotate_assembly}
\title{match assembled transcripts to annotated transcripts}
\usage{
annotate_assembly(assembled, annotated)
}
\arguments{
\item{assembled}{\code{GRangesList} object representing assembled transcripts}
\item{annotated}{\code{GRangesList} object representing annotated transcripts}
}
\value{
data frame, where each row contains \code{assembledInd} and \code{annotatedInd} (indexes of overlapping transcripts in \code{assembled} and \code{annotated}), and the percent overlap between the two transcripts.
}
\description{
match assembled transcripts to annotated transcripts
}
\details{
If \code{gown} is a \code{ballgown} object, \code{assembled} can be \code{structure(gown)$trans} (or any subset). You can generate a \code{GRangesList} object containing annotated transcripts from a gtf file using the \code{\link{gffReadGR}} function and setting \code{splitByTranscripts=TRUE}.
}
\author{
Alyssa Frazee
}
|
/man/annotate_assembly.Rd
|
no_license
|
gpertea/ballgown
|
R
| false
| false
| 1,012
|
rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{annotate_assembly}
\alias{annotate_assembly}
\title{match assembled transcripts to annotated transcripts}
\usage{
annotate_assembly(assembled, annotated)
}
\arguments{
\item{assembled}{\code{GRangesList} object representing assembled transcripts}
\item{annotated}{\code{GRangesList} object representing annotated transcripts}
}
\value{
data frame, where each row contains \code{assembledInd} and \code{annotatedInd} (indexes of overlapping transcripts in \code{assembled} and \code{annotated}), and the percent overlap between the two transcripts.
}
\description{
match assembled transcripts to annotated transcripts
}
\details{
If \code{gown} is a \code{ballgown} object, \code{assembled} can be \code{structure(gown)$trans} (or any subset). You can generate a \code{GRangesList} object containing annotated transcripts from a gtf file using the \code{\link{gffReadGR}} function and setting \code{splitByTranscripts=TRUE}.
}
\author{
Alyssa Frazee
}
|
#!/usr/bin/env R
args <- commandArgs(TRUE)
library(grid)
library(futile.logger)
library(VennDiagram)
#usage: Rscript venn_3categories.R commonDown commonDown.svg svg 15 15 2.5
venn_plot <- function(data_file, out_name, out_type, out_height, out_width, text_cex){
col_names <- colnames(data_file)
A = levels(data_file[, col_names[1]])
B = levels(data_file[, col_names[2]])
C = levels(data_file[, col_names[3]])
venn.diagram(
x = list(A = A, B = B, C = C),
category.names = col_names,
filename = out_name,
imagetype = out_type, #"svg", "png"
height = out_height, #15, 3000
width = out_width, #15, 3000
col = "transparent",
fill = c("cornflowerblue", "seagreen3", "orange"),
alpha = 0.50,
label.col = c("white", "white", "white", "white",
"white", "white", "white"),
cex = text_cex, #2.5, 1
fontfamily = "serif",
fontface = "bold",
cat.col = c("cornflowerblue", "darkgreen", "orange"),
cat.pos = c(-60,60,180),
cat.dist = c(0.1,0.1,0.1),
cat.fontfamily = "serif",
cat.cex = text_cex, #2.5, 1
rotation.degree = 0,
margin = 0.2,
force.unique = TRUE)
}
data_file <- read.table(args[1],header=TRUE,sep="\t",quote="")
#venn_plot(data_file, "noncoding.svg", "svg", 15, 15, 2.5)
#venn_plot(data_file, "noncoding.png", "png", 3000, 3000, 1)
venn_plot(data_file, args[2], args[3], as.numeric(args[4]), as.numeric(args[5]), as.numeric(args[6]))
|
/scripts/vennCategory/venn_3categories.R
|
no_license
|
ZhikunWu/Bioinformatic-resources
|
R
| false
| false
| 1,453
|
r
|
#!/usr/bin/env R
args <- commandArgs(TRUE)
library(grid)
library(futile.logger)
library(VennDiagram)
#usage: Rscript venn_3categories.R commonDown commonDown.svg svg 15 15 2.5
venn_plot <- function(data_file, out_name, out_type, out_height, out_width, text_cex){
col_names <- colnames(data_file)
A = levels(data_file[, col_names[1]])
B = levels(data_file[, col_names[2]])
C = levels(data_file[, col_names[3]])
venn.diagram(
x = list(A = A, B = B, C = C),
category.names = col_names,
filename = out_name,
imagetype = out_type, #"svg", "png"
height = out_height, #15, 3000
width = out_width, #15, 3000
col = "transparent",
fill = c("cornflowerblue", "seagreen3", "orange"),
alpha = 0.50,
label.col = c("white", "white", "white", "white",
"white", "white", "white"),
cex = text_cex, #2.5, 1
fontfamily = "serif",
fontface = "bold",
cat.col = c("cornflowerblue", "darkgreen", "orange"),
cat.pos = c(-60,60,180),
cat.dist = c(0.1,0.1,0.1),
cat.fontfamily = "serif",
cat.cex = text_cex, #2.5, 1
rotation.degree = 0,
margin = 0.2,
force.unique = TRUE)
}
data_file <- read.table(args[1],header=TRUE,sep="\t",quote="")
#venn_plot(data_file, "noncoding.svg", "svg", 15, 15, 2.5)
#venn_plot(data_file, "noncoding.png", "png", 3000, 3000, 1)
venn_plot(data_file, args[2], args[3], as.numeric(args[4]), as.numeric(args[5]), as.numeric(args[6]))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{getHzhist}
\alias{getHzhist}
\title{Projected death risks}
\usage{
getHzhist(yr0, age0, yr1, M)
}
\arguments{
\item{yr0}{initial year}
\item{age0}{initial age}
\item{yr1}{final year}
\item{M}{matrix to extrapolate}
}
\value{
vector hazard for years
}
\description{
Get diagonal hazard
}
\details{
TODO
}
\author{
Pete Dodd
}
|
/man/getHzhist.Rd
|
no_license
|
petedodd/discly
|
R
| false
| true
| 424
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{getHzhist}
\alias{getHzhist}
\title{Projected death risks}
\usage{
getHzhist(yr0, age0, yr1, M)
}
\arguments{
\item{yr0}{initial year}
\item{age0}{initial age}
\item{yr1}{final year}
\item{M}{matrix to extrapolate}
}
\value{
vector hazard for years
}
\description{
Get diagonal hazard
}
\details{
TODO
}
\author{
Pete Dodd
}
|
# --- Effect of German govt spending shock on Italian exports
# Data period: 1980q1-2018q4
# 95% and 68% confidence intervals
# h = 4, 8 and 12
# OLS with left-hand side in growth rates and 4 lags of x(t-1)
source('~/Studie/MSc ECO/Period 5-6 MSc thesis/MSc thesis RStudio project/Scripts/Spillovers IT and DE v4 1.R')
# Load packages
library(ggplot2)
library(gridExtra)
library(dplyr)
library(data.table)
library(lmtest)
library(sandwich)
rmIT.l <- data.frame(shift(data2$rmIT, n = 1:12, type = "lead"))
names(rmIT.l) = c("rmIT.l1", "rmIT.l2", "rmIT.l3", "rmIT.l4", "rmIT.l5", "rmIT.l6",
"rmIT.l7", "rmIT.l8", "rmIT.l9", "rmIT.l10", "rmIT.l11", "rmIT.l12")
rxIT.l <- data.frame(shift(data2$rxIT, n = 1:12, type = "lead"))
names(rxIT.l) = c("rxIT.l1", "rxIT.l2", "rxIT.l3", "rxIT.l4", "rxIT.l5", "rxIT.l6",
"rxIT.l7", "rxIT.l8", "rxIT.l9", "rxIT.l10", "rxIT.l11", "rxIT.l12")
l.rmIT <- data.frame(shift(data2$rmIT, n = 1:4, type = "lag"))
names(l.rmIT) = c("l1.rmIT", "l2.rmIT", "l3.rmIT", "l4.rmIT")
l.rxIT <- data.frame(shift(data2$rxIT, n = 1:4, type = "lag"))
names(l.rxIT) = c("l1.rxIT", "l2.rxIT", "l3.rxIT", "l4.rxIT")
rmDE.l <- data.frame(shift(data2$rmDE, n = 1:12, type = "lead"))
names(rmDE.l) = c("rmDE.l1", "rmDE.l2", "rmDE.l3", "rmDE.l4", "rmDE.l5", "rmDE.l6",
"rmDE.l7", "rmDE.l8", "rmDE.l9", "rmDE.l10", "rmDE.l11", "rmDE.l12")
rxDE.l <- data.frame(shift(data2$rxDE, n = 1:12, type = "lead"))
names(rxDE.l) = c("rxDE.l1", "rxDE.l2", "rxDE.l3", "rxDE.l4", "rxDE.l5", "rxDE.l6",
"rxDE.l7", "rxDE.l8", "rxDE.l9", "rxDE.l10", "rxDE.l11", "rxDE.l12")
l.rmDE <- data.frame(shift(data2$rmDE, n = 1:4, type = "lag"))
names(l.rmDE) = c("l1.rmDE", "l2.rmDE", "l3.rmDE", "l4.rmDE")
l.rxDE <- data.frame(shift(data2$rxDE, n = 1:4, type = "lag"))
names(l.rxDE) = c("l1.rxDE", "l2.rxDE", "l3.rxDE", "l4.rxDE")
data3$shockDE2 <- (data3$shockDE / unlist(l.rxDE[1])) / sd((data3$shockDE / unlist(l.rxDE[1])), na.rm = TRUE)
data3$shockDE3 <- data3$shockDE2 / 100
data3$shockIT2 <- (data3$shockIT / unlist(l.rxIT[1])) / sd((data3$shockIT / unlist(l.rxIT[1])), na.rm = TRUE)
data3$shockIT3 <- data3$shockIT2 / 100
data4 <- cbind(data3, l.rmIT, l.rxIT, rmIT.l, rxIT.l, l.rmDE, l.rxDE, rmDE.l, rxDE.l)
data5 <- subset(data4, select = -c(30:32, 35:37, 152:203))
h <- 12
# -- OLS regressions
# -- Equation 5
lhsDEIT50 <- (data5$rxIT - data5$l1.rxIT) / data5$rxIT
lhsDEIT5 <- lapply(1:h, function(x) (data5[, 165+x] - data5$l1.rxIT) / data5$l1.rxIT)
lhsDEIT5 <- data.frame(lhsDEIT5)
names(lhsDEIT5) = paste("lhsDEIT5", 1:h, sep = "")
data6 <- cbind(data5, lhsDEIT50, lhsDEIT5)
DEIT5 <- lapply(1:13, function(x) lm(data6[, 209+x] ~ shockDE2 + l1.debtIT + l1.intIT + l1.lrtrIT + l1.lrgIT + l1.lryITc + l2.debtIT + l2.intIT + l2.lrtrIT + l2.lrgIT + l2.lryITc + l3.debtIT + l3.intIT + l3.lrtrIT + l3.lrgIT + l3.lryITc + l4.debtIT + l4.intIT + l4.lrtrIT + l4.lrgIT + l4.lryITc + shockNL2 + shockIT2 + shockES2 + shockFR2, data = data6))
summariesDEIT5 <- lapply(DEIT5, summary)
DEIT5conf95 <- lapply(DEIT5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
DEIT5conf68 <- lapply(DEIT5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
DEIT5up95 <- lapply(1:13, function(x) DEIT5conf95[[x]][2,2])
DEIT5low95 <- lapply(1:13, function(x) DEIT5conf95[[x]][2,1])
DEIT5up68 <- lapply(1:13, function(x) DEIT5conf68[[x]][2,2])
DEIT5low68 <- lapply(1:13, function(x) DEIT5conf68[[x]][2,1])
betaDEITt <- lapply(summariesDEIT5, function(x) x$coefficients[2,1])
names(betaDEITt) <- paste("betaDEITt", 0:h, sep = "")
# -- Equation 6
lhsDEIT60 <- (data6$rgDE - data6$l1.rgDE) / data6$l1.rxIT
lhsDEIT6 <- lapply(1:h, function(x) (data6[, 84+x] - data6$l1.rgDE) / data6$l1.rxIT)
lhsDEIT6 <- data.frame(lhsDEIT6)
names(lhsDEIT6) = paste("lhsDEIT6", 1:h, sep = "")
data6 <- cbind(data6, lhsDEIT60, lhsDEIT6)
DEIT6 <- lapply(1:13, function(x) lm(data6[, 222+x] ~ shockDE3 + l1.debtDE + l1.intDE + l1.lrtrDE + l1.lrgDE + l1.lryDEc + l2.debtDE + l2.intDE + l2.lrtrDE + l2.lrgDE + l2.lryDEc + l3.debtDE + l3.intDE + l3.lrtrDE + l3.lrgDE + l3.lryDEc + l4.debtDE + l4.intDE + l4.lrtrDE + l4.lrgDE + l4.lryDEc + shockNL3 + shockFR3 + shockES3 + shockIT3, data = data6))
summariesDEIT6 <- lapply(DEIT6, summary)
DEIT6conf95 <- lapply(DEIT6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
DEIT6conf68 <- lapply(DEIT6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
DEIT6up95 <- lapply(1:13, function(x) DEIT6conf95[[x]][2,2])
DEIT6low95 <- lapply(1:13, function(x) DEIT6conf95[[x]][2,1])
DEIT6up68 <- lapply(1:13, function(x) DEIT6conf68[[x]][2,2])
DEIT6low68 <- lapply(1:13, function(x) DEIT6conf68[[x]][2,1])
gammaDEITt <- lapply(summariesDEIT6, function(x) x$coefficients[2,1])
names(gammaDEITt) <- paste("gammaDEITt", 0:h, sep = "")
# -- Cumulative multiplier
mDEITtc <- cumsum(betaDEITt) / cumsum(as.numeric(gammaDEITt)); as.numeric(mDEITtc)
# --- Effect of Italian govt spending shock on German exports
# -- Equation 5
lhsITDE50 <- (data6$rxDE - data6$l1.rxDE) / data6$l1.rxDE
lhsITDE5 <- lapply(1:h, function(x) (data6[, 197+x] - data6$l1.rxDE) / data6$l1.rxDE)
lhsITDE5 <- data.frame(lhsITDE5)
names(lhsITDE5) = paste("lhsITDE5", 1:h, sep = "")
data6 <- cbind(data6, lhsITDE50, lhsITDE5)
ITDE5 <- lapply(1:13, function(x) lm(data6[, 235+x] ~ shockIT2 + l1.debtDE + l1.intDE + l1.lrtrDE + l1.lrgDE + l1.lryDEc + l2.debtDE + l2.intDE + l2.lrtrDE + l2.lrgDE + l2.lryDEc + l3.debtDE + l3.intDE + l3.lrtrDE + l3.lrgDE + l3.lryDEc + l4.debtDE + l4.intDE + l4.lrtrDE + l4.lrgDE + l4.lryDEc + shockNL2 + shockDE2 + shockFR2 + shockES2, data = data6))
summariesITDE5 <- lapply(ITDE5, summary)
ITDE5conf95 <- lapply(ITDE5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
ITDE5conf68 <- lapply(ITDE5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
ITDE5up95 <- lapply(1:13, function(x) ITDE5conf95[[x]][2,2])
ITDE5low95 <- lapply(1:13, function(x) ITDE5conf95[[x]][2,1])
ITDE5up68 <- lapply(1:13, function(x) ITDE5conf68[[x]][2,2])
ITDE5low68 <- lapply(1:13, function(x) ITDE5conf68[[x]][2,1])
betaITDEt <- lapply(summariesITDE5, function(x) x$coefficients[2,1])
names(betaITDEt) <- paste("betaITDEt", 0:h, sep = "")
# -- Equation 6
lhsITDE60 <- (data6$rgIT - data6$l1.rgIT) / data6$l1.rxDE
lhsITDE6 <- lapply(1:h, function(x) (data6[, 96+x] - data6$l1.rgIT) / data6$l1.rxDE)
lhsITDE6 <- data.frame(lhsITDE6)
names(lhsITDE6) = paste("lhsITDE6", 1:h, sep = "")
data6 <- cbind(data6, lhsITDE60, lhsITDE6)
ITDE6 <- lapply(1:13, function(x) lm(data6[, 248+x] ~ shockIT3 + l1.debtIT + l1.intIT + l1.lrtrIT + l1.lrgIT + l1.lryITc + l2.debtIT + l2.intIT + l2.lrtrIT + l2.lrgIT + l2.lryITc + l3.debtIT + l3.intIT + l3.lrtrIT + l3.lrgIT + l3.lryITc + l4.debtIT + l4.intIT + l4.lrtrIT + l4.lrgIT + l4.lryITc + shockDE3 + shockNL3 + shockFR3 + shockES3, data = data6))
summariesITDE6 <- lapply(ITDE6, summary)
ITDE6conf95 <- lapply(ITDE6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
ITDE6conf68 <- lapply(ITDE6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
ITDE6up95 <- lapply(1:13, function(x) ITDE6conf95[[x]][2,2])
ITDE6low95 <- lapply(1:13, function(x) ITDE6conf95[[x]][2,1])
ITDE6up68 <- lapply(1:13, function(x) ITDE6conf68[[x]][2,2])
ITDE6low68 <- lapply(1:13, function(x) ITDE6conf68[[x]][2,1])
gammaITDEt <- lapply(summariesITDE6, function(x) x$coefficients[2,1])
names(gammaITDEt) <- paste("gammaITDEt", 0:h, sep = "")
# -- Cumulative multiplier
mITDEtc <- cumsum(betaITDEt) / cumsum(as.numeric(gammaITDEt)); as.numeric(mITDEtc)
|
/trade spillovers/Trade spillovers IT and DE v3 1.R
|
no_license
|
mdg9709/spilloversNL
|
R
| false
| false
| 7,759
|
r
|
# --- Effect of German govt spending shock on Italian exports
# Data period: 1980q1-2018q4
# 95% and 68% confidence intervals
# h = 4, 8 and 12
# OLS with left-hand side in growth rates and 4 lags of x(t-1)
source('~/Studie/MSc ECO/Period 5-6 MSc thesis/MSc thesis RStudio project/Scripts/Spillovers IT and DE v4 1.R')
# Load packages
library(ggplot2)
library(gridExtra)
library(dplyr)
library(data.table)
library(lmtest)
library(sandwich)
rmIT.l <- data.frame(shift(data2$rmIT, n = 1:12, type = "lead"))
names(rmIT.l) = c("rmIT.l1", "rmIT.l2", "rmIT.l3", "rmIT.l4", "rmIT.l5", "rmIT.l6",
"rmIT.l7", "rmIT.l8", "rmIT.l9", "rmIT.l10", "rmIT.l11", "rmIT.l12")
rxIT.l <- data.frame(shift(data2$rxIT, n = 1:12, type = "lead"))
names(rxIT.l) = c("rxIT.l1", "rxIT.l2", "rxIT.l3", "rxIT.l4", "rxIT.l5", "rxIT.l6",
"rxIT.l7", "rxIT.l8", "rxIT.l9", "rxIT.l10", "rxIT.l11", "rxIT.l12")
l.rmIT <- data.frame(shift(data2$rmIT, n = 1:4, type = "lag"))
names(l.rmIT) = c("l1.rmIT", "l2.rmIT", "l3.rmIT", "l4.rmIT")
l.rxIT <- data.frame(shift(data2$rxIT, n = 1:4, type = "lag"))
names(l.rxIT) = c("l1.rxIT", "l2.rxIT", "l3.rxIT", "l4.rxIT")
rmDE.l <- data.frame(shift(data2$rmDE, n = 1:12, type = "lead"))
names(rmDE.l) = c("rmDE.l1", "rmDE.l2", "rmDE.l3", "rmDE.l4", "rmDE.l5", "rmDE.l6",
"rmDE.l7", "rmDE.l8", "rmDE.l9", "rmDE.l10", "rmDE.l11", "rmDE.l12")
rxDE.l <- data.frame(shift(data2$rxDE, n = 1:12, type = "lead"))
names(rxDE.l) = c("rxDE.l1", "rxDE.l2", "rxDE.l3", "rxDE.l4", "rxDE.l5", "rxDE.l6",
"rxDE.l7", "rxDE.l8", "rxDE.l9", "rxDE.l10", "rxDE.l11", "rxDE.l12")
l.rmDE <- data.frame(shift(data2$rmDE, n = 1:4, type = "lag"))
names(l.rmDE) = c("l1.rmDE", "l2.rmDE", "l3.rmDE", "l4.rmDE")
l.rxDE <- data.frame(shift(data2$rxDE, n = 1:4, type = "lag"))
names(l.rxDE) = c("l1.rxDE", "l2.rxDE", "l3.rxDE", "l4.rxDE")
data3$shockDE2 <- (data3$shockDE / unlist(l.rxDE[1])) / sd((data3$shockDE / unlist(l.rxDE[1])), na.rm = TRUE)
data3$shockDE3 <- data3$shockDE2 / 100
data3$shockIT2 <- (data3$shockIT / unlist(l.rxIT[1])) / sd((data3$shockIT / unlist(l.rxIT[1])), na.rm = TRUE)
data3$shockIT3 <- data3$shockIT2 / 100
data4 <- cbind(data3, l.rmIT, l.rxIT, rmIT.l, rxIT.l, l.rmDE, l.rxDE, rmDE.l, rxDE.l)
data5 <- subset(data4, select = -c(30:32, 35:37, 152:203))
h <- 12
# -- OLS regressions
# -- Equation 5
lhsDEIT50 <- (data5$rxIT - data5$l1.rxIT) / data5$rxIT
lhsDEIT5 <- lapply(1:h, function(x) (data5[, 165+x] - data5$l1.rxIT) / data5$l1.rxIT)
lhsDEIT5 <- data.frame(lhsDEIT5)
names(lhsDEIT5) = paste("lhsDEIT5", 1:h, sep = "")
data6 <- cbind(data5, lhsDEIT50, lhsDEIT5)
DEIT5 <- lapply(1:13, function(x) lm(data6[, 209+x] ~ shockDE2 + l1.debtIT + l1.intIT + l1.lrtrIT + l1.lrgIT + l1.lryITc + l2.debtIT + l2.intIT + l2.lrtrIT + l2.lrgIT + l2.lryITc + l3.debtIT + l3.intIT + l3.lrtrIT + l3.lrgIT + l3.lryITc + l4.debtIT + l4.intIT + l4.lrtrIT + l4.lrgIT + l4.lryITc + shockNL2 + shockIT2 + shockES2 + shockFR2, data = data6))
summariesDEIT5 <- lapply(DEIT5, summary)
DEIT5conf95 <- lapply(DEIT5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
DEIT5conf68 <- lapply(DEIT5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
DEIT5up95 <- lapply(1:13, function(x) DEIT5conf95[[x]][2,2])
DEIT5low95 <- lapply(1:13, function(x) DEIT5conf95[[x]][2,1])
DEIT5up68 <- lapply(1:13, function(x) DEIT5conf68[[x]][2,2])
DEIT5low68 <- lapply(1:13, function(x) DEIT5conf68[[x]][2,1])
betaDEITt <- lapply(summariesDEIT5, function(x) x$coefficients[2,1])
names(betaDEITt) <- paste("betaDEITt", 0:h, sep = "")
# -- Equation 6
lhsDEIT60 <- (data6$rgDE - data6$l1.rgDE) / data6$l1.rxIT
lhsDEIT6 <- lapply(1:h, function(x) (data6[, 84+x] - data6$l1.rgDE) / data6$l1.rxIT)
lhsDEIT6 <- data.frame(lhsDEIT6)
names(lhsDEIT6) = paste("lhsDEIT6", 1:h, sep = "")
data6 <- cbind(data6, lhsDEIT60, lhsDEIT6)
DEIT6 <- lapply(1:13, function(x) lm(data6[, 222+x] ~ shockDE3 + l1.debtDE + l1.intDE + l1.lrtrDE + l1.lrgDE + l1.lryDEc + l2.debtDE + l2.intDE + l2.lrtrDE + l2.lrgDE + l2.lryDEc + l3.debtDE + l3.intDE + l3.lrtrDE + l3.lrgDE + l3.lryDEc + l4.debtDE + l4.intDE + l4.lrtrDE + l4.lrgDE + l4.lryDEc + shockNL3 + shockFR3 + shockES3 + shockIT3, data = data6))
summariesDEIT6 <- lapply(DEIT6, summary)
DEIT6conf95 <- lapply(DEIT6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
DEIT6conf68 <- lapply(DEIT6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
DEIT6up95 <- lapply(1:13, function(x) DEIT6conf95[[x]][2,2])
DEIT6low95 <- lapply(1:13, function(x) DEIT6conf95[[x]][2,1])
DEIT6up68 <- lapply(1:13, function(x) DEIT6conf68[[x]][2,2])
DEIT6low68 <- lapply(1:13, function(x) DEIT6conf68[[x]][2,1])
gammaDEITt <- lapply(summariesDEIT6, function(x) x$coefficients[2,1])
names(gammaDEITt) <- paste("gammaDEITt", 0:h, sep = "")
# -- Cumulative multiplier
mDEITtc <- cumsum(betaDEITt) / cumsum(as.numeric(gammaDEITt)); as.numeric(mDEITtc)
# --- Effect of Italian govt spending shock on German exports
# -- Equation 5
lhsITDE50 <- (data6$rxDE - data6$l1.rxDE) / data6$l1.rxDE
lhsITDE5 <- lapply(1:h, function(x) (data6[, 197+x] - data6$l1.rxDE) / data6$l1.rxDE)
lhsITDE5 <- data.frame(lhsITDE5)
names(lhsITDE5) = paste("lhsITDE5", 1:h, sep = "")
data6 <- cbind(data6, lhsITDE50, lhsITDE5)
ITDE5 <- lapply(1:13, function(x) lm(data6[, 235+x] ~ shockIT2 + l1.debtDE + l1.intDE + l1.lrtrDE + l1.lrgDE + l1.lryDEc + l2.debtDE + l2.intDE + l2.lrtrDE + l2.lrgDE + l2.lryDEc + l3.debtDE + l3.intDE + l3.lrtrDE + l3.lrgDE + l3.lryDEc + l4.debtDE + l4.intDE + l4.lrtrDE + l4.lrgDE + l4.lryDEc + shockNL2 + shockDE2 + shockFR2 + shockES2, data = data6))
summariesITDE5 <- lapply(ITDE5, summary)
ITDE5conf95 <- lapply(ITDE5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
ITDE5conf68 <- lapply(ITDE5, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
ITDE5up95 <- lapply(1:13, function(x) ITDE5conf95[[x]][2,2])
ITDE5low95 <- lapply(1:13, function(x) ITDE5conf95[[x]][2,1])
ITDE5up68 <- lapply(1:13, function(x) ITDE5conf68[[x]][2,2])
ITDE5low68 <- lapply(1:13, function(x) ITDE5conf68[[x]][2,1])
betaITDEt <- lapply(summariesITDE5, function(x) x$coefficients[2,1])
names(betaITDEt) <- paste("betaITDEt", 0:h, sep = "")
# -- Equation 6
lhsITDE60 <- (data6$rgIT - data6$l1.rgIT) / data6$l1.rxDE
lhsITDE6 <- lapply(1:h, function(x) (data6[, 96+x] - data6$l1.rgIT) / data6$l1.rxDE)
lhsITDE6 <- data.frame(lhsITDE6)
names(lhsITDE6) = paste("lhsITDE6", 1:h, sep = "")
data6 <- cbind(data6, lhsITDE60, lhsITDE6)
ITDE6 <- lapply(1:13, function(x) lm(data6[, 248+x] ~ shockIT3 + l1.debtIT + l1.intIT + l1.lrtrIT + l1.lrgIT + l1.lryITc + l2.debtIT + l2.intIT + l2.lrtrIT + l2.lrgIT + l2.lryITc + l3.debtIT + l3.intIT + l3.lrtrIT + l3.lrgIT + l3.lryITc + l4.debtIT + l4.intIT + l4.lrtrIT + l4.lrgIT + l4.lryITc + shockDE3 + shockNL3 + shockFR3 + shockES3, data = data6))
summariesITDE6 <- lapply(ITDE6, summary)
ITDE6conf95 <- lapply(ITDE6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.95)
ITDE6conf68 <- lapply(ITDE6, coefci, vcov = NeweyWest, lag = 0, prewhite = FALSE, level = 0.68)
ITDE6up95 <- lapply(1:13, function(x) ITDE6conf95[[x]][2,2])
ITDE6low95 <- lapply(1:13, function(x) ITDE6conf95[[x]][2,1])
ITDE6up68 <- lapply(1:13, function(x) ITDE6conf68[[x]][2,2])
ITDE6low68 <- lapply(1:13, function(x) ITDE6conf68[[x]][2,1])
gammaITDEt <- lapply(summariesITDE6, function(x) x$coefficients[2,1])
names(gammaITDEt) <- paste("gammaITDEt", 0:h, sep = "")
# -- Cumulative multiplier
mITDEtc <- cumsum(betaITDEt) / cumsum(as.numeric(gammaITDEt)); as.numeric(mITDEtc)
|
# Created on
# Course work:
# @author:
# Source:
test <- c(25,45,66,24,66,7,24,44,66,87,2)
quant = quantile(test)
print(quant)
|
/chaaya/list_quantile.r
|
no_license
|
tactlabs/r-samples
|
R
| false
| false
| 130
|
r
|
# Created on
# Course work:
# @author:
# Source:
test <- c(25,45,66,24,66,7,24,44,66,87,2)
quant = quantile(test)
print(quant)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_clarity_plot.R
\name{create_clarity_plot}
\alias{create_clarity_plot}
\title{create_clarity_plot}
\usage{
create_clarity_plot(data_ptds)
}
\value{
}
\description{
create_clarity_plot
}
\author{
Aaron Conway
}
|
/man/create_clarity_plot.Rd
|
permissive
|
awconway/ptds
|
R
| false
| true
| 295
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_clarity_plot.R
\name{create_clarity_plot}
\alias{create_clarity_plot}
\title{create_clarity_plot}
\usage{
create_clarity_plot(data_ptds)
}
\value{
}
\description{
create_clarity_plot
}
\author{
Aaron Conway
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.