content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coord-munch.r
\name{dist_central_angle}
\alias{dist_central_angle}
\title{Compute central angle between two points.
Multiple by radius of sphere to get great circle distance}
\usage{
dist_central_angle(lon, lat)
}
\arguments{
\item{lon}{longitude}
\item{lat}{latitude}
}
\description{
Compute central angle between two points.
Multiple by radius of sphere to get great circle distance
}
| /man/dist_central_angle.Rd | no_license | vivekktiwari/animint2 | R | false | true | 466 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coord-munch.r
\name{dist_central_angle}
\alias{dist_central_angle}
\title{Compute central angle between two points.
Multiple by radius of sphere to get great circle distance}
\usage{
dist_central_angle(lon, lat)
}
\arguments{
\item{lon}{longitude}
\item{lat}{latitude}
}
\description{
Compute central angle between two points.
Multiple by radius of sphere to get great circle distance
}
|
require(shiny)
require(rCharts)
require(rChartsDygraphs)
shinyUI(fluidPage(
titlePanel('Dygraphs Test in Shiny with rCharts'),
h4("12 Column with Width 400"),
chartOutput('mygraph', 'dygraph', package="rChartsDygraphs", add_lib=T)
,
h4("8 Column with Width 1000"),
chartOutput('mygraph2', 'dygraph', package="rChartsDygraphs", add_lib=T)
,
h4("8 Column with Width 400"),
chartOutput('mygraph3', 'dygraph', package="rChartsDygraphs", add_lib=T)
)
) | /ui.R | no_license | timelyportfolio/rCharts_dygraphs_shiny | R | false | false | 508 | r | require(shiny)
require(rCharts)
require(rChartsDygraphs)
shinyUI(fluidPage(
titlePanel('Dygraphs Test in Shiny with rCharts'),
h4("12 Column with Width 400"),
chartOutput('mygraph', 'dygraph', package="rChartsDygraphs", add_lib=T)
,
h4("8 Column with Width 1000"),
chartOutput('mygraph2', 'dygraph', package="rChartsDygraphs", add_lib=T)
,
h4("8 Column with Width 400"),
chartOutput('mygraph3', 'dygraph', package="rChartsDygraphs", add_lib=T)
)
) |
# #The goal of this script is to analyze and plot colony gorwth data
# rm(list = ls())
#
# #load library
# library(tidyverse)
#Load plate map
plateMap = as.tibble(read.table("02_metadata/181101_ColonyGrowth36_validationOfKOs_reanalyzed/plateMaps/plate6.txt", header = T))
#Read in baseline plate
baselineData = as_tibble(read.csv("01_rawData/181101_ColonyGrowth36_validationOfKOs_reanalyzed/summarizedResults/row6_baseline_plateSummary.csv"))
baselineData = baselineData[,1:2]
colnames(baselineData) = c("well", "cellNumber_baseline")
#Read in colony growth data
colonyGrowthData = as_tibble(read.csv("01_rawData/181101_ColonyGrowth36_validationOfKOs_reanalyzed/summarizedResults/row6_onPLX4032_plateSummary.csv"))
colnames(colonyGrowthData) = c("well", "cellNumber_onDrug", "colonyNumber_onDrug", "numCellsInsideColonies_onDrug", "avgCellsPerColony_onDrug", "cellOutsideColonies_onDrug")
#merge data
mergedData = left_join(plateMap, baselineData, by = "well")
mergedData = left_join(mergedData, colonyGrowthData, by = "well")
#compute metrics of resistance
mergedData = mergedData %>%
mutate(totalRcells_norm = cellNumber_onDrug / cellNumber_baseline) %>%
mutate(Rcolonies_norm = colonyNumber_onDrug * 10000 / cellNumber_baseline) %>%
mutate(survivingCells_norm = cellOutsideColonies_onDrug / cellNumber_baseline)
#save output table
setwd("03_extractedData/181101_ColonyGrowth36_validationOfKOs_reanalyzed/")
write.table(mergedData, file ="plate6_extractedData.txt", row.names = FALSE, col.names = TRUE, sep = "\t", quote = FALSE)
| /05_scripts/181101_ColonyGrowth36_validationOfKOs_reanalyzed/190405_extractData_Plate6.R | no_license | edatorre/2020_TorreEtAl_data | R | false | false | 1,548 | r | # #The goal of this script is to analyze and plot colony gorwth data
# rm(list = ls())
#
# #load library
# library(tidyverse)
#Load plate map
plateMap = as.tibble(read.table("02_metadata/181101_ColonyGrowth36_validationOfKOs_reanalyzed/plateMaps/plate6.txt", header = T))
#Read in baseline plate
baselineData = as_tibble(read.csv("01_rawData/181101_ColonyGrowth36_validationOfKOs_reanalyzed/summarizedResults/row6_baseline_plateSummary.csv"))
baselineData = baselineData[,1:2]
colnames(baselineData) = c("well", "cellNumber_baseline")
#Read in colony growth data
colonyGrowthData = as_tibble(read.csv("01_rawData/181101_ColonyGrowth36_validationOfKOs_reanalyzed/summarizedResults/row6_onPLX4032_plateSummary.csv"))
colnames(colonyGrowthData) = c("well", "cellNumber_onDrug", "colonyNumber_onDrug", "numCellsInsideColonies_onDrug", "avgCellsPerColony_onDrug", "cellOutsideColonies_onDrug")
#merge data
mergedData = left_join(plateMap, baselineData, by = "well")
mergedData = left_join(mergedData, colonyGrowthData, by = "well")
#compute metrics of resistance
mergedData = mergedData %>%
mutate(totalRcells_norm = cellNumber_onDrug / cellNumber_baseline) %>%
mutate(Rcolonies_norm = colonyNumber_onDrug * 10000 / cellNumber_baseline) %>%
mutate(survivingCells_norm = cellOutsideColonies_onDrug / cellNumber_baseline)
#save output table
setwd("03_extractedData/181101_ColonyGrowth36_validationOfKOs_reanalyzed/")
write.table(mergedData, file ="plate6_extractedData.txt", row.names = FALSE, col.names = TRUE, sep = "\t", quote = FALSE)
|
tabPanel('Segment', value = 'tab_segment') | /inst/app-blorr/ui/ui_segment.R | permissive | benitezrcamilo/xplorerr | R | false | false | 42 | r | tabPanel('Segment', value = 'tab_segment') |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributionProfile.R
\name{smoother.distrProfile}
\alias{smoother.distrProfile}
\title{Smoother for distribution profiles.}
\usage{
\method{smoother}{distrProfile}(object, session = NULL, control = list(...),
...)
}
\arguments{
\item{object}{An object of class \code{distrProfile} as returned by \code{\link{distributionProfile}}.}
\item{session}{A numeric vector of the sessions to be selected and smoothed. Defaults to all sessions.}
\item{control}{A list of parameters for controlling the smoothing process.
This is passed to \code{\link{smootherControl.distrProfile}}.}
\item{...}{Arguments to be used to form the default \code{control} argument if it is not supplied directly.}
}
\description{
The distribution profiles are smoothed using a shape constrained additive model with Poisson
responses to ensure that the smoothed distribution profile is positive and monotone decreasing.
}
\references{
Kosmidis, I., and Passfield, L. (2015). Linking the Performance of
Endurance Runners to Training and Physiological Effects via Multi-Resolution
Elastic Net. \emph{ArXiv e-print} arXiv:1506.01388.
Pya, N. and Wood S. (2015). Shape Constrained Additive Models. Statistics and
Computing, 25(3), 543--559.
Frick, H., Kosmidis, I. (2017). trackeR: Infrastructure for Running and Cycling Data from GPS-Enabled Tracking Devices in R. \emph{Journal of Statistical Software}, \bold{82}(7), 1--29. doi:10.18637/jss.v082.i07
}
\seealso{
\code{\link{smootherControl.distrProfile}}
}
| /man/smoother.distrProfile.Rd | no_license | DrRoad/trackeR | R | false | true | 1,579 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributionProfile.R
\name{smoother.distrProfile}
\alias{smoother.distrProfile}
\title{Smoother for distribution profiles.}
\usage{
\method{smoother}{distrProfile}(object, session = NULL, control = list(...),
...)
}
\arguments{
\item{object}{An object of class \code{distrProfile} as returned by \code{\link{distributionProfile}}.}
\item{session}{A numeric vector of the sessions to be selected and smoothed. Defaults to all sessions.}
\item{control}{A list of parameters for controlling the smoothing process.
This is passed to \code{\link{smootherControl.distrProfile}}.}
\item{...}{Arguments to be used to form the default \code{control} argument if it is not supplied directly.}
}
\description{
The distribution profiles are smoothed using a shape constrained additive model with Poisson
responses to ensure that the smoothed distribution profile is positive and monotone decreasing.
}
\references{
Kosmidis, I., and Passfield, L. (2015). Linking the Performance of
Endurance Runners to Training and Physiological Effects via Multi-Resolution
Elastic Net. \emph{ArXiv e-print} arXiv:1506.01388.
Pya, N. and Wood S. (2015). Shape Constrained Additive Models. Statistics and
Computing, 25(3), 543--559.
Frick, H., Kosmidis, I. (2017). trackeR: Infrastructure for Running and Cycling Data from GPS-Enabled Tracking Devices in R. \emph{Journal of Statistical Software}, \bold{82}(7), 1--29. doi:10.18637/jss.v082.i07
}
\seealso{
\code{\link{smootherControl.distrProfile}}
}
|
campaign_id <- "59905"
ctr_threshold <- .01
## 52673 51513
library(RODBC)
conn <- odbcConnect("modeling_db")
query_file <- 'C:/Users/john/Google Drive/R Directory/projects/High CTR Site List/sites_by_ctr_query.txt'
query <- readChar(query_file, file.info(query_file)$size)
sites_by_ctr <- sqlQuery(conn,query)
sites_by_ctr_query <-("select line_item_id
, period
, refresh_time
, description as site
, sum(views) as impressiosn
, sum(clicks) as clicks
from campaign_insights_new
and feature = 'site'
and description <> '_TOTAL_'
group by line_item_id
, period
, refresh_time
, description")
library(RODBC)
conn <- odbcConnect("modeling db")
sites_by_ctr <- sqlQuery(conn, sprintf(sites_by_ctr_query, campaign_id))
## format query data
library(sqldf)
sites_by_ctr_formatted<-sqldf('select site, sum(impressions) as impressions, sum(clicks) as clicks,sum(clicks)/sum(impressions) as ctr from sites_by_ctr where period=7 group by site order by ctr asc')
# add rank column
sites_by_ctr_formatted$rank<-rank(sites_by_ctr_formatted$ctr, ties.method="first")
sites_by_ctr_formatted$ctr_2<-sites_by_ctr_formatted$ctr*100
# add cum clicks column
for (loop in (1:nrow(sites_by_ctr_formatted)))
{sites_by_ctr_formatted[loop,"cum_clicks"] <- sum(sites_by_ctr_formatted[1:loop,"clicks"])}
# add cum imps column
for (loop in (1:nrow(sites_by_ctr_formatted)))
{sites_by_ctr_formatted[loop,"cum_imps"] <- sum(sites_by_ctr_formatted[1:loop,"impressions"])}
# add cumulative ctr by rank column !! needs work
for (loop in (1:nrow(sites_by_ctr_formatted)))
{sites_by_ctr_formatted[loop,"cum_ctr"] <- sum(sites_by_ctr_formatted[1:loop,"clicks"])/sum(sites_by_ctr_formatted[1:loop,"impressions"])}
## add is_mobile column
sites_by_ctr_formatted$is_mobile<-grepl('mob.app',sites_by_ctr_formatted$site)
# plot cumulative ctr by rank
library(ggplot2)
ggplot(data=sites_by_ctr_formatted,aes(x=rank, y=cum_ctr))+geom_bar(stat="identity")
# Return sites above threshold
subset(sites_by_ctr_formatted,cum_ctr>ctr_threshold, select=c(is_mobile,site,ctr))
library(ggplot2)
ggplot(data=subset(sites_by_ctr_formatted,cum_ctr>ctr_threshold, select=c(site,ctr,clicks))
,aes(x=site, y=ctr,label=clicks))+geom_bar(stat="identity")+coord_flip()+geom_text(hjust=0, vjust=0)
write.csv(sites_by_ctr_formatted, file = "high_ctr_data.csv")
| /my R scripts/high ctr troubleshoot.R | no_license | SuperJohn/R-directory | R | false | false | 2,306 | r | campaign_id <- "59905"
ctr_threshold <- .01
## 52673 51513
library(RODBC)
conn <- odbcConnect("modeling_db")
query_file <- 'C:/Users/john/Google Drive/R Directory/projects/High CTR Site List/sites_by_ctr_query.txt'
query <- readChar(query_file, file.info(query_file)$size)
sites_by_ctr <- sqlQuery(conn,query)
sites_by_ctr_query <-("select line_item_id
, period
, refresh_time
, description as site
, sum(views) as impressiosn
, sum(clicks) as clicks
from campaign_insights_new
and feature = 'site'
and description <> '_TOTAL_'
group by line_item_id
, period
, refresh_time
, description")
library(RODBC)
conn <- odbcConnect("modeling db")
sites_by_ctr <- sqlQuery(conn, sprintf(sites_by_ctr_query, campaign_id))
## format query data
library(sqldf)
sites_by_ctr_formatted<-sqldf('select site, sum(impressions) as impressions, sum(clicks) as clicks,sum(clicks)/sum(impressions) as ctr from sites_by_ctr where period=7 group by site order by ctr asc')
# add rank column
sites_by_ctr_formatted$rank<-rank(sites_by_ctr_formatted$ctr, ties.method="first")
sites_by_ctr_formatted$ctr_2<-sites_by_ctr_formatted$ctr*100
# add cum clicks column
for (loop in (1:nrow(sites_by_ctr_formatted)))
{sites_by_ctr_formatted[loop,"cum_clicks"] <- sum(sites_by_ctr_formatted[1:loop,"clicks"])}
# add cum imps column
for (loop in (1:nrow(sites_by_ctr_formatted)))
{sites_by_ctr_formatted[loop,"cum_imps"] <- sum(sites_by_ctr_formatted[1:loop,"impressions"])}
# add cumulative ctr by rank column !! needs work
for (loop in (1:nrow(sites_by_ctr_formatted)))
{sites_by_ctr_formatted[loop,"cum_ctr"] <- sum(sites_by_ctr_formatted[1:loop,"clicks"])/sum(sites_by_ctr_formatted[1:loop,"impressions"])}
## add is_mobile column
sites_by_ctr_formatted$is_mobile<-grepl('mob.app',sites_by_ctr_formatted$site)
# plot cumulative ctr by rank
library(ggplot2)
ggplot(data=sites_by_ctr_formatted,aes(x=rank, y=cum_ctr))+geom_bar(stat="identity")
# Return sites above threshold
subset(sites_by_ctr_formatted,cum_ctr>ctr_threshold, select=c(is_mobile,site,ctr))
library(ggplot2)
ggplot(data=subset(sites_by_ctr_formatted,cum_ctr>ctr_threshold, select=c(site,ctr,clicks))
,aes(x=site, y=ctr,label=clicks))+geom_bar(stat="identity")+coord_flip()+geom_text(hjust=0, vjust=0)
write.csv(sites_by_ctr_formatted, file = "high_ctr_data.csv")
|
library(devtools)
install_github('terraref/traits')
install_github('daattali/addinslist')
lapply(c('~/Team_1/doc', '~/Team_1/src', '~/Team_1/results', '~/Team_1/data'), dir.create)
lapply(c('~/Team_1/data/raw_data_csv', '~/Team_1/results/plots'), dir.create)
| /src/R/team1.r | no_license | dlebauer/team1-predict-swir | R | false | false | 263 | r | library(devtools)
install_github('terraref/traits')
install_github('daattali/addinslist')
lapply(c('~/Team_1/doc', '~/Team_1/src', '~/Team_1/results', '~/Team_1/data'), dir.create)
lapply(c('~/Team_1/data/raw_data_csv', '~/Team_1/results/plots'), dir.create)
|
testlist <- list(bytes1 = c(-704643287L, 1593835520L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), pmutation = 0)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) | /mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612803287-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 239 | r | testlist <- list(bytes1 = c(-704643287L, 1593835520L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), pmutation = 0)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export_config.R
\name{export_config}
\alias{export_config}
\title{Export model configuration setups}
\usage{
export_config(
config_file,
model = c("GOTM", "GLM", "Simstrat", "FLake"),
folder = "."
)
}
\arguments{
\item{config_file}{name of the master LakeEnsemblR config file}
\item{model}{vector; model to export configuration file. Options include c('GOTM', 'GLM', 'Simstrat', 'FLake')}
\item{folder}{folder}
}
\description{
Create directory with file setups for each model based on a master LakeEnsemblR config file
}
\examples{
}
\author{
Tadhg Moore, Jorrit Mesman
}
\keyword{methods}
| /man/export_config.Rd | permissive | rmpilla/LakeEnsemblR | R | false | true | 678 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export_config.R
\name{export_config}
\alias{export_config}
\title{Export model configuration setups}
\usage{
export_config(
config_file,
model = c("GOTM", "GLM", "Simstrat", "FLake"),
folder = "."
)
}
\arguments{
\item{config_file}{name of the master LakeEnsemblR config file}
\item{model}{vector; model to export configuration file. Options include c('GOTM', 'GLM', 'Simstrat', 'FLake')}
\item{folder}{folder}
}
\description{
Create directory with file setups for each model based on a master LakeEnsemblR config file
}
\examples{
}
\author{
Tadhg Moore, Jorrit Mesman
}
\keyword{methods}
|
#' Color palettes from components.ai, ramped to a specified length
#'
#' @inheritParams components_pal
#' @param n Numeric. Number of colors to be displayed.
#'
#' @export
components <- function(palette = "lab",
level = 8,
n,
alpha = 1,
reverse = FALSE) {
stopifnot(is.numeric(level))
pal <- components_palettes[[palette]]
if (is.null(pal)) stop("Palette not found.")
if (level > nrow(pal)) stop("This palette only has ", nrow(pal), " levels.")
pal <- pal[level, ]
if (missing(n)) n <- length(pal)
if (reverse) pal <- rev(pal)
grDevices::colorRampPalette(pal, alpha)(n)
}
#' Color palettes from components.ai
#'
#' @param palette Character. A palette to display; one of "bootstrap", "lab"
#' (the default), "material", "open_color", "palx", or "tachyons".
#' @param level Numeric. The "level" of the palette to be displayed.
#' @param alpha Numeric. Transparency.
#' @param reverse Logical. Should the order of colors be reversed?
#'
#' @export
components_pal <- function(palette = "lab",
level = 8,
alpha = 1,
reverse = FALSE) {
function(n) {
components(palette, level, n, alpha, reverse)
}
}
#' components.ai color scales for ggplot2
#'
#' @inheritParams components
#' @param ... Arguments passed to either [ggplot2::discrete_scale] or
#' [ggplot2::scale_color_gradientn], as appropriate.
#'
#'
#' @rdname scale_color_components
#' @export
scale_color_components <- function(palette = "lab",
level = 8,
discrete = TRUE,
alpha = 1,
reverse = FALSE,
...) {
if (discrete) {
ggplot2::discrete_scale(
"colour",
"components",
components_pal(palette,
level = level,
alpha = alpha,
reverse = reverse),
...)
}
else {
ggplot2::scale_color_gradientn(
colours = components(palette,
level = level,
256,
alpha = alpha,
reverse = reverse),
...)
}
}
#' @rdname scale_color_components
#' @export
scale_colour_components <- scale_color_components
#' components.ai fill scales for ggplot2
#'
#' @inheritParams components
#' @param ... Arguments passed to either [ggplot2::discrete_scale] or
#' [ggplot2::scale_fill_gradientn], as appropriate.
#'
#' @export
scale_fill_components <- function(palette = "lab",
level = 8,
discrete = TRUE,
alpha = 1,
reverse = FALSE,
...) {
if (discrete) {
ggplot2::discrete_scale(
"fill",
"components",
components_pal(palette,
level = level,
alpha = alpha,
reverse = reverse),
...)
}
else {
ggplot2::scale_fill_gradientn(
colours = components(palette,
level = 5,
256,
alpha = alpha,
reverse = reverse),
...)
}
}
#' Display a color palette
#'
#' Given a character vector (hex RGB values), display palette in graphics window.
#'
#' @param palette vector of character hex RGB values
#'
#' @export
components_show_palette <- function(palette, level) {
name <- paste0(palette, ": Level ", level)
palette <- components(palette, level)
n <- length(palette)
if (length(palette > 0)) {
graphics::image(1:n, 1, as.matrix(1:n), col = palette,
xlab = "", ylab = "", xaxt = "n", yaxt = "n",
bty = "n")
graphics::title(main = name)
}
}
| /R/components.R | permissive | mikemahoney218/ggm218 | R | false | false | 4,007 | r | #' Color palettes from components.ai, ramped to a specified length
#'
#' @inheritParams components_pal
#' @param n Numeric. Number of colors to be displayed.
#'
#' @export
components <- function(palette = "lab",
level = 8,
n,
alpha = 1,
reverse = FALSE) {
stopifnot(is.numeric(level))
pal <- components_palettes[[palette]]
if (is.null(pal)) stop("Palette not found.")
if (level > nrow(pal)) stop("This palette only has ", nrow(pal), " levels.")
pal <- pal[level, ]
if (missing(n)) n <- length(pal)
if (reverse) pal <- rev(pal)
grDevices::colorRampPalette(pal, alpha)(n)
}
#' Color palettes from components.ai
#'
#' @param palette Character. A palette to display; one of "bootstrap", "lab"
#' (the default), "material", "open_color", "palx", or "tachyons".
#' @param level Numeric. The "level" of the palette to be displayed.
#' @param alpha Numeric. Transparency.
#' @param reverse Logical. Should the order of colors be reversed?
#'
#' @export
components_pal <- function(palette = "lab",
level = 8,
alpha = 1,
reverse = FALSE) {
function(n) {
components(palette, level, n, alpha, reverse)
}
}
#' components.ai color scales for ggplot2
#'
#' @inheritParams components
#' @param ... Arguments passed to either [ggplot2::discrete_scale] or
#' [ggplot2::scale_color_gradientn], as appropriate.
#'
#'
#' @rdname scale_color_components
#' @export
scale_color_components <- function(palette = "lab",
level = 8,
discrete = TRUE,
alpha = 1,
reverse = FALSE,
...) {
if (discrete) {
ggplot2::discrete_scale(
"colour",
"components",
components_pal(palette,
level = level,
alpha = alpha,
reverse = reverse),
...)
}
else {
ggplot2::scale_color_gradientn(
colours = components(palette,
level = level,
256,
alpha = alpha,
reverse = reverse),
...)
}
}
#' @rdname scale_color_components
#' @export
scale_colour_components <- scale_color_components
#' components.ai fill scales for ggplot2
#'
#' @inheritParams components
#' @param ... Arguments passed to either [ggplot2::discrete_scale] or
#' [ggplot2::scale_fill_gradientn], as appropriate.
#'
#' @export
scale_fill_components <- function(palette = "lab",
level = 8,
discrete = TRUE,
alpha = 1,
reverse = FALSE,
...) {
if (discrete) {
ggplot2::discrete_scale(
"fill",
"components",
components_pal(palette,
level = level,
alpha = alpha,
reverse = reverse),
...)
}
else {
ggplot2::scale_fill_gradientn(
colours = components(palette,
level = 5,
256,
alpha = alpha,
reverse = reverse),
...)
}
}
#' Display a color palette
#'
#' Given a character vector (hex RGB values), display palette in graphics window.
#'
#' @param palette vector of character hex RGB values
#'
#' @export
components_show_palette <- function(palette, level) {
name <- paste0(palette, ": Level ", level)
palette <- components(palette, level)
n <- length(palette)
if (length(palette > 0)) {
graphics::image(1:n, 1, as.matrix(1:n), col = palette,
xlab = "", ylab = "", xaxt = "n", yaxt = "n",
bty = "n")
graphics::title(main = name)
}
}
|
library(shiny)
shinyServer(function(input, output) {
numberlist <- reactive({
text <- input$textInput;
list <- strsplit(text, "\\s*,\\s*");
sapply(list, as.numeric);
})
avg <- reactive({
mean(numberlist())
})
output$textOutput <- renderText({
paste("Average:", avg());
})
output$plotOutput <- renderPlot({
if(input$plotButton > 0){
input$plotButton
isolate({
plot(numberlist());
abline(h = avg())
})
}
})
})
| /server.R | no_license | 381265947/DataProductsCourseProject | R | false | false | 500 | r |
library(shiny)
shinyServer(function(input, output) {
numberlist <- reactive({
text <- input$textInput;
list <- strsplit(text, "\\s*,\\s*");
sapply(list, as.numeric);
})
avg <- reactive({
mean(numberlist())
})
output$textOutput <- renderText({
paste("Average:", avg());
})
output$plotOutput <- renderPlot({
if(input$plotButton > 0){
input$plotButton
isolate({
plot(numberlist());
abline(h = avg())
})
}
})
})
|
read_enrichment_file=function(infile,SIG_THRESH,fillin,col_of_interest){
data=read.table(infile,header=TRUE)
data.enrich=data.frame(enrichment=data[,col_of_interest])
rownames(data.enrich)=data$tf
to_remove=which(as.numeric(as.character(data$BH))>SIG_THRESH)
#keep only the significant ones
if (length(to_remove)>0){
data.enrich[to_remove,'enrichment']=fillin
}
return(data.enrich)
}
one_enrichment_plot=function(outpdf,data,top_value){
print('doing enrichment plot')
print(head(data))
require(ggplot2)
data[,'tf']=gsub('bed.OverlapChIPseq','',
gsub('MotifMatch_','',
gsub('MergedPeaks_ChIPseq_','',
gsub('correlatedMotifs.motif.pouya.Motif.','',
gsub('scanThresh0','',data$tf)))))
significance=(data$BH<=0.05)
sig_vector=rep('Not significant',times=dim(data)[1])
sig_vector[which(significance==TRUE)]='Significant'
data=cbind(data,Significant=factor(sig_vector, levels=c('Significant','Not significant')),
TFname=factor(data$tf,levels=data[order(data$enrichment),'tf']))
pdf(outpdf,width=5,height=12)
print(ggplot(data, aes(y=enrichment, x=TFname,col=Significant))+
coord_flip()+geom_point()+scale_colour_manual(values=c("red","gray"))+geom_errorbar(aes(ymax = confLow,
ymin=confHigh))+ylim(0,top_value)+theme_bw() + ylab('Enrichment of TF in QTL peaks') + xlab('TF')+theme(panel.border = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")))
dev.off()
}
optimal_ordering=function(m,meth){
require(cba)
d <- dist(as.matrix(m),method=meth)
hc <- hclust(d)
co <- order.optimal(d, hc$merge)
m.optimalRows=as.matrix(m)[co$order,]
return(m.optimalRows)
}
order_by_column=function(m,column_name,decreasing_desired){
return(m[order(m[,column_name],decreasing=decreasing_desired),])
}
heatmap_enrichments=function(data,out,meth,top_value){
#data.optimal=optimal_ordering(data,meth)
data.optimal=data
require(pheatmap)
pdf(out,width=10,height=20)
pheatmap(as.matrix(data.optimal),cluster_rows=FALSE,cluster_cols=FALSE,fontsize=10,breaks=seq(from=1,to=top_value,by=(top_value-1)/20),
color=colorRampPalette(c("white", "red",'black'))(n = 20),cellwidth=10,cellheight=10)#,legend_breaks=seq(from=1,to=top_value,by=1))
#pheatmap(-log(as.matrix(data.optimal)),cluster_rows=FALSE,cluster_cols=FALSE,fontsize=5,
# cellwidth=5,cellheight=5,breaks=seq(from=1,to=600,by=1),
# color=colorRampPalette(c("gray", "red","black"))(n = 600))
dev.off()
}
overlapEnrichment_distalQTL=function(){
enrichfiles='/srv/gsfs0/projects/snyder/oursu/histoneQTL/motif_analysis/results/2015-01-13/OverlapEnrichment/ENRICHDIR/ENRICHPREF'
enrichments=c('TFBS_overlap_','Motif_overlap_','MotifCorrelatedLocal_overlap_') #add in disrupted motif overlaps, and hQTL overlaps
hmarks=c('H3K27AC','H3K4ME1','H3K4ME3')
for (enrich in enrichments){
if (enrich=='TFBS_overlap_'){
top_value=3
}
if (enrich=='Motif_overlap_'){
top_value=5
}
if (enrich=='MotifCorrelatedLocal_overlap_'){
top_value=25
}
print(enrich)
first=TRUE
for (suffix in c('HMARK.QTLpeaks','LocalPeakIsHMARK.QTLpeaks_affectingDistalPeaks')){
for (hmark in hmarks){
f=gsub('ENRICHDIR',paste(enrich,'QTLpeaks0kb',sep=''),
gsub('ENRICHPREF',paste(enrich,hmark,'QTLpeaks0kb___.overlapEnrichIN',gsub('HMARK',hmark,suffix),sep=''),enrichfiles))
cur_data=read_enrichment_file(f,0.05,1,'enrichment')
cur_total=read.table(f,header=TRUE)
#cur_data=cur_data/max(cur_data[,1])
if (suffix=='HMARK.QTLpeaks'){
addon='Local: HMARK'
}
if (suffix=='LocalPeakIsHMARK.QTLpeaks_affectingDistalPeaks'){
addon='Distal: HMARK'
}
rownames(cur_data)=gsub('bed.OverlapChIPseq','',
gsub('MotifMatch_','',
gsub('MergedPeaks_ChIPseq_','',
gsub('correlatedMotifs.motif.pouya.Motif.','',
gsub('scanThresh0','',rownames(cur_data))))))
#condition=paste(gsub('_',' ',enrich),hmark,' ',addon,sep='')
condition=gsub('HMARK',hmark,addon)
cur_total=cbind(cur_total,condition=condition)
one_enrichment_plot(paste(dirname(f),condition,'overlapEnrichmentPlot.pdf',sep=''),cur_total,top_value)
if (first==FALSE){
data=cbind(data,cur_data[rownames(data),])
total=rbind(total,cur_total)
colnames(data)[ncol(data)]=condition
}
if (first==TRUE){
data=cur_data
total=cur_total
first=FALSE
colnames(data)[1]=condition
}
}
}
###### heatmap ###################################################
#add in the k27AC again, to sort by it and its pvalue
sortf=gsub('ENRICHDIR',paste(enrich,'QTLpeaks0kb',sep=''),
gsub('ENRICHPREF',paste(enrich,'H3K27AC','QTLpeaks0kb___.overlapEnrichIN',gsub('HMARK','H3K27AC','HMARK.QTLpeaks'),sep=''),enrichfiles))
k27ac_data=read_enrichment_file(sortf,1.1,1,c('enrichment'))
rownames(k27ac_data)=gsub('bed.OverlapChIPseq','',
gsub('MotifMatch_','',
gsub('MergedPeaks_ChIPseq_','',
gsub('correlatedMotifs.motif.pouya.Motif.','',
gsub('scanThresh0','',rownames(k27ac_data))))))
k27ac_sorted_rows=rownames(k27ac_data)[order(k27ac_data[,1],decreasing=TRUE)]
data=data[k27ac_sorted_rows,]
heatmap_enrichments(data,paste(dirname(f),'overlapEnrichmentHeatmap.pdf',sep=''),'euclidean',top_value)
####################################################################
}
}
overlapEnrichment_distalQTL()
| /Features/TFs/overlapEnrichment/visualize_overlapEnrichment.R | no_license | oursu/genome_utils | R | false | false | 5,945 | r | read_enrichment_file=function(infile,SIG_THRESH,fillin,col_of_interest){
data=read.table(infile,header=TRUE)
data.enrich=data.frame(enrichment=data[,col_of_interest])
rownames(data.enrich)=data$tf
to_remove=which(as.numeric(as.character(data$BH))>SIG_THRESH)
#keep only the significant ones
if (length(to_remove)>0){
data.enrich[to_remove,'enrichment']=fillin
}
return(data.enrich)
}
one_enrichment_plot=function(outpdf,data,top_value){
print('doing enrichment plot')
print(head(data))
require(ggplot2)
data[,'tf']=gsub('bed.OverlapChIPseq','',
gsub('MotifMatch_','',
gsub('MergedPeaks_ChIPseq_','',
gsub('correlatedMotifs.motif.pouya.Motif.','',
gsub('scanThresh0','',data$tf)))))
significance=(data$BH<=0.05)
sig_vector=rep('Not significant',times=dim(data)[1])
sig_vector[which(significance==TRUE)]='Significant'
data=cbind(data,Significant=factor(sig_vector, levels=c('Significant','Not significant')),
TFname=factor(data$tf,levels=data[order(data$enrichment),'tf']))
pdf(outpdf,width=5,height=12)
print(ggplot(data, aes(y=enrichment, x=TFname,col=Significant))+
coord_flip()+geom_point()+scale_colour_manual(values=c("red","gray"))+geom_errorbar(aes(ymax = confLow,
ymin=confHigh))+ylim(0,top_value)+theme_bw() + ylab('Enrichment of TF in QTL peaks') + xlab('TF')+theme(panel.border = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")))
dev.off()
}
optimal_ordering=function(m,meth){
require(cba)
d <- dist(as.matrix(m),method=meth)
hc <- hclust(d)
co <- order.optimal(d, hc$merge)
m.optimalRows=as.matrix(m)[co$order,]
return(m.optimalRows)
}
order_by_column=function(m,column_name,decreasing_desired){
return(m[order(m[,column_name],decreasing=decreasing_desired),])
}
heatmap_enrichments=function(data,out,meth,top_value){
#data.optimal=optimal_ordering(data,meth)
data.optimal=data
require(pheatmap)
pdf(out,width=10,height=20)
pheatmap(as.matrix(data.optimal),cluster_rows=FALSE,cluster_cols=FALSE,fontsize=10,breaks=seq(from=1,to=top_value,by=(top_value-1)/20),
color=colorRampPalette(c("white", "red",'black'))(n = 20),cellwidth=10,cellheight=10)#,legend_breaks=seq(from=1,to=top_value,by=1))
#pheatmap(-log(as.matrix(data.optimal)),cluster_rows=FALSE,cluster_cols=FALSE,fontsize=5,
# cellwidth=5,cellheight=5,breaks=seq(from=1,to=600,by=1),
# color=colorRampPalette(c("gray", "red","black"))(n = 600))
dev.off()
}
overlapEnrichment_distalQTL=function(){
enrichfiles='/srv/gsfs0/projects/snyder/oursu/histoneQTL/motif_analysis/results/2015-01-13/OverlapEnrichment/ENRICHDIR/ENRICHPREF'
enrichments=c('TFBS_overlap_','Motif_overlap_','MotifCorrelatedLocal_overlap_') #add in disrupted motif overlaps, and hQTL overlaps
hmarks=c('H3K27AC','H3K4ME1','H3K4ME3')
for (enrich in enrichments){
if (enrich=='TFBS_overlap_'){
top_value=3
}
if (enrich=='Motif_overlap_'){
top_value=5
}
if (enrich=='MotifCorrelatedLocal_overlap_'){
top_value=25
}
print(enrich)
first=TRUE
for (suffix in c('HMARK.QTLpeaks','LocalPeakIsHMARK.QTLpeaks_affectingDistalPeaks')){
for (hmark in hmarks){
f=gsub('ENRICHDIR',paste(enrich,'QTLpeaks0kb',sep=''),
gsub('ENRICHPREF',paste(enrich,hmark,'QTLpeaks0kb___.overlapEnrichIN',gsub('HMARK',hmark,suffix),sep=''),enrichfiles))
cur_data=read_enrichment_file(f,0.05,1,'enrichment')
cur_total=read.table(f,header=TRUE)
#cur_data=cur_data/max(cur_data[,1])
if (suffix=='HMARK.QTLpeaks'){
addon='Local: HMARK'
}
if (suffix=='LocalPeakIsHMARK.QTLpeaks_affectingDistalPeaks'){
addon='Distal: HMARK'
}
rownames(cur_data)=gsub('bed.OverlapChIPseq','',
gsub('MotifMatch_','',
gsub('MergedPeaks_ChIPseq_','',
gsub('correlatedMotifs.motif.pouya.Motif.','',
gsub('scanThresh0','',rownames(cur_data))))))
#condition=paste(gsub('_',' ',enrich),hmark,' ',addon,sep='')
condition=gsub('HMARK',hmark,addon)
cur_total=cbind(cur_total,condition=condition)
one_enrichment_plot(paste(dirname(f),condition,'overlapEnrichmentPlot.pdf',sep=''),cur_total,top_value)
if (first==FALSE){
data=cbind(data,cur_data[rownames(data),])
total=rbind(total,cur_total)
colnames(data)[ncol(data)]=condition
}
if (first==TRUE){
data=cur_data
total=cur_total
first=FALSE
colnames(data)[1]=condition
}
}
}
###### heatmap ###################################################
#add in the k27AC again, to sort by it and its pvalue
sortf=gsub('ENRICHDIR',paste(enrich,'QTLpeaks0kb',sep=''),
gsub('ENRICHPREF',paste(enrich,'H3K27AC','QTLpeaks0kb___.overlapEnrichIN',gsub('HMARK','H3K27AC','HMARK.QTLpeaks'),sep=''),enrichfiles))
k27ac_data=read_enrichment_file(sortf,1.1,1,c('enrichment'))
rownames(k27ac_data)=gsub('bed.OverlapChIPseq','',
gsub('MotifMatch_','',
gsub('MergedPeaks_ChIPseq_','',
gsub('correlatedMotifs.motif.pouya.Motif.','',
gsub('scanThresh0','',rownames(k27ac_data))))))
k27ac_sorted_rows=rownames(k27ac_data)[order(k27ac_data[,1],decreasing=TRUE)]
data=data[k27ac_sorted_rows,]
heatmap_enrichments(data,paste(dirname(f),'overlapEnrichmentHeatmap.pdf',sep=''),'euclidean',top_value)
####################################################################
}
}
overlapEnrichment_distalQTL()
|
#####################################################################
## Author: Joshua Cape (joshua.cape@pitt.edu)
## Script: DyNet paper code, preamble material and defined functions
#####################################################################
#####################################################################
library(igraph)
library(ggplot2)
library(irlba)
library(matrixcalc)
library(matpow)
library(mvtnorm)
library(plotrix)
library(xtable)
library(knitr)
library(distill)
library(patchwork)
library(mclust)
library(MASS)
library(lubridate)
library(readr)
library(dplyr)
#####################################################################
#####################################################################
sym <- function(s){
s[lower.tri(s)] = t(s)[lower.tri(s)];
s
}
ttinf <- function(mtx.data){
return(max(apply(mtx.data, 1, function(x) norm(x, "2"))))
}
#####################################################################
#####################################################################
circleFun <- function(center = c(0,0), diameter = 1, npoints = 100){
r = diameter / 2
tt <- seq(0, 2*pi,length.out = npoints)
xx <- center[1] + r * cos(tt)
yy <- center[2] + r * sin(tt)
return(data.frame(x = xx, y = yy))
}
rot_mtx <- function(theta){
return(rbind(c(cos(theta), -sin(theta)),
c(sin(theta), cos(theta))))
}
unit_func <- function(vector){
return(vector/norm(vector,"2"))
}
#####################################################################
#####################################################################
mtx_print <- function(mtx){
return(
print(xtable(mtx, align=rep("",ncol(mtx)+1), digits=7),
tabular.environment="bmatrix",
include.rownames=FALSE,
include.colnames=FALSE,
floating=FALSE,
hline.after=NULL,
timestamp=NULL,
comment=FALSE)
)
}
func_slope <- function(vec) return(vec[2]/vec[1])
#####################################################################
##################################################################### | /code_dynet_preamble.R | permissive | jcape1/paper_dynet | R | false | false | 2,321 | r | #####################################################################
## Author: Joshua Cape (joshua.cape@pitt.edu)
## Script: DyNet paper code, preamble material and defined functions
#####################################################################
#####################################################################
library(igraph)
library(ggplot2)
library(irlba)
library(matrixcalc)
library(matpow)
library(mvtnorm)
library(plotrix)
library(xtable)
library(knitr)
library(distill)
library(patchwork)
library(mclust)
library(MASS)
library(lubridate)
library(readr)
library(dplyr)
#####################################################################
#####################################################################
sym <- function(s){
s[lower.tri(s)] = t(s)[lower.tri(s)];
s
}
ttinf <- function(mtx.data){
return(max(apply(mtx.data, 1, function(x) norm(x, "2"))))
}
#####################################################################
#####################################################################
circleFun <- function(center = c(0,0), diameter = 1, npoints = 100){
r = diameter / 2
tt <- seq(0, 2*pi,length.out = npoints)
xx <- center[1] + r * cos(tt)
yy <- center[2] + r * sin(tt)
return(data.frame(x = xx, y = yy))
}
rot_mtx <- function(theta){
return(rbind(c(cos(theta), -sin(theta)),
c(sin(theta), cos(theta))))
}
unit_func <- function(vector){
return(vector/norm(vector,"2"))
}
#####################################################################
#####################################################################
mtx_print <- function(mtx){
return(
print(xtable(mtx, align=rep("",ncol(mtx)+1), digits=7),
tabular.environment="bmatrix",
include.rownames=FALSE,
include.colnames=FALSE,
floating=FALSE,
hline.after=NULL,
timestamp=NULL,
comment=FALSE)
)
}
func_slope <- function(vec) return(vec[2]/vec[1])
#####################################################################
##################################################################### |
# NDFA Water Quality
# Purpose: Code to import, clean, and export I80 continuous water quality data
# downloaded from Hydstra
# Author: Dave Bosworth & Amanda Maguire
# Load packages
library(tidyverse)
library(lubridate)
# Import Data -------------------------------------------------------------
# Define path on SharePoint site for data - this works if you have the SharePoint site synced
# to your computer
sharepoint_path <- normalizePath(
file.path(
Sys.getenv("USERPROFILE"),
"California Department of Water Resources/Office of Water Quality and Estuarine Ecology - North Delta Flow Action/WQ_Subteam"
)
)
# Import data
i80_orig <- read_csv(
file = paste0(sharepoint_path, "/Raw_Data/Continuous/RTM_RAW_DWR I80_2013-2019.csv"),
col_names = FALSE,
skip = 3,
col_types = "cdd-------dd-dd-dd-dd-dd-" # "c" = character, "d" = numeric, "-" = skip
)
glimpse(i80_orig)
# Clean Data --------------------------------------------------------------
# HYDSTRA PARAMETER CODES:
# 450 - Water Temperature (Celcius)
# 630 - Depth below water surface (meters)
# 806 - Salinity (ppt)
# 810 - Turbidity (NTU)
# 821 - Specific Conductance at 25 C (uS/cm)
# 860 - pH
# 865 - Dissolved Oxygen (% saturation)
# 2351 - Dissolved Oxygen (mg/L)
# 7004 - Chlorophyll (ug/L)
# Clean data
# Change variable names - using NDFA standardized names
names(i80_orig) <- c(
"DateTime",
"WaterTemp",
"WaterTemp_Qual",
"Turbidity",
"Turbidity_Qual",
"SpCnd",
"SpCnd_Qual",
"pH",
"pH_Qual",
"DO",
"DO_Qual",
"Chla",
"Chla_Qual"
)
# Parse date time variable, and create StationCode variable
i80_clean <- i80_orig %>%
mutate(
DateTime = mdy_hm(DateTime),
StationCode = "I80"
)
glimpse(i80_clean)
# Export Data -------------------------------------------------------------
# Export formatted data as a .csv file
i80_clean %>%
write_excel_csv(
path = paste0(sharepoint_path, "/Processed_Data/Continuous/RTM_OUTPUT_I80_formatted.csv"),
na = ""
)
# For easier importing of this file in the future should either:
# 1) convert file to .xlsx file after exporting, or
# 2) manually format the 'DateTime' variable in the .csv file to "yyyy-mm-dd hh:mm:ss"
| /Water_Quality/Continuous_WQ/Data_Cleaning/Archive/Clean_RTM_Hydstra_I80.R | no_license | InteragencyEcologicalProgram/ND-FASTR | R | false | false | 2,204 | r | # NDFA Water Quality
# Purpose: Code to import, clean, and export I80 continuous water quality data
# downloaded from Hydstra
# Author: Dave Bosworth & Amanda Maguire
# Load packages
library(tidyverse)
library(lubridate)
# Import Data -------------------------------------------------------------
# Define path on SharePoint site for data - this works if you have the SharePoint site synced
# to your computer
sharepoint_path <- normalizePath(
file.path(
Sys.getenv("USERPROFILE"),
"California Department of Water Resources/Office of Water Quality and Estuarine Ecology - North Delta Flow Action/WQ_Subteam"
)
)
# Import data
i80_orig <- read_csv(
file = paste0(sharepoint_path, "/Raw_Data/Continuous/RTM_RAW_DWR I80_2013-2019.csv"),
col_names = FALSE,
skip = 3,
col_types = "cdd-------dd-dd-dd-dd-dd-" # "c" = character, "d" = numeric, "-" = skip
)
glimpse(i80_orig)
# Clean Data --------------------------------------------------------------
# HYDSTRA PARAMETER CODES:
# 450 - Water Temperature (Celcius)
# 630 - Depth below water surface (meters)
# 806 - Salinity (ppt)
# 810 - Turbidity (NTU)
# 821 - Specific Conductance at 25 C (uS/cm)
# 860 - pH
# 865 - Dissolved Oxygen (% saturation)
# 2351 - Dissolved Oxygen (mg/L)
# 7004 - Chlorophyll (ug/L)
# Clean data
# Change variable names - using NDFA standardized names
names(i80_orig) <- c(
"DateTime",
"WaterTemp",
"WaterTemp_Qual",
"Turbidity",
"Turbidity_Qual",
"SpCnd",
"SpCnd_Qual",
"pH",
"pH_Qual",
"DO",
"DO_Qual",
"Chla",
"Chla_Qual"
)
# Parse date time variable, and create StationCode variable
i80_clean <- i80_orig %>%
mutate(
DateTime = mdy_hm(DateTime),
StationCode = "I80"
)
glimpse(i80_clean)
# Export Data -------------------------------------------------------------
# Export formatted data as a .csv file
i80_clean %>%
write_excel_csv(
path = paste0(sharepoint_path, "/Processed_Data/Continuous/RTM_OUTPUT_I80_formatted.csv"),
na = ""
)
# For easier importing of this file in the future should either:
# 1) convert file to .xlsx file after exporting, or
# 2) manually format the 'DateTime' variable in the .csv file to "yyyy-mm-dd hh:mm:ss"
|
rm(list=ls())
##########################################################################################
### Functions
##########################################################################################
installIfAbsentAndLoad <- function(neededVector) {
for(thispackage in neededVector) {
if( ! require(thispackage, character.only = T) )
{ install.packages(thispackage)}
require(thispackage, character.only = T)
}
}
##############################
### Load required packages ###
##############################
needed <- c("ISLR") #contains Auto data
installIfAbsentAndLoad(needed)
# Get data into a data frame
mydf <- Auto
n <- nrow(mydf)
# Randomly shuffle the data frame - this is a cautionary (and
# almost always necessary) step to prevent bias if the data
# is sorted somehow
set.seed(5072, sample.kind="Rejection")
mydf <- mydf[sample(n, n),]
# Create 10 equally size folds
numfolds <- 10
fold.indices <- cut(1:n, breaks=numfolds, labels=F)
#Perform 10 fold cross validation
mse <- rep(0, numfolds)
# Build the model with the full data frame (this is the
# point - don't need to withhold rows for validation/test)
my.final.model <- glm(mpg ~ poly(horsepower, 2), data=mydf)
summary(my.final.model)
# Estimate the expected value of the true MSE
for(i in 1:numfolds){
#Segement your data by fold using the which() function
test.indices <- which(fold.indices == i)
test.data <- mydf[test.indices, ]
train.data <- mydf[-test.indices, ]
glm.fit=glm(mpg ~ poly(horsepower,2),data=train.data)
mse[i] <- mean((predict.glm(glm.fit, test.data) - test.data$mpg) ^ 2)
}
# The following value is the final estimate the expected
# value of the true MSE
mean(mse)
# The following value is a measure of its variability
sd(mse)
#
# Now compare to cv.glm()...
glm.fit <- glm(mpg ~ poly(horsepower,2), data=mydf)
cv <- cv.glm(mydf, glm.fit, K = 10)
cv$delta
| /07.manualCVExample.R | no_license | wqeqwqeq/aaa | R | false | false | 1,941 | r | rm(list=ls())
##########################################################################################
### Functions
##########################################################################################
installIfAbsentAndLoad <- function(neededVector) {
for(thispackage in neededVector) {
if( ! require(thispackage, character.only = T) )
{ install.packages(thispackage)}
require(thispackage, character.only = T)
}
}
##############################
### Load required packages ###
##############################
needed <- c("ISLR") #contains Auto data
installIfAbsentAndLoad(needed)
# Get data into a data frame
mydf <- Auto
n <- nrow(mydf)
# Randomly shuffle the data frame - this is a cautionary (and
# almost always necessary) step to prevent bias if the data
# is sorted somehow
set.seed(5072, sample.kind="Rejection")
mydf <- mydf[sample(n, n),]
# Create 10 equally size folds
numfolds <- 10
fold.indices <- cut(1:n, breaks=numfolds, labels=F)
#Perform 10 fold cross validation
mse <- rep(0, numfolds)
# Build the model with the full data frame (this is the
# point - don't need to withhold rows for validation/test)
my.final.model <- glm(mpg ~ poly(horsepower, 2), data=mydf)
summary(my.final.model)
# Estimate the expected value of the true MSE
for(i in 1:numfolds){
#Segement your data by fold using the which() function
test.indices <- which(fold.indices == i)
test.data <- mydf[test.indices, ]
train.data <- mydf[-test.indices, ]
glm.fit=glm(mpg ~ poly(horsepower,2),data=train.data)
mse[i] <- mean((predict.glm(glm.fit, test.data) - test.data$mpg) ^ 2)
}
# The following value is the final estimate the expected
# value of the true MSE
mean(mse)
# The following value is a measure of its variability
sd(mse)
#
# Now compare to cv.glm()...
glm.fit <- glm(mpg ~ poly(horsepower,2), data=mydf)
cv <- cv.glm(mydf, glm.fit, K = 10)
cv$delta
|
library(rworldxtra)
library(raadtools)
top <- brick(readAll(readtopo("etopo2")))
data(countriesHigh)
sv <- c("New Zealand", "Antarctica", "Papua New Guinea", "Indonesia", "Malaysia", "Fiji", "Australia")
a <- subset(countriesHigh, SOVEREIGNT %in% sv)
b <- tri_mesh(a, max_area = 0.01)
b$v$z_ <- extract(top, b$v[, c("x_", "y_")], method = "bilinear")
b$v$z_0 <- b$v$z_
b$v$z_ <- b$v$z_0 * 5000
globe(b, rad = 6378137.0, specular = "black");rgl::bg3d("grey")
b2 <- b
b2$v$z_ <- b2$v$z_0 * 50
xy <- rgdal::project(as.matrix(b2$v[, c("x_", "y_")]), "+proj=laea +lon_0=130 +lat_0=-42 +ellps=WGS84")
b2$v$x_ <- xy[,1]
b2$v$y_ <- xy[,2]
| /inst/examples/example_topo.r | no_license | nemochina2008/rangl | R | false | false | 663 | r | library(rworldxtra)
library(raadtools)
top <- brick(readAll(readtopo("etopo2")))
data(countriesHigh)
sv <- c("New Zealand", "Antarctica", "Papua New Guinea", "Indonesia", "Malaysia", "Fiji", "Australia")
a <- subset(countriesHigh, SOVEREIGNT %in% sv)
b <- tri_mesh(a, max_area = 0.01)
b$v$z_ <- extract(top, b$v[, c("x_", "y_")], method = "bilinear")
b$v$z_0 <- b$v$z_
b$v$z_ <- b$v$z_0 * 5000
globe(b, rad = 6378137.0, specular = "black");rgl::bg3d("grey")
b2 <- b
b2$v$z_ <- b2$v$z_0 * 50
xy <- rgdal::project(as.matrix(b2$v[, c("x_", "y_")]), "+proj=laea +lon_0=130 +lat_0=-42 +ellps=WGS84")
b2$v$x_ <- xy[,1]
b2$v$y_ <- xy[,2]
|
\name{svyglm}
\alias{svyglm}
\alias{svyglm.survey.design}
\alias{svyglm.svyrep.design}
\alias{summary.svyglm}
\alias{summary.svrepglm}
\alias{vcov.svyglm}
\alias{residuals.svyglm}
\alias{residuals.svrepglm}
\alias{predict.svyglm}
\alias{predict.svrepglm}
\alias{coef.svyglm}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{Survey-weighted generalised linear models.}
\description{
Fit a generalised linear model to data from a complex survey design,
with inverse-probability weighting and design-based standard errors.
}
\usage{
\method{svyglm}{survey.design}(formula, design, subset=NULL, family=stats::gaussian(),start=NULL, ...)
\method{svyglm}{svyrep.design}(formula, design, subset=NULL, family=stats::gaussian(),start=NULL, ..., rho=NULL,
return.replicates=FALSE, na.action,multicore=getOption("survey.multicore"))
\method{summary}{svyglm}(object, correlation = FALSE, df.resid=NULL,
...)
\method{predict}{svyglm}(object,newdata=NULL,total=NULL,
type=c("link","response","terms"),
se.fit=(type != "terms"),vcov=FALSE,...)
\method{predict}{svrepglm}(object,newdata=NULL,total=NULL,
type=c("link","response","terms"),
se.fit=(type != "terms"),vcov=FALSE,
return.replicates=!is.null(object$replicates),...)
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{formula}{Model formula}
\item{design}{Survey design from \code{\link{svydesign}} or \code{\link{svrepdesign}}. Must contain all variables
in the formula}
\item{subset}{Expression to select a subpopulation}
\item{family}{\code{family} object for \code{glm}}
\item{start}{Starting values for the coefficients (needed for some uncommon link/family combinations)}
\item{\dots}{Other arguments passed to \code{glm} or
\code{summary.glm} }
\item{rho}{For replicate BRR designs, to specify the parameter for
Fay's variance method, giving weights of \code{rho} and \code{2-rho}}
\item{return.replicates}{Return the replicates as the \code{replicates} component of the
result? (for \code{predict}, only possible if they
were computed in the \code{svyglm} fit)}
\item{object}{A \code{svyglm} object}
\item{correlation}{Include the correlation matrix of parameters?}
\item{na.action}{Handling of NAs}
\item{multicore}{Use the \code{multicore} package to distribute
replicates across processors?}
\item{df.resid}{Optional denominator degrees of freedom for Wald
tests}
\item{newdata}{new data frame for prediction}
\item{total}{population size when predicting population total}
\item{type}{linear predictor (\code{link}) or response}
\item{se.fit}{if \code{TRUE}, return variances of predictions}
\item{vcov}{if \code{TRUE} and \code{se=TRUE} return full
variance-covariance matrix of predictions}
}
\details{
For binomial and Poisson families use \code{family=quasibinomial()}
and \code{family=quasipoisson()} to avoid a warning about non-integer
numbers of successes. The `quasi' versions of the family objects give
the same point estimates and standard errors and do not give the
warning.
If \code{df.resid} is not specified the df for the null model is
computed by \code{\link{degf}} and the residual df computed by
subtraction. This is recommended by Korn and Graubard and is correct
for PSU-level covariates but is potentially very conservative for
individual-level covariates. To get tests based on a Normal distribution
use \code{df.resid=Inf}, and to use number of PSUs-number of strata,
specify \code{df.resid=degf(design)}.
Parallel processing with \code{multicore=TRUE} is helpful only for
fairly large data sets and on computers with sufficient memory. It may
be incompatible with GUIs, although the Mac Aqua GUI appears to be safe.
\code{predict} gives fitted values and sampling variability for specific new
values of covariates. When \code{newdata} are the population mean it
gives the regression estimator of the mean, and when \code{newdata} are
the population totals and \code{total} is specified it gives the
regression estimator of the population total. Regression estimators of
mean and total can also be obtained with \code{\link{calibrate}}.
}
\value{ \code{svyglm} returns an object of class \code{svyglm}. The
\code{predict} method returns an object of class \code{svystat}}
\author{Thomas Lumley}
\seealso{
\code{\link{glm}}, which is used to do most of the work.
\code{\link{regTermTest}}, for multiparameter tests
\code{\link{calibrate}}, for an alternative way to specify regression
estimators of population totals or means
\code{\link{svyttest}} for one-sample and two-sample t-tests.
}
\references{
Lumley T, Scott A (2017) "Fitting Regression Models to Survey Data"
Statistical Science 32: 265-278
}
\examples{
data(api)
dstrat<-svydesign(id=~1,strata=~stype, weights=~pw, data=apistrat, fpc=~fpc)
dclus2<-svydesign(id=~dnum+snum, weights=~pw, data=apiclus2)
rstrat<-as.svrepdesign(dstrat)
rclus2<-as.svrepdesign(dclus2)
summary(svyglm(api00~ell+meals+mobility, design=dstrat))
summary(svyglm(api00~ell+meals+mobility, design=dclus2))
summary(svyglm(api00~ell+meals+mobility, design=rstrat))
summary(svyglm(api00~ell+meals+mobility, design=rclus2))
## use quasibinomial, quasipoisson to avoid warning messages
summary(svyglm(sch.wide~ell+meals+mobility, design=dstrat,
family=quasibinomial()))
## Compare regression and ratio estimation of totals
api.ratio <- svyratio(~api.stu,~enroll, design=dstrat)
pop<-data.frame(enroll=sum(apipop$enroll, na.rm=TRUE))
npop <- nrow(apipop)
predict(api.ratio, pop$enroll)
## regression estimator is less efficient
api.reg <- svyglm(api.stu~enroll, design=dstrat)
predict(api.reg, newdata=pop, total=npop)
## same as calibration estimator
svytotal(~api.stu, calibrate(dstrat, ~enroll, pop=c(npop, pop$enroll)))
## svyglm can also reproduce the ratio estimator
api.reg2 <- svyglm(api.stu~enroll-1, design=dstrat,
family=quasi(link="identity",var="mu"))
predict(api.reg2, newdata=pop, total=npop)
## higher efficiency by modelling variance better
api.reg3 <- svyglm(api.stu~enroll-1, design=dstrat,
family=quasi(link="identity",var="mu^3"))
predict(api.reg3, newdata=pop, total=npop)
## true value
sum(apipop$api.stu)
}
\keyword{regression}% at least one, from doc/KEYWORDS
\keyword{survey}% at least one, from doc/KEYWORDS
| /man/svyglm.Rd | no_license | jeffeaton/survey | R | false | false | 6,545 | rd | \name{svyglm}
\alias{svyglm}
\alias{svyglm.survey.design}
\alias{svyglm.svyrep.design}
\alias{summary.svyglm}
\alias{summary.svrepglm}
\alias{vcov.svyglm}
\alias{residuals.svyglm}
\alias{residuals.svrepglm}
\alias{predict.svyglm}
\alias{predict.svrepglm}
\alias{coef.svyglm}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{Survey-weighted generalised linear models.}
\description{
Fit a generalised linear model to data from a complex survey design,
with inverse-probability weighting and design-based standard errors.
}
\usage{
\method{svyglm}{survey.design}(formula, design, subset=NULL, family=stats::gaussian(),start=NULL, ...)
\method{svyglm}{svyrep.design}(formula, design, subset=NULL, family=stats::gaussian(),start=NULL, ..., rho=NULL,
return.replicates=FALSE, na.action,multicore=getOption("survey.multicore"))
\method{summary}{svyglm}(object, correlation = FALSE, df.resid=NULL,
...)
\method{predict}{svyglm}(object,newdata=NULL,total=NULL,
type=c("link","response","terms"),
se.fit=(type != "terms"),vcov=FALSE,...)
\method{predict}{svrepglm}(object,newdata=NULL,total=NULL,
type=c("link","response","terms"),
se.fit=(type != "terms"),vcov=FALSE,
return.replicates=!is.null(object$replicates),...)
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{formula}{Model formula}
\item{design}{Survey design from \code{\link{svydesign}} or \code{\link{svrepdesign}}. Must contain all variables
in the formula}
\item{subset}{Expression to select a subpopulation}
\item{family}{\code{family} object for \code{glm}}
\item{start}{Starting values for the coefficients (needed for some uncommon link/family combinations)}
\item{\dots}{Other arguments passed to \code{glm} or
\code{summary.glm} }
\item{rho}{For replicate BRR designs, to specify the parameter for
Fay's variance method, giving weights of \code{rho} and \code{2-rho}}
\item{return.replicates}{Return the replicates as the \code{replicates} component of the
result? (for \code{predict}, only possible if they
were computed in the \code{svyglm} fit)}
\item{object}{A \code{svyglm} object}
\item{correlation}{Include the correlation matrix of parameters?}
\item{na.action}{Handling of NAs}
\item{multicore}{Use the \code{multicore} package to distribute
replicates across processors?}
\item{df.resid}{Optional denominator degrees of freedom for Wald
tests}
\item{newdata}{new data frame for prediction}
\item{total}{population size when predicting population total}
\item{type}{linear predictor (\code{link}) or response}
\item{se.fit}{if \code{TRUE}, return variances of predictions}
\item{vcov}{if \code{TRUE} and \code{se=TRUE} return full
variance-covariance matrix of predictions}
}
\details{
For binomial and Poisson families use \code{family=quasibinomial()}
and \code{family=quasipoisson()} to avoid a warning about non-integer
numbers of successes. The `quasi' versions of the family objects give
the same point estimates and standard errors and do not give the
warning.
If \code{df.resid} is not specified the df for the null model is
computed by \code{\link{degf}} and the residual df computed by
subtraction. This is recommended by Korn and Graubard and is correct
for PSU-level covariates but is potentially very conservative for
individual-level covariates. To get tests based on a Normal distribution
use \code{df.resid=Inf}, and to use number of PSUs-number of strata,
specify \code{df.resid=degf(design)}.
Parallel processing with \code{multicore=TRUE} is helpful only for
fairly large data sets and on computers with sufficient memory. It may
be incompatible with GUIs, although the Mac Aqua GUI appears to be safe.
\code{predict} gives fitted values and sampling variability for specific new
values of covariates. When \code{newdata} are the population mean it
gives the regression estimator of the mean, and when \code{newdata} are
the population totals and \code{total} is specified it gives the
regression estimator of the population total. Regression estimators of
mean and total can also be obtained with \code{\link{calibrate}}.
}
\value{ \code{svyglm} returns an object of class \code{svyglm}. The
\code{predict} method returns an object of class \code{svystat}}
\author{Thomas Lumley}
\seealso{
\code{\link{glm}}, which is used to do most of the work.
\code{\link{regTermTest}}, for multiparameter tests
\code{\link{calibrate}}, for an alternative way to specify regression
estimators of population totals or means
\code{\link{svyttest}} for one-sample and two-sample t-tests.
}
\references{
Lumley T, Scott A (2017) "Fitting Regression Models to Survey Data"
Statistical Science 32: 265-278
}
\examples{
data(api)
dstrat<-svydesign(id=~1,strata=~stype, weights=~pw, data=apistrat, fpc=~fpc)
dclus2<-svydesign(id=~dnum+snum, weights=~pw, data=apiclus2)
rstrat<-as.svrepdesign(dstrat)
rclus2<-as.svrepdesign(dclus2)
summary(svyglm(api00~ell+meals+mobility, design=dstrat))
summary(svyglm(api00~ell+meals+mobility, design=dclus2))
summary(svyglm(api00~ell+meals+mobility, design=rstrat))
summary(svyglm(api00~ell+meals+mobility, design=rclus2))
## use quasibinomial, quasipoisson to avoid warning messages
summary(svyglm(sch.wide~ell+meals+mobility, design=dstrat,
family=quasibinomial()))
## Compare regression and ratio estimation of totals
api.ratio <- svyratio(~api.stu,~enroll, design=dstrat)
pop<-data.frame(enroll=sum(apipop$enroll, na.rm=TRUE))
npop <- nrow(apipop)
predict(api.ratio, pop$enroll)
## regression estimator is less efficient
api.reg <- svyglm(api.stu~enroll, design=dstrat)
predict(api.reg, newdata=pop, total=npop)
## same as calibration estimator
svytotal(~api.stu, calibrate(dstrat, ~enroll, pop=c(npop, pop$enroll)))
## svyglm can also reproduce the ratio estimator
api.reg2 <- svyglm(api.stu~enroll-1, design=dstrat,
family=quasi(link="identity",var="mu"))
predict(api.reg2, newdata=pop, total=npop)
## higher efficiency by modelling variance better
api.reg3 <- svyglm(api.stu~enroll-1, design=dstrat,
family=quasi(link="identity",var="mu^3"))
predict(api.reg3, newdata=pop, total=npop)
## true value
sum(apipop$api.stu)
}
\keyword{regression}% at least one, from doc/KEYWORDS
\keyword{survey}% at least one, from doc/KEYWORDS
|
# Distribucion T Student
#Datos Aleatorios
# t <- rnorm(16, 13, 5.6)
#Valores Generados
# t
#Calculo T Student
# t.test(t)
#hist(t, col="blue")
#Distribucion T-Student
#Datos Aleatorios
t <- runif(16, 1, 10)
t
#Registros En Total
n <- length(t)
n
#Promedio
Promedio <- mean(t)
Promedio
#Desviaci?n Estandar
sd=5.6
Desviacion_Estandar <- sd
Desviacion_Estandar
#Media
mean=13
Media <- mean
Media
#Grados De Libertad
Grados_Libertad <- n-1
Grados_Libertad
#Intevalo De Confianza
Int_Confianza <- 99
Int_Confianza
#Riesgo
Riesgo <- 100 - Int_Confianza
Riesgo
#Alfa
Alfa <- 1- ((Riesgo/100)/2)
Alfa
#Valor Criticos(tCr?tico)
ValorCritico <- qt(Alfa, Grados_Libertad, lower.tail = TRUE)# <=
ValorCritico
qt(Alfa, Grados_Libertad, lower.tail = FALSE)# >
#Valores Extremos
#Obtener Regi?n Cr?tica Inferior y Superior
Region_Critica_Superior <- ((Promedio)+(ValorCritico*Desviacion_Estandar)/(sqrt(n)))
Region_Critica_Superior
Region_Critica_Inferior <- ((Promedio)-(ValorCritico*Desviacion_Estandar)/(sqrt(n)))
Region_Critica_Inferior
| /6_distibución_T-student/6.R | no_license | wallyHack/proyecto_final_de_estadistica | R | false | false | 1,061 | r | # Distribucion T Student
#Datos Aleatorios
# t <- rnorm(16, 13, 5.6)
#Valores Generados
# t
#Calculo T Student
# t.test(t)
#hist(t, col="blue")
#Distribucion T-Student
#Datos Aleatorios
t <- runif(16, 1, 10)
t
#Registros En Total
n <- length(t)
n
#Promedio
Promedio <- mean(t)
Promedio
#Desviaci?n Estandar
sd=5.6
Desviacion_Estandar <- sd
Desviacion_Estandar
#Media
mean=13
Media <- mean
Media
#Grados De Libertad
Grados_Libertad <- n-1
Grados_Libertad
#Intevalo De Confianza
Int_Confianza <- 99
Int_Confianza
#Riesgo
Riesgo <- 100 - Int_Confianza
Riesgo
#Alfa
Alfa <- 1- ((Riesgo/100)/2)
Alfa
#Valor Criticos(tCr?tico)
ValorCritico <- qt(Alfa, Grados_Libertad, lower.tail = TRUE)# <=
ValorCritico
qt(Alfa, Grados_Libertad, lower.tail = FALSE)# >
#Valores Extremos
#Obtener Regi?n Cr?tica Inferior y Superior
Region_Critica_Superior <- ((Promedio)+(ValorCritico*Desviacion_Estandar)/(sqrt(n)))
Region_Critica_Superior
Region_Critica_Inferior <- ((Promedio)-(ValorCritico*Desviacion_Estandar)/(sqrt(n)))
Region_Critica_Inferior
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\docType{package}
\name{mlr3learners.catboost-package}
\alias{mlr3learners.catboost}
\alias{mlr3learners.catboost-package}
\title{mlr3learners.catboost: Learners from catboost package for mlr3}
\description{
Adds Learner functionality from the catboost
package to mlr3.
}
\author{
\strong{Maintainer}: Lennart Schneider \email{lennart.sch@web.de} (\href{https://orcid.org/0000-0003-4152-5308}{ORCID})
}
| /man/mlr3learners.catboost-package.Rd | no_license | mlr3learners/mlr3learners.catboost | R | false | true | 492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\docType{package}
\name{mlr3learners.catboost-package}
\alias{mlr3learners.catboost}
\alias{mlr3learners.catboost-package}
\title{mlr3learners.catboost: Learners from catboost package for mlr3}
\description{
Adds Learner functionality from the catboost
package to mlr3.
}
\author{
\strong{Maintainer}: Lennart Schneider \email{lennart.sch@web.de} (\href{https://orcid.org/0000-0003-4152-5308}{ORCID})
}
|
png("plot6.png")
baltimore_la_data <- NEI[(NEI$fips=="24510" | NEI$fips=="06037") & NEI$type=="ON-ROAD", ]
yearly_fips_emissions <- aggregate(Emissions ~ year + fips, baltimore_la_data, sum)
yearly_fips_emissions$fips[yearly_fips_emissions$fips=="24510"] <- "Baltimore"
yearly_fips_emissions$fips[yearly_fips_emissions$fips=="06037"] <- "Los Angeles"
plot <- ggplot(yearly_fips_emissions, aes(factor(year), Emissions))
plot <- plot + facet_grid(. ~ fips)
plot <- plot + geom_bar(stat="identity") +
xlab("Year") +
ylab("Total Emissions") +
ggtitle('Total Emissions in Baltimore and Los Angeles')
print(plot)
dev.off() | /plot6.R | no_license | Juhaninho/ExploratoryDataAnalysis | R | false | false | 626 | r | png("plot6.png")
baltimore_la_data <- NEI[(NEI$fips=="24510" | NEI$fips=="06037") & NEI$type=="ON-ROAD", ]
yearly_fips_emissions <- aggregate(Emissions ~ year + fips, baltimore_la_data, sum)
yearly_fips_emissions$fips[yearly_fips_emissions$fips=="24510"] <- "Baltimore"
yearly_fips_emissions$fips[yearly_fips_emissions$fips=="06037"] <- "Los Angeles"
plot <- ggplot(yearly_fips_emissions, aes(factor(year), Emissions))
plot <- plot + facet_grid(. ~ fips)
plot <- plot + geom_bar(stat="identity") +
xlab("Year") +
ylab("Total Emissions") +
ggtitle('Total Emissions in Baltimore and Los Angeles')
print(plot)
dev.off() |
# Script Description --------------------
# This R script generates barplots using ggplot2 package to visualise the transition-level
# intensity analysis of of changes in Mindoro Island, Philippines derived from land cover
# classification of Landsat data for three time-intervals: 1988-2000, 2000-2010, and
# 2010-2015. Intensity analysis was calculated using an Excel spreadsheet with a VBA macro
# (see https://sites.google.com/site/intensityanalysis/). The domain of analysis is
# Mindoro Island.
#
# Script By: Jose Don T De Alban
# Date Created: 13 Mar 2018
# Last Modified: 17 Aug 2019
# Set Working Directory -------------------
setwd("/Users/dondealban/Dropbox/Research/Mindoro/intensity analysis/")
# Load Libraries --------------------------
library(tidyverse)
library(readxl)
# Read Input Data -------------------------
# Read transition level XLSX data file, convert to data frame, and store into variable
rawG <- as.data.frame(read_excel("Transition_Level_Intensity_Analysis.xlsx", sheet="GRA_Gain"))
rawL <- as.data.frame(read_excel("Transition_Level_Intensity_Analysis.xlsx", sheet="GRA_Loss"))
# Clean and Subset Data -------------------
# 1. Add Change Type column
type1 <- rep("Gain", nrow(rawG))
type2 <- rep("Loss", nrow(rawL))
dfG <- cbind(rawG, type1)
dfL <- cbind(rawL, type2)
# 2. Reorder columns before renaming
dfGain <- dfG[,c(1:2,12,3:11)]
dfLoss <- dfL[,c(1:2,12,3:11)]
# 3. Change column names for easier reference
# Note the following description of category level column names
# ColA - Years of Time Interval
# ColB - Study Area/Site
# ColC - Change Type
# ColD - Category Name
# ColE - Observed Annual Loss/Gain [number of elements]
# ColF - Loss/Gain Intensity [percent of t1/t2 category]
# ColG - Uniform Intensity [percent of t1/t2 to/from category]
# ColH - Uniform Annual Loss/Gain [number of elements]
# ColI - Hypothesized Annual Error [number of elements]
# ColJ - Commission Intensity [percent of t1/t2 transition]
# ColK - Omission Intensity [percent of t1/t2 transition]
# ColL - Hypothesized t1/t2 Error [percent of interval domain]
list <- c("ColA","ColB","ColC","ColD","ColE","ColF","ColG","ColH","ColI","ColJ","ColK","ColL")
colnames(dfGain) <- c(list)
colnames(dfLoss) <- c(list)
# Generate Plots ------------------------
# Plot 1: To N (Gain Transition)
plotG <- ggplot() + geom_bar(data=dfGain, aes(x=ColD, y=ColF, fill=ColC), stat="identity", position=position_dodge())
plotG <- plotG + geom_hline(data=dfGain, aes(yintercept=ColG, colour="#000000"), linetype="dashed") # Uniform line
plotG <- plotG + facet_grid(ColB ~ ColA, scales="free_y")
plotG <- plotG + labs(x="Losing Category", y="Annual Transition Intensity (% of Category at Initial Time)")
plotG <- plotG + scale_fill_manual(values=c("#4472c4"), labels=c("Gain Intensity"))
plotG <- plotG + scale_colour_manual(values=c("#000000"), labels=c("Uniform Intensity"))
plotG <- plotG + theme(panel.grid.minor=element_blank())
plotG <- plotG + theme(legend.position="bottom", legend.box="horizontal", legend.title=element_blank())
# Plot 2: From M (Loss Transition)
plotL <- ggplot() + geom_bar(data=dfLoss, aes(x=ColD, y=ColF, fill=ColC), stat="identity", position=position_dodge())
plotL <- plotL + geom_hline(data=dfLoss, aes(yintercept=ColG, colour="#000000"), linetype="dashed") # Uniform line
plotL <- plotL + facet_grid(ColB ~ ColA, scales="free_y")
plotL <- plotL + labs(x="Gaining Category", y="Annual Transition Intensity (% of Category at Final Time)")
plotL <- plotL + scale_fill_manual(values=c("#4472c4"), labels=c("Loss Intensity"))
plotL <- plotL + scale_colour_manual(values=c("#000000"), labels=c("Uniform Intensity"))
plotL <- plotL + theme(panel.grid.minor=element_blank())
plotL <- plotL + theme(legend.position="bottom", legend.box="horizontal", legend.title=element_blank())
# Save Outputs --------------------------
# Output boxplots to a PDF file
ggsave(plotG, file="Transition-Level-Intensity-Analysis-Grassland-Gain.pdf", width=25, height=20, units="cm", dpi=300)
ggsave(plotL, file="Transition-Level-Intensity-Analysis-Grassland-Loss.pdf", width=25, height=20, units="cm", dpi=300)
| /scripts/R_Intensity-Analysis-Transition-Level_GRA_v1.R | no_license | dondealban/mindoro | R | false | false | 4,139 | r | # Script Description --------------------
# This R script generates barplots using ggplot2 package to visualise the transition-level
# intensity analysis of of changes in Mindoro Island, Philippines derived from land cover
# classification of Landsat data for three time-intervals: 1988-2000, 2000-2010, and
# 2010-2015. Intensity analysis was calculated using an Excel spreadsheet with a VBA macro
# (see https://sites.google.com/site/intensityanalysis/). The domain of analysis is
# Mindoro Island.
#
# Script By: Jose Don T De Alban
# Date Created: 13 Mar 2018
# Last Modified: 17 Aug 2019
# Set Working Directory -------------------
setwd("/Users/dondealban/Dropbox/Research/Mindoro/intensity analysis/")
# Load Libraries --------------------------
library(tidyverse)
library(readxl)
# Read Input Data -------------------------
# Read transition level XLSX data file, convert to data frame, and store into variable
rawG <- as.data.frame(read_excel("Transition_Level_Intensity_Analysis.xlsx", sheet="GRA_Gain"))
rawL <- as.data.frame(read_excel("Transition_Level_Intensity_Analysis.xlsx", sheet="GRA_Loss"))
# Clean and Subset Data -------------------
# 1. Add Change Type column
type1 <- rep("Gain", nrow(rawG))
type2 <- rep("Loss", nrow(rawL))
dfG <- cbind(rawG, type1)
dfL <- cbind(rawL, type2)
# 2. Reorder columns before renaming
dfGain <- dfG[,c(1:2,12,3:11)]
dfLoss <- dfL[,c(1:2,12,3:11)]
# 3. Change column names for easier reference
# Note the following description of category level column names
# ColA - Years of Time Interval
# ColB - Study Area/Site
# ColC - Change Type
# ColD - Category Name
# ColE - Observed Annual Loss/Gain [number of elements]
# ColF - Loss/Gain Intensity [percent of t1/t2 category]
# ColG - Uniform Intensity [percent of t1/t2 to/from category]
# ColH - Uniform Annual Loss/Gain [number of elements]
# ColI - Hypothesized Annual Error [number of elements]
# ColJ - Commission Intensity [percent of t1/t2 transition]
# ColK - Omission Intensity [percent of t1/t2 transition]
# ColL - Hypothesized t1/t2 Error [percent of interval domain]
list <- c("ColA","ColB","ColC","ColD","ColE","ColF","ColG","ColH","ColI","ColJ","ColK","ColL")
colnames(dfGain) <- c(list)
colnames(dfLoss) <- c(list)
# Generate Plots ------------------------
# Plot 1: To N (Gain Transition)
plotG <- ggplot() + geom_bar(data=dfGain, aes(x=ColD, y=ColF, fill=ColC), stat="identity", position=position_dodge())
plotG <- plotG + geom_hline(data=dfGain, aes(yintercept=ColG, colour="#000000"), linetype="dashed") # Uniform line
plotG <- plotG + facet_grid(ColB ~ ColA, scales="free_y")
plotG <- plotG + labs(x="Losing Category", y="Annual Transition Intensity (% of Category at Initial Time)")
plotG <- plotG + scale_fill_manual(values=c("#4472c4"), labels=c("Gain Intensity"))
plotG <- plotG + scale_colour_manual(values=c("#000000"), labels=c("Uniform Intensity"))
plotG <- plotG + theme(panel.grid.minor=element_blank())
plotG <- plotG + theme(legend.position="bottom", legend.box="horizontal", legend.title=element_blank())
# Plot 2: From M (Loss Transition)
plotL <- ggplot() + geom_bar(data=dfLoss, aes(x=ColD, y=ColF, fill=ColC), stat="identity", position=position_dodge())
plotL <- plotL + geom_hline(data=dfLoss, aes(yintercept=ColG, colour="#000000"), linetype="dashed") # Uniform line
plotL <- plotL + facet_grid(ColB ~ ColA, scales="free_y")
plotL <- plotL + labs(x="Gaining Category", y="Annual Transition Intensity (% of Category at Final Time)")
plotL <- plotL + scale_fill_manual(values=c("#4472c4"), labels=c("Loss Intensity"))
plotL <- plotL + scale_colour_manual(values=c("#000000"), labels=c("Uniform Intensity"))
plotL <- plotL + theme(panel.grid.minor=element_blank())
plotL <- plotL + theme(legend.position="bottom", legend.box="horizontal", legend.title=element_blank())
# Save Outputs --------------------------
# Output boxplots to a PDF file
ggsave(plotG, file="Transition-Level-Intensity-Analysis-Grassland-Gain.pdf", width=25, height=20, units="cm", dpi=300)
ggsave(plotL, file="Transition-Level-Intensity-Analysis-Grassland-Loss.pdf", width=25, height=20, units="cm", dpi=300)
|
# Exercise 2: working with data APIs
# load relevant libraries
library(httr)
library(jsonlite)
# Use `source()` to load your API key variable from the `apikey.R` file you made.
# Make sure you've set your working directory!
source("apikey.R")
# Create a variable `movie.name` that is the name of a movie of your choice.
movie_name <- "Inception"
# Construct an HTTP request to search for reviews for the given movie.
# The base URI is `https://api.nytimes.com/svc/movies/v2/`
# The resource is `reviews/search.json`
# See the interactive console for parameter details:
# https://developer.nytimes.com/movie_reviews_v2.json
#
# You should use YOUR api key (as the `api-key` parameter)
# and your `movie.name` variable as the search query!
base_url <- "https://api.nytimes.com/svc/movies/v2/"
resource <- "reviews/search.json"
query_para <- list("api-key" = nyt_api_key, query = movie_name)
# Send the HTTP Request to download the data
# Extract the content and convert it from JSON
response <- GET(paste0(base_url, resource), query = query_para)
body <- fromJSON(content(response, "text"))
# What kind of data structure did this produce? A data frame? A list?
is.data.frame(body)
is.list(body)
# Manually inspect the returned data and identify the content of interest
# (which are the movie reviews).
# Use functions such as `names()`, `str()`, etc.
names(body)
names(body$results)
# Flatten the movie reviews content into a data structure called `reviews`
review<- flatten(body$results)
# From the most recent review, store the headline, short summary, and link to
# the full article, each in their own variables
headline <- review$headline
short_summary <- review$summary_short
link <- review$link.url
# Create a list of the three pieces of information from above.
# Print out the list.
inception <- list(headline, short_summary, link)
print(inception)
| /exercise-2/exercise.R | permissive | andrew861003/ch11-apis | R | false | false | 1,866 | r | # Exercise 2: working with data APIs
# load relevant libraries
library(httr)
library(jsonlite)
# Use `source()` to load your API key variable from the `apikey.R` file you made.
# Make sure you've set your working directory!
source("apikey.R")
# Create a variable `movie.name` that is the name of a movie of your choice.
movie_name <- "Inception"
# Construct an HTTP request to search for reviews for the given movie.
# The base URI is `https://api.nytimes.com/svc/movies/v2/`
# The resource is `reviews/search.json`
# See the interactive console for parameter details:
# https://developer.nytimes.com/movie_reviews_v2.json
#
# You should use YOUR api key (as the `api-key` parameter)
# and your `movie.name` variable as the search query!
base_url <- "https://api.nytimes.com/svc/movies/v2/"
resource <- "reviews/search.json"
query_para <- list("api-key" = nyt_api_key, query = movie_name)
# Send the HTTP Request to download the data
# Extract the content and convert it from JSON
response <- GET(paste0(base_url, resource), query = query_para)
body <- fromJSON(content(response, "text"))
# What kind of data structure did this produce? A data frame? A list?
is.data.frame(body)
is.list(body)
# Manually inspect the returned data and identify the content of interest
# (which are the movie reviews).
# Use functions such as `names()`, `str()`, etc.
names(body)
names(body$results)
# Flatten the movie reviews content into a data structure called `reviews`
review<- flatten(body$results)
# From the most recent review, store the headline, short summary, and link to
# the full article, each in their own variables
headline <- review$headline
short_summary <- review$summary_short
link <- review$link.url
# Create a list of the three pieces of information from above.
# Print out the list.
inception <- list(headline, short_summary, link)
print(inception)
|
###################################################################################
# to apply the m-out-of-n bootstrap, we need to have an estimate of the "amount of irregularity"
# in the data
# Irregularity occurs when the second stage treatment has a very small effect on the treatment
# decision
#
# Following Chakraborty et al (2013), this occurs when phi*x is close to zero because then the function
# f(x) = Indicator(phi*x >0) is not differentiable
#
# Goal here: how to estimate p = (probability that phi*x will be close to zero)
# Idea:
# - fit the dWOLS model to get estimates of phi. Then, using these estimates, calculate
# \hat phi*x for each observations. Then, \hat p = proportion of observations that have
# \hat phi*x "close" to zero.
# - "close" to zero is subjective. Might want to vary the threshold.
# Other idea (as suggested by Wallace et al in JSS):
# - "non-regularity occurs when optimal treatment is not unique. (...) [estimating irregularity]
# involves identifying the proportion of subjects for whom, when all possible blip parameter
# values within their respective confidence sets are considered, both treatment and
# non-treatment could be recommended."
#
###################################################################################
library(DTRreg)
expit <- function(x) exp(x)/(1+exp(x))
# gamma parameters following Chakraborty et al (2013) to control for irregularity in the generated data
g <- matrix(NA, nrow = 9, ncol = 7)
g[1,] <- c(0,0,0,0,0,0,0)
g[2,] <- c(0,0,0,0,0.01,0,0)
g[3,] <- c(0,0,-0.5,0,0.5,0,-0.5)
g[4,] <- c(0,0,-0.5,0,0.99,0,-0.98)
g[5,] <- c(0,0,-0.5,0,1,0.5,-0.5)
g[6,] <- c(0,0,-0.5,0,0.25,0.5,0.5)
g[7,] <- c(0,0,-0.25,0,0.75,0.5,0.5)
g[8,] <- c(0,0,0,0,1,0,-1)
g[9,] <- c(0,0,0,0,0.25,0,-0.24)
# delta parameters following Chakraborty et al (2013) to control for irregularity in the generated data
d <- matrix(NA, nrow = 9, ncol = 2)
d[1,] <- c(0.5,0.5)
d[2,] <- c(0.5,0.5)
d[3,] <- c(0.5,0.5)
d[4,] <- c(0.5,0.5)
d[5,] <- c(1,0)
d[6,] <- c(0.1,0.1)
d[7,] <- c(0.1,0.1)
d[8,] <- c(0,0)
d[9,] <- c(0,0)
# scenario
sc <- seq(1,9)
################################### scenario 3 - nonregular ###################################
n <- 300
i <- 3
# treatment A1, A2: P(Aj = 1) = P(Aj = 0) = 0.5
A1 <- rbinom(n, size = 1, prob = 0.5)
A2 <- rbinom(n, size = 1, prob = 0.5)
# treatment A1 coded as -1,1 so I don't have to adapt the delta_1 and delta_2 parameters
A1.min <- 2*A1 - 1
# covariates O1, O2: coded as -1, 1, where O2 depends on A1, O1 and (delta_1,delta_2)
O1 <- 2*rbinom(n, size = 1, prob = 0.5) - 1
O2 <- 2*rbinom(n, size = 1, prob = expit(d[sc[i],1]*O1 + d[sc[i],2]*A1.min)) - 1
# generated outcome Y2 (Y1 set to 0), using parameters (gamma_1,...,gamma_7)
Y2 <- g[sc[i],1] + g[sc[i],2]*O1 + g[sc[i],3]*A1 + g[sc[i],4]*O1*A1 + g[sc[i],5]*A2 + g[sc[i],6]*O2*A2 + g[sc[i],7]*A1*A2 + rnorm(n)
# model specification
blip.model <- list(~ O1, ~ O2 + A1)
proba <- list(as.vector(rep(0.5,n)))
treat.model <- list(A1 ~ 1, A2 ~ 1)
tf.model <- list(~ O1, ~ O1 + A1 + O1*A1)
# fit dWOLS to the generated dataset, using all n=300 observations
s3 <- DTRreg(outcome = Y2, blip.mod = blip.model, treat.mod = treat.model, tf.mod = tf.model, treat.mod.man = rep(proba,2), method = "dwols")
summary(s3)
# calculate phi*x ~ p -- should be close to 0.5
int1 <- s3["psi"][[1]][[1]][1]
B.o1 <- s3["psi"][[1]][[1]][2]
int2 <- s3["psi"][[1]][[2]][1]
B.o2 <- s3["psi"][[1]][[2]][2]
B.a1 <- s3["psi"][[1]][[2]][3]
psi <- int2 + O2*B.o2 + A1*B.a1
psi
# estimate of p, the probability of generating data with gamma5 + gamma6*O2 + gamma7*A1 close to zero
# in scenario 3, this probability should be close to 0.5
# try different threshold to quantify "close to zero"
# the estimates of p varies a lot depending on the data
length(psi[which(abs(psi) < 0.1)])/n
length(psi[which(abs(psi) < 0.15)])/n
# probability of generating patient history such that g5*A2 + g6*O2*A2 + g7*A1*A2 = 0
# this is, following the paper where A1,A2 are coded {-1,1} but this specificiation of p
# is not relevant when A1,A2 are coded {0,1} because p will always be 0.5
gg <- g[sc[i],5]*A2 + g[sc[i],6]*O2*A2 + g[sc[i],7]*A1*A2
length(which(gg==0))/n
################################### scenario 5 - nonregular ###################################
i <- 5
# treatment A1, A2: P(Aj = 1) = P(Aj = 0) = 0.5
A1 <- rbinom(n, size = 1, prob = 0.5)
A2 <- rbinom(n, size = 1, prob = 0.5)
# treatment A1 coded as -1,1 so I don't have to adapt the delta_1 and delta_2 parameters
A1.min <- 2*A1 - 1
# covariates O1, O2: coded as -1, 1, where O2 depends on A1, O1 and (delta_1,delta_2)
O1 <- 2*rbinom(n, size = 1, prob = 0.5) - 1
O2 <- 2*rbinom(n, size = 1, prob = expit(d[3,1]*O1 + d[3,2]*A1.min)) - 1
# generated outcome Y2 (Y1 set to 0), using parameters (gamma_1,...,gamma_7)
Y1 <- rep(0, n)
Y2 <- g[5,1] + g[5,2]*O1 + g[5,3]*A1 + g[5,4]*O1*A1 + g[5,5]*A2 + g[5,6]*O2*A2 + g[5,7]*A1*A2 + rnorm(n)
# model specification
blip.model <- list(~ O1, ~ O2 + A1)
proba <- list(as.vector(rep(0.5,n)))
treat.model <- list(A1~1, A2~1)
tf.model <- list(~ O1, ~ O1 + A1 + O1*A1)
# fit dWOLS to the generated dataset, using all n=300 observations
s5 <- DTRreg(outcome = Y2, blip.mod = blip.model, treat.mod = treat.model, tf.mod = tf.model, treat.mod.man = rep(proba,2), method = "dwols")
summary(s5)
# calculate phi*x ~ p -- should be close to 0.25
int1 <- s5["psi"][[1]][[1]][1]
B.o1 <- s5["psi"][[1]][[1]][2]
int2 <- s5["psi"][[1]][[2]][1]
B.o2 <- s5["psi"][[1]][[2]][2]
B.a1 <- s5["psi"][[1]][[2]][3]
psi <- int2 + O2*B.o2 + A1*B.a1
psi
# estimate of p, the probability of generating data with gamma5 + gamma6*O2 + gamma7*A1 close to zero
# in scenario 5, this probability should be close to 0.25
# try different threshold to quantify "close to zero"
# the estimates of p varies a lot depending on the data
length(psi[which(abs(psi) < 0.1)])/n
length(psi[which(abs(psi) < 0.15)])/n
| /simulations.R | no_license | gabriellesimoneau/DTR_bootstrap | R | false | false | 5,998 | r | ###################################################################################
# to apply the m-out-of-n bootstrap, we need to have an estimate of the "amount of irregularity"
# in the data
# Irregularity occurs when the second stage treatment has a very small effect on the treatment
# decision
#
# Following Chakraborty et al (2013), this occurs when phi*x is close to zero because then the function
# f(x) = Indicator(phi*x >0) is not differentiable
#
# Goal here: how to estimate p = (probability that phi*x will be close to zero)
# Idea:
# - fit the dWOLS model to get estimates of phi. Then, using these estimates, calculate
# \hat phi*x for each observations. Then, \hat p = proportion of observations that have
# \hat phi*x "close" to zero.
# - "close" to zero is subjective. Might want to vary the threshold.
# Other idea (as suggested by Wallace et al in JSS):
# - "non-regularity occurs when optimal treatment is not unique. (...) [estimating irregularity]
# involves identifying the proportion of subjects for whom, when all possible blip parameter
# values within their respective confidence sets are considered, both treatment and
# non-treatment could be recommended."
#
###################################################################################
library(DTRreg)
expit <- function(x) exp(x)/(1+exp(x))
# gamma parameters following Chakraborty et al (2013) to control for irregularity in the generated data
g <- matrix(NA, nrow = 9, ncol = 7)
g[1,] <- c(0,0,0,0,0,0,0)
g[2,] <- c(0,0,0,0,0.01,0,0)
g[3,] <- c(0,0,-0.5,0,0.5,0,-0.5)
g[4,] <- c(0,0,-0.5,0,0.99,0,-0.98)
g[5,] <- c(0,0,-0.5,0,1,0.5,-0.5)
g[6,] <- c(0,0,-0.5,0,0.25,0.5,0.5)
g[7,] <- c(0,0,-0.25,0,0.75,0.5,0.5)
g[8,] <- c(0,0,0,0,1,0,-1)
g[9,] <- c(0,0,0,0,0.25,0,-0.24)
# delta parameters following Chakraborty et al (2013) to control for irregularity in the generated data
d <- matrix(NA, nrow = 9, ncol = 2)
d[1,] <- c(0.5,0.5)
d[2,] <- c(0.5,0.5)
d[3,] <- c(0.5,0.5)
d[4,] <- c(0.5,0.5)
d[5,] <- c(1,0)
d[6,] <- c(0.1,0.1)
d[7,] <- c(0.1,0.1)
d[8,] <- c(0,0)
d[9,] <- c(0,0)
# scenario
sc <- seq(1,9)
################################### scenario 3 - nonregular ###################################
n <- 300
i <- 3
# treatment A1, A2: P(Aj = 1) = P(Aj = 0) = 0.5
A1 <- rbinom(n, size = 1, prob = 0.5)
A2 <- rbinom(n, size = 1, prob = 0.5)
# treatment A1 coded as -1,1 so I don't have to adapt the delta_1 and delta_2 parameters
A1.min <- 2*A1 - 1
# covariates O1, O2: coded as -1, 1, where O2 depends on A1, O1 and (delta_1,delta_2)
O1 <- 2*rbinom(n, size = 1, prob = 0.5) - 1
O2 <- 2*rbinom(n, size = 1, prob = expit(d[sc[i],1]*O1 + d[sc[i],2]*A1.min)) - 1
# generated outcome Y2 (Y1 set to 0), using parameters (gamma_1,...,gamma_7)
Y2 <- g[sc[i],1] + g[sc[i],2]*O1 + g[sc[i],3]*A1 + g[sc[i],4]*O1*A1 + g[sc[i],5]*A2 + g[sc[i],6]*O2*A2 + g[sc[i],7]*A1*A2 + rnorm(n)
# model specification
blip.model <- list(~ O1, ~ O2 + A1)
proba <- list(as.vector(rep(0.5,n)))
treat.model <- list(A1 ~ 1, A2 ~ 1)
tf.model <- list(~ O1, ~ O1 + A1 + O1*A1)
# fit dWOLS to the generated dataset, using all n=300 observations
s3 <- DTRreg(outcome = Y2, blip.mod = blip.model, treat.mod = treat.model, tf.mod = tf.model, treat.mod.man = rep(proba,2), method = "dwols")
summary(s3)
# calculate phi*x ~ p -- should be close to 0.5
int1 <- s3["psi"][[1]][[1]][1]
B.o1 <- s3["psi"][[1]][[1]][2]
int2 <- s3["psi"][[1]][[2]][1]
B.o2 <- s3["psi"][[1]][[2]][2]
B.a1 <- s3["psi"][[1]][[2]][3]
psi <- int2 + O2*B.o2 + A1*B.a1
psi
# estimate of p, the probability of generating data with gamma5 + gamma6*O2 + gamma7*A1 close to zero
# in scenario 3, this probability should be close to 0.5
# try different threshold to quantify "close to zero"
# the estimates of p varies a lot depending on the data
length(psi[which(abs(psi) < 0.1)])/n
length(psi[which(abs(psi) < 0.15)])/n
# probability of generating patient history such that g5*A2 + g6*O2*A2 + g7*A1*A2 = 0
# this is, following the paper where A1,A2 are coded {-1,1} but this specificiation of p
# is not relevant when A1,A2 are coded {0,1} because p will always be 0.5
gg <- g[sc[i],5]*A2 + g[sc[i],6]*O2*A2 + g[sc[i],7]*A1*A2
length(which(gg==0))/n
################################### scenario 5 - nonregular ###################################
i <- 5
# treatment A1, A2: P(Aj = 1) = P(Aj = 0) = 0.5
A1 <- rbinom(n, size = 1, prob = 0.5)
A2 <- rbinom(n, size = 1, prob = 0.5)
# treatment A1 coded as -1,1 so I don't have to adapt the delta_1 and delta_2 parameters
A1.min <- 2*A1 - 1
# covariates O1, O2: coded as -1, 1, where O2 depends on A1, O1 and (delta_1,delta_2)
O1 <- 2*rbinom(n, size = 1, prob = 0.5) - 1
O2 <- 2*rbinom(n, size = 1, prob = expit(d[3,1]*O1 + d[3,2]*A1.min)) - 1
# generated outcome Y2 (Y1 set to 0), using parameters (gamma_1,...,gamma_7)
Y1 <- rep(0, n)
Y2 <- g[5,1] + g[5,2]*O1 + g[5,3]*A1 + g[5,4]*O1*A1 + g[5,5]*A2 + g[5,6]*O2*A2 + g[5,7]*A1*A2 + rnorm(n)
# model specification
blip.model <- list(~ O1, ~ O2 + A1)
proba <- list(as.vector(rep(0.5,n)))
treat.model <- list(A1~1, A2~1)
tf.model <- list(~ O1, ~ O1 + A1 + O1*A1)
# fit dWOLS to the generated dataset, using all n=300 observations
s5 <- DTRreg(outcome = Y2, blip.mod = blip.model, treat.mod = treat.model, tf.mod = tf.model, treat.mod.man = rep(proba,2), method = "dwols")
summary(s5)
# calculate phi*x ~ p -- should be close to 0.25
int1 <- s5["psi"][[1]][[1]][1]
B.o1 <- s5["psi"][[1]][[1]][2]
int2 <- s5["psi"][[1]][[2]][1]
B.o2 <- s5["psi"][[1]][[2]][2]
B.a1 <- s5["psi"][[1]][[2]][3]
psi <- int2 + O2*B.o2 + A1*B.a1
psi
# estimate of p, the probability of generating data with gamma5 + gamma6*O2 + gamma7*A1 close to zero
# in scenario 5, this probability should be close to 0.25
# try different threshold to quantify "close to zero"
# the estimates of p varies a lot depending on the data
length(psi[which(abs(psi) < 0.1)])/n
length(psi[which(abs(psi) < 0.15)])/n
|
mdf <- melt(newdata)
ggplot(mdf) +
geom_density(aes(x = relvalues, color = Relationship))
#To extract intersect points off the density plot
intersect(newdata$Full, newdata$Half) #http://stackoverflow.com/questions/21212352/find-two-densities-point-of-intersection-in-r/21213177#21213177
#Compare estimators with each other
plot(output$relatedness[,5:11]) # the wang estimator seems to give best correlation with the ML estimators.
plot(output$inbreeding[,2:3]) # corelation between inbreeding estimators. Should include the ML methods but does not output them for some reason despite the doco saying it should.
hist(output$inbreeding$LH)
hist(output$inbreeding$LR)
#remove to free memory for simulation
rm(eltwlarvalPeeliSnps)
rm(goodDArTsnps)
rm(larv)
rm(larvalPeeliSnps)
rm(qslAllLarvaInfo)
rm(qslAllLarvaInfoApr2016)
rm(qslMPeeliiForRelated)
rm(Report.DMac15.1861)
rm(thlarvalPeeliSnps)
rm(twthlarvalPeeliSnps)
| /scratch.R | no_license | dnatheist/Ch5GenomicDiversityAndSpatialStructure | R | false | false | 932 | r |
mdf <- melt(newdata)
ggplot(mdf) +
geom_density(aes(x = relvalues, color = Relationship))
#To extract intersect points off the density plot
intersect(newdata$Full, newdata$Half) #http://stackoverflow.com/questions/21212352/find-two-densities-point-of-intersection-in-r/21213177#21213177
#Compare estimators with each other
plot(output$relatedness[,5:11]) # the wang estimator seems to give best correlation with the ML estimators.
plot(output$inbreeding[,2:3]) # corelation between inbreeding estimators. Should include the ML methods but does not output them for some reason despite the doco saying it should.
hist(output$inbreeding$LH)
hist(output$inbreeding$LR)
#remove to free memory for simulation
rm(eltwlarvalPeeliSnps)
rm(goodDArTsnps)
rm(larv)
rm(larvalPeeliSnps)
rm(qslAllLarvaInfo)
rm(qslAllLarvaInfoApr2016)
rm(qslMPeeliiForRelated)
rm(Report.DMac15.1861)
rm(thlarvalPeeliSnps)
rm(twthlarvalPeeliSnps)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/step-subset-slice.R
\name{slice.dtplyr_step}
\alias{slice.dtplyr_step}
\alias{slice_head.dtplyr_step}
\alias{slice_tail.dtplyr_step}
\alias{slice_min.dtplyr_step}
\alias{slice_max.dtplyr_step}
\title{Subset rows using their positions}
\usage{
\method{slice}{dtplyr_step}(.data, ..., .by = NULL)
\method{slice_head}{dtplyr_step}(.data, ..., n, prop, by = NULL)
\method{slice_tail}{dtplyr_step}(.data, ..., n, prop, by = NULL)
\method{slice_min}{dtplyr_step}(.data, order_by, ..., n, prop, by = NULL, with_ties = TRUE)
\method{slice_max}{dtplyr_step}(.data, order_by, ..., n, prop, by = NULL, with_ties = TRUE)
}
\arguments{
\item{.data}{A \code{\link[=lazy_dt]{lazy_dt()}}.}
\item{...}{For \code{slice()}: <\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Integer row
values.
Provide either positive values to keep, or negative values to drop.
The values provided must be either all positive or all negative.
Indices beyond the number of rows in the input are silently ignored.
For \verb{slice_*()}, these arguments are passed on to methods.}
\item{.by, by}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
<\code{\link[dplyr:dplyr_tidy_select]{tidy-select}}> Optionally, a selection of columns to
group by for just this operation, functioning as an alternative to \code{\link[dplyr:group_by]{group_by()}}. For
details and examples, see \link[dplyr:dplyr_by]{?dplyr_by}.}
\item{n, prop}{Provide either \code{n}, the number of rows, or \code{prop}, the
proportion of rows to select. If neither are supplied, \code{n = 1} will be
used. If \code{n} is greater than the number of rows in the group
(or \code{prop > 1}), the result will be silently truncated to the group size.
\code{prop} will be rounded towards zero to generate an integer number of
rows.
A negative value of \code{n} or \code{prop} will be subtracted from the group
size. For example, \code{n = -2} with a group of 5 rows will select 5 - 2 = 3
rows; \code{prop = -0.25} with 8 rows will select 8 * (1 - 0.25) = 6 rows.}
\item{order_by}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Variable or function
of variables to order by. To order by multiple variables, wrap them in a
data frame or tibble.}
\item{with_ties}{Should ties be kept together? The default, \code{TRUE},
may return more rows than you request. Use \code{FALSE} to ignore ties,
and return the first \code{n} rows.}
}
\description{
These are methods for the dplyr \code{\link[=slice]{slice()}}, \code{slice_head()}, \code{slice_tail()},
\code{slice_min()}, \code{slice_max()} and \code{slice_sample()} generics. They are
translated to the \code{i} argument of \verb{[.data.table}.
Unlike dplyr, \code{slice()} (and \code{slice()} alone) returns the same number of
rows per group, regardless of whether or not the indices appear in each
group.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
dt <- lazy_dt(mtcars)
dt \%>\% slice(1, 5, 10)
dt \%>\% slice(-(1:4))
# First and last rows based on existing order
dt \%>\% slice_head(n = 5)
dt \%>\% slice_tail(n = 5)
# Rows with minimum and maximum values of a variable
dt \%>\% slice_min(mpg, n = 5)
dt \%>\% slice_max(mpg, n = 5)
# slice_min() and slice_max() may return more rows than requested
# in the presence of ties. Use with_ties = FALSE to suppress
dt \%>\% slice_min(cyl, n = 1)
dt \%>\% slice_min(cyl, n = 1, with_ties = FALSE)
# slice_sample() allows you to random select with or without replacement
dt \%>\% slice_sample(n = 5)
dt \%>\% slice_sample(n = 5, replace = TRUE)
# you can optionally weight by a variable - this code weights by the
# physical weight of the cars, so heavy cars are more likely to get
# selected
dt \%>\% slice_sample(weight_by = wt, n = 5)
}
| /man/slice.dtplyr_step.Rd | no_license | cran/dtplyr | R | false | true | 3,899 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/step-subset-slice.R
\name{slice.dtplyr_step}
\alias{slice.dtplyr_step}
\alias{slice_head.dtplyr_step}
\alias{slice_tail.dtplyr_step}
\alias{slice_min.dtplyr_step}
\alias{slice_max.dtplyr_step}
\title{Subset rows using their positions}
\usage{
\method{slice}{dtplyr_step}(.data, ..., .by = NULL)
\method{slice_head}{dtplyr_step}(.data, ..., n, prop, by = NULL)
\method{slice_tail}{dtplyr_step}(.data, ..., n, prop, by = NULL)
\method{slice_min}{dtplyr_step}(.data, order_by, ..., n, prop, by = NULL, with_ties = TRUE)
\method{slice_max}{dtplyr_step}(.data, order_by, ..., n, prop, by = NULL, with_ties = TRUE)
}
\arguments{
\item{.data}{A \code{\link[=lazy_dt]{lazy_dt()}}.}
\item{...}{For \code{slice()}: <\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Integer row
values.
Provide either positive values to keep, or negative values to drop.
The values provided must be either all positive or all negative.
Indices beyond the number of rows in the input are silently ignored.
For \verb{slice_*()}, these arguments are passed on to methods.}
\item{.by, by}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
<\code{\link[dplyr:dplyr_tidy_select]{tidy-select}}> Optionally, a selection of columns to
group by for just this operation, functioning as an alternative to \code{\link[dplyr:group_by]{group_by()}}. For
details and examples, see \link[dplyr:dplyr_by]{?dplyr_by}.}
\item{n, prop}{Provide either \code{n}, the number of rows, or \code{prop}, the
proportion of rows to select. If neither are supplied, \code{n = 1} will be
used. If \code{n} is greater than the number of rows in the group
(or \code{prop > 1}), the result will be silently truncated to the group size.
\code{prop} will be rounded towards zero to generate an integer number of
rows.
A negative value of \code{n} or \code{prop} will be subtracted from the group
size. For example, \code{n = -2} with a group of 5 rows will select 5 - 2 = 3
rows; \code{prop = -0.25} with 8 rows will select 8 * (1 - 0.25) = 6 rows.}
\item{order_by}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Variable or function
of variables to order by. To order by multiple variables, wrap them in a
data frame or tibble.}
\item{with_ties}{Should ties be kept together? The default, \code{TRUE},
may return more rows than you request. Use \code{FALSE} to ignore ties,
and return the first \code{n} rows.}
}
\description{
These are methods for the dplyr \code{\link[=slice]{slice()}}, \code{slice_head()}, \code{slice_tail()},
\code{slice_min()}, \code{slice_max()} and \code{slice_sample()} generics. They are
translated to the \code{i} argument of \verb{[.data.table}.
Unlike dplyr, \code{slice()} (and \code{slice()} alone) returns the same number of
rows per group, regardless of whether or not the indices appear in each
group.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
dt <- lazy_dt(mtcars)
dt \%>\% slice(1, 5, 10)
dt \%>\% slice(-(1:4))
# First and last rows based on existing order
dt \%>\% slice_head(n = 5)
dt \%>\% slice_tail(n = 5)
# Rows with minimum and maximum values of a variable
dt \%>\% slice_min(mpg, n = 5)
dt \%>\% slice_max(mpg, n = 5)
# slice_min() and slice_max() may return more rows than requested
# in the presence of ties. Use with_ties = FALSE to suppress
dt \%>\% slice_min(cyl, n = 1)
dt \%>\% slice_min(cyl, n = 1, with_ties = FALSE)
# slice_sample() allows you to random select with or without replacement
dt \%>\% slice_sample(n = 5)
dt \%>\% slice_sample(n = 5, replace = TRUE)
# you can optionally weight by a variable - this code weights by the
# physical weight of the cars, so heavy cars are more likely to get
# selected
dt \%>\% slice_sample(weight_by = wt, n = 5)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SqrtLassoSolver.R
\docType{methods}
\name{run,SqrtLassoSolver-method}
\alias{run,SqrtLassoSolver-method}
\alias{run.SqrtLassoSolver}
\alias{solve.SqrtLasso}
\title{Run the Square Root LASSO Solver}
\usage{
\S4method{run}{SqrtLassoSolver}(obj)
}
\arguments{
\item{obj}{An object of class Solver with "sqrtlasso" as the solver string}
}
\value{
A data frame containing the coefficients relating the target gene to
each transcription factor, plus other fit parameters.
}
\description{
Given a TReNA object with Square Root LASSO as the solver,
use the \code{\link{slim}} function to estimate coefficients
for each transcription factor as a predictor of the target gene's expression level.
This method should be called using the \code{\link{solve}} method on an appropriate TReNA object.
}
\examples{
# Load included Alzheimer's data, create a TReNA object with Square Root LASSO as solver, and solve
load(system.file(package="TReNA", "extdata/ampAD.154genes.mef2cTFs.278samples.RData"))
target.gene <- "MEF2C"
tfs <- setdiff(rownames(mtx.sub), target.gene)
sqrt.solver <- SqrtLassoSolver(mtx.sub, target.gene, tfs)
tbl <- run(sqrt.solver)
# Solve the same problem but use 8 cores
sqrt.solver <- SqrtLassoSolver(mtx.sub, target.gene, tfs, nCores = 8)
tbl <- run(sqrt.solver)
}
\seealso{
\code{\link{slim}}, \code{\link{SqrtLassoSolver}}
Other solver methods: \code{\link{run,BayesSpikeSolver-method}},
\code{\link{run,EnsembleSolver-method}},
\code{\link{run,LassoPVSolver-method}},
\code{\link{run,LassoSolver-method}},
\code{\link{run,PearsonSolver-method}},
\code{\link{run,RandomForestSolver-method}},
\code{\link{run,RidgeSolver-method}},
\code{\link{run,SpearmanSolver-method}},
\code{\link{solve,TReNA-method}}
}
| /man/solve.SqrtLasso.Rd | no_license | noahmclean1/TReNA | R | false | true | 1,812 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SqrtLassoSolver.R
\docType{methods}
\name{run,SqrtLassoSolver-method}
\alias{run,SqrtLassoSolver-method}
\alias{run.SqrtLassoSolver}
\alias{solve.SqrtLasso}
\title{Run the Square Root LASSO Solver}
\usage{
\S4method{run}{SqrtLassoSolver}(obj)
}
\arguments{
\item{obj}{An object of class Solver with "sqrtlasso" as the solver string}
}
\value{
A data frame containing the coefficients relating the target gene to
each transcription factor, plus other fit parameters.
}
\description{
Given a TReNA object with Square Root LASSO as the solver,
use the \code{\link{slim}} function to estimate coefficients
for each transcription factor as a predictor of the target gene's expression level.
This method should be called using the \code{\link{solve}} method on an appropriate TReNA object.
}
\examples{
# Load included Alzheimer's data, create a TReNA object with Square Root LASSO as solver, and solve
load(system.file(package="TReNA", "extdata/ampAD.154genes.mef2cTFs.278samples.RData"))
target.gene <- "MEF2C"
tfs <- setdiff(rownames(mtx.sub), target.gene)
sqrt.solver <- SqrtLassoSolver(mtx.sub, target.gene, tfs)
tbl <- run(sqrt.solver)
# Solve the same problem but use 8 cores
sqrt.solver <- SqrtLassoSolver(mtx.sub, target.gene, tfs, nCores = 8)
tbl <- run(sqrt.solver)
}
\seealso{
\code{\link{slim}}, \code{\link{SqrtLassoSolver}}
Other solver methods: \code{\link{run,BayesSpikeSolver-method}},
\code{\link{run,EnsembleSolver-method}},
\code{\link{run,LassoPVSolver-method}},
\code{\link{run,LassoSolver-method}},
\code{\link{run,PearsonSolver-method}},
\code{\link{run,RandomForestSolver-method}},
\code{\link{run,RidgeSolver-method}},
\code{\link{run,SpearmanSolver-method}},
\code{\link{solve,TReNA-method}}
}
|
with(ac2c1017f47884555a0c28c052f730e36, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';rm(list=ls())}); | /80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/a9hXpIfWcjCx4.R | no_license | ayanmanna8/test | R | false | false | 212 | r | with(ac2c1017f47884555a0c28c052f730e36, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';rm(list=ls())}); |
#' @title Plot 2D scatter plot
#' @description Creates scatter plot
#' @usage plot2DGraph(seq, dim = 2, genbank = FALSE)
#' @return
plot2DGraph <- function(seqs, genbank = FALSE, xlab = 'X', ylab = 'Y', main = '',
colorset = c('#da186f', '#681f1c', '#ffa600', '#bc5090', '#003f5c'),
show.legend = TRUE, legend.pos = 'topleft'){
graphs <- matrix(,0,4)
colnames(graphs) <- c('X', 'Y', 'freq', 'nr')
for (seq in seqs){
number <- which(seq == seqs)
graph <- as.data.frame(dGraph(seq = seq, dim = 2, genbank = genbank)$graph)
graphs <- rbind(graphs, cbind(graph, list('nr' = rep(number, nrow(graph)))))
}
graphs_shuffled <- graphs[sample(nrow(graphs)), ] # for the overlaps
palette <- adjustcolor(colorset[graphs_shuffled$nr], alpha.f = 0.2)
plot(graphs_shuffled$X, graphs_shuffled$Y, col = palette, pch = 20,
cex = sqrt(graphs_shuffled$freq), xlab = xlab, ylab = ylab, main = main)
if (show.legend){
legend(legend.pos, legend = seqs, col = colorset[1:length(seqs)], pch=16, pt.cex = 2, cex=1, bty = 'n')
}
}
| /R/plot2DGraph.R | no_license | Kicer86/DynamicRepresentation | R | false | false | 1,121 | r | #' @title Plot 2D scatter plot
#' @description Creates scatter plot
#' @usage plot2DGraph(seq, dim = 2, genbank = FALSE)
#' @return
plot2DGraph <- function(seqs, genbank = FALSE, xlab = 'X', ylab = 'Y', main = '',
colorset = c('#da186f', '#681f1c', '#ffa600', '#bc5090', '#003f5c'),
show.legend = TRUE, legend.pos = 'topleft'){
graphs <- matrix(,0,4)
colnames(graphs) <- c('X', 'Y', 'freq', 'nr')
for (seq in seqs){
number <- which(seq == seqs)
graph <- as.data.frame(dGraph(seq = seq, dim = 2, genbank = genbank)$graph)
graphs <- rbind(graphs, cbind(graph, list('nr' = rep(number, nrow(graph)))))
}
graphs_shuffled <- graphs[sample(nrow(graphs)), ] # for the overlaps
palette <- adjustcolor(colorset[graphs_shuffled$nr], alpha.f = 0.2)
plot(graphs_shuffled$X, graphs_shuffled$Y, col = palette, pch = 20,
cex = sqrt(graphs_shuffled$freq), xlab = xlab, ylab = ylab, main = main)
if (show.legend){
legend(legend.pos, legend = seqs, col = colorset[1:length(seqs)], pch=16, pt.cex = 2, cex=1, bty = 'n')
}
}
|
#' @importFrom GenomeInfoDb seqlevels seqlevelsStyle
NULL
#' MultiOmicQC: Helper functions for checking the integrity of Multi-omics datasets
#'
#' MultiOmicQC allows the user to run common checks on MultiAssayExperiment objects
#' additional to the checks already established in MultiAssayExperiment.
#'
#' @aliases NULL
"_PACKAGE"
| /R/MultiOmicQC-pkg.R | permissive | ttriche/MultiOmicQC | R | false | false | 334 | r | #' @importFrom GenomeInfoDb seqlevels seqlevelsStyle
NULL
#' MultiOmicQC: Helper functions for checking the integrity of Multi-omics datasets
#'
#' MultiOmicQC allows the user to run common checks on MultiAssayExperiment objects
#' additional to the checks already established in MultiAssayExperiment.
#'
#' @aliases NULL
"_PACKAGE"
|
#'
#' @title Simulates the individual effect related to heterogeneity in baseline disease risk
#' @description The variation in baseline disease risk is assumed to be normally distributed
#' on a logistic scale. If this parameter is set to 10, the implication is that a 'high risk'
#' subject (someone at the upper 95 percent entile of population risk) is, all else being equal,
#' at 10 times the offs of developing disease compared to someone else who is at 'low risk' (at
#' the lower 5 percent centile of population risk).
#' @param num.obs number of observations to simulate.
#' @param baseline.OR baseline odds ratio for subject on 95 percent population centile versus 5
#' percentile. This parameter reflects the heterogeneity in disease risk arising from determinantes
#' that have not been measured or have not been included in the model.
#' @return a numerical vector.
#' @keywords internal
#' @author Gaye A.
#'
sim.subject.data <- function (num.obs=10000, baseline.OR=12.36){
numobs <- num.obs
baseline.odds <- baseline.OR
# CONVERT BASELINE ODDS RATIO FROM 5th TO 95th PERCENTILES INTO THE
# CORRESPONDING VARIANCE FOR A NORMALLY DISTRIBUTED RANDOM EFFECT
baseline.variance <- (log(baseline.odds)/(2*qnorm(0.95)))^2
# CREATE NORMALLY DISTRIBUTED RANDOM EFFECT VECTOR
# WITH APPROPRIATE VARIANCE ON SCALE OF LOG-ODDS
subject.effect <- rnorm(numobs,0,sqrt(baseline.variance))
# RETURN A VECTOR
output <- subject.effect
}
| /R/sim.subject.data.R | no_license | agaye/ESPRESSO.G | R | false | false | 1,473 | r | #'
#' @title Simulates the individual effect related to heterogeneity in baseline disease risk
#' @description The variation in baseline disease risk is assumed to be normally distributed
#' on a logistic scale. If this parameter is set to 10, the implication is that a 'high risk'
#' subject (someone at the upper 95 percent entile of population risk) is, all else being equal,
#' at 10 times the offs of developing disease compared to someone else who is at 'low risk' (at
#' the lower 5 percent centile of population risk).
#' @param num.obs number of observations to simulate.
#' @param baseline.OR baseline odds ratio for subject on 95 percent population centile versus 5
#' percentile. This parameter reflects the heterogeneity in disease risk arising from determinantes
#' that have not been measured or have not been included in the model.
#' @return a numerical vector.
#' @keywords internal
#' @author Gaye A.
#'
sim.subject.data <- function (num.obs=10000, baseline.OR=12.36){
numobs <- num.obs
baseline.odds <- baseline.OR
# CONVERT BASELINE ODDS RATIO FROM 5th TO 95th PERCENTILES INTO THE
# CORRESPONDING VARIANCE FOR A NORMALLY DISTRIBUTED RANDOM EFFECT
baseline.variance <- (log(baseline.odds)/(2*qnorm(0.95)))^2
# CREATE NORMALLY DISTRIBUTED RANDOM EFFECT VECTOR
# WITH APPROPRIATE VARIANCE ON SCALE OF LOG-ODDS
subject.effect <- rnorm(numobs,0,sqrt(baseline.variance))
# RETURN A VECTOR
output <- subject.effect
}
|
setwd("C:/Users/wjssm/Desktop/0.graduate/3rd/Datamining/lab")
###1.Basic Commands###
x <- c(1,3,2,5)
x
x = c(1,6,2)
y <- c(1,4,3)
length(x); length(y)
x+y
#a list of all of the objects
ls()
#delete
rm(x,y)
ls()
#delete all
rm(list = ls())
#matrix
x <- matrix(data = 1:4, nrow = 2)
x
matrix(data = 1:4, 2,2, byrow = T)
sqrt(x)
x^2
x<- rnorm(50)
y <- x+rnorm(50, 50, sd = 0.1)
cor(x,y)
set.seed(1303)
rnorm(50)
set.seed(3)
y <- rnorm(100)
mean(y)
var(y)
sqrt(var(y))
sd(y)
###2.Graphics###
x<- rnorm(100); y <- rnorm(100)
plot(x,y)
plot(x,y, xlab = 'this is the x-axis', ylab = 'this is the y-axis',
main = 'Plot of X vs Y')
#pdf('Figure.pdf')
plot(x,y, col = 'green')
#dev.off()
x <- seq(1,10); x
x <- 1:10; x
x <- seq(-pi, pi, length = 50);x
y<-x
#outer : x,y outer product; 간단하게 함수 계산
f <- outer(x,y, function(x,y) cos(y)/(1+x^2))
contour(x,y,f)
contour(x,y,f, nlevels = 45, add = T)
fa <- (f-t(f))/2
contour(x,y,fa, nlevels = 15)
#image : draw a heatmap
image(x,y,fa)
#persp : 3d plot
##theta, phi : control the angles at which the plot is viewed
persp(x,y,fa)
persp(x,y,fa, theta = 30)
persp(x,y,fa, theta = 30, phi = 20)
persp(x,y,fa, theta = 30, phi = 70)
persp(x,y,fa, theta = 30, phi = 40)
###3.Indexing Data###
A <- matrix(1:16, 4,4); A
A[2,3]
A[c(1,3), c(2,4)]
A[1:3,2:4]
A[1:2,]
A[,1:2]
A[1,]
#except
A[-c(1,3),]
A[-c(1,3), -c(1,3,4)]
dim(A)
###4.Loading Data####
Auto = read.table('http://www-bcf.usc.edu/~gareth/ISL/Auto.data',
header = T)
#fix : view data in a spreadsheet like window
#편집도 가능!
fix(Auto)
dim(Auto)
Auto <- na.omit(Auto)
dim(Auto)
names(Auto)
###5.Additional Graphical and Numerical Summaries####
plot(cylinders, mpg)
attach(Auto)
plot(cylinders, mpg)
cylinders <- as.factor(cylinders)
#auto draw boxplots
plot(cylinders, mpg)
plot(cylinders, mpg, col = 'red')
plot(cylinders, mpg, col = 'red', varwidth = T)
plot(cylinders, mpg, col = 'red', varwidth = T, horizontal = T)
plot(cylinders, mpg, col = 'red', varwidth = T,
xlab = 'cylinders', ylab = 'MPG')
hist(mpg)
hist(mpg, col = 2)
hist(mpg, col = 2, breaks = 15)
#paris : a scatterplot matrix for every pair of variables
pairs(Auto)
pairs(~mpg + displacement + horsepower + weight +
acceleration, Auto)
plot(horsepower, mpg)
#identify() : plot 위의 점들이 어떤 점인지 알려줌(name)
identify(horsepower, mpg, name)
summary(Auto)
summary(mpg)
| /Datamining/lab/lab_ch2.R | no_license | miniii222/study_in_graduate | R | false | false | 2,563 | r | setwd("C:/Users/wjssm/Desktop/0.graduate/3rd/Datamining/lab")
###1.Basic Commands###
x <- c(1,3,2,5)
x
x = c(1,6,2)
y <- c(1,4,3)
length(x); length(y)
x+y
#a list of all of the objects
ls()
#delete
rm(x,y)
ls()
#delete all
rm(list = ls())
#matrix
x <- matrix(data = 1:4, nrow = 2)
x
matrix(data = 1:4, 2,2, byrow = T)
sqrt(x)
x^2
x<- rnorm(50)
y <- x+rnorm(50, 50, sd = 0.1)
cor(x,y)
set.seed(1303)
rnorm(50)
set.seed(3)
y <- rnorm(100)
mean(y)
var(y)
sqrt(var(y))
sd(y)
###2.Graphics###
x<- rnorm(100); y <- rnorm(100)
plot(x,y)
plot(x,y, xlab = 'this is the x-axis', ylab = 'this is the y-axis',
main = 'Plot of X vs Y')
#pdf('Figure.pdf')
plot(x,y, col = 'green')
#dev.off()
x <- seq(1,10); x
x <- 1:10; x
x <- seq(-pi, pi, length = 50);x
y<-x
#outer : x,y outer product; 간단하게 함수 계산
f <- outer(x,y, function(x,y) cos(y)/(1+x^2))
contour(x,y,f)
contour(x,y,f, nlevels = 45, add = T)
fa <- (f-t(f))/2
contour(x,y,fa, nlevels = 15)
#image : draw a heatmap
image(x,y,fa)
#persp : 3d plot
##theta, phi : control the angles at which the plot is viewed
persp(x,y,fa)
persp(x,y,fa, theta = 30)
persp(x,y,fa, theta = 30, phi = 20)
persp(x,y,fa, theta = 30, phi = 70)
persp(x,y,fa, theta = 30, phi = 40)
###3.Indexing Data###
A <- matrix(1:16, 4,4); A
A[2,3]
A[c(1,3), c(2,4)]
A[1:3,2:4]
A[1:2,]
A[,1:2]
A[1,]
#except
A[-c(1,3),]
A[-c(1,3), -c(1,3,4)]
dim(A)
###4.Loading Data####
Auto = read.table('http://www-bcf.usc.edu/~gareth/ISL/Auto.data',
header = T)
#fix : view data in a spreadsheet like window
#편집도 가능!
fix(Auto)
dim(Auto)
Auto <- na.omit(Auto)
dim(Auto)
names(Auto)
###5.Additional Graphical and Numerical Summaries####
plot(cylinders, mpg)
attach(Auto)
plot(cylinders, mpg)
cylinders <- as.factor(cylinders)
#auto draw boxplots
plot(cylinders, mpg)
plot(cylinders, mpg, col = 'red')
plot(cylinders, mpg, col = 'red', varwidth = T)
plot(cylinders, mpg, col = 'red', varwidth = T, horizontal = T)
plot(cylinders, mpg, col = 'red', varwidth = T,
xlab = 'cylinders', ylab = 'MPG')
hist(mpg)
hist(mpg, col = 2)
hist(mpg, col = 2, breaks = 15)
#paris : a scatterplot matrix for every pair of variables
pairs(Auto)
pairs(~mpg + displacement + horsepower + weight +
acceleration, Auto)
plot(horsepower, mpg)
#identify() : plot 위의 점들이 어떤 점인지 알려줌(name)
identify(horsepower, mpg, name)
summary(Auto)
summary(mpg)
|
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR Description. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General
# Public License along with this library; if not, write to the
# Free Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
################################################################################
# FUNCTION: DESCRIPTION:
# getModel Extract whole model slot
# getType Extract portfolio type from specification
# getOptimize Extract what to optimize from specification
# getEstimator Extract type of covariance estimator
# getTailRisk Extract list of tail dependency risk matrixes
# getParams Extract parameters from specification
# getAlpha Extracts target VaR-alpha specification
# getA Extracts quadratic LPM Exponent
# FUNCTION: DESCRIPTION:
# getPortfolio Extract whole portfolio slot
# getWeights Extracts weights from a portfolio object
# getTargetReturn Extracts target return from specification
# getTargetRisk Extracts target riks from specification
# getRiskFreeRate Extracts risk free rate from specification
# getNFrontierPoints Extracts number of frontier points
# getStatus Extracts portfolio status information
# FUNCTION: DESCRIPTION:
# getOptim Extract whole optim slot
# getSolver Extracts solver from specification
# getObjective Extracs name of objective function
# getOptions Extracs options
# getControl Extracs control list parameters
# getTrace Extracts solver's trace flag
# FUNCTION: DESCRIPTION:
# getMessages Extract whole messages slot
################################################################################
# fPFOLIOSPEC:
# model = list(
# type = "MV",
# optimize = "minRisk",
# estimator = "covEstimator",
# tailRisk = NULL,
# params = list(alpha = 0.05, a = 1))
# portfolio = list(
# weights = NULL,
# targetReturn = NULL,
# targetRisk = NULL,
# targetAlpha = NULL,
# riskFreeRate = 0,
# nFrontierPoints = 50,
# status = 0)
# optim = list(
# solver = "solveRquadprog",
# objective = NULL,
# options = list(meq=2),
# control = list(),
# trace = FALSE)
# messages = list(NULL)
# ------------------------------------------------------------------------------
getModel.fPFOLIOSPEC <- function(object) object@model
getType.fPFOLIOSPEC <- function(object) object@model$type[1]
getOptimize.fPFOLIOSPEC <- function(object) object@model$optimize
getEstimator.fPFOLIOSPEC <- function(object) object@model$estimator
getTailRisk.fPFOLIOSPEC <- function(object) object@model$tailRisk
getParams.fPFOLIOSPEC <- function(object) object@model$params
getAlpha.fPFOLIOSPEC <- function(object) object@model$params$alpha
getA.fPFOLIOSPEC <- function(object) object@model$params$a
.getEstimatorFun <- function(object) match.fun(getEstimator(object))
# ------------------------------------------------------------------------------
getPortfolio.fPFOLIOSPEC <- function(object) object@portfolio
getWeights.fPFOLIOSPEC <- function(object) object@portfolio$weights
getTargetReturn.fPFOLIOSPEC <- function(object) object@portfolio$targetReturn
getTargetRisk.fPFOLIOSPEC <- function(object) object@portfolio$targetRisk
getRiskFreeRate.fPFOLIOSPEC <- function(object) object@portfolio$riskFreeRate
getNFrontierPoints.fPFOLIOSPEC <- function(object) object@portfolio$nFrontierPoints
getStatus.fPFOLIOSPEC <- function(object) object@portfolio$status
# ------------------------------------------------------------------------------
getOptim.fPFOLIOSPEC <- function(object) object@optim
getSolver.fPFOLIOSPEC <- function(object) object@optim$solver
getObjective.fPFOLIOSPEC <- function(object) object@optim$objective
getOptions.fPFOLIOSPEC <- function(object) object@optim$options
getControl.fPFOLIOSPEC <- function(object) object@optim$control
getTrace.fPFOLIOSPEC <- function(object) object@optim$trace
# ------------------------------------------------------------------------------
getMessages.fPFOLIOSPEC <- function(object) object@messages
################################################################################
| /R/object-getSpec.R | no_license | cran/fPortfolio | R | false | false | 5,186 | r |
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR Description. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General
# Public License along with this library; if not, write to the
# Free Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
################################################################################
# FUNCTION: DESCRIPTION:
# getModel Extract whole model slot
# getType Extract portfolio type from specification
# getOptimize Extract what to optimize from specification
# getEstimator Extract type of covariance estimator
# getTailRisk Extract list of tail dependency risk matrixes
# getParams Extract parameters from specification
# getAlpha Extracts target VaR-alpha specification
# getA Extracts quadratic LPM Exponent
# FUNCTION: DESCRIPTION:
# getPortfolio Extract whole portfolio slot
# getWeights Extracts weights from a portfolio object
# getTargetReturn Extracts target return from specification
# getTargetRisk Extracts target riks from specification
# getRiskFreeRate Extracts risk free rate from specification
# getNFrontierPoints Extracts number of frontier points
# getStatus Extracts portfolio status information
# FUNCTION: DESCRIPTION:
# getOptim Extract whole optim slot
# getSolver Extracts solver from specification
# getObjective Extracs name of objective function
# getOptions Extracs options
# getControl Extracs control list parameters
# getTrace Extracts solver's trace flag
# FUNCTION: DESCRIPTION:
# getMessages Extract whole messages slot
################################################################################
# fPFOLIOSPEC:
# model = list(
# type = "MV",
# optimize = "minRisk",
# estimator = "covEstimator",
# tailRisk = NULL,
# params = list(alpha = 0.05, a = 1))
# portfolio = list(
# weights = NULL,
# targetReturn = NULL,
# targetRisk = NULL,
# targetAlpha = NULL,
# riskFreeRate = 0,
# nFrontierPoints = 50,
# status = 0)
# optim = list(
# solver = "solveRquadprog",
# objective = NULL,
# options = list(meq=2),
# control = list(),
# trace = FALSE)
# messages = list(NULL)
# ------------------------------------------------------------------------------
getModel.fPFOLIOSPEC <- function(object) object@model
getType.fPFOLIOSPEC <- function(object) object@model$type[1]
getOptimize.fPFOLIOSPEC <- function(object) object@model$optimize
getEstimator.fPFOLIOSPEC <- function(object) object@model$estimator
getTailRisk.fPFOLIOSPEC <- function(object) object@model$tailRisk
getParams.fPFOLIOSPEC <- function(object) object@model$params
getAlpha.fPFOLIOSPEC <- function(object) object@model$params$alpha
getA.fPFOLIOSPEC <- function(object) object@model$params$a
.getEstimatorFun <- function(object) match.fun(getEstimator(object))
# ------------------------------------------------------------------------------
getPortfolio.fPFOLIOSPEC <- function(object) object@portfolio
getWeights.fPFOLIOSPEC <- function(object) object@portfolio$weights
getTargetReturn.fPFOLIOSPEC <- function(object) object@portfolio$targetReturn
getTargetRisk.fPFOLIOSPEC <- function(object) object@portfolio$targetRisk
getRiskFreeRate.fPFOLIOSPEC <- function(object) object@portfolio$riskFreeRate
getNFrontierPoints.fPFOLIOSPEC <- function(object) object@portfolio$nFrontierPoints
getStatus.fPFOLIOSPEC <- function(object) object@portfolio$status
# ------------------------------------------------------------------------------
getOptim.fPFOLIOSPEC <- function(object) object@optim
getSolver.fPFOLIOSPEC <- function(object) object@optim$solver
getObjective.fPFOLIOSPEC <- function(object) object@optim$objective
getOptions.fPFOLIOSPEC <- function(object) object@optim$options
getControl.fPFOLIOSPEC <- function(object) object@optim$control
getTrace.fPFOLIOSPEC <- function(object) object@optim$trace
# ------------------------------------------------------------------------------
getMessages.fPFOLIOSPEC <- function(object) object@messages
################################################################################
|
#########################################################################################################################
#
# R - function segm3Dkrv for simulating critical values in segm3D
#
# emaphazises on the propagation-separation approach
#
# Copyright (C) 2010-12 Weierstrass-Institut fuer
# Angewandte Analysis und Stochastik (WIAS)
#
# Author: Joerg Polzehl
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
segm3Dkrv <- function(dy,df,hmax=NULL,ladjust=1,beta=0,graph=FALSE,h0=c(0,0,0)) {
#
#
# Auxilary functions
IQRdiff <- function(y) IQR(diff(y))/1.908
#
# first check arguments and initialize
#
args <- match.call()
nt <- df+1
if (length(dy)!=3) {
stop("dy has to be of length 3")
}
d <- 3
n1 <- dy[1]
n2 <- dy[2]
n3 <- dy[3]
n <- n1*n2*n3
res <- array(rnorm(prod(dy)*nt),c(nt,dy))
if(any(h0>0)) {
# require(aws)
warning("for simulating critical values we need package aws")
# for(i in 1:nt) res[i,,,] <- kernsm(res[i,,,],h0)@yhat
}
# test dimension of data (vector of 3D) and define dimension related stuff
ddim <- dim(res)
y <- .Fortran("mean3D",
as.double(res),
as.integer(n1),
as.integer(n2),
as.integer(n3),
as.integer(nt),
y=double(prod(dy)),
PACKAGE="fmri",DUP=TRUE)$y
dim(y) <- dy
if (length(dy)==d+1) {
dim(y) <- dy[1:3]
} else if (length(dy)!=d) {
stop("y has to be 3 dimensional")
}
# set the code for the kernel (used in lkern) and set lambda
lkern <- 1
skern <- 1
# define lambda
lambda <- ladjust*(exp(2.6-3.17*log(df)+8.4*log(log(df)))+16) # corresponding to p_0 ~ 1e-6
hinit <- 1
# define hmax
if (is.null(hmax)) hmax <- 5 # uses a maximum of about 520 points
# re-define bandwidth for Gaussian lkern!!!!
if (lkern==3) {
# assume hmax was given in FWHM units (Gaussian kernel will be truncated at 4)
hmax <- fwhm2bw(hmax)*4
hinit <- min(hinit,hmax)
}
if(is.null(h0)) h0 <- rep(0,3)
# estimate variance in the gaussian case if necessary
# deal with homoskedastic Gaussian case by extending sigma2
mask <- array(TRUE,dy[1:3])
res <- .Fortran("sweepm",res=as.double(res),
as.logical(mask),
as.integer(n1),
as.integer(n2),
as.integer(n3),
as.integer(nt),
PACKAGE="fmri",DUP=TRUE)$res
cat("\nfmri.smooth: first variance estimate","\n")
vartheta0 <- .Fortran("ivar",as.double(res),
as.double(1),
as.logical(rep(TRUE,prod(dy))),
as.integer(n1),
as.integer(n2),
as.integer(n3),
as.integer(nt),
var = double(n1*n2*n3),
PACKAGE="fmri",DUP=TRUE)$var
sigma2 <- vartheta0/df # thats the variance of y ... !!!! assuming zero mean
sigma2 <- 1/sigma2 # need the inverse for easier computations
dim(sigma2) <- dy
# Initialize list for bi and theta
wghts <- c(1,1,1)
hinit <- hinit/wghts[1]
hmax <- hmax/wghts[1]
wghts <- (wghts[2:3]/wghts[1])
tobj <- list(bi= rep(1,n))
theta <- y
segm <- array(0,dy)
varest <- 1/sigma2
maxvol <- getvofh(hmax,lkern,wghts)
fov <- prod(ddim[1:3])
kstar <- as.integer(log(maxvol)/log(1.25))
steps <- kstar+1
cat("FOV",fov,"ladjust",ladjust,"lambda",lambda,"\n")
k <- 1
hakt <- hinit
hakt0 <- hinit
lambda0 <- lambda
maxvalue <- matrix(0,2,kstar)
mse <- numeric(kstar)
mae <- numeric(kstar)
if (hinit>1) lambda0 <- 1e50 # that removes the stochstic term for the first step
scorr <- numeric(3)
if(h0[1]>0) scorr[1] <- get.corr.gauss(h0[1],2)
if(h0[2]>0) scorr[2] <- get.corr.gauss(h0[2],2)
if(h0[3]>0) scorr[3] <- get.corr.gauss(h0[3],2)
total <- cumsum(1.25^(1:kstar))/sum(1.25^(1:kstar))
# run single steps to display intermediate results
while (k<=kstar) {
hakt0 <- gethani(1,10,lkern,1.25^(k-1),wghts,1e-4)
hakt <- gethani(1,10,lkern,1.25^k,wghts,1e-4)
hakt.oscale <- if(lkern==3) bw2fwhm(hakt/4) else hakt
cat("step",k,"bandwidth",signif(hakt.oscale,3)," ")
dlw <- (2*trunc(hakt/c(1,wghts))+1)[1:d]
hakt0 <- hakt
theta0 <- theta
bi0 <- tobj$bi
#
# need these values to compute variances after the last iteration
#
tobj <- .Fortran("segm3dkb",
as.double(y),
as.double(res),
as.double(sigma2),
as.integer(n1),
as.integer(n2),
as.integer(n3),
as.integer(nt),
as.double(df),
hakt=as.double(hakt),
as.double(lambda0),
as.double(theta0),
bi=as.double(bi0),
thnew=double(n1*n2*n3),
as.integer(lkern),
double(prod(dlw)),
as.double(wghts),
double(nt),#swres
as.double(fov),
varest=as.double(varest),
maxvalue=double(1),
minvalue=double(1),
PACKAGE="fmri",DUP=TRUE)[c("bi","thnew","hakt","varest","maxvalue","minvalue")]
gc()
theta <- array(tobj$thnew,dy)
varest <- array(tobj$varest,dy)
dim(tobj$bi) <- dy
maxvalue[1,k] <- tobj$maxvalue
maxvalue[2,k] <- -tobj$minvalue
mae[k] <- mean(abs(theta))
mse[k] <- mean(theta^2)
if (graph) {
par(mfrow=c(2,2),mar=c(1,1,3,.25),mgp=c(2,1,0))
image(y[,,n3%/%2+1],col=gray((0:255)/255),xaxt="n",yaxt="n")
title(paste("Observed Image min=",signif(min(y),3)," max=",signif(max(y),3)))
image(theta[,,n3%/%2+1],col=gray((0:255)/255),xaxt="n",yaxt="n")
title(paste("Reconstruction h=",signif(hakt.oscale,3)," min=",signif(min(theta),3)," max=",signif(max(theta),3)))
image(segm[,,n3%/%2+1]>0,col=gray((0:255)/255),xaxt="n",yaxt="n")
title(paste("Segmentation h=",signif(hakt.oscale,3)," detected=",sum(segm>0)))
image(tobj$bi[,,n3%/%2+1],col=gray((0:255)/255),xaxt="n",yaxt="n")
title(paste("Sum of weights: min=",signif(min(tobj$bi),3)," mean=",signif(mean(tobj$bi),3)," max=",signif(max(tobj$bi),3)))
}
if (max(total) >0) {
cat(signif(total[k],2)*100,"% \r",sep="")
}
k <- k+1
# adjust lambda for the high intrinsic correlation between neighboring estimates
lambda0 <- lambda
gc()
}
z <- list(mae=mae,mse=mse,maxvalue=maxvalue)
invisible(z)
}
| /fmri/R/segmkrv.r | no_license | ingted/R-Examples | R | false | false | 7,666 | r | #########################################################################################################################
#
# R - function segm3Dkrv for simulating critical values in segm3D
#
# emaphazises on the propagation-separation approach
#
# Copyright (C) 2010-12 Weierstrass-Institut fuer
# Angewandte Analysis und Stochastik (WIAS)
#
# Author: Joerg Polzehl
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
segm3Dkrv <- function(dy,df,hmax=NULL,ladjust=1,beta=0,graph=FALSE,h0=c(0,0,0)) {
#
#
# Auxilary functions
IQRdiff <- function(y) IQR(diff(y))/1.908
#
# first check arguments and initialize
#
args <- match.call()
nt <- df+1
if (length(dy)!=3) {
stop("dy has to be of length 3")
}
d <- 3
n1 <- dy[1]
n2 <- dy[2]
n3 <- dy[3]
n <- n1*n2*n3
res <- array(rnorm(prod(dy)*nt),c(nt,dy))
if(any(h0>0)) {
# require(aws)
warning("for simulating critical values we need package aws")
# for(i in 1:nt) res[i,,,] <- kernsm(res[i,,,],h0)@yhat
}
# test dimension of data (vector of 3D) and define dimension related stuff
ddim <- dim(res)
y <- .Fortran("mean3D",
as.double(res),
as.integer(n1),
as.integer(n2),
as.integer(n3),
as.integer(nt),
y=double(prod(dy)),
PACKAGE="fmri",DUP=TRUE)$y
dim(y) <- dy
if (length(dy)==d+1) {
dim(y) <- dy[1:3]
} else if (length(dy)!=d) {
stop("y has to be 3 dimensional")
}
# set the code for the kernel (used in lkern) and set lambda
lkern <- 1
skern <- 1
# define lambda
lambda <- ladjust*(exp(2.6-3.17*log(df)+8.4*log(log(df)))+16) # corresponding to p_0 ~ 1e-6
hinit <- 1
# define hmax
if (is.null(hmax)) hmax <- 5 # uses a maximum of about 520 points
# re-define bandwidth for Gaussian lkern!!!!
if (lkern==3) {
# assume hmax was given in FWHM units (Gaussian kernel will be truncated at 4)
hmax <- fwhm2bw(hmax)*4
hinit <- min(hinit,hmax)
}
if(is.null(h0)) h0 <- rep(0,3)
# estimate variance in the gaussian case if necessary
# deal with homoskedastic Gaussian case by extending sigma2
mask <- array(TRUE,dy[1:3])
res <- .Fortran("sweepm",res=as.double(res),
as.logical(mask),
as.integer(n1),
as.integer(n2),
as.integer(n3),
as.integer(nt),
PACKAGE="fmri",DUP=TRUE)$res
cat("\nfmri.smooth: first variance estimate","\n")
vartheta0 <- .Fortran("ivar",as.double(res),
as.double(1),
as.logical(rep(TRUE,prod(dy))),
as.integer(n1),
as.integer(n2),
as.integer(n3),
as.integer(nt),
var = double(n1*n2*n3),
PACKAGE="fmri",DUP=TRUE)$var
sigma2 <- vartheta0/df # thats the variance of y ... !!!! assuming zero mean
sigma2 <- 1/sigma2 # need the inverse for easier computations
dim(sigma2) <- dy
# Initialize list for bi and theta
wghts <- c(1,1,1)
hinit <- hinit/wghts[1]
hmax <- hmax/wghts[1]
wghts <- (wghts[2:3]/wghts[1])
tobj <- list(bi= rep(1,n))
theta <- y
segm <- array(0,dy)
varest <- 1/sigma2
maxvol <- getvofh(hmax,lkern,wghts)
fov <- prod(ddim[1:3])
kstar <- as.integer(log(maxvol)/log(1.25))
steps <- kstar+1
cat("FOV",fov,"ladjust",ladjust,"lambda",lambda,"\n")
k <- 1
hakt <- hinit
hakt0 <- hinit
lambda0 <- lambda
maxvalue <- matrix(0,2,kstar)
mse <- numeric(kstar)
mae <- numeric(kstar)
if (hinit>1) lambda0 <- 1e50 # that removes the stochstic term for the first step
scorr <- numeric(3)
if(h0[1]>0) scorr[1] <- get.corr.gauss(h0[1],2)
if(h0[2]>0) scorr[2] <- get.corr.gauss(h0[2],2)
if(h0[3]>0) scorr[3] <- get.corr.gauss(h0[3],2)
total <- cumsum(1.25^(1:kstar))/sum(1.25^(1:kstar))
# run single steps to display intermediate results
while (k<=kstar) {
hakt0 <- gethani(1,10,lkern,1.25^(k-1),wghts,1e-4)
hakt <- gethani(1,10,lkern,1.25^k,wghts,1e-4)
hakt.oscale <- if(lkern==3) bw2fwhm(hakt/4) else hakt
cat("step",k,"bandwidth",signif(hakt.oscale,3)," ")
dlw <- (2*trunc(hakt/c(1,wghts))+1)[1:d]
hakt0 <- hakt
theta0 <- theta
bi0 <- tobj$bi
#
# need these values to compute variances after the last iteration
#
tobj <- .Fortran("segm3dkb",
as.double(y),
as.double(res),
as.double(sigma2),
as.integer(n1),
as.integer(n2),
as.integer(n3),
as.integer(nt),
as.double(df),
hakt=as.double(hakt),
as.double(lambda0),
as.double(theta0),
bi=as.double(bi0),
thnew=double(n1*n2*n3),
as.integer(lkern),
double(prod(dlw)),
as.double(wghts),
double(nt),#swres
as.double(fov),
varest=as.double(varest),
maxvalue=double(1),
minvalue=double(1),
PACKAGE="fmri",DUP=TRUE)[c("bi","thnew","hakt","varest","maxvalue","minvalue")]
gc()
theta <- array(tobj$thnew,dy)
varest <- array(tobj$varest,dy)
dim(tobj$bi) <- dy
maxvalue[1,k] <- tobj$maxvalue
maxvalue[2,k] <- -tobj$minvalue
mae[k] <- mean(abs(theta))
mse[k] <- mean(theta^2)
if (graph) {
par(mfrow=c(2,2),mar=c(1,1,3,.25),mgp=c(2,1,0))
image(y[,,n3%/%2+1],col=gray((0:255)/255),xaxt="n",yaxt="n")
title(paste("Observed Image min=",signif(min(y),3)," max=",signif(max(y),3)))
image(theta[,,n3%/%2+1],col=gray((0:255)/255),xaxt="n",yaxt="n")
title(paste("Reconstruction h=",signif(hakt.oscale,3)," min=",signif(min(theta),3)," max=",signif(max(theta),3)))
image(segm[,,n3%/%2+1]>0,col=gray((0:255)/255),xaxt="n",yaxt="n")
title(paste("Segmentation h=",signif(hakt.oscale,3)," detected=",sum(segm>0)))
image(tobj$bi[,,n3%/%2+1],col=gray((0:255)/255),xaxt="n",yaxt="n")
title(paste("Sum of weights: min=",signif(min(tobj$bi),3)," mean=",signif(mean(tobj$bi),3)," max=",signif(max(tobj$bi),3)))
}
if (max(total) >0) {
cat(signif(total[k],2)*100,"% \r",sep="")
}
k <- k+1
# adjust lambda for the high intrinsic correlation between neighboring estimates
lambda0 <- lambda
gc()
}
z <- list(mae=mae,mse=mse,maxvalue=maxvalue)
invisible(z)
}
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -790102194L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) | /dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615939210-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 826 | r | testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -790102194L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) |
testlist <- list(mu = 1.71964488691504e-319, var = 0)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result) | /metafolio/inst/testfiles/est_beta_params/libFuzzer_est_beta_params/est_beta_params_valgrind_files/1612987777-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 121 | r | testlist <- list(mu = 1.71964488691504e-319, var = 0)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result) |
summary.fGWAS.scan<-function(object, ...)
{
r.fgwas <- object;
#fgwas
#filter
#options
#params
#curve
#covariance
#est.values
r.sum.ret <- list();
if(!is.null(r.gls$fgwas))
{
re7 <- r.gls$fgwas;
fgwas.sig <- which( re7[,7] <= r.gls$options$fgwas.cutoff );
if(length(fgwas.sig)>0)
{
fgwas_sigs <- re7[ fgwas.sig, , drop=F];
fgwas.sig.inc <- order(fgwas_sigs[,7]);
r.sum.ret$fgwas_sig <- fgwas_sigs[fgwas.sig.inc,];
}
if(!is.null(r.sum.ret$varsel))
r.sum.ret$varsel <- cbind(r.sum.ret$varsel, fgwas.pvalue=find_fgwas_pvalue( r.gls$fgwas, rownames(r.sum.ret$varsel) ) ) ;
if(!is.null(r.sum.ret$refit))
r.sum.ret$refit <- cbind(r.sum.ret$refit, fgwas.pvalue=find_fgwas_pvalue( r.gls$fgwas, rownames(r.sum.ret$refit) ) ) ;
}
class(r.sum.ret) <- "sum.fGWAS.scan";
r.sum.ret
}
print.sum.fGWAS.scan<-function(x, ...)
{
r.sum.ret <- x;
if(!is.null(r.sum.ret$fgwas_sig))
{
cat("--- Significant SNPs Estimate by fGWAS method:", NROW(r.sum.ret$fgwas_sig), "SNPs\n");
if( NROW(r.sum.ret$fgwas_sig)>25 )
{
cat("Top 25 SNPs:\n");
show(r.sum.ret$fgwas_sig[1:25,,drop=F]);
}
else
show(r.sum.ret$fgwas_sig);
}
}
plot.fGWAS.scan<-function( x, y=NULL, ... , fig.prefix=NULL )
{
r.gls <- x;
if( missing(fig.prefix)) fig.prefix <- "gls.plot";
if(!is.null(r.gls$fgwas))
{
filter.man <- r.gls$fgwas[, c(1,2,7), drop=F]
draw_man_fgwas( filter.man, fig.prefix, "fgwas" );
}
else
cat("! No fGWAS filter results.\n");
if( !is.null(r.gls$varsel_add) || !is.null(r.gls$varsel_dom))
{
if ( !is.null(r.gls$varsel_add) ) varsel <- r.gls$varsel_add[, c(1,2), drop=F]
if ( !is.null(r.gls$varsel_dom) ) varsel <- r.gls$varsel_dom[, c(1,2), drop=F]
if ( !is.null(r.gls$varsel_add) ) varsel<- cbind( varsel, r.gls$varsel_add[,7] );
if ( !is.null(r.gls$varsel_dom) ) varsel<- cbind( varsel, r.gls$varsel_dom[,7] );
draw_man_adh2( varsel, fig.prefix, "varsel" );
}
else
cat("! No varible selection results.\n");
if( !is.null(r.gls$refit_add) || !is.null(r.gls$refit_dom) )
{
refit<- merge_add_dom( r.gls$refit_add, r.gls$refit_dom);
draw_refit_curve( refit, fig.prefix, "curve" );
}
else
cat("! No refit results.\n");
}
print.fGWAS.scan<-function(x, ...)
{
}
summary.fGWAS.dat<-function( x,..., fig.prefix=NULL )
{
}
summary.fGWAS.perm<-function( x,..., fig.prefix=NULL )
{
}
print.fGWAS.dat<-function( x,..., fig.prefix=NULL )
{
}
print.fGWAS.perm<-function( x,..., fig.prefix=NULL )
{
}
plot.fGWAS.dat<-function( x,..., fig.prefix=NULL )
{
}
plot.fGWAS.perm<-function( x,..., fig.prefix=NULL )
{
} | /fgwas/R/summary.r | no_license | wzhy2000/R | R | false | false | 2,747 | r | summary.fGWAS.scan<-function(object, ...)
{
r.fgwas <- object;
#fgwas
#filter
#options
#params
#curve
#covariance
#est.values
r.sum.ret <- list();
if(!is.null(r.gls$fgwas))
{
re7 <- r.gls$fgwas;
fgwas.sig <- which( re7[,7] <= r.gls$options$fgwas.cutoff );
if(length(fgwas.sig)>0)
{
fgwas_sigs <- re7[ fgwas.sig, , drop=F];
fgwas.sig.inc <- order(fgwas_sigs[,7]);
r.sum.ret$fgwas_sig <- fgwas_sigs[fgwas.sig.inc,];
}
if(!is.null(r.sum.ret$varsel))
r.sum.ret$varsel <- cbind(r.sum.ret$varsel, fgwas.pvalue=find_fgwas_pvalue( r.gls$fgwas, rownames(r.sum.ret$varsel) ) ) ;
if(!is.null(r.sum.ret$refit))
r.sum.ret$refit <- cbind(r.sum.ret$refit, fgwas.pvalue=find_fgwas_pvalue( r.gls$fgwas, rownames(r.sum.ret$refit) ) ) ;
}
class(r.sum.ret) <- "sum.fGWAS.scan";
r.sum.ret
}
print.sum.fGWAS.scan<-function(x, ...)
{
r.sum.ret <- x;
if(!is.null(r.sum.ret$fgwas_sig))
{
cat("--- Significant SNPs Estimate by fGWAS method:", NROW(r.sum.ret$fgwas_sig), "SNPs\n");
if( NROW(r.sum.ret$fgwas_sig)>25 )
{
cat("Top 25 SNPs:\n");
show(r.sum.ret$fgwas_sig[1:25,,drop=F]);
}
else
show(r.sum.ret$fgwas_sig);
}
}
plot.fGWAS.scan<-function( x, y=NULL, ... , fig.prefix=NULL )
{
r.gls <- x;
if( missing(fig.prefix)) fig.prefix <- "gls.plot";
if(!is.null(r.gls$fgwas))
{
filter.man <- r.gls$fgwas[, c(1,2,7), drop=F]
draw_man_fgwas( filter.man, fig.prefix, "fgwas" );
}
else
cat("! No fGWAS filter results.\n");
if( !is.null(r.gls$varsel_add) || !is.null(r.gls$varsel_dom))
{
if ( !is.null(r.gls$varsel_add) ) varsel <- r.gls$varsel_add[, c(1,2), drop=F]
if ( !is.null(r.gls$varsel_dom) ) varsel <- r.gls$varsel_dom[, c(1,2), drop=F]
if ( !is.null(r.gls$varsel_add) ) varsel<- cbind( varsel, r.gls$varsel_add[,7] );
if ( !is.null(r.gls$varsel_dom) ) varsel<- cbind( varsel, r.gls$varsel_dom[,7] );
draw_man_adh2( varsel, fig.prefix, "varsel" );
}
else
cat("! No varible selection results.\n");
if( !is.null(r.gls$refit_add) || !is.null(r.gls$refit_dom) )
{
refit<- merge_add_dom( r.gls$refit_add, r.gls$refit_dom);
draw_refit_curve( refit, fig.prefix, "curve" );
}
else
cat("! No refit results.\n");
}
print.fGWAS.scan<-function(x, ...)
{
}
summary.fGWAS.dat<-function( x,..., fig.prefix=NULL )
{
}
summary.fGWAS.perm<-function( x,..., fig.prefix=NULL )
{
}
print.fGWAS.dat<-function( x,..., fig.prefix=NULL )
{
}
print.fGWAS.perm<-function( x,..., fig.prefix=NULL )
{
}
plot.fGWAS.dat<-function( x,..., fig.prefix=NULL )
{
}
plot.fGWAS.perm<-function( x,..., fig.prefix=NULL )
{
} |
## external validation plots
## TERN landscapes
# back transformed
root<- "Z:/projects/ternlandscapes_2019/soiltexture/outs/dsm_externalvalidation/BT/data/"
fig.root<- "Z:/projects/ternlandscapes_2019/soiltexture/outs/dsm_externalvalidation/"
### sand
## D1
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d1_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d1_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d1.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
## D2
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d2_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d2_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d2.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
## D3
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d3_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d3_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d3.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
## D4
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d4_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d4_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d4.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
## D5
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d5_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d5_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d5.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
## D6
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d6_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d6_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d6.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
| /Production/DSM/SoilTexture/digitalsoilmapping/validation/model_diognostics/external_val_work_sand_BT.R | permissive | AusSoilsDSM/SLGA | R | false | false | 6,137 | r | ## external validation plots
## TERN landscapes
# back transformed
root<- "Z:/projects/ternlandscapes_2019/soiltexture/outs/dsm_externalvalidation/BT/data/"
fig.root<- "Z:/projects/ternlandscapes_2019/soiltexture/outs/dsm_externalvalidation/"
### sand
## D1
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d1_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d1_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d1.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
## D2
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d2_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d2_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d2.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
## D3
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d3_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d3_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d3.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
## D4
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d4_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d4_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d4.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
## D5
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d5_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d5_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d5.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
## D6
# prediction data
pred.data<- readRDS(file = paste0(root,"sand_d6_pred_data_BT.rds"))
dim(pred.data)
# observation data
observation.data<- readRDS(file = paste0(root,"d6_observed_data_BT.rds"))
# row means
val.mean<- rowMeans(pred.data)
## fancy plotting
# sand
xlimits= c(0,100)
ylimits= c(0,100)
tiff(file=paste0(fig.root,"BT/tern_v2_val_sand_d6.tiff"),width=12,height=12,units="cm",res=300,pointsize=8)
plot(observation.data[,2], val.mean,xlim= xlimits, ylim= ylimits, type= "n",axes=F,ylab="predicted sand (%)", xlab= "observed sand (%)",col="black", font.lab=2,cex.lab=1.5,font=2, font.axis=2, family="sans")
axis(side=2,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
axis(side=1,at=seq(from = 0,to = 100,by = 10),font=2, font.axis=2, family="sans",lty=1, lwd=1,cex.axis=1.2, col="black")
points (observation.data[,2], val.mean,pch=1, col="black", cex=0.1)
abline(0, 1, lwd=1.5, col="red")
dev.off()
|
# https://www.google.org/flutrends/about/data/flu/historic/us-historic-v2.txt
fluTrends = readr::read_csv("us-historic-v2.txt",
skip = 8)[,c(1,3:53)]
usethis::use_data(fluTrends, overwrite = TRUE)
| /data-raw/fluTrends.R | no_license | jarad/MWBDSSworkshop | R | false | false | 227 | r | # https://www.google.org/flutrends/about/data/flu/historic/us-historic-v2.txt
fluTrends = readr::read_csv("us-historic-v2.txt",
skip = 8)[,c(1,3:53)]
usethis::use_data(fluTrends, overwrite = TRUE)
|
library(ape)
testtree <- read.tree("9841_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9841_1_unrooted.txt") | /codeml_files/newick_trees_processed/9841_1/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("9841_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9841_1_unrooted.txt") |
rm( list = ls())
source( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RCode/hyads_to_pm25_functions.R')
#coordinate reference system projection string for spatial data
p4s <- "+proj=lcc +lat_1=33 +lat_2=45 +lat_0=40 +lon_0=-97 +a=6370000 +b=6370000"
#======================================================================#
## Load meteorology as list of months
#======================================================================#
#define the layer names, do the actual downloading
Sys.setenv(TZ='UTC')
layer.names <- c( "air.2m.mon.mean.nc",
"apcp.mon.mean.nc",
"rhum.2m.mon.mean.nc",
"vwnd.10m.mon.mean.nc",
"uwnd.10m.mon.mean.nc")
names( layer.names) <- c( "temp", "apcp", "rhum", "vwnd", "uwnd")
# do the data downloading
# set destination parameter to where you want the data downloaded,
# for example, destination = '~/Desktop'
list.met <- lapply( layer.names,
downloader.fn, #destination = '~/Desktop'
dataset = 'NARR')
# take over US
mets2005 <- usa.functioner( 2005, list.met, dataset = 'NARR', return.usa.sub = F)
mets2006 <- usa.functioner( 2006, list.met, dataset = 'NARR', return.usa.sub = F)
mets2011 <- usa.functioner( 2011, list.met, dataset = 'NARR', return.usa.sub = F)
mets2005.m <- usa.functioner( 2005, list.met, dataset = 'NARR', avg.period = 'month', return.usa.sub = F)
mets2006.m <- usa.functioner( 2006, list.met, dataset = 'NARR', avg.period = 'month', return.usa.sub = F)
mets2011.m <- usa.functioner( 2011, list.met, dataset = 'NARR', avg.period = 'month', return.usa.sub = F)
# combine monthly rasters into single list
mets.m.all <- append( append( mets2005.m, mets2006.m), mets2011.m)
#======================================================================#
## Load ddm as month
#======================================================================#
ddm2005.m <- ddm_to_zip( ddm_coal_file = '~//Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL_impacts_2005_update.csv',
Year = 2005, avg.period = 'month')
ddm2006.m <- ddm_to_zip( ddm_coal_file = '~//Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL_impacts_2006_update.csv',
Year = 2006, avg.period = 'month')
names( ddm2005.m) <- names( mets2005.m)
names( ddm2006.m) <- names( mets2006.m)
# combine into single list
ddm.m.all <- stack( ddm2005.m, ddm2006.m)
#======================================================================#
## Load ddm as annual
#======================================================================#
ddm2005 <- ddm_to_zip( ddm_coal_file = '~//Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL_impacts_2005_update.csv',
Year = 2005)
ddm2006 <- ddm_to_zip( ddm_coal_file = '~//Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL_impacts_2006_update.csv',
Year = 2006)
names( ddm2005) <- 'cmaq.ddm'
names( ddm2006) <- 'cmaq.ddm'
#======================================================================#
## Load monthly hyads
#======================================================================#
# read monthly grid files
hyads2005.m.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_month_nopbl2005.csv', drop = 'V1')
hyads2006.m.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_month_nopbl2006.csv', drop = 'V1')
hyads2011.m.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_month_nopbl2011.csv', drop = 'V1')
# create lists from monthly grid objects
hyads2005.m.l <- split( hyads2005.m.dt, by = 'yearmonth')
hyads2006.m.l <- split( hyads2006.m.dt, by = 'yearmonth')
hyads2011.m.l <- split( hyads2011.m.dt, by = 'yearmonth')
names( hyads2005.m.l) <- names( mets2005.m)
names( hyads2006.m.l) <- names( mets2006.m)
names( hyads2011.m.l) <- names( mets2011.m)
# create lists of monthly rasters
HyADSrasterizer <- function( X){
r <- rasterFromXYZ( X[, .( x, y, hyads)], crs = p4s)
r[is.na( r)] <- 0
return( r)
}
hyads2005.m <- lapply( hyads2005.m.l, HyADSrasterizer)
hyads2006.m <- lapply( hyads2006.m.l, HyADSrasterizer)
hyads2011.m <- lapply( hyads2011.m.l, HyADSrasterizer)
# combine into single list
hyads.m.all <- stack( stack( hyads2005.m), stack( hyads2006.m), stack( hyads2011.m))
#======================================================================#
## Load anuual hyads
#======================================================================#
hyads2005.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_annual_nopbl_2005.csv', drop = 'V1')
hyads2006.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_annual_nopbl_2006.csv', drop = 'V1')
hyads2011.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_annual_nopbl_2011.csv', drop = 'V1')
hyads2005 <- rasterFromXYZ( hyads2005.dt[, .( x, y, hyads)], crs = p4s)
hyads2006 <- rasterFromXYZ( hyads2006.dt[, .( x, y, hyads)], crs = p4s)
hyads2011 <- rasterFromXYZ( hyads2011.dt[, .( x, y, hyads)], crs = p4s)
## ========================================================= ##
## Read in emissions data
## ========================================================= ##
d_cems_cmaq.f <- "~/Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL IMPACTS/INVENTORY/CEM/2005_cemsum.txt"
d_nonegu.f <- "~/Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL IMPACTS/INVENTORY/NONEGU COAL/ptinv_ptnonipm_xportfrac_cap2005v2_2005cs_orl_06jan2011_v4_orl_COAL.txt"
d_cmaq <- fread( d_cems_cmaq.f)
d_nonegu <- fread( d_nonegu.f, skip = "06029", header = F)[,1:63]
d_nonegu.names <- unlist( fread( d_nonegu.f, skip = 'FIPS,PLANTID,', header = F, nrows = 1))
names( d_nonegu) <- d_nonegu.names
d_nonegu.slim <- d_nonegu[ POLCODE == 'SO2', .( XLOC, YLOC, ANN_EMIS)]
## Convert to spatial object, take over CMAQ raster
d_nonegu.sp <- SpatialPointsDataFrame( d_nonegu.slim[, .( XLOC, YLOC)],
data.frame( d_nonegu.slim[, ANN_EMIS]),
proj4string = CRS( "+proj=longlat +datum=WGS84 +no_defs"))
d_nonegu.sp <- spTransform( d_nonegu.sp, CRS( p4s))
d_nonegu.r <- rasterize( d_nonegu.sp, ddm.m.all)$d_nonegu.slim...ANN_EMIS.
d_nonegu.r[is.na(d_nonegu.r[])] <- 0
## ========================================================= ##
## Source inverse distance weighted raster
## ========================================================= ##
idwe.m.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RData/ampd_dists_sox_weighted.csv', drop = 'V1')
idwe.m.l <- split( idwe.m.dt, by = 'yearmon')
# create lists of monthly rasters
IDWErasterizer <- function( X){
r <- rasterFromXYZ( X[, .( x, y, tot.sum)], crs = p4s)
r[is.na( r)] <- 0
return( r)
}
idwe.m <- stack( lapply( idwe.m.l, IDWErasterizer))
names( idwe.m) <- names( hyads.m.all)
## ========================================================= ##
## SOx inverse distance by year
## ========================================================= ##
idwe2005.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RData/ampd_dists_sox_weighted_2005_total.csv', drop = 'V1')
idwe2006.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RData/ampd_dists_sox_weighted_2006_total.csv', drop = 'V1')
idwe2011.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RData/ampd_dists_sox_weighted_2011_total.csv', drop = 'V1')
idwe2005 <- rasterFromXYZ( idwe2005.dt, crs = p4s)
idwe2006 <- rasterFromXYZ( idwe2006.dt, crs = p4s)
idwe2011 <- rasterFromXYZ( idwe2011.dt, crs = p4s)
names( idwe2005) <- 'idwe'
names( idwe2006) <- 'idwe'
names( idwe2011) <- 'idwe'
summary(( hyads2006 - hyads2005) / hyads2005)
summary(( ddm2006 - ddm2005) / ddm2005)
summary(( idwe2006 - idwe2005) / idwe2005)
## ========================================================= ##
## Plots
## ========================================================= ##
# get usa mask for masking
# download USA polygon from rnaturalearth
us_states.names <- state.abb[!(state.abb %in% c( 'HI', 'AK'))]
us_states <- st_transform( USAboundaries::us_states(), p4s)
mask.usa <- sf::as_Spatial(us_states)[ us_states$state_abbr %in% us_states.names,]
plot( ( hyads2006 - hyads2005) / hyads2005)
plot(mask.usa, add = T)
plot( (( ddm2006 - ddm2005) / ddm2005))
plot( (( idwe2006 - idwe2005) / idwe2005))
plot( hyads.m.all$X2005.07.01)
plot(mask.usa, add = T)
plot( idwe.m$X2005.07.01)
plot(mask.usa, add = T)
plot( ddm.m.all$X2005.06.01)
plot(mask.usa, add = T)
# plot( data.table( values( project_and_stack( hyads.m.all$X2005.08.01, ddm.m.all$X2005.08.01))))
# plot( data.table( values( project_and_stack( hyads.m.all$X2005.12.01, ddm.m.all$X2005.12.01))))
# plot( data.table( values( project_and_stack( idwe.m$X2005.12.01, ddm.m.all$X2005.12.01))))
#======================================================================#
# stack up and project annual data
#======================================================================#
dats2005.a <- project_and_stack( ddm2005, hyads2005, idwe2005,
mets2005, d_nonegu.r, mask.use = mask.usa)
dats2006.a <- project_and_stack( ddm2006, hyads2006, idwe2006,
mets2006, d_nonegu.r, mask.use = mask.usa)
dats2011.a <- project_and_stack( ddm2006, hyads2011, idwe2011,
mets2011, d_nonegu.r, mask.use = mask.usa)
dats2011.a$cmaq.ddm <- NA
summary( dats2006.a - dats2005.a)
summary( dats2011.a - dats2005.a)
cor( values( dats2005.a), use = 'complete.obs')
cor( values( dats2006.a), use = 'complete.obs')
dats2005.v <- data.table( values( dats2005.a))
dats2006.v <- data.table( values( dats2006.a))
plot( dats2005.v[, .(cmaq.ddm, hyads, idwe)])
plot( dats2006.v[, .(cmaq.ddm, hyads, idwe)])
plot( dats2005.a$cmaq.ddm < 1.2 & dats2005.a$hyads > 1.5e8)
plot( dats2006.a$cmaq.ddm < 1.2 & dats2006.a$hyads > 1.5e8)
d2005.red <- which( dats2005.v$cmaq.ddm < 1.2 & dats2005.v$hyads > 1.5e8)
plot( dats2005.v[d2005.red,.(cmaq.ddm, hyads, idwe)], col = 'red')
cor( dats2005.v[!d2005.red], use = 'complete.obs')
plot( dats2005.v[!d2005.red,.(cmaq.ddm, hyads, idwe)])
#======================================================================#
## Combine into raster stack, train model
#======================================================================#
cov.names = c( "temp", "rhum", "vwnd", "uwnd", "wspd")
# predict each month in 2006 using model trained in 2005
preds.mon.hyads06w05 <- mapply( month.trainer, names( mets2005.m), names( mets2006.m),
MoreArgs = list( name.x = 'hyads', y.m = hyads.m.all,
ddm.m = ddm.m.all, mets.m = mets.m.all,
idwe.m = idwe.m, emiss.m = d_nonegu.r,
.mask.use = mask.usa, cov.names = cov.names))
preds.mon.idwe06w05 <- mapply( month.trainer, names( mets2005.m), names( mets2006.m),
MoreArgs = list( name.x = 'idwe', y.m = idwe.m,
ddm.m = ddm.m.all, mets.m = mets.m.all,
idwe.m = idwe.m, emiss.m = d_nonegu.r,
.mask.use = mask.usa, cov.names = cov.names))
# predict each month in 2006 using model trained in 2005
# preds.mon.hyads05w06 <- mapply( month.trainer, names( mets2006.m), names( mets2005.m),
# MoreArgs = list( name.x = 'hyads', y.m = hyads.m.all,
# ddm.m = ddm.m.all, mets.m = mets.m.all,
# idwe.m = idwe.m, emiss.m = d_nonegu.r,
# .mask.use = mask.usa, cov.names = cov.names))
# preds.mon.idwe05w06 <- mapply( month.trainer, names( mets2006.m), names( mets2005.m),
# MoreArgs = list( name.x = 'idwe', y.m = idwe.m,
# ddm.m = ddm.m.all, mets.m = mets.m.all,
# idwe.m = idwe.m, emiss.m = d_nonegu.r,
# .mask.use = mask.usa, cov.names = cov.names))
# predict annual 2006 using model trained in 2005
preds.ann.hyads06w05 <- lm.hyads.ddm.holdout( dat.stack = dats2005.a, dat.stack.pred = dats2006.a, name.idwe = 'idwe', x.name = 'hyads',
ho.frac = 0, covars.names = cov.names, return.mods = T)
preds.ann.idwe06w05 <- lm.hyads.ddm.holdout( dat.stack = dats2005.a, dat.stack.pred = dats2006.a, name.idwe = 'idwe', x.name = 'idwe',
ho.frac = 0, covars.names = cov.names, return.mods = T)
# predict annual 2006 using model trained in 2005
# preds.ann.hyads05w06 <- lm.hyads.ddm.holdout( dat.stack = dats2006.a, dat.stack.pred = dats2005.a, name.idwe = 'idwe', x.name = 'hyads',
# ho.frac = 0, covars.names = cov.names, return.mods = T)
# preds.ann.idwe05w06 <- lm.hyads.ddm.holdout( dat.stack = dats2006.a, dat.stack.pred = dats2005.a, name.idwe = 'idwe', x.name = 'idwe',
# ho.frac = 0, covars.names = cov.names, return.mods = T)
# predict annual 2006 using model trained in 2005 - include inverse distance
# preds.ann.hyads06w05.i <- lm.hyads.ddm.holdout( dat.stack = dats2005.a, dat.stack.pred = dats2006.a,
# ho.frac = 0, covars.names = c( cov.names, 'idwe'), return.mods = T)
#======================================================================#
## Save data
#======================================================================#
# annual stacks,
# monthly stacks
# annual model
# monthly models
save( dats2005.a, dats2006.a, dats2011.a,
hyads.m.all, ddm.m.all, mets.m.all,
idwe.m, d_nonegu.r,
preds.mon.hyads06w05, #preds.mon.hyads05w06,
preds.mon.idwe06w05, #preds.mon.idwe05w06,
preds.ann.hyads06w05, #preds.ann.hyads05w06,
preds.ann.idwe06w05, #preds.ann.idwe05w06,
file = '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RData/hyads_to_cmaq_models3.RData')
# do correlation comparisons on quintiles
# scale all 3 on their Z score scale
#======================================================================#
## Annual plots
#======================================================================#
ggplot.a.raster( preds.ann.hyads06w05$Y.ho.hat.raster$y.hat.lm.cv,
preds.ann.hyads06w05$Y.ho.hat.raster$y.hat.gam.cv,
preds.ann.idwe06w05$Y.ho.hat.raster$y.hat.lm.cv,
preds.ann.idwe06w05$Y.ho.hat.raster$y.hat.gam.cv,
ncol. = 2, facet.names = c( 'lm - hyads', 'gam - hyads',
'lm - idwe', 'gam - idwe'),
mask.raster = mask.usa)
# preds.ann.hyads06w05$metrics
# preds.ann.idwe06w05$metrics
#======================================================================#
## Extract data, summarize, and plot
#======================================================================#
## things we should show by month
# r (or R^2)
# spatial map of error by month
# each month's holdout?
# plot contributions of inputs
gg_out <- ggplot.a.raster( subset( ddm.m.all, 'X2005.07.01'),
preds.mon.hyads05w06 ['Y.ho.hat.raster','X2006.07.01'][[1]]$y.hat.gam.cv,
preds.mon.idwe05w06 ['Y.ho.hat.raster','X2006.07.01'][[1]]$y.hat.gam.cv,
mask.raster = mask.usa, facet.names = c( 'CMAQ', 'HyADS', 'IDWE'),
bounds = c( 0,8), ncol. = 1)
ggsave( '~/Dropbox/Harvard/Meetings_and_People/CMAS_2019/HyADS_pred_model_July.png', gg_out,
height = 8, width = 3.5, scale = .7)
# plots of monthly predictions
ggplot.a.raster( preds.mon.hyads05w06['Y.ho.hat.raster','X2006.01.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.02.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.03.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.04.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.05.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.06.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.07.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.08.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.09.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.10.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.11.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.12.01'][[1]]$y.hat.gam.cv,
bounds = c( 0,8), ncol. = 3, facet.names = month.name,
mask.raster = mask.usa)
ggplot.a.raster( preds.mon.idwe['Y.ho.hat.raster','X2005.01.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.02.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.03.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.04.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.05.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.06.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.07.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.08.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.09.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.10.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.11.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.12.01'][[1]]$y.hat.lm.cv,
bounds = c( 0,6), ncol. = 3, facet.names = month.name,
mask.raster = mask.usa)
# plots of monthly error
ggplot.a.raster( preds.mon.hyads05w06['Y.ho.hat.raster','X2006.01.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.01.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.02.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.02.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.03.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.03.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.04.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.04.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.05.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.05.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.06.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.06.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.07.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.07.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.08.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.08.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.09.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.09.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.10.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.10.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.11.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.11.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.12.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.12.01'),
bounds = c( -2,2), ncol. = 3, facet.names = month.name,
mask.raster = mask.usa)
ggplot.a.raster( preds.mon.idwe05w06['Y.ho.hat.raster','X2006.01.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.01.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.02.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.02.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.03.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.03.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.04.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.04.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.05.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.05.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.06.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.06.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.07.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.07.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.08.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.08.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.09.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.09.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.10.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.10.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.11.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.11.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.12.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.12.01'),
bounds = c( -2,2), ncol. = 3, facet.names = month.name,
mask.raster = mask.usa)
ggplot.a.raster( preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.01.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.01.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.02.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.02.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.03.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.03.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.04.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.04.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.05.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.05.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.06.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.06.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.07.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.07.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.08.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.08.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.09.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.09.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.10.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.10.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.11.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.11.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.12.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.12.01'),
bounds = c( -1,1), ncol. = 3, facet.names = month.name,
mask.raster = mask.usa)
#======================================================================#
## Check out the covariates
#======================================================================#
# plots of monthly covariate contributions
# average of each covariate over the year
# spatial plots of just HyADS/spline covariates over the year
covs.all <- names( preds.ann.hyads06w05$Y.ho.terms.gam.raster)
covs.all.idwe <- names( preds.ann.idwe06w05$Y.ho.terms.gam.raster)
covs.hyads <- covs.all[grep( 'hyads', covs.all)]
covs.hyads.s <- c( covs.hyads, "s.x.y.")
covs.tot.sum <- covs.all.idwe[grep( 'tot.sum', covs.all.idwe)]
covs.tot.sum.s <- c( covs.tot.sum, "s.x.y.")
covs.idwe <- gsub( 'tot.sum', 'idwe', covs.tot.sum)
covs.idwe.s <- gsub( 'tot.sum', 'idwe', covs.tot.sum.s)
# plot hyads related covariates for different years
ggplot.a.raster( unstack( stack( subset( preds.ann.hyads06w05$Y.ho.terms.gam.raster, covs.hyads.s),
subset( preds.ann.hyads05w06$Y.ho.terms.gam.raster, covs.hyads.s))),
bounds = c( -4,4), ncol. = 7, mask.raster = mask.usa,
facet.names = paste( covs.hyads.s, rep( c( '06w05', '05w06'), each = 7)))
ggplot.a.raster( unstack( stack( subset( preds.ann.idwe06w05$Y.ho.terms.gam.raster, covs.tot.sum.s),
subset( preds.ann.idwe05w06$Y.ho.terms.gam.raster, covs.tot.sum.s))),
bounds = c( -4,4), ncol. = 7, mask.raster = mask.usa,
facet.names = paste( covs.tot.sum.s, rep( c( '06w05', '05w06'), each = 7)))
# sum all hyads/tot.sum contributions
hyads_gamters <- stack( sum( subset( preds.ann.hyads06w05$Y.ho.terms.gam.raster, covs.hyads)),
subset( preds.ann.hyads06w05$Y.ho.terms.gam.raster, 's.x.y.'),
sum( subset( preds.ann.hyads05w06$Y.ho.terms.gam.raster, covs.hyads)),
subset( preds.ann.hyads05w06$Y.ho.terms.gam.raster, 's.x.y.'),
sum( subset( preds.ann.idwe06w05$Y.ho.terms.gam.raster, covs.tot.sum)),
subset( preds.ann.idwe06w05$Y.ho.terms.gam.raster, 's.x.y.'),
sum( subset( preds.ann.idwe05w06$Y.ho.terms.gam.raster, covs.tot.sum)),
subset( preds.ann.idwe05w06$Y.ho.terms.gam.raster, 's.x.y.'))
ggplot.a.raster( unstack( hyads_gamters),
bounds = c( -4,4), ncol. = 2, mask.raster = mask.usa,
facet.names = c( paste( c( 'hyads', 'hyads s.x.y.'),
rep( c( '06w05', '05w06'), each = 2)),
paste( c( 'idwe', 'idwe s.x.y.'),
rep( c( '06w05', '05w06'), each = 2))))
# plot hyads related covariates for different months
gamters.mon06w05 <- stack( lapply( colnames( preds.mon.hyads06w05),
function( x) {
subset( preds.mon.hyads06w05['Y.ho.terms.gam.raster', x][[1]], covs.hyads)
}))
gamters.mon06w05.i <- stack( lapply( colnames( preds.mon.idwe06w05),
function( x) {
subset( preds.mon.idwe06w05['Y.ho.terms.gam.raster', x][[1]], covs.idwe)
}))
gamters.mon05w06 <- stack( lapply( colnames( preds.mon.hyads05w06),
function( x) {
subset( preds.mon.hyads05w06['Y.ho.terms.gam.raster', x][[1]], covs.hyads)
}))
names.gamters <- paste( covs.hyads, rep( month.abb, each = 7))
names.gamters.i <- paste( covs.idwe, rep( month.abb, each = 7))
ggplot.a.raster( unstack( gamters.mon06w05),
bounds = c( -4,4), ncol. = 7, mask.raster = mask.usa,
facet.names = names.gamters)
ggplot.a.raster( unstack( gamters.mon06w05.i),
bounds = c( -4,4), ncol. = 7, mask.raster = mask.usa,
facet.names = names.gamters.i)
ggplot.a.raster( unstack( gamters.mon05w06),
bounds = c( -4,4), ncol. = 7, mask.raster = mask.usa,
facet.names = names.gamters)
# sum all hyads/tot.sum contributions
gamters.mon06w05.hyadssum <- stack( lapply( colnames( preds.mon.hyads06w05),
function( x) {
stack( sum( subset( preds.mon.hyads06w05['Y.ho.terms.gam.raster', x][[1]], covs.hyads)),
subset( preds.mon.hyads06w05['Y.ho.terms.gam.raster', x][[1]], 's.x.y.'))
}))
gamters.mon06w05.idwesum <- stack( lapply( colnames( preds.mon.idwe06w05),
function( x) {
stack( sum( subset( preds.mon.idwe06w05['Y.ho.terms.gam.raster', x][[1]], covs.idwe)),
subset( preds.mon.idwe06w05['Y.ho.terms.gam.raster', x][[1]], 's.x.y.'))
}))
names.gamters.hy <- paste( c( 'hyads', 's.x.y.'), rep( month.abb, each = 2))
names.gamters.is <- paste( c( 'idwe', 's.x.y.'), rep( month.abb, each = 2))
ggplot.a.raster( unstack( gamters.mon06w05.hyadssum),
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names.gamters.hy)
ggplot.a.raster( unstack( gamters.mon06w05.idwesum),
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names.gamters.is)
ggplot.a.raster( preds.ann.hyads06w05$Y.ho.terms.raster,
bounds = c( -4,4), ncol. = 5, mask.raster = mask.usa,
facet.names = names( preds.ann.hyads06w05$Y.ho.terms.raster))
ggplot.a.raster( preds.ann.hyads06w05$Y.ho.terms.gam.raster,
bounds = c( -4,4), ncol. = 5, mask.raster = mask.usa,
facet.names = names( preds.ann.hyads06w05$Y.ho.terms.gam.raster))
ggplot.a.raster( preds.mon.hyads06w05['Y.ho.terms.raster','X2005.01.01'][[1]],
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names( preds.mon.hyads06w05['Y.ho.terms.raster','X2005.01.01'][[1]]))
ggplot.a.raster( preds.mon.hyads06w05['Y.ho.terms.raster','X2005.07.01'][[1]],
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names( preds.mon.hyads06w05['Y.ho.terms.raster','X2005.07.01'][[1]]))
ggplot.a.raster( preds.mon.hyads06w05['Y.ho.terms.gam.raster','X2005.01.01'][[1]],
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names( preds.mon.hyads06w05['Y.ho.terms.gam.raster','X2005.01.01'][[1]]))
ggplot.a.raster( preds.mon.hyads05w06['Y.ho.terms.gam.raster','X2006.07.01'][[1]],
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names( preds.mon.hyads05w06['Y.ho.terms.gam.raster','X2006.07.01'][[1]]))
ggplot.a.raster( preds.mon.idwe05w06['Y.ho.terms.gam.raster','X2006.07.01'][[1]],
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names( preds.mon.idwe05w06['Y.ho.terms.gam.raster','X2006.07.01'][[1]]))
#======================================================================#
## Plot the metrics
#======================================================================#
## extract evaluation statistics
## IDWE gets big change from bivariate spline, HyADS does not
preds.metrics.hyads <- preds.mon.hyads06w05[ 'metrics',]
preds.metrics.idwe <- preds.mon.idwe06w05[ 'metrics',]
metrics <- data.table( month = c( as.Date( gsub( '\\.', '-', gsub( 'X', '', names( preds.metrics.hyads)))),
as.Date( gsub( '\\.', '-', gsub( 'X', '', names( preds.metrics.idwe))))),
model = c( rep( 'hyads', length( names( preds.metrics.hyads))),
rep( 'idwe', length( names( preds.metrics.idwe)))),
class = c( rep( 'gam', 2 * length( names( preds.metrics.hyads))),
rep( 'lm', 2 * length( names( preds.metrics.idwe)))),
'R^2' = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'gam.cv']$R^2),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'gam.cv']$R^2),
sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'lm.cv']$R^2),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'lm.cv']$R^2)),
NMB = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'gam.cv']$NMB),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'gam.cv']$NMB),
sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'lm.cv']$NMB),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'lm.cv']$NMB)),
NME = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'gam.cv']$NME),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'gam.cv']$NME),
sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'lm.cv']$NME),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'lm.cv']$NME)),
RMSE = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'gam.cv']$RMSE),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'gam.cv']$RMSE),
sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'lm.cv']$RMSE),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'lm.cv']$RMSE)))
metrics.m <- melt( metrics, id.vars = c( 'model', 'month', 'class'), variable.name = 'metric')
ggplot( data = metrics.m,
aes( x = month, y = value, lty = class, color = model)) +
geom_line() + geom_point() +
facet_wrap( . ~ metric, scales = 'free_y', ncol = 1, labeller = label_parsed) +
expand_limits( y = 0)
# metrics - adj. Z score, no model
metrics.Z.only <- data.table( month = c( as.Date( gsub( '\\.', '-', gsub( 'X', '', names( preds.metrics.hyads)))),
as.Date( gsub( '\\.', '-', gsub( 'X', '', names( preds.metrics.idwe))))),
model = c( rep( 'hyads', length( names( preds.metrics.hyads))),
rep( 'idwe', length( names( preds.metrics.idwe)))),
'R^2' = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'adj.Z.only']$R^2),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'adj.Z.only']$R^2)),
NMB = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'adj.Z.only']$NMB),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'adj.Z.only']$NMB)),
NME = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'adj.Z.only']$NME),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'adj.Z.only']$NME)),
RMSE = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'adj.Z.only']$RMSE),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'adj.Z.only']$RMSE)))
metrics.Z.only.m <- melt( metrics.Z.only, id.vars = c( 'model', 'month'), variable.name = 'metric')
ggplot( data = metrics.Z.only.m,
aes( x = month, y = value, group = model, color = model)) +
geom_line() + geom_point() +
facet_wrap( . ~ metric, scales = 'free_y', ncol = 1, labeller = label_parsed) +
expand_limits( y = 0)
# extract linear model coefficients
#annual comparisons
#5 day avg time
#Check w/ sunni on month/annual etc
#======================================================================#
## Plot changes in evaluation in different areas
#======================================================================#
cors.keep.month.hyads.u05w06 <- rbindlist( preds.mon.hyads05w06['evals.q',], idcol = 'month')[, y := '05w06']
cors.keep.month.hyads.u06w05 <- rbindlist( preds.mon.hyads06w05['evals.q',], idcol = 'month')[, y := '06w05']
cors.keep.month.idwe.u05w06 <- rbindlist( preds.mon.idwe05w06['evals.q',], idcol = 'month')[, y := '05w06']
cors.keep.month.idwe.u06w05 <- rbindlist( preds.mon.idwe06w05['evals.q',], idcol = 'month')[, y := '06w05']
cors.keep.month <- rbind( cors.keep.month.hyads.u05w06, cors.keep.month.hyads.u06w05,
cors.keep.month.idwe.u05w06, cors.keep.month.idwe.u06w05)
cors.keep.m <- melt( cors.keep.month, id.vars = c( 'mod.name', 's', 'month', 'y'))
cors.keep.m[, month := month( as.Date( gsub( 'X', '', month), format = '%Y.%m.%d'))]
ggplot( data = cors.keep.m,
aes( x = s, y = value, color = mod.name, lty = y)) +
geom_hline( yintercept = 0) +
facet_grid( variable ~ month, scales = 'free_y') + geom_line()
# plot annual evaluation across s
cors.keep.u06w05 <- rbind( preds.ann.hyads06w05$evals.q, preds.ann.idwe06w05$evals.q)[, y := '06w05']
cors.keep.u05w06 <- rbind( preds.ann.hyads05w06$evals.q, preds.ann.idwe05w06$evals.q)[, y := '05w06']
cors.keep.u <- rbind( cors.keep.u06w05, cors.keep.u05w06)
cors.keep.m <- melt( cors.keep.u, id.vars = c( 'mod.name', 's', 'y'))
ggplot( data = cors.keep.m,
aes( x = s, y = value, color = mod.name, lty = y)) +
geom_hline( yintercept = 0) +
facet_wrap( . ~ variable, scales = 'free_y') + geom_line()
# need somehow to evaluate near vs far sources
# approximate this as high/low tot.sum
# says more about how emissions near sources are handled than
# anything else
# check out wind speed argument --- very key
# IDWE does better in years with slow windspeed?
# plot cmaq range at each s
# do MSE?
cors.keep <- data.table()
for (y in 2005:2006){
vals <- values( get( paste0( 'dats', y, '.a')))
for ( s in seq( 0.01, 1, .01)){
q <- quantile( vals[,'tot.sum'], s, na.rm = T)
cors <- cor( vals[vals[,'cmaq.ddm'] < q,], use = 'complete.obs', method = 'spearman')
cors.keep <- rbind( cors.keep,
data.table( s = s, hyads = cors['cmaq.ddm', 'hyads'],
idwe = cors['cmaq.ddm', 'tot.sum'], year = y))
}
}
cors.keep.m <- melt( cors.keep, id.vars = c( 's', 'year'))
ggplot( data = cors.keep.m,
aes( x = s, y = value, color = variable, group = variable)) +
geom_line() + facet_wrap( year ~ ., ncol = 2)
cors.keep[which.min( abs( hyads - idwe))]
| /RCode/hyads_to_pm25_month.R | no_license | rcswiggy98/HyADS_to_pm25 | R | false | false | 38,273 | r | rm( list = ls())
source( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RCode/hyads_to_pm25_functions.R')
#coordinate reference system projection string for spatial data
p4s <- "+proj=lcc +lat_1=33 +lat_2=45 +lat_0=40 +lon_0=-97 +a=6370000 +b=6370000"
#======================================================================#
## Load meteorology as list of months
#======================================================================#
#define the layer names, do the actual downloading
Sys.setenv(TZ='UTC')
layer.names <- c( "air.2m.mon.mean.nc",
"apcp.mon.mean.nc",
"rhum.2m.mon.mean.nc",
"vwnd.10m.mon.mean.nc",
"uwnd.10m.mon.mean.nc")
names( layer.names) <- c( "temp", "apcp", "rhum", "vwnd", "uwnd")
# do the data downloading
# set destination parameter to where you want the data downloaded,
# for example, destination = '~/Desktop'
list.met <- lapply( layer.names,
downloader.fn, #destination = '~/Desktop'
dataset = 'NARR')
# take over US
mets2005 <- usa.functioner( 2005, list.met, dataset = 'NARR', return.usa.sub = F)
mets2006 <- usa.functioner( 2006, list.met, dataset = 'NARR', return.usa.sub = F)
mets2011 <- usa.functioner( 2011, list.met, dataset = 'NARR', return.usa.sub = F)
mets2005.m <- usa.functioner( 2005, list.met, dataset = 'NARR', avg.period = 'month', return.usa.sub = F)
mets2006.m <- usa.functioner( 2006, list.met, dataset = 'NARR', avg.period = 'month', return.usa.sub = F)
mets2011.m <- usa.functioner( 2011, list.met, dataset = 'NARR', avg.period = 'month', return.usa.sub = F)
# combine monthly rasters into single list
mets.m.all <- append( append( mets2005.m, mets2006.m), mets2011.m)
#======================================================================#
## Load ddm as month
#======================================================================#
ddm2005.m <- ddm_to_zip( ddm_coal_file = '~//Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL_impacts_2005_update.csv',
Year = 2005, avg.period = 'month')
ddm2006.m <- ddm_to_zip( ddm_coal_file = '~//Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL_impacts_2006_update.csv',
Year = 2006, avg.period = 'month')
names( ddm2005.m) <- names( mets2005.m)
names( ddm2006.m) <- names( mets2006.m)
# combine into single list
ddm.m.all <- stack( ddm2005.m, ddm2006.m)
#======================================================================#
## Load ddm as annual
#======================================================================#
ddm2005 <- ddm_to_zip( ddm_coal_file = '~//Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL_impacts_2005_update.csv',
Year = 2005)
ddm2006 <- ddm_to_zip( ddm_coal_file = '~//Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL_impacts_2006_update.csv',
Year = 2006)
names( ddm2005) <- 'cmaq.ddm'
names( ddm2006) <- 'cmaq.ddm'
#======================================================================#
## Load monthly hyads
#======================================================================#
# read monthly grid files
hyads2005.m.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_month_nopbl2005.csv', drop = 'V1')
hyads2006.m.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_month_nopbl2006.csv', drop = 'V1')
hyads2011.m.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_month_nopbl2011.csv', drop = 'V1')
# create lists from monthly grid objects
hyads2005.m.l <- split( hyads2005.m.dt, by = 'yearmonth')
hyads2006.m.l <- split( hyads2006.m.dt, by = 'yearmonth')
hyads2011.m.l <- split( hyads2011.m.dt, by = 'yearmonth')
names( hyads2005.m.l) <- names( mets2005.m)
names( hyads2006.m.l) <- names( mets2006.m)
names( hyads2011.m.l) <- names( mets2011.m)
# create lists of monthly rasters
HyADSrasterizer <- function( X){
r <- rasterFromXYZ( X[, .( x, y, hyads)], crs = p4s)
r[is.na( r)] <- 0
return( r)
}
hyads2005.m <- lapply( hyads2005.m.l, HyADSrasterizer)
hyads2006.m <- lapply( hyads2006.m.l, HyADSrasterizer)
hyads2011.m <- lapply( hyads2011.m.l, HyADSrasterizer)
# combine into single list
hyads.m.all <- stack( stack( hyads2005.m), stack( hyads2006.m), stack( hyads2011.m))
#======================================================================#
## Load anuual hyads
#======================================================================#
hyads2005.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_annual_nopbl_2005.csv', drop = 'V1')
hyads2006.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_annual_nopbl_2006.csv', drop = 'V1')
hyads2011.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/HyADS_grid/gridexposures/HyADS_grid_annual_nopbl_2011.csv', drop = 'V1')
hyads2005 <- rasterFromXYZ( hyads2005.dt[, .( x, y, hyads)], crs = p4s)
hyads2006 <- rasterFromXYZ( hyads2006.dt[, .( x, y, hyads)], crs = p4s)
hyads2011 <- rasterFromXYZ( hyads2011.dt[, .( x, y, hyads)], crs = p4s)
## ========================================================= ##
## Read in emissions data
## ========================================================= ##
d_cems_cmaq.f <- "~/Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL IMPACTS/INVENTORY/CEM/2005_cemsum.txt"
d_nonegu.f <- "~/Dropbox/Harvard/RFMeval_Local/CMAQ_DDM/COAL IMPACTS/INVENTORY/NONEGU COAL/ptinv_ptnonipm_xportfrac_cap2005v2_2005cs_orl_06jan2011_v4_orl_COAL.txt"
d_cmaq <- fread( d_cems_cmaq.f)
d_nonegu <- fread( d_nonegu.f, skip = "06029", header = F)[,1:63]
d_nonegu.names <- unlist( fread( d_nonegu.f, skip = 'FIPS,PLANTID,', header = F, nrows = 1))
names( d_nonegu) <- d_nonegu.names
d_nonegu.slim <- d_nonegu[ POLCODE == 'SO2', .( XLOC, YLOC, ANN_EMIS)]
## Convert to spatial object, take over CMAQ raster
d_nonegu.sp <- SpatialPointsDataFrame( d_nonegu.slim[, .( XLOC, YLOC)],
data.frame( d_nonegu.slim[, ANN_EMIS]),
proj4string = CRS( "+proj=longlat +datum=WGS84 +no_defs"))
d_nonegu.sp <- spTransform( d_nonegu.sp, CRS( p4s))
d_nonegu.r <- rasterize( d_nonegu.sp, ddm.m.all)$d_nonegu.slim...ANN_EMIS.
d_nonegu.r[is.na(d_nonegu.r[])] <- 0
## ========================================================= ##
## Source inverse distance weighted raster
## ========================================================= ##
idwe.m.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RData/ampd_dists_sox_weighted.csv', drop = 'V1')
idwe.m.l <- split( idwe.m.dt, by = 'yearmon')
# create lists of monthly rasters
IDWErasterizer <- function( X){
r <- rasterFromXYZ( X[, .( x, y, tot.sum)], crs = p4s)
r[is.na( r)] <- 0
return( r)
}
idwe.m <- stack( lapply( idwe.m.l, IDWErasterizer))
names( idwe.m) <- names( hyads.m.all)
## ========================================================= ##
## SOx inverse distance by year
## ========================================================= ##
idwe2005.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RData/ampd_dists_sox_weighted_2005_total.csv', drop = 'V1')
idwe2006.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RData/ampd_dists_sox_weighted_2006_total.csv', drop = 'V1')
idwe2011.dt <- fread( '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RData/ampd_dists_sox_weighted_2011_total.csv', drop = 'V1')
idwe2005 <- rasterFromXYZ( idwe2005.dt, crs = p4s)
idwe2006 <- rasterFromXYZ( idwe2006.dt, crs = p4s)
idwe2011 <- rasterFromXYZ( idwe2011.dt, crs = p4s)
names( idwe2005) <- 'idwe'
names( idwe2006) <- 'idwe'
names( idwe2011) <- 'idwe'
summary(( hyads2006 - hyads2005) / hyads2005)
summary(( ddm2006 - ddm2005) / ddm2005)
summary(( idwe2006 - idwe2005) / idwe2005)
## ========================================================= ##
## Plots
## ========================================================= ##
# get usa mask for masking
# download USA polygon from rnaturalearth
us_states.names <- state.abb[!(state.abb %in% c( 'HI', 'AK'))]
us_states <- st_transform( USAboundaries::us_states(), p4s)
mask.usa <- sf::as_Spatial(us_states)[ us_states$state_abbr %in% us_states.names,]
plot( ( hyads2006 - hyads2005) / hyads2005)
plot(mask.usa, add = T)
plot( (( ddm2006 - ddm2005) / ddm2005))
plot( (( idwe2006 - idwe2005) / idwe2005))
plot( hyads.m.all$X2005.07.01)
plot(mask.usa, add = T)
plot( idwe.m$X2005.07.01)
plot(mask.usa, add = T)
plot( ddm.m.all$X2005.06.01)
plot(mask.usa, add = T)
# plot( data.table( values( project_and_stack( hyads.m.all$X2005.08.01, ddm.m.all$X2005.08.01))))
# plot( data.table( values( project_and_stack( hyads.m.all$X2005.12.01, ddm.m.all$X2005.12.01))))
# plot( data.table( values( project_and_stack( idwe.m$X2005.12.01, ddm.m.all$X2005.12.01))))
#======================================================================#
# stack up and project annual data
#======================================================================#
dats2005.a <- project_and_stack( ddm2005, hyads2005, idwe2005,
mets2005, d_nonegu.r, mask.use = mask.usa)
dats2006.a <- project_and_stack( ddm2006, hyads2006, idwe2006,
mets2006, d_nonegu.r, mask.use = mask.usa)
dats2011.a <- project_and_stack( ddm2006, hyads2011, idwe2011,
mets2011, d_nonegu.r, mask.use = mask.usa)
dats2011.a$cmaq.ddm <- NA
summary( dats2006.a - dats2005.a)
summary( dats2011.a - dats2005.a)
cor( values( dats2005.a), use = 'complete.obs')
cor( values( dats2006.a), use = 'complete.obs')
dats2005.v <- data.table( values( dats2005.a))
dats2006.v <- data.table( values( dats2006.a))
plot( dats2005.v[, .(cmaq.ddm, hyads, idwe)])
plot( dats2006.v[, .(cmaq.ddm, hyads, idwe)])
plot( dats2005.a$cmaq.ddm < 1.2 & dats2005.a$hyads > 1.5e8)
plot( dats2006.a$cmaq.ddm < 1.2 & dats2006.a$hyads > 1.5e8)
d2005.red <- which( dats2005.v$cmaq.ddm < 1.2 & dats2005.v$hyads > 1.5e8)
plot( dats2005.v[d2005.red,.(cmaq.ddm, hyads, idwe)], col = 'red')
cor( dats2005.v[!d2005.red], use = 'complete.obs')
plot( dats2005.v[!d2005.red,.(cmaq.ddm, hyads, idwe)])
#======================================================================#
## Combine into raster stack, train model
#======================================================================#
cov.names = c( "temp", "rhum", "vwnd", "uwnd", "wspd")
# predict each month in 2006 using model trained in 2005
preds.mon.hyads06w05 <- mapply( month.trainer, names( mets2005.m), names( mets2006.m),
MoreArgs = list( name.x = 'hyads', y.m = hyads.m.all,
ddm.m = ddm.m.all, mets.m = mets.m.all,
idwe.m = idwe.m, emiss.m = d_nonegu.r,
.mask.use = mask.usa, cov.names = cov.names))
preds.mon.idwe06w05 <- mapply( month.trainer, names( mets2005.m), names( mets2006.m),
MoreArgs = list( name.x = 'idwe', y.m = idwe.m,
ddm.m = ddm.m.all, mets.m = mets.m.all,
idwe.m = idwe.m, emiss.m = d_nonegu.r,
.mask.use = mask.usa, cov.names = cov.names))
# predict each month in 2006 using model trained in 2005
# preds.mon.hyads05w06 <- mapply( month.trainer, names( mets2006.m), names( mets2005.m),
# MoreArgs = list( name.x = 'hyads', y.m = hyads.m.all,
# ddm.m = ddm.m.all, mets.m = mets.m.all,
# idwe.m = idwe.m, emiss.m = d_nonegu.r,
# .mask.use = mask.usa, cov.names = cov.names))
# preds.mon.idwe05w06 <- mapply( month.trainer, names( mets2006.m), names( mets2005.m),
# MoreArgs = list( name.x = 'idwe', y.m = idwe.m,
# ddm.m = ddm.m.all, mets.m = mets.m.all,
# idwe.m = idwe.m, emiss.m = d_nonegu.r,
# .mask.use = mask.usa, cov.names = cov.names))
# predict annual 2006 using model trained in 2005
preds.ann.hyads06w05 <- lm.hyads.ddm.holdout( dat.stack = dats2005.a, dat.stack.pred = dats2006.a, name.idwe = 'idwe', x.name = 'hyads',
ho.frac = 0, covars.names = cov.names, return.mods = T)
preds.ann.idwe06w05 <- lm.hyads.ddm.holdout( dat.stack = dats2005.a, dat.stack.pred = dats2006.a, name.idwe = 'idwe', x.name = 'idwe',
ho.frac = 0, covars.names = cov.names, return.mods = T)
# predict annual 2006 using model trained in 2005
# preds.ann.hyads05w06 <- lm.hyads.ddm.holdout( dat.stack = dats2006.a, dat.stack.pred = dats2005.a, name.idwe = 'idwe', x.name = 'hyads',
# ho.frac = 0, covars.names = cov.names, return.mods = T)
# preds.ann.idwe05w06 <- lm.hyads.ddm.holdout( dat.stack = dats2006.a, dat.stack.pred = dats2005.a, name.idwe = 'idwe', x.name = 'idwe',
# ho.frac = 0, covars.names = cov.names, return.mods = T)
# predict annual 2006 using model trained in 2005 - include inverse distance
# preds.ann.hyads06w05.i <- lm.hyads.ddm.holdout( dat.stack = dats2005.a, dat.stack.pred = dats2006.a,
# ho.frac = 0, covars.names = c( cov.names, 'idwe'), return.mods = T)
#======================================================================#
## Save data
#======================================================================#
# annual stacks,
# monthly stacks
# annual model
# monthly models
save( dats2005.a, dats2006.a, dats2011.a,
hyads.m.all, ddm.m.all, mets.m.all,
idwe.m, d_nonegu.r,
preds.mon.hyads06w05, #preds.mon.hyads05w06,
preds.mon.idwe06w05, #preds.mon.idwe05w06,
preds.ann.hyads06w05, #preds.ann.hyads05w06,
preds.ann.idwe06w05, #preds.ann.idwe05w06,
file = '~/Dropbox/Harvard/RFMeval_Local/HyADS_to_pm25/RData/hyads_to_cmaq_models3.RData')
# do correlation comparisons on quintiles
# scale all 3 on their Z score scale
#======================================================================#
## Annual plots
#======================================================================#
ggplot.a.raster( preds.ann.hyads06w05$Y.ho.hat.raster$y.hat.lm.cv,
preds.ann.hyads06w05$Y.ho.hat.raster$y.hat.gam.cv,
preds.ann.idwe06w05$Y.ho.hat.raster$y.hat.lm.cv,
preds.ann.idwe06w05$Y.ho.hat.raster$y.hat.gam.cv,
ncol. = 2, facet.names = c( 'lm - hyads', 'gam - hyads',
'lm - idwe', 'gam - idwe'),
mask.raster = mask.usa)
# preds.ann.hyads06w05$metrics
# preds.ann.idwe06w05$metrics
#======================================================================#
## Extract data, summarize, and plot
#======================================================================#
## things we should show by month
# r (or R^2)
# spatial map of error by month
# each month's holdout?
# plot contributions of inputs
gg_out <- ggplot.a.raster( subset( ddm.m.all, 'X2005.07.01'),
preds.mon.hyads05w06 ['Y.ho.hat.raster','X2006.07.01'][[1]]$y.hat.gam.cv,
preds.mon.idwe05w06 ['Y.ho.hat.raster','X2006.07.01'][[1]]$y.hat.gam.cv,
mask.raster = mask.usa, facet.names = c( 'CMAQ', 'HyADS', 'IDWE'),
bounds = c( 0,8), ncol. = 1)
ggsave( '~/Dropbox/Harvard/Meetings_and_People/CMAS_2019/HyADS_pred_model_July.png', gg_out,
height = 8, width = 3.5, scale = .7)
# plots of monthly predictions
ggplot.a.raster( preds.mon.hyads05w06['Y.ho.hat.raster','X2006.01.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.02.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.03.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.04.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.05.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.06.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.07.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.08.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.09.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.10.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.11.01'][[1]]$y.hat.gam.cv,
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.12.01'][[1]]$y.hat.gam.cv,
bounds = c( 0,8), ncol. = 3, facet.names = month.name,
mask.raster = mask.usa)
ggplot.a.raster( preds.mon.idwe['Y.ho.hat.raster','X2005.01.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.02.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.03.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.04.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.05.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.06.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.07.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.08.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.09.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.10.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.11.01'][[1]]$y.hat.lm.cv,
preds.mon.idwe['Y.ho.hat.raster','X2005.12.01'][[1]]$y.hat.lm.cv,
bounds = c( 0,6), ncol. = 3, facet.names = month.name,
mask.raster = mask.usa)
# plots of monthly error
ggplot.a.raster( preds.mon.hyads05w06['Y.ho.hat.raster','X2006.01.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.01.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.02.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.02.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.03.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.03.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.04.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.04.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.05.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.05.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.06.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.06.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.07.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.07.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.08.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.08.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.09.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.09.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.10.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.10.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.11.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.11.01'),
preds.mon.hyads05w06['Y.ho.hat.raster','X2006.12.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.12.01'),
bounds = c( -2,2), ncol. = 3, facet.names = month.name,
mask.raster = mask.usa)
ggplot.a.raster( preds.mon.idwe05w06['Y.ho.hat.raster','X2006.01.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.01.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.02.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.02.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.03.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.03.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.04.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.04.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.05.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.05.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.06.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.06.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.07.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.07.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.08.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.08.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.09.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.09.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.10.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.10.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.11.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.11.01'),
preds.mon.idwe05w06['Y.ho.hat.raster','X2006.12.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.12.01'),
bounds = c( -2,2), ncol. = 3, facet.names = month.name,
mask.raster = mask.usa)
ggplot.a.raster( preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.01.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.01.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.02.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.02.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.03.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.03.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.04.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.04.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.05.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.05.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.06.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.06.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.07.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.07.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.08.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.08.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.09.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.09.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.10.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.10.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.11.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.11.01'),
preds.mon.hyads05w06['Y.ho.hat.bias.raster','X2006.12.01'][[1]]$y.hat.gam.cv / subset( ddm.m.all, 'X2005.12.01'),
bounds = c( -1,1), ncol. = 3, facet.names = month.name,
mask.raster = mask.usa)
#======================================================================#
## Check out the covariates
#======================================================================#
# plots of monthly covariate contributions
# average of each covariate over the year
# spatial plots of just HyADS/spline covariates over the year
covs.all <- names( preds.ann.hyads06w05$Y.ho.terms.gam.raster)
covs.all.idwe <- names( preds.ann.idwe06w05$Y.ho.terms.gam.raster)
covs.hyads <- covs.all[grep( 'hyads', covs.all)]
covs.hyads.s <- c( covs.hyads, "s.x.y.")
covs.tot.sum <- covs.all.idwe[grep( 'tot.sum', covs.all.idwe)]
covs.tot.sum.s <- c( covs.tot.sum, "s.x.y.")
covs.idwe <- gsub( 'tot.sum', 'idwe', covs.tot.sum)
covs.idwe.s <- gsub( 'tot.sum', 'idwe', covs.tot.sum.s)
# plot hyads related covariates for different years
ggplot.a.raster( unstack( stack( subset( preds.ann.hyads06w05$Y.ho.terms.gam.raster, covs.hyads.s),
subset( preds.ann.hyads05w06$Y.ho.terms.gam.raster, covs.hyads.s))),
bounds = c( -4,4), ncol. = 7, mask.raster = mask.usa,
facet.names = paste( covs.hyads.s, rep( c( '06w05', '05w06'), each = 7)))
ggplot.a.raster( unstack( stack( subset( preds.ann.idwe06w05$Y.ho.terms.gam.raster, covs.tot.sum.s),
subset( preds.ann.idwe05w06$Y.ho.terms.gam.raster, covs.tot.sum.s))),
bounds = c( -4,4), ncol. = 7, mask.raster = mask.usa,
facet.names = paste( covs.tot.sum.s, rep( c( '06w05', '05w06'), each = 7)))
# sum all hyads/tot.sum contributions
hyads_gamters <- stack( sum( subset( preds.ann.hyads06w05$Y.ho.terms.gam.raster, covs.hyads)),
subset( preds.ann.hyads06w05$Y.ho.terms.gam.raster, 's.x.y.'),
sum( subset( preds.ann.hyads05w06$Y.ho.terms.gam.raster, covs.hyads)),
subset( preds.ann.hyads05w06$Y.ho.terms.gam.raster, 's.x.y.'),
sum( subset( preds.ann.idwe06w05$Y.ho.terms.gam.raster, covs.tot.sum)),
subset( preds.ann.idwe06w05$Y.ho.terms.gam.raster, 's.x.y.'),
sum( subset( preds.ann.idwe05w06$Y.ho.terms.gam.raster, covs.tot.sum)),
subset( preds.ann.idwe05w06$Y.ho.terms.gam.raster, 's.x.y.'))
ggplot.a.raster( unstack( hyads_gamters),
bounds = c( -4,4), ncol. = 2, mask.raster = mask.usa,
facet.names = c( paste( c( 'hyads', 'hyads s.x.y.'),
rep( c( '06w05', '05w06'), each = 2)),
paste( c( 'idwe', 'idwe s.x.y.'),
rep( c( '06w05', '05w06'), each = 2))))
# plot hyads related covariates for different months
gamters.mon06w05 <- stack( lapply( colnames( preds.mon.hyads06w05),
function( x) {
subset( preds.mon.hyads06w05['Y.ho.terms.gam.raster', x][[1]], covs.hyads)
}))
gamters.mon06w05.i <- stack( lapply( colnames( preds.mon.idwe06w05),
function( x) {
subset( preds.mon.idwe06w05['Y.ho.terms.gam.raster', x][[1]], covs.idwe)
}))
gamters.mon05w06 <- stack( lapply( colnames( preds.mon.hyads05w06),
function( x) {
subset( preds.mon.hyads05w06['Y.ho.terms.gam.raster', x][[1]], covs.hyads)
}))
names.gamters <- paste( covs.hyads, rep( month.abb, each = 7))
names.gamters.i <- paste( covs.idwe, rep( month.abb, each = 7))
ggplot.a.raster( unstack( gamters.mon06w05),
bounds = c( -4,4), ncol. = 7, mask.raster = mask.usa,
facet.names = names.gamters)
ggplot.a.raster( unstack( gamters.mon06w05.i),
bounds = c( -4,4), ncol. = 7, mask.raster = mask.usa,
facet.names = names.gamters.i)
ggplot.a.raster( unstack( gamters.mon05w06),
bounds = c( -4,4), ncol. = 7, mask.raster = mask.usa,
facet.names = names.gamters)
# sum all hyads/tot.sum contributions
gamters.mon06w05.hyadssum <- stack( lapply( colnames( preds.mon.hyads06w05),
function( x) {
stack( sum( subset( preds.mon.hyads06w05['Y.ho.terms.gam.raster', x][[1]], covs.hyads)),
subset( preds.mon.hyads06w05['Y.ho.terms.gam.raster', x][[1]], 's.x.y.'))
}))
gamters.mon06w05.idwesum <- stack( lapply( colnames( preds.mon.idwe06w05),
function( x) {
stack( sum( subset( preds.mon.idwe06w05['Y.ho.terms.gam.raster', x][[1]], covs.idwe)),
subset( preds.mon.idwe06w05['Y.ho.terms.gam.raster', x][[1]], 's.x.y.'))
}))
names.gamters.hy <- paste( c( 'hyads', 's.x.y.'), rep( month.abb, each = 2))
names.gamters.is <- paste( c( 'idwe', 's.x.y.'), rep( month.abb, each = 2))
ggplot.a.raster( unstack( gamters.mon06w05.hyadssum),
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names.gamters.hy)
ggplot.a.raster( unstack( gamters.mon06w05.idwesum),
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names.gamters.is)
ggplot.a.raster( preds.ann.hyads06w05$Y.ho.terms.raster,
bounds = c( -4,4), ncol. = 5, mask.raster = mask.usa,
facet.names = names( preds.ann.hyads06w05$Y.ho.terms.raster))
ggplot.a.raster( preds.ann.hyads06w05$Y.ho.terms.gam.raster,
bounds = c( -4,4), ncol. = 5, mask.raster = mask.usa,
facet.names = names( preds.ann.hyads06w05$Y.ho.terms.gam.raster))
ggplot.a.raster( preds.mon.hyads06w05['Y.ho.terms.raster','X2005.01.01'][[1]],
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names( preds.mon.hyads06w05['Y.ho.terms.raster','X2005.01.01'][[1]]))
ggplot.a.raster( preds.mon.hyads06w05['Y.ho.terms.raster','X2005.07.01'][[1]],
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names( preds.mon.hyads06w05['Y.ho.terms.raster','X2005.07.01'][[1]]))
ggplot.a.raster( preds.mon.hyads06w05['Y.ho.terms.gam.raster','X2005.01.01'][[1]],
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names( preds.mon.hyads06w05['Y.ho.terms.gam.raster','X2005.01.01'][[1]]))
ggplot.a.raster( preds.mon.hyads05w06['Y.ho.terms.gam.raster','X2006.07.01'][[1]],
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names( preds.mon.hyads05w06['Y.ho.terms.gam.raster','X2006.07.01'][[1]]))
ggplot.a.raster( preds.mon.idwe05w06['Y.ho.terms.gam.raster','X2006.07.01'][[1]],
bounds = c( -4,4), ncol. = 4, mask.raster = mask.usa,
facet.names = names( preds.mon.idwe05w06['Y.ho.terms.gam.raster','X2006.07.01'][[1]]))
#======================================================================#
## Plot the metrics
#======================================================================#
## extract evaluation statistics
## IDWE gets big change from bivariate spline, HyADS does not
preds.metrics.hyads <- preds.mon.hyads06w05[ 'metrics',]
preds.metrics.idwe <- preds.mon.idwe06w05[ 'metrics',]
metrics <- data.table( month = c( as.Date( gsub( '\\.', '-', gsub( 'X', '', names( preds.metrics.hyads)))),
as.Date( gsub( '\\.', '-', gsub( 'X', '', names( preds.metrics.idwe))))),
model = c( rep( 'hyads', length( names( preds.metrics.hyads))),
rep( 'idwe', length( names( preds.metrics.idwe)))),
class = c( rep( 'gam', 2 * length( names( preds.metrics.hyads))),
rep( 'lm', 2 * length( names( preds.metrics.idwe)))),
'R^2' = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'gam.cv']$R^2),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'gam.cv']$R^2),
sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'lm.cv']$R^2),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'lm.cv']$R^2)),
NMB = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'gam.cv']$NMB),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'gam.cv']$NMB),
sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'lm.cv']$NMB),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'lm.cv']$NMB)),
NME = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'gam.cv']$NME),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'gam.cv']$NME),
sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'lm.cv']$NME),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'lm.cv']$NME)),
RMSE = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'gam.cv']$RMSE),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'gam.cv']$RMSE),
sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'lm.cv']$RMSE),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'lm.cv']$RMSE)))
metrics.m <- melt( metrics, id.vars = c( 'model', 'month', 'class'), variable.name = 'metric')
ggplot( data = metrics.m,
aes( x = month, y = value, lty = class, color = model)) +
geom_line() + geom_point() +
facet_wrap( . ~ metric, scales = 'free_y', ncol = 1, labeller = label_parsed) +
expand_limits( y = 0)
# metrics - adj. Z score, no model
metrics.Z.only <- data.table( month = c( as.Date( gsub( '\\.', '-', gsub( 'X', '', names( preds.metrics.hyads)))),
as.Date( gsub( '\\.', '-', gsub( 'X', '', names( preds.metrics.idwe))))),
model = c( rep( 'hyads', length( names( preds.metrics.hyads))),
rep( 'idwe', length( names( preds.metrics.idwe)))),
'R^2' = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'adj.Z.only']$R^2),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'adj.Z.only']$R^2)),
NMB = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'adj.Z.only']$NMB),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'adj.Z.only']$NMB)),
NME = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'adj.Z.only']$NME),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'adj.Z.only']$NME)),
RMSE = c( sapply( preds.metrics.hyads, function( dt) dt[ mod.name == 'adj.Z.only']$RMSE),
sapply( preds.metrics.idwe, function( dt) dt[ mod.name == 'adj.Z.only']$RMSE)))
metrics.Z.only.m <- melt( metrics.Z.only, id.vars = c( 'model', 'month'), variable.name = 'metric')
ggplot( data = metrics.Z.only.m,
aes( x = month, y = value, group = model, color = model)) +
geom_line() + geom_point() +
facet_wrap( . ~ metric, scales = 'free_y', ncol = 1, labeller = label_parsed) +
expand_limits( y = 0)
# extract linear model coefficients
#annual comparisons
#5 day avg time
#Check w/ sunni on month/annual etc
#======================================================================#
## Plot changes in evaluation in different areas
#======================================================================#
cors.keep.month.hyads.u05w06 <- rbindlist( preds.mon.hyads05w06['evals.q',], idcol = 'month')[, y := '05w06']
cors.keep.month.hyads.u06w05 <- rbindlist( preds.mon.hyads06w05['evals.q',], idcol = 'month')[, y := '06w05']
cors.keep.month.idwe.u05w06 <- rbindlist( preds.mon.idwe05w06['evals.q',], idcol = 'month')[, y := '05w06']
cors.keep.month.idwe.u06w05 <- rbindlist( preds.mon.idwe06w05['evals.q',], idcol = 'month')[, y := '06w05']
cors.keep.month <- rbind( cors.keep.month.hyads.u05w06, cors.keep.month.hyads.u06w05,
cors.keep.month.idwe.u05w06, cors.keep.month.idwe.u06w05)
cors.keep.m <- melt( cors.keep.month, id.vars = c( 'mod.name', 's', 'month', 'y'))
cors.keep.m[, month := month( as.Date( gsub( 'X', '', month), format = '%Y.%m.%d'))]
ggplot( data = cors.keep.m,
aes( x = s, y = value, color = mod.name, lty = y)) +
geom_hline( yintercept = 0) +
facet_grid( variable ~ month, scales = 'free_y') + geom_line()
# plot annual evaluation across s
cors.keep.u06w05 <- rbind( preds.ann.hyads06w05$evals.q, preds.ann.idwe06w05$evals.q)[, y := '06w05']
cors.keep.u05w06 <- rbind( preds.ann.hyads05w06$evals.q, preds.ann.idwe05w06$evals.q)[, y := '05w06']
cors.keep.u <- rbind( cors.keep.u06w05, cors.keep.u05w06)
cors.keep.m <- melt( cors.keep.u, id.vars = c( 'mod.name', 's', 'y'))
ggplot( data = cors.keep.m,
aes( x = s, y = value, color = mod.name, lty = y)) +
geom_hline( yintercept = 0) +
facet_wrap( . ~ variable, scales = 'free_y') + geom_line()
# need somehow to evaluate near vs far sources
# approximate this as high/low tot.sum
# says more about how emissions near sources are handled than
# anything else
# check out wind speed argument --- very key
# IDWE does better in years with slow windspeed?
# plot cmaq range at each s
# do MSE?
cors.keep <- data.table()
for (y in 2005:2006){
vals <- values( get( paste0( 'dats', y, '.a')))
for ( s in seq( 0.01, 1, .01)){
q <- quantile( vals[,'tot.sum'], s, na.rm = T)
cors <- cor( vals[vals[,'cmaq.ddm'] < q,], use = 'complete.obs', method = 'spearman')
cors.keep <- rbind( cors.keep,
data.table( s = s, hyads = cors['cmaq.ddm', 'hyads'],
idwe = cors['cmaq.ddm', 'tot.sum'], year = y))
}
}
cors.keep.m <- melt( cors.keep, id.vars = c( 's', 'year'))
ggplot( data = cors.keep.m,
aes( x = s, y = value, color = variable, group = variable)) +
geom_line() + facet_wrap( year ~ ., ncol = 2)
cors.keep[which.min( abs( hyads - idwe))]
|
#' @export
#'
SDcheck.dist2_DT <- function(keeper, string, summarize = F){
spellSD(keeper, string, 2, summarize)
} | /TSTr/R/SDcheck.dist2_DT.R | no_license | ingted/R-Examples | R | false | false | 121 | r | #' @export
#'
SDcheck.dist2_DT <- function(keeper, string, summarize = F){
spellSD(keeper, string, 2, summarize)
} |
#######################################
###professor Choi, shizu@snu.ac.kr ####
#######################################
#######################################
######## R 데이터 구조 ###########
#######################################
#scalar#vector#list#array#data.frame#c#rbind#cbind#$#mean#na.rm=TRUE
#ctrl+enter 누르면 console 창에 출력, 여러줄 선택해서 run도 가능
#함수는 yellow로 표시, 괄호 안에 함수에 넣을 것을 지정
#comment를 쓸때는 앞에 #을 삽입
print("hello world!")
print("Hello world!")
#모든 계산 가능
1*2
3*4
2/4
#변수(variable) 만들기
#왼쪽이 객체, 오른쪽은 투입할 데이터 (순서에 유의하세요)
a<-2
a
a<-3
a
#concatenate의 약자 c, 연결의 의미
a<-c(3,5)
a
##R에서 쓰이는 변수 유형
#numeric (real or decimal): 2, 2.0, pi
#character : "a", "work", "1"
#complex : 1+4i
#logical : true of false
#integer : special case of numeric dat without decimals
#scalar, vector, array, list, dataframe의 이해
#scalar: 하나의 원소(element)
scalar<-1
scalar
scalar<-"bts"
scalar
#vector : 여러개의 원소들이나 하나의 row
vector <-c(1,2,3)
vector
vector <-c("v", "rm", "suga")
vector
#matrix : 2*2, 2*3의 행렬 (vector를 여러개의 row로 쌓은형태)
matrix <-matrix(c(1,2,3,4,5,6), nrow=3)
matrix
matrix <-matrix(c(1,2,3,4,5,6), nrow=2)
matrix
matrix <-matrix(c(1,2,3,4,5,6), nrow=2, byrow=TRUE)
matrix
matrix <-matrix(c(1:20), nrow=4, ncol=5, byrow=TRUE)
matrix
mat1 <-c(1:3)
mat2 <-c(4:6)
matrix<-c(mat1, mat2)
matrix
matrix <-cbind(mat1, mat2) #cbind : column을 기준으로 횡으로 붙이기
matrix
matrix <-rbind(mat1, mat2) #rbind : row을 기준으로 종으로 붙이기
matrix
#특정 위치의 요소 추출 및 치환
matrix[1,2]
matrix[1:2]
matrix[1,] #첫번째 row의 모든 원소를 추출
matrix[,1] #첫번째 col의 모든 원소를 추출
matrix[c(1,2),] #1,2번째 row의 모든 원소를 추출
matrix[1,2]=100
matrix
#array : matrix를 여러층으로 쌓은것
matrix1<- matrix(c(1:9), nrow=3)
matrix1
matrix2<- matrix(c(10:18), nrow=3)
matrix3<- matrix(c(19:27), nrow=3)
matrix2
matrix3
array <-array(c(matrix1, matrix2, matrix3), dim=c(3,3,3))
array
#지금까지 살펴본 vector, matrix, array는 모두 같은 특성의 데이터로만 구성되어 있음. 즉 character, logic, numeric의 한종류
#일반적으로 쓰는 데이터는 문자변수, 숫자변수 등이 하나의 데이터셋에 담겨있음. 이 경우 쓰는 것이 dataframe. 앞으로 우리가 쓰는 대부분의 데이터는 dataframe일 것임
btsname <-c("RM", "Jin", "suga","jhope", "jimin", "V", "JK")
btsyear <-c(1994, 1992, 1993, 1994, 1995, 1995, 1997)
btsposition <-c("rap", "vocal", "rap", "rap", "vocal", "vocal","vocal")
bts <-data.frame(btsname, btsyear, btsposition)
bts
str(bts)
bts <-data.frame(btsname, btsyear, btsposition, stringsAsFactors = TRUE)
str(bts)
#factor의 이해
#factor란 주로 categorical한 변수로서 "값"(일반벡터)에 "level"이라는 정보를 추가한 것
gender=factor(c("male", "female", "female", "male"))
gender
str(gender)
##level의 순서를 바꾸고 싶거나, referece group 설정을 위해서는 leves=c() 사용
gender=factor(gender,
levels=c("male", "female"))
gender
str(gender)
#dataframe 활용
#변수 선택 $표시 활용
bts$btsname
bts$btsposition
bts$btsposition=factor(btsposition, levels=c("vocal", "rap"))
bts$btsposition
bts$age <- 2021-bts$btsyear+1
bts
bts$null <-NULL
bts
bts$na <-NA
bts
dim(bts)
#na=not available의 약자. 결측치를 의미함
#NULL=존재하지 않는 값
#na와 null의 차이는 mean 산출시 확인 가능
#null은 자동으로 무시되어 mean 산출
#na는 평균에 영향을 미침. 따라서 na.rm=TRUE 옵션을 통해 na를 무시하고 평균을 구할 수 있음
bts
bts[1,5]<-3
bts[2,5]<-5
bts[3,5]<-1
mean(bts$na,na.rm = TRUE)
bts[1,4]<-NA #대괄호는 indexing, [row, column] 순서를 기억하자
mean(bts$age)
mean(bts$na, na.rm=TRUE)
bts
#작업 디렉토리 설정하기 -> r project를 쓰지않고 script를 개별 저장관리할 경우
getwd()
setwd("C:\\Users\\Owner\\Documents\\new") ##자료 저장 directory 설정
#package 불러오기(install)와 열기(library)
install.packages("readxl")
install.packages("foreign")
library(readxl)
library(foreign)
#자료 입력 및 출력
#외부자료 가져오기. excel은 csv 파일로 가져오기 추천
data_csv <- read.table("data_csv.csv", header = T, sep=",")
data_spss <- read.spss("data_sav.sav", use.value.labels=T, to.data.frame=T)
#외부자료 내보내기. excel은 csv 파일로 내보내기 추천
write.table(data_csv, "data_csv2.csv", sep=",", row.names = F, quote=F)
write.foreign(data_spss, "data_spss2.dat", "data_spss2.sav", package="SPSS")
#기초통계 (summary)
View(data_csv)
str(data_csv)
#score2가 character변수이므로 numeric으로 변경
data_csv$score2 <- as.numeric(data_csv$score2)
#쉼표때문에 missing이 생기는걸 확인했습니다. gsub 함수를 활용해 쉼표를 없애겠습니다
#gsub(“제거할 내용“, “제거방식”, 객체$변수)
data_csv$score2 <- gsub(",", "", data_csv$score2)
data_csv$score2 <- as.numeric(data_csv$score2)
#edu와 employment도 factor로 변환하겠습니다
data_csv$edu=factor(data_csv$edu,
levels=c("elementry", "middle", "high"))
data_csv$employment=factor(data_csv$employment,
levels=c("employed", "unemployed"))
summary(data_csv)
summary(data_csv$score)
table(data_csv$edu)
addmargins(table(data_csv$edu))
table(data_csv$edu, data_csv$employment)
addmargins(table(data_csv$edu, data_csv$employment)) | /1_data strucrue_choi.R | no_license | ChungSeok/2021_graduate | R | false | false | 5,711 | r | #######################################
###professor Choi, shizu@snu.ac.kr ####
#######################################
#######################################
######## R 데이터 구조 ###########
#######################################
#scalar#vector#list#array#data.frame#c#rbind#cbind#$#mean#na.rm=TRUE
#ctrl+enter 누르면 console 창에 출력, 여러줄 선택해서 run도 가능
#함수는 yellow로 표시, 괄호 안에 함수에 넣을 것을 지정
#comment를 쓸때는 앞에 #을 삽입
print("hello world!")
print("Hello world!")
#모든 계산 가능
1*2
3*4
2/4
#변수(variable) 만들기
#왼쪽이 객체, 오른쪽은 투입할 데이터 (순서에 유의하세요)
a<-2
a
a<-3
a
#concatenate의 약자 c, 연결의 의미
a<-c(3,5)
a
##R에서 쓰이는 변수 유형
#numeric (real or decimal): 2, 2.0, pi
#character : "a", "work", "1"
#complex : 1+4i
#logical : true of false
#integer : special case of numeric dat without decimals
#scalar, vector, array, list, dataframe의 이해
#scalar: 하나의 원소(element)
scalar<-1
scalar
scalar<-"bts"
scalar
#vector : 여러개의 원소들이나 하나의 row
vector <-c(1,2,3)
vector
vector <-c("v", "rm", "suga")
vector
#matrix : 2*2, 2*3의 행렬 (vector를 여러개의 row로 쌓은형태)
matrix <-matrix(c(1,2,3,4,5,6), nrow=3)
matrix
matrix <-matrix(c(1,2,3,4,5,6), nrow=2)
matrix
matrix <-matrix(c(1,2,3,4,5,6), nrow=2, byrow=TRUE)
matrix
matrix <-matrix(c(1:20), nrow=4, ncol=5, byrow=TRUE)
matrix
mat1 <-c(1:3)
mat2 <-c(4:6)
matrix<-c(mat1, mat2)
matrix
matrix <-cbind(mat1, mat2) #cbind : column을 기준으로 횡으로 붙이기
matrix
matrix <-rbind(mat1, mat2) #rbind : row을 기준으로 종으로 붙이기
matrix
#특정 위치의 요소 추출 및 치환
matrix[1,2]
matrix[1:2]
matrix[1,] #첫번째 row의 모든 원소를 추출
matrix[,1] #첫번째 col의 모든 원소를 추출
matrix[c(1,2),] #1,2번째 row의 모든 원소를 추출
matrix[1,2]=100
matrix
#array : matrix를 여러층으로 쌓은것
matrix1<- matrix(c(1:9), nrow=3)
matrix1
matrix2<- matrix(c(10:18), nrow=3)
matrix3<- matrix(c(19:27), nrow=3)
matrix2
matrix3
array <-array(c(matrix1, matrix2, matrix3), dim=c(3,3,3))
array
#지금까지 살펴본 vector, matrix, array는 모두 같은 특성의 데이터로만 구성되어 있음. 즉 character, logic, numeric의 한종류
#일반적으로 쓰는 데이터는 문자변수, 숫자변수 등이 하나의 데이터셋에 담겨있음. 이 경우 쓰는 것이 dataframe. 앞으로 우리가 쓰는 대부분의 데이터는 dataframe일 것임
btsname <-c("RM", "Jin", "suga","jhope", "jimin", "V", "JK")
btsyear <-c(1994, 1992, 1993, 1994, 1995, 1995, 1997)
btsposition <-c("rap", "vocal", "rap", "rap", "vocal", "vocal","vocal")
bts <-data.frame(btsname, btsyear, btsposition)
bts
str(bts)
bts <-data.frame(btsname, btsyear, btsposition, stringsAsFactors = TRUE)
str(bts)
#factor의 이해
#factor란 주로 categorical한 변수로서 "값"(일반벡터)에 "level"이라는 정보를 추가한 것
gender=factor(c("male", "female", "female", "male"))
gender
str(gender)
##level의 순서를 바꾸고 싶거나, referece group 설정을 위해서는 leves=c() 사용
gender=factor(gender,
levels=c("male", "female"))
gender
str(gender)
#dataframe 활용
#변수 선택 $표시 활용
bts$btsname
bts$btsposition
bts$btsposition=factor(btsposition, levels=c("vocal", "rap"))
bts$btsposition
bts$age <- 2021-bts$btsyear+1
bts
bts$null <-NULL
bts
bts$na <-NA
bts
dim(bts)
#na=not available의 약자. 결측치를 의미함
#NULL=존재하지 않는 값
#na와 null의 차이는 mean 산출시 확인 가능
#null은 자동으로 무시되어 mean 산출
#na는 평균에 영향을 미침. 따라서 na.rm=TRUE 옵션을 통해 na를 무시하고 평균을 구할 수 있음
bts
bts[1,5]<-3
bts[2,5]<-5
bts[3,5]<-1
mean(bts$na,na.rm = TRUE)
bts[1,4]<-NA #대괄호는 indexing, [row, column] 순서를 기억하자
mean(bts$age)
mean(bts$na, na.rm=TRUE)
bts
#작업 디렉토리 설정하기 -> r project를 쓰지않고 script를 개별 저장관리할 경우
getwd()
setwd("C:\\Users\\Owner\\Documents\\new") ##자료 저장 directory 설정
#package 불러오기(install)와 열기(library)
install.packages("readxl")
install.packages("foreign")
library(readxl)
library(foreign)
#자료 입력 및 출력
#외부자료 가져오기. excel은 csv 파일로 가져오기 추천
data_csv <- read.table("data_csv.csv", header = T, sep=",")
data_spss <- read.spss("data_sav.sav", use.value.labels=T, to.data.frame=T)
#외부자료 내보내기. excel은 csv 파일로 내보내기 추천
write.table(data_csv, "data_csv2.csv", sep=",", row.names = F, quote=F)
write.foreign(data_spss, "data_spss2.dat", "data_spss2.sav", package="SPSS")
#기초통계 (summary)
View(data_csv)
str(data_csv)
#score2가 character변수이므로 numeric으로 변경
data_csv$score2 <- as.numeric(data_csv$score2)
#쉼표때문에 missing이 생기는걸 확인했습니다. gsub 함수를 활용해 쉼표를 없애겠습니다
#gsub(“제거할 내용“, “제거방식”, 객체$변수)
data_csv$score2 <- gsub(",", "", data_csv$score2)
data_csv$score2 <- as.numeric(data_csv$score2)
#edu와 employment도 factor로 변환하겠습니다
data_csv$edu=factor(data_csv$edu,
levels=c("elementry", "middle", "high"))
data_csv$employment=factor(data_csv$employment,
levels=c("employed", "unemployed"))
summary(data_csv)
summary(data_csv$score)
table(data_csv$edu)
addmargins(table(data_csv$edu))
table(data_csv$edu, data_csv$employment)
addmargins(table(data_csv$edu, data_csv$employment)) |
options(shiny.maxRequestSize = 9*1024^2)
if (!require("pacman")) install.packages("pacman")
pacman::p_load(shiny,
shinydashboard)
ui <- dashboardPage(
skin = "yellow",
dashboardHeader(
title = "An intelligent application for Autism detection",
titleWidth = 600
),
dashboardSidebar(),
dashboardBody(
fluidRow(
tabBox(
width = 12,
#title = "First tabBox",
# The id lets us use input$tabset1 on the server to find the current tab
id = "tabset1",
height = "1000px",
tabPanel(h4("Predictions"),
"First tab content"),
tabPanel(h4("Explainable AI"),
"Second tab content")
)
)
)
) | /ui.R | no_license | satyakamacodes/autoSum2 | R | false | false | 770 | r | options(shiny.maxRequestSize = 9*1024^2)
if (!require("pacman")) install.packages("pacman")
pacman::p_load(shiny,
shinydashboard)
ui <- dashboardPage(
skin = "yellow",
dashboardHeader(
title = "An intelligent application for Autism detection",
titleWidth = 600
),
dashboardSidebar(),
dashboardBody(
fluidRow(
tabBox(
width = 12,
#title = "First tabBox",
# The id lets us use input$tabset1 on the server to find the current tab
id = "tabset1",
height = "1000px",
tabPanel(h4("Predictions"),
"First tab content"),
tabPanel(h4("Explainable AI"),
"Second tab content")
)
)
)
) |
# ========================================================== #
# hash_pw.R #
# ========================================================== #
#
#' Hashing functions
#'
#' Hashes passwords using a variety of algorithms
#'
#' Wrappers to digest package, using algorithsm made available via digest.
#' Specifically, does \emph{not} serialize the hash.
#'
#' @name hash_pw
#'
#X## ------------------------------- PARAMS ------------------------------- ##
#' @param pw string to hash. Cannot be blank or blank-like
#'
#' @param algo algorithm to use. Passed to the digest `algo` parameter
#' see `?digest::digest` for more.
#'
#X## ------------------------------------------------------------------------ ##
#'
#' @return
#' The hashed string.
#'
#' @examples
#'
#' #' \dontrun{
#' library(rcreds)
#'
#' hash_pw_md5("P4ssword!")
#' hash_pw("P4ssword!", algo="md5")
#'
#' }
#'
NULL
#' @rdname hash_pw
#' @importFrom magrittr %>%
#' @export
hash_pw <- function(pw, algo) {
requireNamespace("digest")
if (!is.character(pw) || !length(pw) || !nzchar(pw))
stop("'pw' must non-empty string")
digest::digest(pw, algo=algo, serialize=FALSE, length=Inf, file=FALSE)
}
#' @rdname hash_pw
#' @importFrom magrittr %>%
#' @export
hash_pw_md5 <- function(pw) {
hash_pw(pw=pw, algo="md5")
}
#' @rdname hash_pw
#' @importFrom magrittr %>%
#' @export
hash_pw_sha1 <- function(pw) {
hash_pw(pw=pw, algo="sha1")
}
#' @rdname hash_pw
#' @importFrom magrittr %>%
#' @export
hash_pw_sha512 <- function(pw) {
hash_pw(pw=pw, algo="sha512")
}
| /R/hash_pw.R | no_license | rsaporta/rcreds | R | false | false | 1,614 | r | # ========================================================== #
# hash_pw.R #
# ========================================================== #
#
#' Hashing functions
#'
#' Hashes passwords using a variety of algorithms
#'
#' Wrappers to digest package, using algorithsm made available via digest.
#' Specifically, does \emph{not} serialize the hash.
#'
#' @name hash_pw
#'
#X## ------------------------------- PARAMS ------------------------------- ##
#' @param pw string to hash. Cannot be blank or blank-like
#'
#' @param algo algorithm to use. Passed to the digest `algo` parameter
#' see `?digest::digest` for more.
#'
#X## ------------------------------------------------------------------------ ##
#'
#' @return
#' The hashed string.
#'
#' @examples
#'
#' #' \dontrun{
#' library(rcreds)
#'
#' hash_pw_md5("P4ssword!")
#' hash_pw("P4ssword!", algo="md5")
#'
#' }
#'
NULL
#' @rdname hash_pw
#' @importFrom magrittr %>%
#' @export
hash_pw <- function(pw, algo) {
requireNamespace("digest")
if (!is.character(pw) || !length(pw) || !nzchar(pw))
stop("'pw' must non-empty string")
digest::digest(pw, algo=algo, serialize=FALSE, length=Inf, file=FALSE)
}
#' @rdname hash_pw
#' @importFrom magrittr %>%
#' @export
hash_pw_md5 <- function(pw) {
hash_pw(pw=pw, algo="md5")
}
#' @rdname hash_pw
#' @importFrom magrittr %>%
#' @export
hash_pw_sha1 <- function(pw) {
hash_pw(pw=pw, algo="sha1")
}
#' @rdname hash_pw
#' @importFrom magrittr %>%
#' @export
hash_pw_sha512 <- function(pw) {
hash_pw(pw=pw, algo="sha512")
}
|
####################################################################################################
####################################################################################################
## Read, manipulate and write spatial vector data, Get GADM data
## Contact remi.dannunzio@fao.org
## 2018/08/22
####################################################################################################
####################################################################################################
####################################################################################################
################################### PART I: GET GADM DATA
####################################################################################################
## Get the list of countries from getData: "getData"
(gadm_list <- data.frame(raster::getData('ISO3')))
## Get GADM data, check object properties
country <- raster::getData('GADM',path=gadm_dir , country= countrycode, level=1)
summary(country)
extent(country)
proj4string(country)
## Display the SPDF
plot(country)
country$OBJECTID <- row(country)[,1]
## Export the SpatialPolygonDataFrame as a ESRI Shapefile
# writeOGR(country,
# paste0(gadm_dir,"gadm_",countrycode,"_l1.shp"),
# paste0("gadm_",countrycode,"_l1"),
# "ESRI Shapefile",
# overwrite_layer = T)
####################################################################################################
################################### PART II: CREATE A TILING OVER AN AREA OF INTEREST
####################################################################################################
### What grid size do we need ?
grid_size <- 20000 ## in meters
grid_deg <- grid_size/111320 ## in degree
sqr_df <- generate_grid(country,grid_deg)
nrow(sqr_df)
### Select a vector from location of another vector
# aoi <- readOGR(paste0(phu_dir,"107_PHU_BOUNDARY.shp"))
aoi <- readOGR(paste0(phu_dir,"25_KHG_SEPAL.shp"))
#aoi_3phu <- aoi[aoi$KODE_KHG %in% c("KHG.16.02.01","KHG.16.02.08","KHG.16.02.02"),]
### Select a vector from location of another vector
sqr_df_selected <- sqr_df[aoi,]
nrow(sqr_df_selected)
### Plot the results
# plot(sqr_df_selected)
# plot(aoi,add=T,border="blue")
# plot(country,add=T,border="green")
### Give the output a decent name, with unique ID
names(sqr_df_selected@data) <- "tileID"
sqr_df_selected@data$tileID <- row(sqr_df_selected@data)[,1]
tiles <- sqr_df_selected
tiles <- readOGR(paste0(tile_dir,"tiling_all_phu_edit.shp"))
### Distribute samples among users
dt <- tiles@data
users <- read.csv(paste0(doc_dir,"participants_20190819.csv"))
head(users)
du <- data.frame(cbind(users$username,dt$tileID))
names(du) <- c("username","tileID")
du <- arrange(du,username)
df <- data.frame(cbind(du$username,dt$tileID))
names(df) <- c("username","tileID")
df$tileID <- as.numeric(df$tileID)
table(df$username)
tiles@data <- df
### Export ALL TILES as KML
export_name <- paste0("tiling_all_phu")
writeOGR(obj=tiles,
dsn=paste(tile_dir,export_name,".kml",sep=""),
layer= export_name,
driver = "KML",
overwrite_layer = T)
writeOGR(obj=tiles,
dsn=paste(tile_dir,export_name,".shp",sep=""),
layer= export_name,
driver = "ESRI Shapefile",
overwrite_layer = T)
### Create a final subset corresponding to your username
plot(my_tiles,add=T,col="red")
table(tiles@data$username)
plot(tiles)
for (user in unique(df$username))
{
print(user)
export_name <- paste0("tiles_phu_",user)
my_tiles <- tiles[tiles$tileID %in% df[df$username == user,"tileID"],]
plot(my_tiles,add=T,col="red")
print(table(my_tiles$username))
writeOGR(obj=my_tiles,
dsn=paste(tile_dir,export_name,".kml",sep=""),
layer= export_name,
driver = "KML",
overwrite_layer = T)
writeOGR(obj=tiles,
dsn=paste(tile_dir,export_name,".shp",sep=""),
layer= export_name,
driver = "ESRI Shapefile",
overwrite_layer = T)
}
# ### Export the final subset
# export_name <- paste0("tiles_phu_",username)
#
# writeOGR(obj=my_tiles,
# dsn=paste(tile_dir,export_name,".kml",sep=""),
# layer= export_name,
# driver = "KML",
# overwrite_layer = T)
#
| /scripts/tiling/s1_create_tiling_system.R | no_license | yfinegold/ws_idn_20190819 | R | false | false | 4,357 | r | ####################################################################################################
####################################################################################################
## Read, manipulate and write spatial vector data, Get GADM data
## Contact remi.dannunzio@fao.org
## 2018/08/22
####################################################################################################
####################################################################################################
####################################################################################################
################################### PART I: GET GADM DATA
####################################################################################################
## Get the list of countries from getData: "getData"
(gadm_list <- data.frame(raster::getData('ISO3')))
## Get GADM data, check object properties
country <- raster::getData('GADM',path=gadm_dir , country= countrycode, level=1)
summary(country)
extent(country)
proj4string(country)
## Display the SPDF
plot(country)
country$OBJECTID <- row(country)[,1]
## Export the SpatialPolygonDataFrame as a ESRI Shapefile
# writeOGR(country,
# paste0(gadm_dir,"gadm_",countrycode,"_l1.shp"),
# paste0("gadm_",countrycode,"_l1"),
# "ESRI Shapefile",
# overwrite_layer = T)
####################################################################################################
################################### PART II: CREATE A TILING OVER AN AREA OF INTEREST
####################################################################################################
### What grid size do we need ?
grid_size <- 20000 ## in meters
grid_deg <- grid_size/111320 ## in degree
sqr_df <- generate_grid(country,grid_deg)
nrow(sqr_df)
### Select a vector from location of another vector
# aoi <- readOGR(paste0(phu_dir,"107_PHU_BOUNDARY.shp"))
aoi <- readOGR(paste0(phu_dir,"25_KHG_SEPAL.shp"))
#aoi_3phu <- aoi[aoi$KODE_KHG %in% c("KHG.16.02.01","KHG.16.02.08","KHG.16.02.02"),]
### Select a vector from location of another vector
sqr_df_selected <- sqr_df[aoi,]
nrow(sqr_df_selected)
### Plot the results
# plot(sqr_df_selected)
# plot(aoi,add=T,border="blue")
# plot(country,add=T,border="green")
### Give the output a decent name, with unique ID
names(sqr_df_selected@data) <- "tileID"
sqr_df_selected@data$tileID <- row(sqr_df_selected@data)[,1]
tiles <- sqr_df_selected
tiles <- readOGR(paste0(tile_dir,"tiling_all_phu_edit.shp"))
### Distribute samples among users
dt <- tiles@data
users <- read.csv(paste0(doc_dir,"participants_20190819.csv"))
head(users)
du <- data.frame(cbind(users$username,dt$tileID))
names(du) <- c("username","tileID")
du <- arrange(du,username)
df <- data.frame(cbind(du$username,dt$tileID))
names(df) <- c("username","tileID")
df$tileID <- as.numeric(df$tileID)
table(df$username)
tiles@data <- df
### Export ALL TILES as KML
export_name <- paste0("tiling_all_phu")
writeOGR(obj=tiles,
dsn=paste(tile_dir,export_name,".kml",sep=""),
layer= export_name,
driver = "KML",
overwrite_layer = T)
writeOGR(obj=tiles,
dsn=paste(tile_dir,export_name,".shp",sep=""),
layer= export_name,
driver = "ESRI Shapefile",
overwrite_layer = T)
### Create a final subset corresponding to your username
plot(my_tiles,add=T,col="red")
table(tiles@data$username)
plot(tiles)
for (user in unique(df$username))
{
print(user)
export_name <- paste0("tiles_phu_",user)
my_tiles <- tiles[tiles$tileID %in% df[df$username == user,"tileID"],]
plot(my_tiles,add=T,col="red")
print(table(my_tiles$username))
writeOGR(obj=my_tiles,
dsn=paste(tile_dir,export_name,".kml",sep=""),
layer= export_name,
driver = "KML",
overwrite_layer = T)
writeOGR(obj=tiles,
dsn=paste(tile_dir,export_name,".shp",sep=""),
layer= export_name,
driver = "ESRI Shapefile",
overwrite_layer = T)
}
# ### Export the final subset
# export_name <- paste0("tiles_phu_",username)
#
# writeOGR(obj=my_tiles,
# dsn=paste(tile_dir,export_name,".kml",sep=""),
# layer= export_name,
# driver = "KML",
# overwrite_layer = T)
#
|
library("dplyr")
library("ggplot2")
library("gridExtra")
source("scripts/abbreviations.R")
df <- read.csv("raw_data/water_potentials_170517.csv", na.strings=c("","NA"),header=TRUE)
df$h2o<-(1-(df$dry_weight/df$fresh_weight))
df$lma<-df$dry_weight/df$leaf_area
df$phi<-df$water_potential*-1
df_euc<-subset(df, type == 'eucalypt')
df_mis<-subset(df, type == 'mistletoe')
fib<-subset(df, species == 'fibrosa')
mol<-subset(df, species == 'moluccana')
mel<-subset(df, species == 'melaleuca')
boxplot(water_potential~species,data=df)
boxplot(water_potential~infestation,data=df)
boxplot(water_potential~infestation,data=fib)
boxplot(water_potential~infestation,data=mol)
par(mfrow=c(3,1))
phi <- ggplot(data = df, aes(x=infestation, y=phi)) + ylim(-3.2,-1.4) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none")
h2o <- ggplot(data = df, aes(x=infestation, y=h2o)) + ylim(0.45,0.75) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none")
lma <- ggplot(data = df, aes(x=infestation, y=lma)) + ylim(0.015,0.03) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none")
traits<-grid.arrange(phi, h2o , lma, nrow=3, ncol=1)
ggsave("output/traits.png", plot = traits, width = 20, height = 20, units = "cm", dpi = 300)
phi_mis <- ggplot(data = df_mis, aes(x=infestation, y=phi)) + ylim(-3.2,-1.4) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="Mistletoe leaves")
h2o_mis <- ggplot(data = df_mis, aes(x=infestation, y=h2o)) + ylim(0.45,0.75) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="Mistletoe leaves")
lma_mis <- ggplot(data = df_mis, aes(x=infestation, y=lma)) + ylim(0.015,0.03) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="Mistletoe leaves")
traits_mis<-grid.arrange(phi_mis, h2o_mis , lma_mis, nrow=3, ncol=1)
ggsave("output/traits_mis.png", plot = traits_mis, width = 20, height = 20, units = "cm", dpi = 300)
phi_euc <- ggplot(data = df_euc, aes(x=infestation, y=phi)) + ylim(-3.2,-1.4) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="Eucalypt leaves")
h2o_euc <- ggplot(data = df_euc, aes(x=infestation, y=h2o)) + ylim(0.45,0.75) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="Eucalypt leaves")
lma_euc <- ggplot(data = df_euc, aes(x=infestation, y=lma)) + ylim(0.015,0.03) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free") + theme(legend.position="none") +
labs(x="Eucalypt leaves")
traits_euc<-grid.arrange(phi_euc, h2o_euc , lma_euc, nrow=3, ncol=1)
ggsave("output/traits_euc.png", plot = traits_euc, width = 20, height = 20, units = "cm", dpi = 300)
phi_mel <- ggplot(data = mel, aes(x=infestation, y=phi)) + ylim(-3.2,-1.4) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="melaleuca leaves")
h2o_mel <- ggplot(data = mel, aes(x=infestation, y=h2o)) + ylim(0.45,0.75) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="melaleuca leaves")
lma_mel <- ggplot(data = mel, aes(x=infestation, y=lma)) + ylim(0.015,0.03) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free") + theme(legend.position="none") +
labs(x="melaleuca leaves")
traits_mel<-grid.arrange(phi_mel, h2o_mel , lma_mel, nrow=3, ncol=1)
ggsave("output/traits_mel.png", plot = traits_mel, width = 20, height = 20, units = "cm", dpi = 300)
traits_all<-grid.arrange(phi_euc, phi_mis , phi_mel, h2o_euc , h2o_mis, h2o_mel, lma_euc,lma_mis, lma_mel, nrow=3, ncol=3)
ggsave("output/traits_all.png", plot = traits_all, width = 35, height = 20, units = "cm", dpi = 600)
| /scripts/traits.R | no_license | griebelchen/leaf_level | R | false | false | 4,101 | r | library("dplyr")
library("ggplot2")
library("gridExtra")
source("scripts/abbreviations.R")
df <- read.csv("raw_data/water_potentials_170517.csv", na.strings=c("","NA"),header=TRUE)
df$h2o<-(1-(df$dry_weight/df$fresh_weight))
df$lma<-df$dry_weight/df$leaf_area
df$phi<-df$water_potential*-1
df_euc<-subset(df, type == 'eucalypt')
df_mis<-subset(df, type == 'mistletoe')
fib<-subset(df, species == 'fibrosa')
mol<-subset(df, species == 'moluccana')
mel<-subset(df, species == 'melaleuca')
boxplot(water_potential~species,data=df)
boxplot(water_potential~infestation,data=df)
boxplot(water_potential~infestation,data=fib)
boxplot(water_potential~infestation,data=mol)
par(mfrow=c(3,1))
phi <- ggplot(data = df, aes(x=infestation, y=phi)) + ylim(-3.2,-1.4) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none")
h2o <- ggplot(data = df, aes(x=infestation, y=h2o)) + ylim(0.45,0.75) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none")
lma <- ggplot(data = df, aes(x=infestation, y=lma)) + ylim(0.015,0.03) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none")
traits<-grid.arrange(phi, h2o , lma, nrow=3, ncol=1)
ggsave("output/traits.png", plot = traits, width = 20, height = 20, units = "cm", dpi = 300)
phi_mis <- ggplot(data = df_mis, aes(x=infestation, y=phi)) + ylim(-3.2,-1.4) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="Mistletoe leaves")
h2o_mis <- ggplot(data = df_mis, aes(x=infestation, y=h2o)) + ylim(0.45,0.75) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="Mistletoe leaves")
lma_mis <- ggplot(data = df_mis, aes(x=infestation, y=lma)) + ylim(0.015,0.03) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="Mistletoe leaves")
traits_mis<-grid.arrange(phi_mis, h2o_mis , lma_mis, nrow=3, ncol=1)
ggsave("output/traits_mis.png", plot = traits_mis, width = 20, height = 20, units = "cm", dpi = 300)
phi_euc <- ggplot(data = df_euc, aes(x=infestation, y=phi)) + ylim(-3.2,-1.4) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="Eucalypt leaves")
h2o_euc <- ggplot(data = df_euc, aes(x=infestation, y=h2o)) + ylim(0.45,0.75) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="Eucalypt leaves")
lma_euc <- ggplot(data = df_euc, aes(x=infestation, y=lma)) + ylim(0.015,0.03) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free") + theme(legend.position="none") +
labs(x="Eucalypt leaves")
traits_euc<-grid.arrange(phi_euc, h2o_euc , lma_euc, nrow=3, ncol=1)
ggsave("output/traits_euc.png", plot = traits_euc, width = 20, height = 20, units = "cm", dpi = 300)
phi_mel <- ggplot(data = mel, aes(x=infestation, y=phi)) + ylim(-3.2,-1.4) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="melaleuca leaves")
h2o_mel <- ggplot(data = mel, aes(x=infestation, y=h2o)) + ylim(0.45,0.75) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free")+ theme(legend.position="none") +
labs(x="melaleuca leaves")
lma_mel <- ggplot(data = mel, aes(x=infestation, y=lma)) + ylim(0.015,0.03) +
theme_bw() + geom_boxplot(aes()) + facet_wrap( ~ species, scales="free") + theme(legend.position="none") +
labs(x="melaleuca leaves")
traits_mel<-grid.arrange(phi_mel, h2o_mel , lma_mel, nrow=3, ncol=1)
ggsave("output/traits_mel.png", plot = traits_mel, width = 20, height = 20, units = "cm", dpi = 300)
traits_all<-grid.arrange(phi_euc, phi_mis , phi_mel, h2o_euc , h2o_mis, h2o_mel, lma_euc,lma_mis, lma_mel, nrow=3, ncol=3)
ggsave("output/traits_all.png", plot = traits_all, width = 35, height = 20, units = "cm", dpi = 600)
|
run_analysis <- function (pref=getwd(), # Default is working directory
select_col="-mean\\()|-std\\()", # grep pattern to select columns to keep
summary_funs=mean, # desired summary function
nrows=-1) { # Option to limit the number of rows to proc
# getData(<subdir>)
# reads subject, activity, and measurement files and returns them in a dataframe after
# labeling the columns & converting the activity column to a text factor.
# NOTE: To speed up the loading & save memory, only the required columns are returned.
getData <- function(subdir) {
cbind( subject=read.table(paste(pref,"/",subdir,"/subject_",subdir,".txt",sep=""),
col.names=c("subject"),nrows=nrows,colClasses="numeric"),
activity=read.table(paste(pref,"/",subdir,"/y_",subdir,".txt",sep=""),
col.names=c("activity"),nrows=nrows,
colClasses="numeric"),
read.table(paste(pref,"/",subdir,"/X_",subdir,".txt",sep=""),
colClasses=selcols,col.names=collabels,nrows=nrows)
)
}
# Read features file. This will become the column headers once we read the measurement file.
# Also, at this point, select the columns that we need to keep so that the read goes faster
actlabels <- ((read.table(paste(pref,"/activity_labels.txt",sep=""),as.is=T))[,2])
collabels <- ((read.table(paste(pref,"/features.txt",sep=""),as.is=T))[,2])
selcols <- ifelse(grepl(select_col, collabels),"numeric","NULL")
# Make one big tidy table from the test & training data
getdata014_merged <<- rbind( getData("test"), getData("train") )
getdata014_merged$activity <<- factor(actlabels[getdata014_merged$activity],actlabels)
# group by activity & subject, and summerise each column
getdata014_report <- as.tbl(getdata014_merged) %>%
group_by(activity,subject) %>%
summarise_each(funs(summary_funs))
#
# write.table(getdata014_mean, file="getdata014_mean.txt", row.names=F)
# return summary table - just because.
getdata014_report
} | /run_analysis.R | no_license | cdesb/getdtata-014 | R | false | false | 2,587 | r | run_analysis <- function (pref=getwd(), # Default is working directory
select_col="-mean\\()|-std\\()", # grep pattern to select columns to keep
summary_funs=mean, # desired summary function
nrows=-1) { # Option to limit the number of rows to proc
# getData(<subdir>)
# reads subject, activity, and measurement files and returns them in a dataframe after
# labeling the columns & converting the activity column to a text factor.
# NOTE: To speed up the loading & save memory, only the required columns are returned.
getData <- function(subdir) {
cbind( subject=read.table(paste(pref,"/",subdir,"/subject_",subdir,".txt",sep=""),
col.names=c("subject"),nrows=nrows,colClasses="numeric"),
activity=read.table(paste(pref,"/",subdir,"/y_",subdir,".txt",sep=""),
col.names=c("activity"),nrows=nrows,
colClasses="numeric"),
read.table(paste(pref,"/",subdir,"/X_",subdir,".txt",sep=""),
colClasses=selcols,col.names=collabels,nrows=nrows)
)
}
# Read features file. This will become the column headers once we read the measurement file.
# Also, at this point, select the columns that we need to keep so that the read goes faster
actlabels <- ((read.table(paste(pref,"/activity_labels.txt",sep=""),as.is=T))[,2])
collabels <- ((read.table(paste(pref,"/features.txt",sep=""),as.is=T))[,2])
selcols <- ifelse(grepl(select_col, collabels),"numeric","NULL")
# Make one big tidy table from the test & training data
getdata014_merged <<- rbind( getData("test"), getData("train") )
getdata014_merged$activity <<- factor(actlabels[getdata014_merged$activity],actlabels)
# group by activity & subject, and summerise each column
getdata014_report <- as.tbl(getdata014_merged) %>%
group_by(activity,subject) %>%
summarise_each(funs(summary_funs))
#
# write.table(getdata014_mean, file="getdata014_mean.txt", row.names=F)
# return summary table - just because.
getdata014_report
} |
setwd('~/Documents/Github/paperOptBalGPPS/Simulations/ate-simresults/')
#-------------------------------------------------------------------------------
# Creating Table 4 -----
#-------------------------------------------------------------------------------
ateresults <- readRDS('2019-02-08-nonparametric_odd-atesim-results.rds')
mc_est_sims <- 10000
n_obs <- 500
mc_res <- matrix(NA, nrow = mc_est_sims, ncol = 1)
for(mc in 1:mc_est_sims){
X1 <- rnorm(n_obs)
X2 <- rbinom(n_obs, 1, prob = 0.4)
Yt_em <- exp(X1) + 4 * X1 + 2 + rnorm(n_obs, sd = 0.5)
Yc_em <- - X1^2 - exp(X1) + rnorm(n_obs, sd = 0.5)
mc_res[mc,] <- mean(Yt_em - Yc_em)
}
true_ate_em <- mean(mc_res)
true_ate <- 3
meanbal1 <- apply(abs(ateresults$Cov1_Balance) < 0.1 & abs(ateresults$Cov2_Balance) < 0.1, 2, mean)
meanbal15 <- apply(abs(ateresults$Cov1_Balance) < 0.15 & abs(ateresults$Cov2_Balance) < 0.15, 2, mean)
meanbal2 <- apply(abs(ateresults$Cov1_Balance) < 0.2 & abs(ateresults$Cov2_Balance) < 0.2, 2, mean)
# Bias
biases_lin <- ateresults$LinearResults - true_ate
niave_bias <- matrix(rep(biases_lin[,1], 12), nrow=nrow(biases_lin), ncol=12)
lin_avg_bias <- apply(biases_lin, 2, mean)
lin_bias_red <- apply(1 - abs(biases_lin) / abs(niave_bias), 2, mean) * 100
lin_avg_absbias <- apply(abs(biases_lin), 2, mean)
lin_emp_std_err <- apply(ateresults$LinearResults, 2, sd)
lin_emp_mse <- apply(biases_lin^2, 2, mean)
biases_em <- ateresults$EffModResults - true_ate_em
niave_bias <- matrix(rep(biases_em[,1], 12), nrow=nrow(biases_em), ncol=12)
em_avg_bias <- apply(biases_em, 2, mean)
em_bias_red <- apply(1 - abs(biases_em) / abs(niave_bias), 2, mean) * 100
em_avg_absbias <- apply(abs(biases_em), 2, mean)
em_emp_std_err <- apply(ateresults$EffModResults, 2, sd)
em_emp_mse <- apply(biases_em^2, 2, mean)
outro <- rbind(meanbal1, meanbal15, meanbal2,
lin_avg_bias, lin_avg_absbias, lin_bias_red, lin_emp_std_err, lin_emp_mse,
em_avg_bias, em_avg_absbias, em_bias_red, em_emp_std_err, em_emp_mse)
colnames(outro) <- c('NAIVE', 'TRUEPS', 'OBGPPS:NPSE', 'OBGPPS:SE',
'BART', 'GBM:KS.MEAN', 'GBM:ES.MEAN', 'GBM:ES.MAX',
'GLM:CORRECT', 'CBPS:CORRECT', 'GLM:MISSPECIFIED', 'CBPS:MISSPECIFIED')
xtable::xtable(t(outro), digits=3)
t(outro)
#-------------------------------------------------------------------------------
# Creating Table 5
#-------------------------------------------------------------------------------
ateresults <- readRDS('2019-02-08-nonparametric_even-atesim-results.rds')
true_ate <- 3
meanbal1 <- apply(abs(ateresults$Cov1_Balance) < 0.1 & abs(ateresults$Cov2_Balance) < 0.1, 2, mean)
meanbal15 <- apply(abs(ateresults$Cov1_Balance) < 0.15 & abs(ateresults$Cov2_Balance) < 0.15, 2, mean)
meanbal2 <- apply(abs(ateresults$Cov1_Balance) < 0.2 & abs(ateresults$Cov2_Balance) < 0.2, 2, mean)
# Bias
biases_lin <- ateresults$LinearResults - true_ate
niave_bias <- matrix(rep(biases_lin[,1], 12), nrow=nrow(biases_lin), ncol=12)
lin_avg_bias <- apply(biases_lin, 2, mean)
lin_bias_red <- apply(1 - abs(biases_lin) / abs(niave_bias), 2, mean) * 100
lin_avg_absbias <- apply(abs(biases_lin), 2, mean)
lin_emp_std_err <- apply(ateresults$LinearResults, 2, sd)
lin_emp_mse <- apply(biases_lin^2, 2, mean)
biases_em <- ateresults$EffModResults - true_ate_em
niave_bias <- matrix(rep(biases_em[,1], 12), nrow=nrow(biases_em), ncol=12)
em_avg_bias <- apply(biases_em, 2, mean)
em_bias_red <- apply(1 - abs(biases_em) / abs(niave_bias), 2, mean) * 100
em_avg_absbias <- apply(abs(biases_em), 2, mean)
em_emp_std_err <- apply(ateresults$EffModResults, 2, sd)
em_emp_mse <- apply(biases_em^2, 2, mean)
outro <- rbind(meanbal1, meanbal15, meanbal2,
lin_avg_bias, lin_avg_absbias, lin_bias_red, lin_emp_std_err, lin_emp_mse,
em_avg_bias, em_avg_absbias, em_bias_red, em_emp_std_err, em_emp_mse)
colnames(outro) <- c('NAIVE', 'TRUEPS', 'OBGPPS:NPSE', 'OBGPPS:SE',
'BART', 'GBM:KS.MEAN', 'GBM:ES.MEAN', 'GBM:ES.MAX',
'GLM:CORRECT', 'CBPS:CORRECT', 'GLM:MISSPECIFIED', 'CBPS:MISSPECIFIED')
xtable::xtable(t(outro), digits=3)
t(outro)
| /Simulations/02-atesim-tablesforpaper.R | no_license | bvegetabile/paperOptBalGPPS | R | false | false | 4,176 | r | setwd('~/Documents/Github/paperOptBalGPPS/Simulations/ate-simresults/')
#-------------------------------------------------------------------------------
# Creating Table 4 -----
#-------------------------------------------------------------------------------
ateresults <- readRDS('2019-02-08-nonparametric_odd-atesim-results.rds')
mc_est_sims <- 10000
n_obs <- 500
mc_res <- matrix(NA, nrow = mc_est_sims, ncol = 1)
for(mc in 1:mc_est_sims){
X1 <- rnorm(n_obs)
X2 <- rbinom(n_obs, 1, prob = 0.4)
Yt_em <- exp(X1) + 4 * X1 + 2 + rnorm(n_obs, sd = 0.5)
Yc_em <- - X1^2 - exp(X1) + rnorm(n_obs, sd = 0.5)
mc_res[mc,] <- mean(Yt_em - Yc_em)
}
true_ate_em <- mean(mc_res)
true_ate <- 3
meanbal1 <- apply(abs(ateresults$Cov1_Balance) < 0.1 & abs(ateresults$Cov2_Balance) < 0.1, 2, mean)
meanbal15 <- apply(abs(ateresults$Cov1_Balance) < 0.15 & abs(ateresults$Cov2_Balance) < 0.15, 2, mean)
meanbal2 <- apply(abs(ateresults$Cov1_Balance) < 0.2 & abs(ateresults$Cov2_Balance) < 0.2, 2, mean)
# Bias
biases_lin <- ateresults$LinearResults - true_ate
niave_bias <- matrix(rep(biases_lin[,1], 12), nrow=nrow(biases_lin), ncol=12)
lin_avg_bias <- apply(biases_lin, 2, mean)
lin_bias_red <- apply(1 - abs(biases_lin) / abs(niave_bias), 2, mean) * 100
lin_avg_absbias <- apply(abs(biases_lin), 2, mean)
lin_emp_std_err <- apply(ateresults$LinearResults, 2, sd)
lin_emp_mse <- apply(biases_lin^2, 2, mean)
biases_em <- ateresults$EffModResults - true_ate_em
niave_bias <- matrix(rep(biases_em[,1], 12), nrow=nrow(biases_em), ncol=12)
em_avg_bias <- apply(biases_em, 2, mean)
em_bias_red <- apply(1 - abs(biases_em) / abs(niave_bias), 2, mean) * 100
em_avg_absbias <- apply(abs(biases_em), 2, mean)
em_emp_std_err <- apply(ateresults$EffModResults, 2, sd)
em_emp_mse <- apply(biases_em^2, 2, mean)
outro <- rbind(meanbal1, meanbal15, meanbal2,
lin_avg_bias, lin_avg_absbias, lin_bias_red, lin_emp_std_err, lin_emp_mse,
em_avg_bias, em_avg_absbias, em_bias_red, em_emp_std_err, em_emp_mse)
colnames(outro) <- c('NAIVE', 'TRUEPS', 'OBGPPS:NPSE', 'OBGPPS:SE',
'BART', 'GBM:KS.MEAN', 'GBM:ES.MEAN', 'GBM:ES.MAX',
'GLM:CORRECT', 'CBPS:CORRECT', 'GLM:MISSPECIFIED', 'CBPS:MISSPECIFIED')
xtable::xtable(t(outro), digits=3)
t(outro)
#-------------------------------------------------------------------------------
# Creating Table 5
#-------------------------------------------------------------------------------
ateresults <- readRDS('2019-02-08-nonparametric_even-atesim-results.rds')
true_ate <- 3
meanbal1 <- apply(abs(ateresults$Cov1_Balance) < 0.1 & abs(ateresults$Cov2_Balance) < 0.1, 2, mean)
meanbal15 <- apply(abs(ateresults$Cov1_Balance) < 0.15 & abs(ateresults$Cov2_Balance) < 0.15, 2, mean)
meanbal2 <- apply(abs(ateresults$Cov1_Balance) < 0.2 & abs(ateresults$Cov2_Balance) < 0.2, 2, mean)
# Bias
biases_lin <- ateresults$LinearResults - true_ate
niave_bias <- matrix(rep(biases_lin[,1], 12), nrow=nrow(biases_lin), ncol=12)
lin_avg_bias <- apply(biases_lin, 2, mean)
lin_bias_red <- apply(1 - abs(biases_lin) / abs(niave_bias), 2, mean) * 100
lin_avg_absbias <- apply(abs(biases_lin), 2, mean)
lin_emp_std_err <- apply(ateresults$LinearResults, 2, sd)
lin_emp_mse <- apply(biases_lin^2, 2, mean)
biases_em <- ateresults$EffModResults - true_ate_em
niave_bias <- matrix(rep(biases_em[,1], 12), nrow=nrow(biases_em), ncol=12)
em_avg_bias <- apply(biases_em, 2, mean)
em_bias_red <- apply(1 - abs(biases_em) / abs(niave_bias), 2, mean) * 100
em_avg_absbias <- apply(abs(biases_em), 2, mean)
em_emp_std_err <- apply(ateresults$EffModResults, 2, sd)
em_emp_mse <- apply(biases_em^2, 2, mean)
outro <- rbind(meanbal1, meanbal15, meanbal2,
lin_avg_bias, lin_avg_absbias, lin_bias_red, lin_emp_std_err, lin_emp_mse,
em_avg_bias, em_avg_absbias, em_bias_red, em_emp_std_err, em_emp_mse)
colnames(outro) <- c('NAIVE', 'TRUEPS', 'OBGPPS:NPSE', 'OBGPPS:SE',
'BART', 'GBM:KS.MEAN', 'GBM:ES.MEAN', 'GBM:ES.MAX',
'GLM:CORRECT', 'CBPS:CORRECT', 'GLM:MISSPECIFIED', 'CBPS:MISSPECIFIED')
xtable::xtable(t(outro), digits=3)
t(outro)
|
prior_val_tbl <- reactive({
req(input$val_date_prior)
loss_run(input$val_date_prior) %>%
select(claim_num, paid, reported)
})
changes_prep <- reactive({
out <- val_tbl() %>%
select(claim_num, accident_date, paid, reported)
out <- left_join(out, prior_val_tbl(), by = "claim_num") %>%
mutate(paid_change = paid.x - paid.y,
reported_change = reported.x - reported.y) %>%
filter(paid_change != 0 | is.na(paid_change) | reported_change != 0) %>%
arrange(desc(paid_change)) %>%
mutate(new_claim = ifelse(is.na(paid.y), "New", "Existing"),
ay = year(accident_date)) %>%
filter(new_claim %in% input$changes_new,
ay %in% input$changes_ay) %>%
select(-new_claim, -ay)
out
})
output$changes_title <- renderText({
paste0(
"From ",
input$val_date_prior,
" to ",
input$val_date
)
})
output$changes_tbl <- DT::renderDataTable({
out <- changes_prep()
# for some reason I can't include these in the tags
t1 <- paste0("As of ", input$val_date)
t2 <- paste0("As of ", input$val_date_prior)
t3 <- paste0("Change from ", input$val_date_prior,
" to ", input$val_date)
col_headers <- htmltools::withTags(
table(
thead(
tr(
th(rowspan = 2, "Claim Number", class = "dt-border-left dt-border-right dt-border-top"),
th(rowspan = 2, "Accident Date", class = "dt-border-right dt-border-top"),
th(colspan = 2, t1, class = "dt-border-right dt-border-top"),
th(colspan = 2, t2, class = "dt-border-right dt-border-top"),
th(colspan = 2, t3, class = "dt-border-right dt-border-top")
),
tr(
th("Paid"),
th("Reported", class = "dt-border-right"),
th("Paid"),
th("Reported", class = "dt-border-right"),
th("Paid"),
th("Reported", class = "dt-border-right")
)
)
)
)
datatable(
out,
rownames = FALSE,
container = col_headers,
class = "stripe cell-border",
extensions = "Buttons",
options = list(
dom = 'Brtip',
#scrollX = TRUE,
buttons = list(
list(
extend = 'collection',
buttons = c('csv', 'excel', 'pdf'),
text = 'Download'
)
)
)
) %>%
formatCurrency(
columns = 3:8,
currency = "",
digits = 0
)
}, server = FALSE)
| /basic-insurer-dashboard/server/02-changes-srv.R | permissive | manniealfaro/shiny-insurance-examples | R | false | false | 2,439 | r |
prior_val_tbl <- reactive({
req(input$val_date_prior)
loss_run(input$val_date_prior) %>%
select(claim_num, paid, reported)
})
changes_prep <- reactive({
out <- val_tbl() %>%
select(claim_num, accident_date, paid, reported)
out <- left_join(out, prior_val_tbl(), by = "claim_num") %>%
mutate(paid_change = paid.x - paid.y,
reported_change = reported.x - reported.y) %>%
filter(paid_change != 0 | is.na(paid_change) | reported_change != 0) %>%
arrange(desc(paid_change)) %>%
mutate(new_claim = ifelse(is.na(paid.y), "New", "Existing"),
ay = year(accident_date)) %>%
filter(new_claim %in% input$changes_new,
ay %in% input$changes_ay) %>%
select(-new_claim, -ay)
out
})
output$changes_title <- renderText({
paste0(
"From ",
input$val_date_prior,
" to ",
input$val_date
)
})
output$changes_tbl <- DT::renderDataTable({
out <- changes_prep()
# for some reason I can't include these in the tags
t1 <- paste0("As of ", input$val_date)
t2 <- paste0("As of ", input$val_date_prior)
t3 <- paste0("Change from ", input$val_date_prior,
" to ", input$val_date)
col_headers <- htmltools::withTags(
table(
thead(
tr(
th(rowspan = 2, "Claim Number", class = "dt-border-left dt-border-right dt-border-top"),
th(rowspan = 2, "Accident Date", class = "dt-border-right dt-border-top"),
th(colspan = 2, t1, class = "dt-border-right dt-border-top"),
th(colspan = 2, t2, class = "dt-border-right dt-border-top"),
th(colspan = 2, t3, class = "dt-border-right dt-border-top")
),
tr(
th("Paid"),
th("Reported", class = "dt-border-right"),
th("Paid"),
th("Reported", class = "dt-border-right"),
th("Paid"),
th("Reported", class = "dt-border-right")
)
)
)
)
datatable(
out,
rownames = FALSE,
container = col_headers,
class = "stripe cell-border",
extensions = "Buttons",
options = list(
dom = 'Brtip',
#scrollX = TRUE,
buttons = list(
list(
extend = 'collection',
buttons = c('csv', 'excel', 'pdf'),
text = 'Download'
)
)
)
) %>%
formatCurrency(
columns = 3:8,
currency = "",
digits = 0
)
}, server = FALSE)
|
library(evd)
SimulateACD <- function(param, distrib, offset = 200, num.n = 1000, num.rep = 1000) {
# Simulates num.n repliacations of ACD time series using ACD model.
#
# Args:
# param: a list of true parameters (r, w, a, b) or (w, a, b)
# distrib:
# offset:
# num.n:
# num.rep:
#
# Returns:
# a matrix (num.n rows, num.rep columns) containing the simulated time series.
# Initialization
if (distrib == "exp") {
w <- param[1]
a <- param[2]
b <- param[3]
}
if ((distrib == "weibull")|(distrib == "frechet")) {
r <- param[1]
w <- param[2]
a <- param[3]
b <- param[4]
}
num.rn <- (offset + num.n) * num.rep
# Generate random numbers
if (distrib == "frechet") {
rand.vec <- rfrechet(num.rn, shape = r, scale = 1 / gamma(1 - 1 / r))
}
if (distrib == "exp") {
rand.vec <- rexp(num.rn, rate = 1)
}
if (distrib == "weibull") {
rand.vec <- rweibull(num.rn, r, scale = 1 / gamma(1 + 1 / r))
}
# for (i in 1: num.rn)
# cat(rand.vec[i], "\n", file = "rand.csv",sep = ",", append=TRUE)
rand.mat <- matrix(rand.vec, nrow = offset + num.n, ncol = num.rep)
# Initialize variables
x.vec <- rep(0, 1 + offset + num.n)
dur.vec <- rep(0, 1 + offset + num.n)
x.mat <- matrix(nrow = num.n, ncol = num.rep)
# Compute durations
for (j in 1: num.rep) {
for (i in 1: (offset + num.n)) {
dur.vec[1+i] <- w + a * x.vec[i] + b * dur.vec[i]
x.vec[1+i] <- dur.vec[1+i] * rand.mat[i, j]
}
x.mat[, j] <- x.vec[(offset+2): (1+offset+num.n)]
}
return(x.mat)
}
| /Real_data/Sim_code/SimulateACD.R | no_license | EricaZ/FACD-model | R | false | false | 1,622 | r | library(evd)
SimulateACD <- function(param, distrib, offset = 200, num.n = 1000, num.rep = 1000) {
# Simulates num.n repliacations of ACD time series using ACD model.
#
# Args:
# param: a list of true parameters (r, w, a, b) or (w, a, b)
# distrib:
# offset:
# num.n:
# num.rep:
#
# Returns:
# a matrix (num.n rows, num.rep columns) containing the simulated time series.
# Initialization
if (distrib == "exp") {
w <- param[1]
a <- param[2]
b <- param[3]
}
if ((distrib == "weibull")|(distrib == "frechet")) {
r <- param[1]
w <- param[2]
a <- param[3]
b <- param[4]
}
num.rn <- (offset + num.n) * num.rep
# Generate random numbers
if (distrib == "frechet") {
rand.vec <- rfrechet(num.rn, shape = r, scale = 1 / gamma(1 - 1 / r))
}
if (distrib == "exp") {
rand.vec <- rexp(num.rn, rate = 1)
}
if (distrib == "weibull") {
rand.vec <- rweibull(num.rn, r, scale = 1 / gamma(1 + 1 / r))
}
# for (i in 1: num.rn)
# cat(rand.vec[i], "\n", file = "rand.csv",sep = ",", append=TRUE)
rand.mat <- matrix(rand.vec, nrow = offset + num.n, ncol = num.rep)
# Initialize variables
x.vec <- rep(0, 1 + offset + num.n)
dur.vec <- rep(0, 1 + offset + num.n)
x.mat <- matrix(nrow = num.n, ncol = num.rep)
# Compute durations
for (j in 1: num.rep) {
for (i in 1: (offset + num.n)) {
dur.vec[1+i] <- w + a * x.vec[i] + b * dur.vec[i]
x.vec[1+i] <- dur.vec[1+i] * rand.mat[i, j]
}
x.mat[, j] <- x.vec[(offset+2): (1+offset+num.n)]
}
return(x.mat)
}
|
#' Lorenz Curve Plot
#'
#' Outputs the Lorenz Curve, it does some binning to make the code faster
#'
#' @param DATA Dataframe containing the predicted, observed and exposure
#' @param NAMES \itemize{
#' \item{MODELS}{Vector of names of the columns with the model predictions}
#' \item{OBSERVED}{Column name of the observed variable}
#' \item{EXPOSURE}{Column name of the exposure variable}
#' }
#' @param PATH Path to which the graph will be exported to. (Default \code{NULL} will display the graph instead of exporting)
#' @param SAMPLE If the data is too large you may set what proportion of the data you want it to use. (E.g. .5 will use half the data)
#' \code{NULL} will not use a sample.
#' @param DATA.ONLY TRUE will simply return a table instead of the plot
#' @param N.BKTS Number of groupings to do for. Lower number of groupings offer faster performance but more approximate
#'
#' @return Either a .png file in the path or output a graph in R
#'
#' @export
Plot.Lorenz<- function(DATA,
NAMES = list(MODELS = NULL,
OBSERVED = NULL,
EXPOSURE = NULL),
PATH = NULL,
SAMPLE = NULL,
DATA.ONLY = FALSE,
N.BKTS = 50){
library(dplyr)
library(ggplot2)
# Sample the data if requested.
# Makes it much faster for large datasets
if(is.null(SAMPLE)){
DATA <- DATA[, c(NAMES$MODELS, NAMES$OBSERVED, NAMES$EXPOSURE)]
}else{
DATA <- DATA[sample(nrow(DATA),nrow(DATA)*SAMPLE), c(NAMES$MODELS, NAMES$OBSERVED, NAMES$EXPOSURE)]
}
# Calculate Earned Loss Cost (E.g. Apply the Weights)
# To be on same basis as the observed
DATA[,NAMES$MODELS] <- DATA[,NAMES$MODELS] * DATA[,NAMES$EXPOSURE]
# Calculate Lorenz curve for each model
LORENZ.DT <- data.frame()
for(i in c(NAMES$MODELS, NAMES$OBSERVED)){
#Sort by the predicted i
TMP<-DATA %>%
arrange_(i)
# Accumulate exposure and bucket in order to reduce the number of points to plot
TMP$Cumulative_Xpo <- cumsum(TMP[,NAMES$EXPOSURE]) / sum(TMP[,NAMES$EXPOSURE])
TMP$Cumulative_Xpo <- Hmisc::cut2(TMP$Cumulative_Xpo, g = N.BKTS,levels.mean = T)
TMP<-TMP %>%
group_by(Cumulative_Xpo) %>%
summarize_each(funs(sum))
TMP<-as.data.frame(TMP)
TMP$Cumulative_Xpo <- as.numeric(as.character(TMP$Cumulative_Xpo))
TMP$Cumulative_Losses = cumsum(TMP[,NAMES$OBSERVED])/sum(TMP[,NAMES$OBSERVED])
LORENZ.DT <- rbind(LORENZ.DT, data.frame( Model = i, TMP[,c("Cumulative_Xpo", "Cumulative_Losses") ]))
}
colnames(LORENZ.DT) <- c("Model","Cumulative_Xpo", "Cumulative_Losses")
if(DATA.ONLY){
return(LORENZ.DT)
}
Lorenz <- ggplot(LORENZ.DT, aes(y = Cumulative_Losses, x = Cumulative_Xpo, group = Model, color = Model) ) +
geom_line() +
scale_color_brewer(palette="Set2") +
theme(axis.text.x = element_text(angle = 45, hjust = 1,size=12),
axis.text.y = element_text(size=12),
legend.position="bottom",
legend.title=element_blank(),
legend.text = element_text(size=12))
## Simply Return Plot or save plot to PNG
if(is.null(PATH)){
return(Lorenz)
}else{
cat(paste0("Lorenz plot ",NAMES$OBSERVED," outputted to ",PATH," \n") )
png(paste0(PATH,"/Lorenz_",NAMES$OBSERVED,".png"))
print(Lorenz)
dev.off()
}
}
#' Model Charts
#'
#' Outputs the double lift chart or predicted vs observed plots
#'
#' @param DATA Dataframe containing the predicted, observed and exposure
#' @param NAMES \itemize{
#' \item{MODELS}{Vector of names of the columns with the model predictions (non-earned)}
#' \item{OBSERVED}{Column name of the observed variable}
#' \item{EXPOSURE}{Column name of the exposure variable}
#' \item{VARIABLE}{Column name of the variable with respect to which you want the LR proof (i.e. the x-axis)}
#' \item{SPLIT.BY}{Column name of the factor variable by which you want to split the LR proofs by}
#' }
#' @param PATH Path to which the graph will be exported to. (Default \code{NULL} will display the graph instead of exporting)
#' @param CUTS The cut points for the variable if the user wants to provide them. Leave \code{NULL} if you want auto bucket.
#' @param MODE Can choose between \code{"LR"} to produce double lift chart (LR Proof) or \code{"PvO"} to produce a predicted vs observed plot.
#' @param DATA.ONLY TRUE will simply return a table instead of the plot
#' @param N.BKTS Number of groupings to do for. Lower number of groupings offer faster performance but more approximate
#'
#' @return Either a .png file in the path or output a graph in R
#'
#' @export
Plot.Chart <- function(DATA,
NAMES = list(MODELS = NULL,
OBSERVED = NULL,
EXPOSURE = NULL,
VARIABLE = NULL,
SPLIT.BY = NULL),
PATH = NULL,
CUTS = NULL,
MODE = "LR",
DATA.ONLY = FALSE,
N.BKTS = 20){
library(Hmisc)
library(dplyr)
library(reshape2)
library(RColorBrewer)
DATA <- DATA[,c(NAMES$SPLIT.BY,
NAMES$EXPOSURE,
NAMES$VARIABLE,
NAMES$MODELS,
NAMES$OBSERVED)]
## Earn the fitted data
DATA[,NAMES$MODELS] = DATA[,NAMES$MODELS]* DATA[,NAMES$EXPOSURE]
#Model diff:
if (is.numeric( DATA[,NAMES$VARIABLE] )){
CUTS <- Hmisc::wtd.quantile(DATA[,NAMES$VARIABLE],
weights = DATA[,NAMES$EXPOSURE],
probs = seq(1/N.BKTS, 1 - 1/N.BKTS, by = 1/N.BKTS))
DATA[,NAMES$VARIABLE] <- as.factor( cut2(DATA[,NAMES$VARIABLE], cuts=CUTS))
}else{
DATA[,NAMES$VARIABLE] <- as.factor(DATA[,NAMES$VARIABLE])
}
#Aggregate dataset
if(is.null(NAMES$SPLIT.BY)){
DATA.AGG <- data.frame(NB.OBS = rep(1,nrow(DATA)), DATA)
DATA.AGG <- DATA.AGG %>%
group_by_(NAMES$VARIABLE) %>%
summarise_each_(funs(sum), c(NAMES$MODELS,
NAMES$OBSERVED,
"NB.OBS",
NAMES$EXPOSURE)) %>%
mutate()
DATA.AGG <- as.data.frame(DATA.AGG)
}else{
DATA.AGG <- DATA.AGG1 <- data.frame(NB.OBS = rep(1,nrow(DATA)), DATA)
DATA.AGG1 <- DATA.AGG1[,!(names(DATA.AGG1) %in% NAMES$SPLIT.BY)] %>%
group_by_(NAMES$VARIABLE) %>%
summarise_each_(funs(sum), c(NAMES$MODELS,
NAMES$OBSERVED,
"NB.OBS",
NAMES$EXPOSURE)) %>%
mutate()
DATA.AGG1 <- data.frame( TEMP="Combined", as.data.frame(DATA.AGG1))
names(DATA.AGG1)[which(names(DATA.AGG1)=="TEMP")] = NAMES$SPLIT.BY
DATA.AGG <- DATA.AGG %>%
group_by_(NAMES$SPLIT.BY,NAMES$VARIABLE) %>%
summarise_each_(funs(sum), c(NAMES$MODELS,
NAMES$OBSERVED,
"NB.OBS",
NAMES$EXPOSURE)) %>%
mutate()
DATA.AGG<- rbind(as.data.frame(DATA.AGG),
DATA.AGG1)
}
#LR
if(MODE == "LR"){
DATA.AGG = DATA.AGG %>%
cbind(., DATA.AGG[,NAMES$OBSERVED] / DATA.AGG[,c(NAMES$MODELS)])
NAMES$CURVES = paste0("C_", NAMES$MODELS)
names(DATA.AGG)[(length(DATA.AGG)-length(NAMES$MODELS) + 1):length(DATA.AGG)] = NAMES$CURVES
if(length(NAMES$CURVES) > 1){
DATA.AGG[,NAMES$CURVES] = apply(DATA.AGG[,NAMES$CURVES],2,function(x){x[x>5] = 5; return(x)})
}else{
DATA.AGG[DATA.AGG[,NAMES$CURVES]>5, NAMES$CURVES] = 5
}
}else if(MODE == "PvO"){
DATA.AGG = DATA.AGG %>%
cbind(., DATA.AGG[,c(NAMES$OBSERVED,NAMES$MODELS)]/DATA.AGG[,NAMES$EXPOSURE])
NAMES$CURVES = paste0("C_", c(NAMES$OBSERVED,NAMES$MODELS))
names(DATA.AGG)[(length(DATA.AGG)-length(NAMES$MODELS)):length(DATA.AGG)] = NAMES$CURVES
}else{
cat("MODE not supported, only 'LR' & 'PvO' are supported! \n")
return(0)
}
if(DATA.ONLY){
return(DATA.AGG)
}
#LR PROOF
if(is.null(NAMES$SPLIT.BY)){
DATA.PL <- melt(DATA.AGG , id=NAMES$VARIABLE)
}else{
DATA.PL <- melt(DATA.AGG , id=c(NAMES$SPLIT.BY,NAMES$VARIABLE))
DATA.PL$variable2 = as.factor(paste0(DATA.PL[,NAMES$SPLIT.BY], DATA.PL[,NAMES$VARIABLE],sep = " - "))
}
TOT.XPO <- sum(DATA.PL[DATA.PL$variable == NAMES$EXPOSURE, "value"])
DATA.PL[DATA.PL$variable == NAMES$EXPOSURE, "value"] <- DATA.PL[DATA.PL$variable == NAMES$EXPOSURE, "value"] /
max(DATA.PL[DATA.PL$variable == NAMES$EXPOSURE, "value"]) *
median(DATA.PL[DATA.PL$variable %in% NAMES$CURVES, "value"])*.25
if(is.null(NAMES$SPLIT.BY)){
Final.Chart <- ggplot(DATA.PL, aes_string(x = NAMES$VARIABLE, y = "value", group= "variable", color="variable", shape="variable") ) +
geom_bar(data= DATA.PL[DATA.PL$variable == NAMES$EXPOSURE,], stat="identity",alpha=.75,fill="gold2",colour=NA,show.legend=FALSE) +
geom_line(data= DATA.PL[DATA.PL$variable %in% NAMES$CURVES,], size=1) +
geom_point(data= DATA.PL[DATA.PL$variable %in% NAMES$CURVES,], aes(shape=variable,size=.5)) +
scale_color_brewer(palette="Set2") +
theme(axis.text.x = element_text(angle = 45, hjust = 1,size=12),
axis.text.y = element_text(size=12),
legend.position="bottom",
legend.title=element_blank(),
legend.text = element_text(size=12))
if(MODE=="LR"){
Final.Chart = Final.Chart + labs(x = NAMES$VARIABLE,
y = "Loss Ratio (capped at 500%)")
}else if(MODE=="PvO"){
Final.Chart = Final.Chart + labs(x = NAMES$VARIABLE,
y = "Average Loss")
}
}else{
Final.Chart <- ggplot(DATA.PL, aes_string(x = NAMES$VARIABLE, y = "value", group="variable2", color=NAMES$SPLIT.BY, shape="variable", linetype="variable")) +
geom_bar(data= DATA.PL[DATA.PL$variable == NAMES$EXPOSURE,], stat="identity",alpha=.75,fill="gold2",colour=NA,show.legend=FALSE) +
geom_line(data= DATA.PL[DATA.PL$variable %in% NAMES$CURVES,], size=1) +
geom_point(data= DATA.PL[DATA.PL$variable %in% NAMES$CURVES,], aes(shape=variable,size=.5)) +
scale_color_brewer("YlGn") +
theme(axis.text.x = element_text(angle = 45, hjust = 1,size=12),
axis.text.y = element_text(size=12),
legend.position="bottom",
legend.title=element_blank(),
legend.text = element_text(size=12))
if(MODE=="LR"){
Final.Chart = Final.Chart + labs(x = NAMES$VARIABLE,
y = "Loss Ratio (capped at 500%)")
}else if(MODE=="PvO"){
Final.Chart = Final.Chart + labs(x = NAMES$VARIABLE,
y = "Average Loss")
}
}
if(is.null(PATH)){
return(Final.Chart)
}else{
cat(paste0("Plot ",NAMES$VARIABLE," outputted to ",PATH," \n") )
png(paste0(PATH,"/",NAMES$VARIABLE,".png"))
print(Final.Chart)
dev.off()
}
}
| /Plotting.R | no_license | JohnOkoth/pmlwriteup | R | false | false | 11,450 | r | #' Lorenz Curve Plot
#'
#' Outputs the Lorenz Curve, it does some binning to make the code faster
#'
#' @param DATA Dataframe containing the predicted, observed and exposure
#' @param NAMES \itemize{
#' \item{MODELS}{Vector of names of the columns with the model predictions}
#' \item{OBSERVED}{Column name of the observed variable}
#' \item{EXPOSURE}{Column name of the exposure variable}
#' }
#' @param PATH Path to which the graph will be exported to. (Default \code{NULL} will display the graph instead of exporting)
#' @param SAMPLE If the data is too large you may set what proportion of the data you want it to use. (E.g. .5 will use half the data)
#' \code{NULL} will not use a sample.
#' @param DATA.ONLY TRUE will simply return a table instead of the plot
#' @param N.BKTS Number of groupings to do for. Lower number of groupings offer faster performance but more approximate
#'
#' @return Either a .png file in the path or output a graph in R
#'
#' @export
Plot.Lorenz<- function(DATA,
NAMES = list(MODELS = NULL,
OBSERVED = NULL,
EXPOSURE = NULL),
PATH = NULL,
SAMPLE = NULL,
DATA.ONLY = FALSE,
N.BKTS = 50){
library(dplyr)
library(ggplot2)
# Sample the data if requested.
# Makes it much faster for large datasets
if(is.null(SAMPLE)){
DATA <- DATA[, c(NAMES$MODELS, NAMES$OBSERVED, NAMES$EXPOSURE)]
}else{
DATA <- DATA[sample(nrow(DATA),nrow(DATA)*SAMPLE), c(NAMES$MODELS, NAMES$OBSERVED, NAMES$EXPOSURE)]
}
# Calculate Earned Loss Cost (E.g. Apply the Weights)
# To be on same basis as the observed
DATA[,NAMES$MODELS] <- DATA[,NAMES$MODELS] * DATA[,NAMES$EXPOSURE]
# Calculate Lorenz curve for each model
LORENZ.DT <- data.frame()
for(i in c(NAMES$MODELS, NAMES$OBSERVED)){
#Sort by the predicted i
TMP<-DATA %>%
arrange_(i)
# Accumulate exposure and bucket in order to reduce the number of points to plot
TMP$Cumulative_Xpo <- cumsum(TMP[,NAMES$EXPOSURE]) / sum(TMP[,NAMES$EXPOSURE])
TMP$Cumulative_Xpo <- Hmisc::cut2(TMP$Cumulative_Xpo, g = N.BKTS,levels.mean = T)
TMP<-TMP %>%
group_by(Cumulative_Xpo) %>%
summarize_each(funs(sum))
TMP<-as.data.frame(TMP)
TMP$Cumulative_Xpo <- as.numeric(as.character(TMP$Cumulative_Xpo))
TMP$Cumulative_Losses = cumsum(TMP[,NAMES$OBSERVED])/sum(TMP[,NAMES$OBSERVED])
LORENZ.DT <- rbind(LORENZ.DT, data.frame( Model = i, TMP[,c("Cumulative_Xpo", "Cumulative_Losses") ]))
}
colnames(LORENZ.DT) <- c("Model","Cumulative_Xpo", "Cumulative_Losses")
if(DATA.ONLY){
return(LORENZ.DT)
}
Lorenz <- ggplot(LORENZ.DT, aes(y = Cumulative_Losses, x = Cumulative_Xpo, group = Model, color = Model) ) +
geom_line() +
scale_color_brewer(palette="Set2") +
theme(axis.text.x = element_text(angle = 45, hjust = 1,size=12),
axis.text.y = element_text(size=12),
legend.position="bottom",
legend.title=element_blank(),
legend.text = element_text(size=12))
## Simply Return Plot or save plot to PNG
if(is.null(PATH)){
return(Lorenz)
}else{
cat(paste0("Lorenz plot ",NAMES$OBSERVED," outputted to ",PATH," \n") )
png(paste0(PATH,"/Lorenz_",NAMES$OBSERVED,".png"))
print(Lorenz)
dev.off()
}
}
#' Model Charts
#'
#' Outputs the double lift chart or predicted vs observed plots
#'
#' @param DATA Dataframe containing the predicted, observed and exposure
#' @param NAMES \itemize{
#' \item{MODELS}{Vector of names of the columns with the model predictions (non-earned)}
#' \item{OBSERVED}{Column name of the observed variable}
#' \item{EXPOSURE}{Column name of the exposure variable}
#' \item{VARIABLE}{Column name of the variable with respect to which you want the LR proof (i.e. the x-axis)}
#' \item{SPLIT.BY}{Column name of the factor variable by which you want to split the LR proofs by}
#' }
#' @param PATH Path to which the graph will be exported to. (Default \code{NULL} will display the graph instead of exporting)
#' @param CUTS The cut points for the variable if the user wants to provide them. Leave \code{NULL} if you want auto bucket.
#' @param MODE Can choose between \code{"LR"} to produce double lift chart (LR Proof) or \code{"PvO"} to produce a predicted vs observed plot.
#' @param DATA.ONLY TRUE will simply return a table instead of the plot
#' @param N.BKTS Number of groupings to do for. Lower number of groupings offer faster performance but more approximate
#'
#' @return Either a .png file in the path or output a graph in R
#'
#' @export
Plot.Chart <- function(DATA,
NAMES = list(MODELS = NULL,
OBSERVED = NULL,
EXPOSURE = NULL,
VARIABLE = NULL,
SPLIT.BY = NULL),
PATH = NULL,
CUTS = NULL,
MODE = "LR",
DATA.ONLY = FALSE,
N.BKTS = 20){
library(Hmisc)
library(dplyr)
library(reshape2)
library(RColorBrewer)
DATA <- DATA[,c(NAMES$SPLIT.BY,
NAMES$EXPOSURE,
NAMES$VARIABLE,
NAMES$MODELS,
NAMES$OBSERVED)]
## Earn the fitted data
DATA[,NAMES$MODELS] = DATA[,NAMES$MODELS]* DATA[,NAMES$EXPOSURE]
#Model diff:
if (is.numeric( DATA[,NAMES$VARIABLE] )){
CUTS <- Hmisc::wtd.quantile(DATA[,NAMES$VARIABLE],
weights = DATA[,NAMES$EXPOSURE],
probs = seq(1/N.BKTS, 1 - 1/N.BKTS, by = 1/N.BKTS))
DATA[,NAMES$VARIABLE] <- as.factor( cut2(DATA[,NAMES$VARIABLE], cuts=CUTS))
}else{
DATA[,NAMES$VARIABLE] <- as.factor(DATA[,NAMES$VARIABLE])
}
#Aggregate dataset
if(is.null(NAMES$SPLIT.BY)){
DATA.AGG <- data.frame(NB.OBS = rep(1,nrow(DATA)), DATA)
DATA.AGG <- DATA.AGG %>%
group_by_(NAMES$VARIABLE) %>%
summarise_each_(funs(sum), c(NAMES$MODELS,
NAMES$OBSERVED,
"NB.OBS",
NAMES$EXPOSURE)) %>%
mutate()
DATA.AGG <- as.data.frame(DATA.AGG)
}else{
DATA.AGG <- DATA.AGG1 <- data.frame(NB.OBS = rep(1,nrow(DATA)), DATA)
DATA.AGG1 <- DATA.AGG1[,!(names(DATA.AGG1) %in% NAMES$SPLIT.BY)] %>%
group_by_(NAMES$VARIABLE) %>%
summarise_each_(funs(sum), c(NAMES$MODELS,
NAMES$OBSERVED,
"NB.OBS",
NAMES$EXPOSURE)) %>%
mutate()
DATA.AGG1 <- data.frame( TEMP="Combined", as.data.frame(DATA.AGG1))
names(DATA.AGG1)[which(names(DATA.AGG1)=="TEMP")] = NAMES$SPLIT.BY
DATA.AGG <- DATA.AGG %>%
group_by_(NAMES$SPLIT.BY,NAMES$VARIABLE) %>%
summarise_each_(funs(sum), c(NAMES$MODELS,
NAMES$OBSERVED,
"NB.OBS",
NAMES$EXPOSURE)) %>%
mutate()
DATA.AGG<- rbind(as.data.frame(DATA.AGG),
DATA.AGG1)
}
#LR
if(MODE == "LR"){
DATA.AGG = DATA.AGG %>%
cbind(., DATA.AGG[,NAMES$OBSERVED] / DATA.AGG[,c(NAMES$MODELS)])
NAMES$CURVES = paste0("C_", NAMES$MODELS)
names(DATA.AGG)[(length(DATA.AGG)-length(NAMES$MODELS) + 1):length(DATA.AGG)] = NAMES$CURVES
if(length(NAMES$CURVES) > 1){
DATA.AGG[,NAMES$CURVES] = apply(DATA.AGG[,NAMES$CURVES],2,function(x){x[x>5] = 5; return(x)})
}else{
DATA.AGG[DATA.AGG[,NAMES$CURVES]>5, NAMES$CURVES] = 5
}
}else if(MODE == "PvO"){
DATA.AGG = DATA.AGG %>%
cbind(., DATA.AGG[,c(NAMES$OBSERVED,NAMES$MODELS)]/DATA.AGG[,NAMES$EXPOSURE])
NAMES$CURVES = paste0("C_", c(NAMES$OBSERVED,NAMES$MODELS))
names(DATA.AGG)[(length(DATA.AGG)-length(NAMES$MODELS)):length(DATA.AGG)] = NAMES$CURVES
}else{
cat("MODE not supported, only 'LR' & 'PvO' are supported! \n")
return(0)
}
if(DATA.ONLY){
return(DATA.AGG)
}
#LR PROOF
if(is.null(NAMES$SPLIT.BY)){
DATA.PL <- melt(DATA.AGG , id=NAMES$VARIABLE)
}else{
DATA.PL <- melt(DATA.AGG , id=c(NAMES$SPLIT.BY,NAMES$VARIABLE))
DATA.PL$variable2 = as.factor(paste0(DATA.PL[,NAMES$SPLIT.BY], DATA.PL[,NAMES$VARIABLE],sep = " - "))
}
TOT.XPO <- sum(DATA.PL[DATA.PL$variable == NAMES$EXPOSURE, "value"])
DATA.PL[DATA.PL$variable == NAMES$EXPOSURE, "value"] <- DATA.PL[DATA.PL$variable == NAMES$EXPOSURE, "value"] /
max(DATA.PL[DATA.PL$variable == NAMES$EXPOSURE, "value"]) *
median(DATA.PL[DATA.PL$variable %in% NAMES$CURVES, "value"])*.25
if(is.null(NAMES$SPLIT.BY)){
Final.Chart <- ggplot(DATA.PL, aes_string(x = NAMES$VARIABLE, y = "value", group= "variable", color="variable", shape="variable") ) +
geom_bar(data= DATA.PL[DATA.PL$variable == NAMES$EXPOSURE,], stat="identity",alpha=.75,fill="gold2",colour=NA,show.legend=FALSE) +
geom_line(data= DATA.PL[DATA.PL$variable %in% NAMES$CURVES,], size=1) +
geom_point(data= DATA.PL[DATA.PL$variable %in% NAMES$CURVES,], aes(shape=variable,size=.5)) +
scale_color_brewer(palette="Set2") +
theme(axis.text.x = element_text(angle = 45, hjust = 1,size=12),
axis.text.y = element_text(size=12),
legend.position="bottom",
legend.title=element_blank(),
legend.text = element_text(size=12))
if(MODE=="LR"){
Final.Chart = Final.Chart + labs(x = NAMES$VARIABLE,
y = "Loss Ratio (capped at 500%)")
}else if(MODE=="PvO"){
Final.Chart = Final.Chart + labs(x = NAMES$VARIABLE,
y = "Average Loss")
}
}else{
Final.Chart <- ggplot(DATA.PL, aes_string(x = NAMES$VARIABLE, y = "value", group="variable2", color=NAMES$SPLIT.BY, shape="variable", linetype="variable")) +
geom_bar(data= DATA.PL[DATA.PL$variable == NAMES$EXPOSURE,], stat="identity",alpha=.75,fill="gold2",colour=NA,show.legend=FALSE) +
geom_line(data= DATA.PL[DATA.PL$variable %in% NAMES$CURVES,], size=1) +
geom_point(data= DATA.PL[DATA.PL$variable %in% NAMES$CURVES,], aes(shape=variable,size=.5)) +
scale_color_brewer("YlGn") +
theme(axis.text.x = element_text(angle = 45, hjust = 1,size=12),
axis.text.y = element_text(size=12),
legend.position="bottom",
legend.title=element_blank(),
legend.text = element_text(size=12))
if(MODE=="LR"){
Final.Chart = Final.Chart + labs(x = NAMES$VARIABLE,
y = "Loss Ratio (capped at 500%)")
}else if(MODE=="PvO"){
Final.Chart = Final.Chart + labs(x = NAMES$VARIABLE,
y = "Average Loss")
}
}
if(is.null(PATH)){
return(Final.Chart)
}else{
cat(paste0("Plot ",NAMES$VARIABLE," outputted to ",PATH," \n") )
png(paste0(PATH,"/",NAMES$VARIABLE,".png"))
print(Final.Chart)
dev.off()
}
}
|
library(ff)
### Name: fforder
### Title: Sorting: order from ff vectors
### Aliases: fforder
### Keywords: univar manip arith IO data
### ** Examples
x <- ff(sample(1e5, 1e6, TRUE))
y <- ff(sample(1e5, 1e6, TRUE))
d <- ffdf(x, y)
i <- fforder(y)
y[i]
i <- fforder(x, index=i)
x[i]
d[i,]
i <- fforder(x, y)
d[i,]
i <- ffdforder(d)
d[i,]
rm(x, y, d, i)
gc()
| /data/genthat_extracted_code/ff/examples/fforder.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 409 | r | library(ff)
### Name: fforder
### Title: Sorting: order from ff vectors
### Aliases: fforder
### Keywords: univar manip arith IO data
### ** Examples
x <- ff(sample(1e5, 1e6, TRUE))
y <- ff(sample(1e5, 1e6, TRUE))
d <- ffdf(x, y)
i <- fforder(y)
y[i]
i <- fforder(x, index=i)
x[i]
d[i,]
i <- fforder(x, y)
d[i,]
i <- ffdforder(d)
d[i,]
rm(x, y, d, i)
gc()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makePBDBtaxonTree.R
\name{makePBDBtaxonTree}
\alias{makePBDBtaxonTree}
\alias{plotTaxaTreePBDB}
\title{Creating a Taxon-Tree from Taxonomic Data Downloaded from the Paleobiology Database}
\usage{
makePBDBtaxonTree(
taxaDataPBDB,
rankTaxon,
method = "parentChild",
tipSet = NULL,
cleanTree = TRUE,
annotatedDuplicateNames = TRUE,
APIversion = "1.2",
failIfNoInternet = TRUE
)
plotTaxaTreePBDB(taxaTree, edgeLength = 1)
}
\arguments{
\item{taxaDataPBDB}{A table of taxonomic data collected from
the Paleobiology Database, using the taxa list option
with \code{show = class}. Should work with versions 1.1-1.2 of
the API, with either the \code{pbdb} or \code{com} vocab. However,
as \code{accepted_name} is not available in API v1.1, the resulting
tree will have a taxon's *original* name and not
any formally updated name.}
\item{rankTaxon}{The selected taxon rank; must be one of \code{'species'},
\code{'genus'}, \code{'family'}, \code{'order'}, \code{'class'} or \code{'phylum'}.}
\item{method}{Controls which algorithm is used for calculating
the taxon-tree. The default option is \code{method = "parentChild"}
which converts the listed binary parent-child taxon relationships in
the Paleobiology Database- these parent-child relationships (if missing
from the input dataset) are autofilled using API calls to the
Paleobiology Database. Alternatively, users may use
\code{method = "Linnean"}, which converts the table of Linnean taxonomic
assignments (family, order, etc as provided by \code{show = class} in
PBDB API calls) into a taxon-tree. Two methods formerly both implemented
under \code{method = "parentChild"} are also available as
\code{method = "parentChildOldMergeRoot"} and \code{method = "parentChildOldQueryPBDB"}
respectively. Both of these use similar algorithms as the current
\code{method = "parentChild"} but differ in how they treat taxa with
parents missing from the input taxonomic dataset.
\code{method = "parentChildOldQueryPBDB"} behaves most similar
to \code{method = "parentChild"} in that it queries the Paleobiology
Database via the API , but repeatedly does so for information on parent
taxa of the 'floating' parents, and continues within a \code{while}
loop until only one such unassigned parent taxon remains. This latter
option may talk a long time or never finish, depending on the
linearity and taxonomic structures encountered in the PBDB taxonomic
data; i.e. if someone a taxon was ultimately its own indirect child
in some grand loop by mistake, then under this option
\code{makePBDBtaxonTree} might never finish. In cases where taxonomy
is bad due to weird and erroneous taxonomic assignments reported by
the PBDB, this routine may search all the way back to a very ancient
and deep taxon, such as the \emph{Eukaryota} taxon.
\code{method = "parentChildOldMergeRoot"} will combine these disparate
potential roots and link them to an artificially-constructed
pseudo-root, which at least allows for visualization of the taxonomic
structure in a limited dataset. This latter option will be fully
offline, as it does not do any additional API calls
of the Paleobiology Database, unlike other options.}
\item{tipSet}{This argument only impacts analyses where
\code{method = "parentChild"} is used. This \code{tipSet} argument controls
which taxa are selected as tip taxa for the output tree.
\code{tipSet = "nonParents"} selects all child taxa which
are not listed as parents in \code{parentChild}.
Alternatively, \code{tipSet = "all"} will add a tip to every
internal node with the parent-taxon name encapsulated in parentheses.
The default is \code{NULL} - if \code{tipSet = NULL} and \code{method = "parentChild"},
then \code{tipSet} will be set so \code{tipSet = "nonParents"}.}
\item{cleanTree}{When \code{TRUE} (the default), the tree is run through a series of
post-processing, including having singles collapsed,
nodes reordered and being written out as a Newick string and read
back in, to ensure functionality with ape functions
and ape-derived functions.
If \code{FALSE}, none of this post-processing is done and
users should beware, as such trees can lead to hard-crashes of R.}
\item{annotatedDuplicateNames}{A logical determining whether duplicate taxon names,
when found in the Paleobiology Database for taxa (presumably reflecting an issue with
taxa being obsolete but with incomplete seniority data), should be annotated to include
sequential numbers so to modify them, via function\code{base}'s
\code{\link[base]{make.unique}}. This only applies to
\code{method = "parentChild"}, with the default option being
\code{annotatedDuplicateNames = TRUE}. If more than 26 duplicates are found, an error
is issued. If this argument is \code{FALSE}, an error is issued if duplicate taxon
names are found.}
\item{APIversion}{Version of the Paleobiology Database API used by
\code{makePBDBtaxonTree} when \code{method = "parentChild"} or
\code{method = "parentChildOldQueryPBDB"} is used. The current default
is \code{APIversion = "1.2"}, the most recent API version as of 12/11/2018.}
\item{failIfNoInternet}{If the Paleobiology Database or another
needed internet resource cannot be accessed, perhaps because of
no internet connection, should the function fail (with an error)
or should the function return \code{NULL} and return an
informative message instead, thus meeting the CRAN policy
that such functionalities must 'fail gracefully'?
The default is \code{TRUE} but all examples that might be auto-run
use \code{FALSE} so they do not fail during R CHECK.}
\item{taxaTree}{A phylogeny of class \code{phylo}, presumably a taxon tree as output from
\code{makePBDBtaxonTree} with higher-taxon names as node labels.}
\item{edgeLength}{The edge length that the plotted tree should be plotted
with (\code{plotTaxaTreePBDB} plots phylogenies as non-ultrametric,
not as a cladogram with aligned tips).}
}
\value{
A phylogeny of class \code{phylo}, where each tip is a taxon of the given \code{rankTaxon}. See additional details
regarding branch lengths can be found in the sub-algorithms used to create the taxon-tree by this function:
\code{\link{parentChild2taxonTree}} and \code{\link{taxonTable2taxonTree}}.
Depending on the \code{method}
used, either the element \code{$parentChild} or \code{$taxonTable} is added to the list structure of
the output phylogeny object, which was used as input for one of the two algorithms mentioned above.
Please note that when applied to output from the taxa option of the API version 1.1, the taxon names
returned are the \emph{original} taxon names as 'accepted_name' is not available in API v1.1, while
under API v1.2, the returned taxon names should be the most up-to-date formal names for those taxa.
Similar issues also effect the identification of parent taxa, as the accepted name of the
parent ID number is only provided in version 1.2 of the API.
}
\description{
The function \code{makePBDBtaxonTree} creates phylogeny-like
object of class \code{phylo} from the taxonomic information
recorded in a taxonomy download from the PBDB for
a given group. Two different algorithms are provided,
the default being based on parent-child taxon relationships,
the other based on the nested Linnean hierarchy. The function
\code{plotTaxaTreePBDB} is also provided as a minor helper
function for optimally plotting the labeled topologies that are
output by \code{makePBDBtaxonTree}.
}
\details{
This function should not be taken too seriously.
Many groups in the Paleobiology Database have
out-of-date or very incomplete taxonomic information.
This function is meant to help visualize
what information is present, and by use of time-scaling
functions, allow us to visualize the intersection
of temporal and phylogenetic, mainly to look for incongruence
due to either incorrect taxonomic placements,
erroneous occurrence data or both.
Note however that, contrary to common opinion among some
paleontologists, taxon-trees may be just as useful for
macroevolutionary studies as reconstructed phylogenies
(Soul and Friedman, 2015).
}
\examples{
# Note that most examples here use argument
# failIfNoInternet = FALSE so that functions do
# not error out but simply return NULL if internet
# connection is not available, and thus
# fail gracefully rather than error out (required by CRAN).
# Remove this argument or set to TRUE so functions DO fail
# when internet resources (paleobiodb) is not available.
set.seed(1)
\donttest{
#get some example occurrence and taxonomic data
data(graptPBDB)
#get the taxon tree: Linnean method
graptTreeLinnean <- makePBDBtaxonTree(
taxaDataPBDB = graptTaxaPBDB,
rankTaxon = "genus",
method = "Linnean",
failIfNoInternet = FALSE)
#get the taxon tree: parentChild method
graptTreeParentChild <- makePBDBtaxonTree(
taxaDataPBDB = graptTaxaPBDB,
rankTaxon = "genus",
method = "parentChild",
failIfNoInternet = FALSE)
if(!is.null(graptTreeParentChild) &
!is.null(graptTreeLinnean)){
# if those functions worked...
# let's plot these and compare them!
plotTaxaTreePBDB(graptTreeParentChild)
plotTaxaTreePBDB(graptTreeLinnean)
}
# pause 3 seconds so we don't spam the API
Sys.sleep(3)
####################################################
# let's try some other groups
###################################
#conodonts
conoData <- getCladeTaxaPBDB("Conodonta",
failIfNoInternet = FALSE)
if(!is.null(conoData)){
conoTree <- makePBDBtaxonTree(
taxaDataPBDB = conoData,
rankTaxon = "genus",
method = "parentChild")
# if it worked, plot it!
plotTaxaTreePBDB(conoTree)
}
# pause 3 seconds so we don't spam the API
Sys.sleep(3)
#############################
#asaphid trilobites
asaData <- getCladeTaxaPBDB("Asaphida",
failIfNoInternet = FALSE)
if(!is.null(asaData)){
asaTree <- makePBDBtaxonTree(
taxaDataPBDB = asaData,
rankTaxon = "genus",
method = "parentChild")
# if it worked, plot it!
plotTaxaTreePBDB(asaTree)
}
# pause 3 seconds so we don't spam the API
Sys.sleep(3)
###############################
#Ornithischia
ornithData <- getCladeTaxaPBDB("Ornithischia",
failIfNoInternet = FALSE)
if(!is.null(ornithData)){
ornithTree <- makePBDBtaxonTree(
taxaDataPBDB = ornithData,
rankTaxon = "genus",
method = "parentChild")
# if it worked, plot it!
plotTaxaTreePBDB(ornithTree)
# pause 3 seconds so we don't spam the API
Sys.sleep(3)
#try Linnean!
#but first... need to drop repeated taxon first: Hylaeosaurus
# actually this taxon seems to have been repaired
# as of September 2019 !
# findHylaeo <- ornithData$taxon_name == "Hylaeosaurus"
# there's actually only one accepted ID number
# HylaeoIDnum <- unique(ornithData[findHylaeo,"taxon_no"])
# HylaeoIDnum
# so, take which one has occurrences listed
# dropThis <- which((ornithData$n_occs < 1) & findHylaeo)
# ornithDataCleaned <- ornithData[-dropThis,]
ornithTree <- makePBDBtaxonTree(
ornithData,
rankTaxon = "genus",
method = "Linnean",
failIfNoInternet = FALSE)
# if it worked, plot it!
plotTaxaTreePBDB(ornithTree)
}
# pause 3 seconds so we don't spam the API
Sys.sleep(3)
#########################
# Rhynchonellida
rhynchData <- getCladeTaxaPBDB("Rhynchonellida",
failIfNoInternet = FALSE)
if(!is.null(rhynchData)){
rhynchTree <- makePBDBtaxonTree(
taxaDataPBDB = rhynchData,
rankTaxon = "genus",
method = "parentChild")
# if it worked, plot it!
plotTaxaTreePBDB(rhynchTree)
}
#some of these look pretty messy!
}
}
\references{
Peters, S. E., and M. McClennen. 2015. The Paleobiology Database
application programming interface. \emph{Paleobiology} 42(1):1-7.
Soul, L. C., and M. Friedman. 2015. Taxonomy and Phylogeny Can Yield
Comparable Results in Comparative Palaeontological Analyses. \emph{Systematic Biology}
(\doi{10.1093/sysbio/syv015})
}
\seealso{
Two other functions in paleotree are used as sub-algorithms by \code{makePBDBtaxonTree}
to create the taxon-tree within this function,
and users should consult their manual pages for additional details:
\code{\link{parentChild2taxonTree}} and \code{\link{taxonTable2taxonTree}}
Closely related functions for
Other functions for manipulating PBDB data can be found at \code{\link{taxonSortPBDBocc}},
\code{\link{occData2timeList}}, and the example data at \code{\link{graptPBDB}}.
}
\author{
David W. Bapst
}
| /man/makePBDBtaxonTree.Rd | permissive | dwbapst/paleotree | R | false | true | 12,499 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makePBDBtaxonTree.R
\name{makePBDBtaxonTree}
\alias{makePBDBtaxonTree}
\alias{plotTaxaTreePBDB}
\title{Creating a Taxon-Tree from Taxonomic Data Downloaded from the Paleobiology Database}
\usage{
makePBDBtaxonTree(
taxaDataPBDB,
rankTaxon,
method = "parentChild",
tipSet = NULL,
cleanTree = TRUE,
annotatedDuplicateNames = TRUE,
APIversion = "1.2",
failIfNoInternet = TRUE
)
plotTaxaTreePBDB(taxaTree, edgeLength = 1)
}
\arguments{
\item{taxaDataPBDB}{A table of taxonomic data collected from
the Paleobiology Database, using the taxa list option
with \code{show = class}. Should work with versions 1.1-1.2 of
the API, with either the \code{pbdb} or \code{com} vocab. However,
as \code{accepted_name} is not available in API v1.1, the resulting
tree will have a taxon's *original* name and not
any formally updated name.}
\item{rankTaxon}{The selected taxon rank; must be one of \code{'species'},
\code{'genus'}, \code{'family'}, \code{'order'}, \code{'class'} or \code{'phylum'}.}
\item{method}{Controls which algorithm is used for calculating
the taxon-tree. The default option is \code{method = "parentChild"}
which converts the listed binary parent-child taxon relationships in
the Paleobiology Database- these parent-child relationships (if missing
from the input dataset) are autofilled using API calls to the
Paleobiology Database. Alternatively, users may use
\code{method = "Linnean"}, which converts the table of Linnean taxonomic
assignments (family, order, etc as provided by \code{show = class} in
PBDB API calls) into a taxon-tree. Two methods formerly both implemented
under \code{method = "parentChild"} are also available as
\code{method = "parentChildOldMergeRoot"} and \code{method = "parentChildOldQueryPBDB"}
respectively. Both of these use similar algorithms as the current
\code{method = "parentChild"} but differ in how they treat taxa with
parents missing from the input taxonomic dataset.
\code{method = "parentChildOldQueryPBDB"} behaves most similar
to \code{method = "parentChild"} in that it queries the Paleobiology
Database via the API , but repeatedly does so for information on parent
taxa of the 'floating' parents, and continues within a \code{while}
loop until only one such unassigned parent taxon remains. This latter
option may talk a long time or never finish, depending on the
linearity and taxonomic structures encountered in the PBDB taxonomic
data; i.e. if someone a taxon was ultimately its own indirect child
in some grand loop by mistake, then under this option
\code{makePBDBtaxonTree} might never finish. In cases where taxonomy
is bad due to weird and erroneous taxonomic assignments reported by
the PBDB, this routine may search all the way back to a very ancient
and deep taxon, such as the \emph{Eukaryota} taxon.
\code{method = "parentChildOldMergeRoot"} will combine these disparate
potential roots and link them to an artificially-constructed
pseudo-root, which at least allows for visualization of the taxonomic
structure in a limited dataset. This latter option will be fully
offline, as it does not do any additional API calls
of the Paleobiology Database, unlike other options.}
\item{tipSet}{This argument only impacts analyses where
\code{method = "parentChild"} is used. This \code{tipSet} argument controls
which taxa are selected as tip taxa for the output tree.
\code{tipSet = "nonParents"} selects all child taxa which
are not listed as parents in \code{parentChild}.
Alternatively, \code{tipSet = "all"} will add a tip to every
internal node with the parent-taxon name encapsulated in parentheses.
The default is \code{NULL} - if \code{tipSet = NULL} and \code{method = "parentChild"},
then \code{tipSet} will be set so \code{tipSet = "nonParents"}.}
\item{cleanTree}{When \code{TRUE} (the default), the tree is run through a series of
post-processing, including having singles collapsed,
nodes reordered and being written out as a Newick string and read
back in, to ensure functionality with ape functions
and ape-derived functions.
If \code{FALSE}, none of this post-processing is done and
users should beware, as such trees can lead to hard-crashes of R.}
\item{annotatedDuplicateNames}{A logical determining whether duplicate taxon names,
when found in the Paleobiology Database for taxa (presumably reflecting an issue with
taxa being obsolete but with incomplete seniority data), should be annotated to include
sequential numbers so to modify them, via function\code{base}'s
\code{\link[base]{make.unique}}. This only applies to
\code{method = "parentChild"}, with the default option being
\code{annotatedDuplicateNames = TRUE}. If more than 26 duplicates are found, an error
is issued. If this argument is \code{FALSE}, an error is issued if duplicate taxon
names are found.}
\item{APIversion}{Version of the Paleobiology Database API used by
\code{makePBDBtaxonTree} when \code{method = "parentChild"} or
\code{method = "parentChildOldQueryPBDB"} is used. The current default
is \code{APIversion = "1.2"}, the most recent API version as of 12/11/2018.}
\item{failIfNoInternet}{If the Paleobiology Database or another
needed internet resource cannot be accessed, perhaps because of
no internet connection, should the function fail (with an error)
or should the function return \code{NULL} and return an
informative message instead, thus meeting the CRAN policy
that such functionalities must 'fail gracefully'?
The default is \code{TRUE} but all examples that might be auto-run
use \code{FALSE} so they do not fail during R CHECK.}
\item{taxaTree}{A phylogeny of class \code{phylo}, presumably a taxon tree as output from
\code{makePBDBtaxonTree} with higher-taxon names as node labels.}
\item{edgeLength}{The edge length that the plotted tree should be plotted
with (\code{plotTaxaTreePBDB} plots phylogenies as non-ultrametric,
not as a cladogram with aligned tips).}
}
\value{
A phylogeny of class \code{phylo}, where each tip is a taxon of the given \code{rankTaxon}. See additional details
regarding branch lengths can be found in the sub-algorithms used to create the taxon-tree by this function:
\code{\link{parentChild2taxonTree}} and \code{\link{taxonTable2taxonTree}}.
Depending on the \code{method}
used, either the element \code{$parentChild} or \code{$taxonTable} is added to the list structure of
the output phylogeny object, which was used as input for one of the two algorithms mentioned above.
Please note that when applied to output from the taxa option of the API version 1.1, the taxon names
returned are the \emph{original} taxon names as 'accepted_name' is not available in API v1.1, while
under API v1.2, the returned taxon names should be the most up-to-date formal names for those taxa.
Similar issues also effect the identification of parent taxa, as the accepted name of the
parent ID number is only provided in version 1.2 of the API.
}
\description{
The function \code{makePBDBtaxonTree} creates phylogeny-like
object of class \code{phylo} from the taxonomic information
recorded in a taxonomy download from the PBDB for
a given group. Two different algorithms are provided,
the default being based on parent-child taxon relationships,
the other based on the nested Linnean hierarchy. The function
\code{plotTaxaTreePBDB} is also provided as a minor helper
function for optimally plotting the labeled topologies that are
output by \code{makePBDBtaxonTree}.
}
\details{
This function should not be taken too seriously.
Many groups in the Paleobiology Database have
out-of-date or very incomplete taxonomic information.
This function is meant to help visualize
what information is present, and by use of time-scaling
functions, allow us to visualize the intersection
of temporal and phylogenetic, mainly to look for incongruence
due to either incorrect taxonomic placements,
erroneous occurrence data or both.
Note however that, contrary to common opinion among some
paleontologists, taxon-trees may be just as useful for
macroevolutionary studies as reconstructed phylogenies
(Soul and Friedman, 2015).
}
\examples{
# Note that most examples here use argument
# failIfNoInternet = FALSE so that functions do
# not error out but simply return NULL if internet
# connection is not available, and thus
# fail gracefully rather than error out (required by CRAN).
# Remove this argument or set to TRUE so functions DO fail
# when internet resources (paleobiodb) is not available.
set.seed(1)
\donttest{
#get some example occurrence and taxonomic data
data(graptPBDB)
#get the taxon tree: Linnean method
graptTreeLinnean <- makePBDBtaxonTree(
taxaDataPBDB = graptTaxaPBDB,
rankTaxon = "genus",
method = "Linnean",
failIfNoInternet = FALSE)
#get the taxon tree: parentChild method
graptTreeParentChild <- makePBDBtaxonTree(
taxaDataPBDB = graptTaxaPBDB,
rankTaxon = "genus",
method = "parentChild",
failIfNoInternet = FALSE)
if(!is.null(graptTreeParentChild) &
!is.null(graptTreeLinnean)){
# if those functions worked...
# let's plot these and compare them!
plotTaxaTreePBDB(graptTreeParentChild)
plotTaxaTreePBDB(graptTreeLinnean)
}
# pause 3 seconds so we don't spam the API
Sys.sleep(3)
####################################################
# let's try some other groups
###################################
#conodonts
conoData <- getCladeTaxaPBDB("Conodonta",
failIfNoInternet = FALSE)
if(!is.null(conoData)){
conoTree <- makePBDBtaxonTree(
taxaDataPBDB = conoData,
rankTaxon = "genus",
method = "parentChild")
# if it worked, plot it!
plotTaxaTreePBDB(conoTree)
}
# pause 3 seconds so we don't spam the API
Sys.sleep(3)
#############################
#asaphid trilobites
asaData <- getCladeTaxaPBDB("Asaphida",
failIfNoInternet = FALSE)
if(!is.null(asaData)){
asaTree <- makePBDBtaxonTree(
taxaDataPBDB = asaData,
rankTaxon = "genus",
method = "parentChild")
# if it worked, plot it!
plotTaxaTreePBDB(asaTree)
}
# pause 3 seconds so we don't spam the API
Sys.sleep(3)
###############################
#Ornithischia
ornithData <- getCladeTaxaPBDB("Ornithischia",
failIfNoInternet = FALSE)
if(!is.null(ornithData)){
ornithTree <- makePBDBtaxonTree(
taxaDataPBDB = ornithData,
rankTaxon = "genus",
method = "parentChild")
# if it worked, plot it!
plotTaxaTreePBDB(ornithTree)
# pause 3 seconds so we don't spam the API
Sys.sleep(3)
#try Linnean!
#but first... need to drop repeated taxon first: Hylaeosaurus
# actually this taxon seems to have been repaired
# as of September 2019 !
# findHylaeo <- ornithData$taxon_name == "Hylaeosaurus"
# there's actually only one accepted ID number
# HylaeoIDnum <- unique(ornithData[findHylaeo,"taxon_no"])
# HylaeoIDnum
# so, take which one has occurrences listed
# dropThis <- which((ornithData$n_occs < 1) & findHylaeo)
# ornithDataCleaned <- ornithData[-dropThis,]
ornithTree <- makePBDBtaxonTree(
ornithData,
rankTaxon = "genus",
method = "Linnean",
failIfNoInternet = FALSE)
# if it worked, plot it!
plotTaxaTreePBDB(ornithTree)
}
# pause 3 seconds so we don't spam the API
Sys.sleep(3)
#########################
# Rhynchonellida
rhynchData <- getCladeTaxaPBDB("Rhynchonellida",
failIfNoInternet = FALSE)
if(!is.null(rhynchData)){
rhynchTree <- makePBDBtaxonTree(
taxaDataPBDB = rhynchData,
rankTaxon = "genus",
method = "parentChild")
# if it worked, plot it!
plotTaxaTreePBDB(rhynchTree)
}
#some of these look pretty messy!
}
}
\references{
Peters, S. E., and M. McClennen. 2015. The Paleobiology Database
application programming interface. \emph{Paleobiology} 42(1):1-7.
Soul, L. C., and M. Friedman. 2015. Taxonomy and Phylogeny Can Yield
Comparable Results in Comparative Palaeontological Analyses. \emph{Systematic Biology}
(\doi{10.1093/sysbio/syv015})
}
\seealso{
Two other functions in paleotree are used as sub-algorithms by \code{makePBDBtaxonTree}
to create the taxon-tree within this function,
and users should consult their manual pages for additional details:
\code{\link{parentChild2taxonTree}} and \code{\link{taxonTable2taxonTree}}
Closely related functions for
Other functions for manipulating PBDB data can be found at \code{\link{taxonSortPBDBocc}},
\code{\link{occData2timeList}}, and the example data at \code{\link{graptPBDB}}.
}
\author{
David W. Bapst
}
|
#' Detect heatwaves and cold-spells.
#'
#' Applies the Hobday et al. (2016) marine heat wave definition to an input time
#' series of temperature along with a daily date vector.
#'
#' @importFrom dplyr %>%
#'
#' @param data A data frame with three columns. In the default setting (i.e. ommitting
#' the arguments \code{doy}, \code{x} and \code{y}; see immediately below), the
#' data set is expected to have the headers \code{doy}, \code{t} and \code{temp}.
#' \code{doy} is the Julian day running from 1 to 366, but modified so that the
#' day-of-year (doy) vector for non-leap-years runs 1...59 and then 61...366.
#' For leap years the 60th day is February 29. The \code{t} column is a vector
#' of dates of class \code{Date}, while \code{temp} is the measured variable (by
#' default it is assumed to be temperature). Data of the appropriate format are
#' created by the function \code{\link{make_whole}}, but your own data can be supplied
#' if they meet the criteria specified by \code{\link{make_whole}}.
#' @param doy If a column headed \code{doy} is not available, another column with
#' Julian dates can be supplied. This argument accepts the name of that column. The
#' default name is, of course, \code{doy}.
#' @param x This column is expected to contain a vector of dates as per the
#' specification of \code{make_whole}. If a column headed \code{t} is present in
#' the dataframe, this argument may be ommitted; otherwise, specify the name of
#' the column with dates here.
#' @param y This is a column containing the measurement variable. If the column
#' name differs from the default (i.e. \code{temp}), specify the name here.
#' @param climatology_start Required. The start date for the period across which
#' the (varying by day-of-year) seasonal cycle and extremes threshold are
#' calculated.
#' @param climatology_end Required. The end date for the period across which
#' the (varying by day-of-year) seasonal cycle and extremes threshold are
#' calculated.
#' @param pctile Threshold percentile (\%) for detection of extreme values.
#' Default is \code{90}th percentile. Please see \code{cold_spells} for more
#' information about the calculation of marine cold spells.
#' @param window_half_width Width of sliding window about day-of-year (to one
#' side of the center day-of-year) used for the pooling of values and
#' calculation of climatology and threshold percentile. Default is \code{5}
#' days, which gives a window width of 11 days centered on the 6th day of the
#' series of 11 days.
#' @param smooth_percentile Boolean switch selecting whether to smooth the
#' climatology and threshold percentile timeseries with a moving average of
#' width \code{smooth_percentile}. Default is \code{TRUE}.
#' @param smooth_percentile_width Full width of moving average window for smoothing
#' climatology and threshold. Default is \code{31} days.
#' @param clim_only Choose to calculate only the climatologies and not the
#' events. Default is \code{FALSE}.
#' @param min_duration Minimum duration for acceptance of detected MHWs.
#' Default is \code{5} days.
#' @param join_across_gaps Boolean switch indicating whether to join MHWs which
#' occur before/after a short gap as specified by \code{max_gap}. Default
#' is \code{TRUE}.
#' @param max_gap Maximum length of gap allowed for the joining of MHWs. Default
#' is \code{2} days.
#' @param max_pad_length Specifies the maximum length of days over which to
#' interpolate (pad) missing data (specified as \code{NA}) in the input
#' temperature time series; i.e., any consecutive blocks of NAs with length
#' greater than \code{max_pad_length} will be left as \code{NA}. Set as an
#' integer. Default is \code{3} days.
#' @param cold_spells Boolean specifying if the code should detect cold events
#' instead of heat events. Default is \code{FALSE}. Please note that the
#' climatological thresholds for cold-spells are calculated the same as for
#' heatwaves, meaning that \code{pctile} should be set the same regardless
#' if one is calculating heatwaves or cold-spells. For example, if one wants
#' to calculate heatwaves above the 90th percentile threshold
#' (the default) one sets \code{pctile = 90}. Likewise, if one would like
#' identify the most intense cold-spells one must also set \code{pctile = 90},
#' even though cold spells are in fact simply the coldest extreme events in a
#' time series, which statistically equate to values below the 10th percentile.
#'
#' @details
#' \enumerate{
#' \item This function assumes that the input time series consists of continuous
#' daily values with few missing values. Time ranges which start and end
#' part-way through the calendar year are supported. The accompanying function
#' \code{\link{make_whole}} aids in the preparation of a time series that is
#' suitable for use with \code{detect}, although this may also be accomplished
#' 'by hand' as long as the criteria are met as discussed in the documentation
#' to \code{\link{make_whole}}.
#' \item It is recommended that a climatology period of at least 30 years is
#' specified in order to capture decadal thermal periodicities. It is further
#' advised that full the start and end dates for the climatology period result
#' in full years, e.g. "1982-01-01" to "2011-12-31" or "1982-07-01" to
#' "2012-06-30"; if not, this may result in an unequal weighting of data
#' belonging with certain months within a time series.
#' \item This function supports leap years. This is done by ignoring Feb 29s
#' for the initial calculation of the climatology and threshold. The values for
#' Feb 29 are then linearly interpolated from the values for Feb 28 and Mar 1.
#' \item The calculation of onset and decline rates assumes that the events
#' started a half-day before the start day and ended a half-day after the
#' end-day. This is consistent with the duration definition as implemented,
#' which assumes duration = end day - start day + 1. As of version 0.15.7, an
#' event that is already present at the beginning of a time series, or an event
#' that is still present at the end of a time series, will report the rate of
#' onset or the rate of decline as \code{NA}, as it is impossible to know what
#' the temperature half a day before or after the start or end of the event is.
#' This may be a departure from the python marineHeatWaves function.
#' \item For the purposes of event detection, any missing temperature values not
#' interpolated over (through optional \code{max_pad_length}) will be set equal
#' to the seasonal climatology. This means they will trigger the end/start of
#' any adjacent temperature values which satisfy the event definition criteria.
#' \item If the code is used to detect cold events (\code{coldSpells} = TRUE),
#' then it works just as for heat waves except that events are detected as
#' deviations below the (100 - pctile)th percentile (e.g., the 10th instead of
#' 90th) for at least 5 days. Intensities are reported as negative values and
#' represent the temperature anomaly below climatology.
#' \item If only the climatology for the time series is required, and not the
#' events themselves, this may be done by setting \code{clim_only} = TRUE.
#' }
#' The original Python algorithm was written by Eric Oliver, Institute for
#' Marine and Antarctic Studies, University of Tasmania, Feb 2015, and is
#' documented by Hobday et al. (2016). The marine cold spell option was
#' implemented in version 0.13 (21 Nov 2015) of the Python module as a result
#' of our preparation of Schlegel et al. (submitted), wherein the cold events
#' receive a brief overview.
#'
#' @return The function will return a list of two tibbles (see the \code{tidyverse}),
#' \code{clim} and \code{event}, which are the climatology and events,
#' respectively. The climatology contains the full time series of daily temperatures,
#' as well as the the seasonal climatology, the threshold and various aspects of the
#' events that were detected. The software was designed for detecting extreme
#' thermal events, and the units specified below reflect that intended purpose.
#' However, the various other kinds of extreme events may be detected according
#' to the 'marine heat wave' specifications, and if that is the case, the appropriate
#' units need to be determined by the user.
#' \item{doy}{Julian day (day-of-year). For non-leap years it runs 1...59 and
#' 61...366, while leap years run 1...366. This column will be named differently if
#' another name was specified to the \code{doy} argument.}
#' \item{t}{The date of the temperature measurement. This column will be
#' named differently if another name was specified to the \code{x} argument.}
#' \item{temp}{If the software was used for the purpose for which it was designed,
#' seawater temperature [deg. C] on the specified date will be returned. This
#' column will of course be named differently if another kind of measurement was
#' specified to the \code{y} argument.}
#' \item{seas_clim_year}{Climatological seasonal cycle [deg. C].}
#' \item{thresh_clim_year}{Seasonally varying threshold (e.g., 90th
#' percentile) [deg. C].}
#' \item{var_clim_year}{Seasonally varying variance (standard deviation) [deg. C].}
#' \item{thresh_criterion}{Boolean indicating if \code{temp} exceeds
#' \code{thresh_clim_year}.}
#' \item{duration_criterion}{Boolean indicating whether periods of consecutive
#' \code{thresh_criterion} are >= \code{min_duration}.}
#' \item{event}{Boolean indicating if all criteria that define a MHW or MCS are
#' met.}
#' \item{event_no}{A sequential number indicating the ID and order of
#' occurence of the MHWs or MCSs.}
#'
#' The events are summarised using a range of event metrics:
#' \item{index_start}{Start index of event.}
#' \item{index_stop}{Stop index of event.}
#' \item{event_no}{A sequential number indicating the ID and order of
#' the events.}
#' \item{duration}{Duration of event [days].}
#' \item{date_start}{Start date of event [date].}
#' \item{date_stop}{Stop date of event [date].}
#' \item{date_peak}{Date of event peak [date].}
#' \item{int_mean}{Mean intensity [deg. C].}
#' \item{int_max}{Maximum (peak) intensity [deg. C].}
#' \item{int_var}{Intensity variability (standard deviation) [deg. C].}
#' \item{int_cum}{Cumulative intensity [deg. C x days].}
#' \item{rate_onset}{Onset rate of event [deg. C / day].}
#' \item{rate_decline}{Decline rate of event [deg. C / day].}
#'
#' \code{int_max_rel_thresh}, \code{int_mean_rel_thresh},
#' \code{int_var_rel_thresh}, and \code{int_cum_rel_thresh}
#' are as above except relative to the threshold (e.g., 90th percentile) rather
#' than the seasonal climatology.
#'
#' \code{int_max_abs}, \code{int_mean_abs}, \code{int_var_abs}, and
#' \code{int_cum_abs} are as above except as absolute magnitudes
#' rather than relative to the seasonal climatology or threshold.
#'
#' \code{int_max_norm} and \code{int_mean_norm} are as above except
#' units are in multiples of threshold exceedances, i.e., a value of 1.5
#' indicates the event intensity (relative to the climatology) was 1.5 times the
#' value of the threshold (relative to climatology,
#' i.e., threshold - climatology.)
#'
#' Note that \code{rate_onset} and \code{rate_decline} will return \code{NA}
#' when the event begins/ends on the first/last day of the time series. This
#' may be particularly evident when the function is applied to large gridded
#' data sets. Although the other metrics do not contain any errors and
#' provide sensible values, please take this into account in its
#' interpretation.
#'
#' @author Albertus J. Smit, Robert W. Schlegel, Eric C. J. Oliver
#'
#' @references Hobday, A.J. et al. (2016). A hierarchical approach to defining
#' marine heatwaves, Progress in Oceanography, 141, pp. 227-238,
#' doi:10.1016/j.pocean.2015.12.014
#'
#' Schlegel, R. W., Oliver, C. J., Wernberg, T. W., Smit, A. J. (2017).
#' Coastal and offshore co-occurrences of marine heatwaves and cold-spells.
#' Progress in Oceanography, 151, pp. 189-205, doi:10.1016/j.pocean.2017.01.004
#'
#' @export
#'
#' @examples
#' ts_dat <- make_whole(sst_WA)
#' res <- detect(ts_dat, climatology_start = "1983-01-01",
#' climatology_end = "2012-12-31")
#' # show a portion of the climatology:
#' res$clim[1:10, ]
#' # show some of the heat waves:
#' res$event[1:5, 1:10]
detect <-
function(data,
doy = doy,
x = t,
y = temp,
climatology_start,
climatology_end,
pctile = 90,
window_half_width = 5,
smooth_percentile = TRUE,
smooth_percentile_width = 31,
clim_only = FALSE,
min_duration = 5,
join_across_gaps = TRUE,
max_gap = 2,
max_pad_length = 3,
cold_spells = FALSE
# verbose = TRUE, # to be implemented
) {
temp <- NULL
doy <- eval(substitute(doy), data)
ts.x <- eval(substitute(x), data)
ts.y <- eval(substitute(y), data)
t_series <- tibble::tibble(doy,
ts.x,
ts.y)
rm(doy); rm(ts.x); rm(ts.y)
t_series$ts.y <- zoo::na.approx(t_series$ts.y, maxgap = max_pad_length)
if (missing(climatology_start))
stop("Oops! Please provide BOTH start and end dates for the climatology.")
if (missing(climatology_end))
stop("Bummer! Please provide BOTH start and end dates for the climatology.")
# clim_start <- paste(climatology_start, "01", "01", sep = "-")
clim_start <- climatology_start
if (t_series$ts.x[1] > clim_start)
stop(paste("The specified start date precedes the first day of series, which is",
t_series$ts.x[1]))
# clim_end <- paste(climatology_end, "12", "31", sep = "-")
clim_end <- climatology_end
if (clim_end > t_series$ts.x[nrow(t_series)])
stop(paste("The specified end date follows the last day of series, which is",
t_series$ts.x[nrow(t_series)]))
if (cold_spells)
t_series$ts.y <- -t_series$ts.y
tDat <- t_series %>%
dplyr::filter(ts.x >= clim_start & ts.x <= clim_end) %>%
dplyr::mutate(ts.x = lubridate::year(ts.x)) %>%
tidyr::spread(ts.x, ts.y)
all_NA <- apply(tDat[59:61, ], 2, function(x) !all(is.na(x)))
no_NA <- names(all_NA[all_NA > 0])
tDat[59:61, no_NA] <- zoo::na.approx(tDat[59:61, no_NA], maxgap = 1, na.rm = TRUE)
tDat <- rbind(utils::tail(tDat, window_half_width),
tDat, utils::head(tDat, window_half_width))
seas_clim_year <- thresh_clim_year <- var_clim_year <- rep(NA, nrow(tDat))
for (i in (window_half_width + 1):((nrow(tDat) - window_half_width))) {
seas_clim_year[i] <-
mean(
c(t(tDat[(i - (window_half_width)):(i + window_half_width), 2:ncol(tDat)])),
na.rm = TRUE)
thresh_clim_year[i] <-
raster::quantile(
c(t(tDat[(i - (window_half_width)):(i + window_half_width), 2:ncol(tDat)])),
probs = pctile/100,
type = 7,
na.rm = TRUE,
names = FALSE
)
var_clim_year[i] <-
stats::sd(
c(t(tDat[(i - (window_half_width)):(i + window_half_width), 2:ncol(tDat)])),
na.rm = TRUE
)
}
len_clim_year <- 366
clim <-
data.frame(
doy = tDat[(window_half_width + 1):((window_half_width) + len_clim_year), 1],
seas_clim_year = seas_clim_year[(window_half_width + 1):((window_half_width) + len_clim_year)],
thresh_clim_year = thresh_clim_year[(window_half_width + 1):((window_half_width) + len_clim_year)],
var_clim_year = var_clim_year[(window_half_width + 1):((window_half_width) + len_clim_year)]
)
if (smooth_percentile) {
clim <- clim %>%
dplyr::mutate(
seas_clim_year = raster::movingFun(
seas_clim_year,
n = smooth_percentile_width,
fun = mean,
type = "around",
circular = TRUE,
na.rm = FALSE
)
) %>%
dplyr::mutate(
thresh_clim_year = raster::movingFun(
thresh_clim_year,
n = smooth_percentile_width,
fun = mean,
type = "around",
circular = TRUE,
na.rm = FALSE
)
) %>%
dplyr::mutate(
var_clim_year = raster::movingFun(
var_clim_year,
n = smooth_percentile_width,
fun = mean,
type = "around",
circular = TRUE,
na.rm = FALSE
)
)
}
if (clim_only) {
t_series <- merge(data, clim, by = "doy")
t_series <- t_series[order(t_series$ts.x),]
return(t_series)
} else {
t_series <- t_series %>%
dplyr::inner_join(clim, by = "doy")
t_series$ts.y[is.na(t_series$ts.y)] <- t_series$seas_clim_year[is.na(t_series$ts.y)]
t_series$thresh_criterion <- t_series$ts.y > t_series$thresh_clim_year
ex1 <- rle(t_series$thresh_criterion)
ind1 <- rep(seq_along(ex1$lengths), ex1$lengths)
s1 <- split(zoo::index(t_series$thresh_criterion), ind1)
proto_events <- s1[ex1$values == TRUE]
index_stop <- index_start <- NULL
proto_events_rng <-
lapply(proto_events, function(x)
data.frame(index_start = min(x), index_stop = max(x)))
duration <- NULL
protoFunc <- function(proto_data) {
out <- proto_data %>%
dplyr::mutate(duration = index_stop - index_start + 1) %>%
dplyr::filter(duration >= min_duration) %>%
dplyr::mutate(date_start = t_series$ts.x[index_start]) %>%
dplyr::mutate(date_stop = t_series$ts.x[index_stop])
}
proto_events <- do.call(rbind, proto_events_rng) %>%
dplyr::mutate(event_no = cumsum(ex1$values[ex1$values == TRUE])) %>%
protoFunc()
t_series$duration_criterion <- rep(FALSE, nrow(t_series))
for (i in 1:nrow(proto_events)) {
t_series$duration_criterion[proto_events$index_start[i]:proto_events$index_stop[i]] <-
rep(TRUE, length = proto_events$duration[i])
}
ex2 <- rle(t_series$duration_criterion)
ind2 <- rep(seq_along(ex2$lengths), ex2$lengths)
s2 <- split(zoo::index(t_series$thresh_criterion), ind2)
proto_gaps <- s2[ex2$values == FALSE]
proto_gaps_rng <-
lapply(proto_gaps, function(x) data.frame(index_start = min(x), index_stop = max(x)))
proto_gaps <- do.call(rbind, proto_gaps_rng) %>%
dplyr::mutate(event_no = c(1:length(ex2$values[ex2$values == FALSE]))) %>%
dplyr::mutate(duration = index_stop - index_start + 1)
if (any(proto_gaps$duration >= 1 & proto_gaps$duration <= max_gap)) {
proto_gaps <- proto_gaps %>%
dplyr::mutate(date_start = t_series$ts.x[index_start]) %>%
dplyr::mutate(date_stop = t_series$ts.x[index_stop]) %>%
dplyr::filter(duration >= 1 & duration <= max_gap)
} else {
join_across_gaps <- FALSE
}
if (join_across_gaps) {
t_series$event <- t_series$duration_criterion
for (i in 1:nrow(proto_gaps)) {
t_series$event[proto_gaps$index_start[i]:proto_gaps$index_stop[i]] <-
rep(TRUE, length = proto_gaps$duration[i])
}
} else {
t_series$event <- t_series$duration_criterion
}
ex3 <- rle(t_series$event)
ind3 <- rep(seq_along(ex3$lengths), ex3$lengths)
s3 <- split(zoo::index(t_series$event), ind3)
events <- s3[ex3$values == TRUE]
event_no <- NULL
events_rng <-
lapply(events, function(x)
data.frame(index_start = min(x), index_stop = max(x)))
events <- do.call(rbind, events_rng) %>%
dplyr::mutate(event_no = cumsum(ex3$values[ex3$values == TRUE])) %>%
protoFunc()
t_series$event_no <- rep(NA, nrow(t_series))
for (i in 1:nrow(events)) {
t_series$event_no[events$index_start[i]:events$index_stop[i]] <-
rep(i, length = events$duration[i])
}
int_mean <- int_max <- int_cum <- int_mean_rel_thresh <-
int_max_rel_thresh <- int_cum_rel_thresh <- int_mean_abs <-
int_max_abs <- int_cum_abs <- int_mean_norm <- int_max_norm <-
rate_onset <- rate_decline <- mhw_rel_thresh <-
rel_thresh_norm <- mhw_rel_seas <- NULL
events_list <- plyr::dlply(events, c("event_no"), function(df)
with(
t_series,
data.frame(
ts.x = c(ts.x[df$index_start:df$index_stop]),
ts.y = c(ts.y[df$index_start:df$index_stop]),
seas_clim_year = c(seas_clim_year[df$index_start:df$index_stop]),
thresh_clim_year = c(thresh_clim_year[df$index_start:df$index_stop]),
mhw_rel_seas = c(ts.y[df$index_start:df$index_stop]) - c(seas_clim_year[df$index_start:df$index_stop]),
mhw_rel_thresh = c(ts.y[df$index_start:df$index_stop]) - c(thresh_clim_year[df$index_start:df$index_stop]),
rel_thresh_norm = c(ts.y[df$index_start:df$index_stop]) - c(thresh_clim_year[df$index_start:df$index_stop]) /
c(thresh_clim_year[df$index_start:df$index_stop]) - c(seas_clim_year[df$index_start:df$index_stop])
)
)
)
events <- cbind(events,
events_list %>%
dplyr::bind_rows(.id = "event_no") %>%
dplyr::group_by(event_no) %>%
dplyr::summarise(date_peak = ts.x[mhw_rel_seas == max(mhw_rel_seas)][1],
int_mean = mean(mhw_rel_seas),
int_max = max(mhw_rel_seas),
int_var = sqrt(stats::var(mhw_rel_seas)),
int_cum = max(cumsum(mhw_rel_seas)),
int_mean_rel_thresh = mean(mhw_rel_thresh),
int_max_rel_thresh = max(mhw_rel_thresh),
int_var_rel_thresh = sqrt(stats::var(mhw_rel_thresh)),
int_cum_rel_thresh = max(cumsum(mhw_rel_thresh)),
int_mean_abs = mean(ts.y),
int_max_abs = max(ts.y),
int_var_abs = sqrt(stats::var(ts.y)),
int_cum_abs = max(cumsum(ts.y)),
int_mean_norm = mean(rel_thresh_norm),
int_max_norm = max(rel_thresh_norm)) %>%
dplyr::arrange(as.numeric(event_no)) %>%
dplyr::select(-event_no))
mhw_rel_seas <- t_series$ts.y - t_series$seas_clim_year
A <- mhw_rel_seas[events$index_start]
B <- t_series$ts.y[events$index_start - 1]
C <- t_series$seas_clim_year[events$index_start - 1]
if (length(B) + 1 == length(A)) {
B <- c(NA, B)
C <- c(NA, C)
}
mhw_rel_seas_start <- 0.5 * (A + B - C)
events$rate_onset <- ifelse(
events$index_start > 1,
(events$int_max - mhw_rel_seas_start) / (as.numeric(
difftime(events$date_peak, events$date_start, units = "days")) + 0.5),
NA
)
D <- mhw_rel_seas[events$index_stop]
E <- t_series$ts.y[events$index_stop + 1]
F <- t_series$seas_clim_year[events$index_stop + 1]
mhw_rel_seas_end <- 0.5 * (D + E - F)
events$rate_decline <- ifelse(
events$index_stop < nrow(t_series),
(events$int_max - mhw_rel_seas_end) / (as.numeric(
difftime(events$date_stop, events$date_peak, units = "days")) + 0.5),
NA
)
if (cold_spells) {
events <- events %>% dplyr::mutate(
int_mean = -int_mean,
int_max = -int_max,
int_cum = -int_cum,
int_mean_rel_thresh = -int_mean_rel_thresh,
int_max_rel_thresh = -int_max_rel_thresh,
int_cum_rel_thresh = -int_cum_rel_thresh,
int_mean_abs = -int_mean_abs,
int_max_abs = -int_max_abs,
int_cum_abs = -int_cum_abs,
int_mean_norm = -int_mean_norm,
int_max_norm = -int_max_norm,
rate_onset = -rate_onset,
rate_decline = -rate_decline
)
t_series <- t_series %>% dplyr::mutate(
ts.y = -ts.y,
seas_clim_year = -seas_clim_year,
thresh_clim_year = -thresh_clim_year
)
}
names(t_series)[1] <- paste(substitute(doy))
names(t_series)[2] <- paste(substitute(x))
names(t_series)[3] <- paste(substitute(y))
list(clim = tibble::as_tibble(t_series),
event = tibble::as_tibble(events))
}
}
| /R/RmarineHeatWaves.R | no_license | cran/RmarineHeatWaves | R | false | false | 24,889 | r | #' Detect heatwaves and cold-spells.
#'
#' Applies the Hobday et al. (2016) marine heat wave definition to an input time
#' series of temperature along with a daily date vector.
#'
#' @importFrom dplyr %>%
#'
#' @param data A data frame with three columns. In the default setting (i.e. ommitting
#' the arguments \code{doy}, \code{x} and \code{y}; see immediately below), the
#' data set is expected to have the headers \code{doy}, \code{t} and \code{temp}.
#' \code{doy} is the Julian day running from 1 to 366, but modified so that the
#' day-of-year (doy) vector for non-leap-years runs 1...59 and then 61...366.
#' For leap years the 60th day is February 29. The \code{t} column is a vector
#' of dates of class \code{Date}, while \code{temp} is the measured variable (by
#' default it is assumed to be temperature). Data of the appropriate format are
#' created by the function \code{\link{make_whole}}, but your own data can be supplied
#' if they meet the criteria specified by \code{\link{make_whole}}.
#' @param doy If a column headed \code{doy} is not available, another column with
#' Julian dates can be supplied. This argument accepts the name of that column. The
#' default name is, of course, \code{doy}.
#' @param x This column is expected to contain a vector of dates as per the
#' specification of \code{make_whole}. If a column headed \code{t} is present in
#' the dataframe, this argument may be ommitted; otherwise, specify the name of
#' the column with dates here.
#' @param y This is a column containing the measurement variable. If the column
#' name differs from the default (i.e. \code{temp}), specify the name here.
#' @param climatology_start Required. The start date for the period across which
#' the (varying by day-of-year) seasonal cycle and extremes threshold are
#' calculated.
#' @param climatology_end Required. The end date for the period across which
#' the (varying by day-of-year) seasonal cycle and extremes threshold are
#' calculated.
#' @param pctile Threshold percentile (\%) for detection of extreme values.
#' Default is \code{90}th percentile. Please see \code{cold_spells} for more
#' information about the calculation of marine cold spells.
#' @param window_half_width Width of sliding window about day-of-year (to one
#' side of the center day-of-year) used for the pooling of values and
#' calculation of climatology and threshold percentile. Default is \code{5}
#' days, which gives a window width of 11 days centered on the 6th day of the
#' series of 11 days.
#' @param smooth_percentile Boolean switch selecting whether to smooth the
#' climatology and threshold percentile timeseries with a moving average of
#' width \code{smooth_percentile}. Default is \code{TRUE}.
#' @param smooth_percentile_width Full width of moving average window for smoothing
#' climatology and threshold. Default is \code{31} days.
#' @param clim_only Choose to calculate only the climatologies and not the
#' events. Default is \code{FALSE}.
#' @param min_duration Minimum duration for acceptance of detected MHWs.
#' Default is \code{5} days.
#' @param join_across_gaps Boolean switch indicating whether to join MHWs which
#' occur before/after a short gap as specified by \code{max_gap}. Default
#' is \code{TRUE}.
#' @param max_gap Maximum length of gap allowed for the joining of MHWs. Default
#' is \code{2} days.
#' @param max_pad_length Specifies the maximum length of days over which to
#' interpolate (pad) missing data (specified as \code{NA}) in the input
#' temperature time series; i.e., any consecutive blocks of NAs with length
#' greater than \code{max_pad_length} will be left as \code{NA}. Set as an
#' integer. Default is \code{3} days.
#' @param cold_spells Boolean specifying if the code should detect cold events
#' instead of heat events. Default is \code{FALSE}. Please note that the
#' climatological thresholds for cold-spells are calculated the same as for
#' heatwaves, meaning that \code{pctile} should be set the same regardless
#' if one is calculating heatwaves or cold-spells. For example, if one wants
#' to calculate heatwaves above the 90th percentile threshold
#' (the default) one sets \code{pctile = 90}. Likewise, if one would like
#' identify the most intense cold-spells one must also set \code{pctile = 90},
#' even though cold spells are in fact simply the coldest extreme events in a
#' time series, which statistically equate to values below the 10th percentile.
#'
#' @details
#' \enumerate{
#' \item This function assumes that the input time series consists of continuous
#' daily values with few missing values. Time ranges which start and end
#' part-way through the calendar year are supported. The accompanying function
#' \code{\link{make_whole}} aids in the preparation of a time series that is
#' suitable for use with \code{detect}, although this may also be accomplished
#' 'by hand' as long as the criteria are met as discussed in the documentation
#' to \code{\link{make_whole}}.
#' \item It is recommended that a climatology period of at least 30 years is
#' specified in order to capture decadal thermal periodicities. It is further
#' advised that full the start and end dates for the climatology period result
#' in full years, e.g. "1982-01-01" to "2011-12-31" or "1982-07-01" to
#' "2012-06-30"; if not, this may result in an unequal weighting of data
#' belonging with certain months within a time series.
#' \item This function supports leap years. This is done by ignoring Feb 29s
#' for the initial calculation of the climatology and threshold. The values for
#' Feb 29 are then linearly interpolated from the values for Feb 28 and Mar 1.
#' \item The calculation of onset and decline rates assumes that the events
#' started a half-day before the start day and ended a half-day after the
#' end-day. This is consistent with the duration definition as implemented,
#' which assumes duration = end day - start day + 1. As of version 0.15.7, an
#' event that is already present at the beginning of a time series, or an event
#' that is still present at the end of a time series, will report the rate of
#' onset or the rate of decline as \code{NA}, as it is impossible to know what
#' the temperature half a day before or after the start or end of the event is.
#' This may be a departure from the python marineHeatWaves function.
#' \item For the purposes of event detection, any missing temperature values not
#' interpolated over (through optional \code{max_pad_length}) will be set equal
#' to the seasonal climatology. This means they will trigger the end/start of
#' any adjacent temperature values which satisfy the event definition criteria.
#' \item If the code is used to detect cold events (\code{coldSpells} = TRUE),
#' then it works just as for heat waves except that events are detected as
#' deviations below the (100 - pctile)th percentile (e.g., the 10th instead of
#' 90th) for at least 5 days. Intensities are reported as negative values and
#' represent the temperature anomaly below climatology.
#' \item If only the climatology for the time series is required, and not the
#' events themselves, this may be done by setting \code{clim_only} = TRUE.
#' }
#' The original Python algorithm was written by Eric Oliver, Institute for
#' Marine and Antarctic Studies, University of Tasmania, Feb 2015, and is
#' documented by Hobday et al. (2016). The marine cold spell option was
#' implemented in version 0.13 (21 Nov 2015) of the Python module as a result
#' of our preparation of Schlegel et al. (submitted), wherein the cold events
#' receive a brief overview.
#'
#' @return The function will return a list of two tibbles (see the \code{tidyverse}),
#' \code{clim} and \code{event}, which are the climatology and events,
#' respectively. The climatology contains the full time series of daily temperatures,
#' as well as the the seasonal climatology, the threshold and various aspects of the
#' events that were detected. The software was designed for detecting extreme
#' thermal events, and the units specified below reflect that intended purpose.
#' However, the various other kinds of extreme events may be detected according
#' to the 'marine heat wave' specifications, and if that is the case, the appropriate
#' units need to be determined by the user.
#' \item{doy}{Julian day (day-of-year). For non-leap years it runs 1...59 and
#' 61...366, while leap years run 1...366. This column will be named differently if
#' another name was specified to the \code{doy} argument.}
#' \item{t}{The date of the temperature measurement. This column will be
#' named differently if another name was specified to the \code{x} argument.}
#' \item{temp}{If the software was used for the purpose for which it was designed,
#' seawater temperature [deg. C] on the specified date will be returned. This
#' column will of course be named differently if another kind of measurement was
#' specified to the \code{y} argument.}
#' \item{seas_clim_year}{Climatological seasonal cycle [deg. C].}
#' \item{thresh_clim_year}{Seasonally varying threshold (e.g., 90th
#' percentile) [deg. C].}
#' \item{var_clim_year}{Seasonally varying variance (standard deviation) [deg. C].}
#' \item{thresh_criterion}{Boolean indicating if \code{temp} exceeds
#' \code{thresh_clim_year}.}
#' \item{duration_criterion}{Boolean indicating whether periods of consecutive
#' \code{thresh_criterion} are >= \code{min_duration}.}
#' \item{event}{Boolean indicating if all criteria that define a MHW or MCS are
#' met.}
#' \item{event_no}{A sequential number indicating the ID and order of
#' occurence of the MHWs or MCSs.}
#'
#' The events are summarised using a range of event metrics:
#' \item{index_start}{Start index of event.}
#' \item{index_stop}{Stop index of event.}
#' \item{event_no}{A sequential number indicating the ID and order of
#' the events.}
#' \item{duration}{Duration of event [days].}
#' \item{date_start}{Start date of event [date].}
#' \item{date_stop}{Stop date of event [date].}
#' \item{date_peak}{Date of event peak [date].}
#' \item{int_mean}{Mean intensity [deg. C].}
#' \item{int_max}{Maximum (peak) intensity [deg. C].}
#' \item{int_var}{Intensity variability (standard deviation) [deg. C].}
#' \item{int_cum}{Cumulative intensity [deg. C x days].}
#' \item{rate_onset}{Onset rate of event [deg. C / day].}
#' \item{rate_decline}{Decline rate of event [deg. C / day].}
#'
#' \code{int_max_rel_thresh}, \code{int_mean_rel_thresh},
#' \code{int_var_rel_thresh}, and \code{int_cum_rel_thresh}
#' are as above except relative to the threshold (e.g., 90th percentile) rather
#' than the seasonal climatology.
#'
#' \code{int_max_abs}, \code{int_mean_abs}, \code{int_var_abs}, and
#' \code{int_cum_abs} are as above except as absolute magnitudes
#' rather than relative to the seasonal climatology or threshold.
#'
#' \code{int_max_norm} and \code{int_mean_norm} are as above except
#' units are in multiples of threshold exceedances, i.e., a value of 1.5
#' indicates the event intensity (relative to the climatology) was 1.5 times the
#' value of the threshold (relative to climatology,
#' i.e., threshold - climatology.)
#'
#' Note that \code{rate_onset} and \code{rate_decline} will return \code{NA}
#' when the event begins/ends on the first/last day of the time series. This
#' may be particularly evident when the function is applied to large gridded
#' data sets. Although the other metrics do not contain any errors and
#' provide sensible values, please take this into account in its
#' interpretation.
#'
#' @author Albertus J. Smit, Robert W. Schlegel, Eric C. J. Oliver
#'
#' @references Hobday, A.J. et al. (2016). A hierarchical approach to defining
#' marine heatwaves, Progress in Oceanography, 141, pp. 227-238,
#' doi:10.1016/j.pocean.2015.12.014
#'
#' Schlegel, R. W., Oliver, C. J., Wernberg, T. W., Smit, A. J. (2017).
#' Coastal and offshore co-occurrences of marine heatwaves and cold-spells.
#' Progress in Oceanography, 151, pp. 189-205, doi:10.1016/j.pocean.2017.01.004
#'
#' @export
#'
#' @examples
#' ts_dat <- make_whole(sst_WA)
#' res <- detect(ts_dat, climatology_start = "1983-01-01",
#' climatology_end = "2012-12-31")
#' # show a portion of the climatology:
#' res$clim[1:10, ]
#' # show some of the heat waves:
#' res$event[1:5, 1:10]
detect <-
function(data,
doy = doy,
x = t,
y = temp,
climatology_start,
climatology_end,
pctile = 90,
window_half_width = 5,
smooth_percentile = TRUE,
smooth_percentile_width = 31,
clim_only = FALSE,
min_duration = 5,
join_across_gaps = TRUE,
max_gap = 2,
max_pad_length = 3,
cold_spells = FALSE
# verbose = TRUE, # to be implemented
) {
temp <- NULL
doy <- eval(substitute(doy), data)
ts.x <- eval(substitute(x), data)
ts.y <- eval(substitute(y), data)
t_series <- tibble::tibble(doy,
ts.x,
ts.y)
rm(doy); rm(ts.x); rm(ts.y)
t_series$ts.y <- zoo::na.approx(t_series$ts.y, maxgap = max_pad_length)
if (missing(climatology_start))
stop("Oops! Please provide BOTH start and end dates for the climatology.")
if (missing(climatology_end))
stop("Bummer! Please provide BOTH start and end dates for the climatology.")
# clim_start <- paste(climatology_start, "01", "01", sep = "-")
clim_start <- climatology_start
if (t_series$ts.x[1] > clim_start)
stop(paste("The specified start date precedes the first day of series, which is",
t_series$ts.x[1]))
# clim_end <- paste(climatology_end, "12", "31", sep = "-")
clim_end <- climatology_end
if (clim_end > t_series$ts.x[nrow(t_series)])
stop(paste("The specified end date follows the last day of series, which is",
t_series$ts.x[nrow(t_series)]))
if (cold_spells)
t_series$ts.y <- -t_series$ts.y
tDat <- t_series %>%
dplyr::filter(ts.x >= clim_start & ts.x <= clim_end) %>%
dplyr::mutate(ts.x = lubridate::year(ts.x)) %>%
tidyr::spread(ts.x, ts.y)
all_NA <- apply(tDat[59:61, ], 2, function(x) !all(is.na(x)))
no_NA <- names(all_NA[all_NA > 0])
tDat[59:61, no_NA] <- zoo::na.approx(tDat[59:61, no_NA], maxgap = 1, na.rm = TRUE)
tDat <- rbind(utils::tail(tDat, window_half_width),
tDat, utils::head(tDat, window_half_width))
seas_clim_year <- thresh_clim_year <- var_clim_year <- rep(NA, nrow(tDat))
for (i in (window_half_width + 1):((nrow(tDat) - window_half_width))) {
seas_clim_year[i] <-
mean(
c(t(tDat[(i - (window_half_width)):(i + window_half_width), 2:ncol(tDat)])),
na.rm = TRUE)
thresh_clim_year[i] <-
raster::quantile(
c(t(tDat[(i - (window_half_width)):(i + window_half_width), 2:ncol(tDat)])),
probs = pctile/100,
type = 7,
na.rm = TRUE,
names = FALSE
)
var_clim_year[i] <-
stats::sd(
c(t(tDat[(i - (window_half_width)):(i + window_half_width), 2:ncol(tDat)])),
na.rm = TRUE
)
}
len_clim_year <- 366
clim <-
data.frame(
doy = tDat[(window_half_width + 1):((window_half_width) + len_clim_year), 1],
seas_clim_year = seas_clim_year[(window_half_width + 1):((window_half_width) + len_clim_year)],
thresh_clim_year = thresh_clim_year[(window_half_width + 1):((window_half_width) + len_clim_year)],
var_clim_year = var_clim_year[(window_half_width + 1):((window_half_width) + len_clim_year)]
)
if (smooth_percentile) {
clim <- clim %>%
dplyr::mutate(
seas_clim_year = raster::movingFun(
seas_clim_year,
n = smooth_percentile_width,
fun = mean,
type = "around",
circular = TRUE,
na.rm = FALSE
)
) %>%
dplyr::mutate(
thresh_clim_year = raster::movingFun(
thresh_clim_year,
n = smooth_percentile_width,
fun = mean,
type = "around",
circular = TRUE,
na.rm = FALSE
)
) %>%
dplyr::mutate(
var_clim_year = raster::movingFun(
var_clim_year,
n = smooth_percentile_width,
fun = mean,
type = "around",
circular = TRUE,
na.rm = FALSE
)
)
}
if (clim_only) {
t_series <- merge(data, clim, by = "doy")
t_series <- t_series[order(t_series$ts.x),]
return(t_series)
} else {
t_series <- t_series %>%
dplyr::inner_join(clim, by = "doy")
t_series$ts.y[is.na(t_series$ts.y)] <- t_series$seas_clim_year[is.na(t_series$ts.y)]
t_series$thresh_criterion <- t_series$ts.y > t_series$thresh_clim_year
ex1 <- rle(t_series$thresh_criterion)
ind1 <- rep(seq_along(ex1$lengths), ex1$lengths)
s1 <- split(zoo::index(t_series$thresh_criterion), ind1)
proto_events <- s1[ex1$values == TRUE]
index_stop <- index_start <- NULL
proto_events_rng <-
lapply(proto_events, function(x)
data.frame(index_start = min(x), index_stop = max(x)))
duration <- NULL
protoFunc <- function(proto_data) {
out <- proto_data %>%
dplyr::mutate(duration = index_stop - index_start + 1) %>%
dplyr::filter(duration >= min_duration) %>%
dplyr::mutate(date_start = t_series$ts.x[index_start]) %>%
dplyr::mutate(date_stop = t_series$ts.x[index_stop])
}
proto_events <- do.call(rbind, proto_events_rng) %>%
dplyr::mutate(event_no = cumsum(ex1$values[ex1$values == TRUE])) %>%
protoFunc()
t_series$duration_criterion <- rep(FALSE, nrow(t_series))
for (i in 1:nrow(proto_events)) {
t_series$duration_criterion[proto_events$index_start[i]:proto_events$index_stop[i]] <-
rep(TRUE, length = proto_events$duration[i])
}
ex2 <- rle(t_series$duration_criterion)
ind2 <- rep(seq_along(ex2$lengths), ex2$lengths)
s2 <- split(zoo::index(t_series$thresh_criterion), ind2)
proto_gaps <- s2[ex2$values == FALSE]
proto_gaps_rng <-
lapply(proto_gaps, function(x) data.frame(index_start = min(x), index_stop = max(x)))
proto_gaps <- do.call(rbind, proto_gaps_rng) %>%
dplyr::mutate(event_no = c(1:length(ex2$values[ex2$values == FALSE]))) %>%
dplyr::mutate(duration = index_stop - index_start + 1)
if (any(proto_gaps$duration >= 1 & proto_gaps$duration <= max_gap)) {
proto_gaps <- proto_gaps %>%
dplyr::mutate(date_start = t_series$ts.x[index_start]) %>%
dplyr::mutate(date_stop = t_series$ts.x[index_stop]) %>%
dplyr::filter(duration >= 1 & duration <= max_gap)
} else {
join_across_gaps <- FALSE
}
if (join_across_gaps) {
t_series$event <- t_series$duration_criterion
for (i in 1:nrow(proto_gaps)) {
t_series$event[proto_gaps$index_start[i]:proto_gaps$index_stop[i]] <-
rep(TRUE, length = proto_gaps$duration[i])
}
} else {
t_series$event <- t_series$duration_criterion
}
ex3 <- rle(t_series$event)
ind3 <- rep(seq_along(ex3$lengths), ex3$lengths)
s3 <- split(zoo::index(t_series$event), ind3)
events <- s3[ex3$values == TRUE]
event_no <- NULL
events_rng <-
lapply(events, function(x)
data.frame(index_start = min(x), index_stop = max(x)))
events <- do.call(rbind, events_rng) %>%
dplyr::mutate(event_no = cumsum(ex3$values[ex3$values == TRUE])) %>%
protoFunc()
t_series$event_no <- rep(NA, nrow(t_series))
for (i in 1:nrow(events)) {
t_series$event_no[events$index_start[i]:events$index_stop[i]] <-
rep(i, length = events$duration[i])
}
int_mean <- int_max <- int_cum <- int_mean_rel_thresh <-
int_max_rel_thresh <- int_cum_rel_thresh <- int_mean_abs <-
int_max_abs <- int_cum_abs <- int_mean_norm <- int_max_norm <-
rate_onset <- rate_decline <- mhw_rel_thresh <-
rel_thresh_norm <- mhw_rel_seas <- NULL
events_list <- plyr::dlply(events, c("event_no"), function(df)
with(
t_series,
data.frame(
ts.x = c(ts.x[df$index_start:df$index_stop]),
ts.y = c(ts.y[df$index_start:df$index_stop]),
seas_clim_year = c(seas_clim_year[df$index_start:df$index_stop]),
thresh_clim_year = c(thresh_clim_year[df$index_start:df$index_stop]),
mhw_rel_seas = c(ts.y[df$index_start:df$index_stop]) - c(seas_clim_year[df$index_start:df$index_stop]),
mhw_rel_thresh = c(ts.y[df$index_start:df$index_stop]) - c(thresh_clim_year[df$index_start:df$index_stop]),
rel_thresh_norm = c(ts.y[df$index_start:df$index_stop]) - c(thresh_clim_year[df$index_start:df$index_stop]) /
c(thresh_clim_year[df$index_start:df$index_stop]) - c(seas_clim_year[df$index_start:df$index_stop])
)
)
)
events <- cbind(events,
events_list %>%
dplyr::bind_rows(.id = "event_no") %>%
dplyr::group_by(event_no) %>%
dplyr::summarise(date_peak = ts.x[mhw_rel_seas == max(mhw_rel_seas)][1],
int_mean = mean(mhw_rel_seas),
int_max = max(mhw_rel_seas),
int_var = sqrt(stats::var(mhw_rel_seas)),
int_cum = max(cumsum(mhw_rel_seas)),
int_mean_rel_thresh = mean(mhw_rel_thresh),
int_max_rel_thresh = max(mhw_rel_thresh),
int_var_rel_thresh = sqrt(stats::var(mhw_rel_thresh)),
int_cum_rel_thresh = max(cumsum(mhw_rel_thresh)),
int_mean_abs = mean(ts.y),
int_max_abs = max(ts.y),
int_var_abs = sqrt(stats::var(ts.y)),
int_cum_abs = max(cumsum(ts.y)),
int_mean_norm = mean(rel_thresh_norm),
int_max_norm = max(rel_thresh_norm)) %>%
dplyr::arrange(as.numeric(event_no)) %>%
dplyr::select(-event_no))
mhw_rel_seas <- t_series$ts.y - t_series$seas_clim_year
A <- mhw_rel_seas[events$index_start]
B <- t_series$ts.y[events$index_start - 1]
C <- t_series$seas_clim_year[events$index_start - 1]
if (length(B) + 1 == length(A)) {
B <- c(NA, B)
C <- c(NA, C)
}
mhw_rel_seas_start <- 0.5 * (A + B - C)
events$rate_onset <- ifelse(
events$index_start > 1,
(events$int_max - mhw_rel_seas_start) / (as.numeric(
difftime(events$date_peak, events$date_start, units = "days")) + 0.5),
NA
)
D <- mhw_rel_seas[events$index_stop]
E <- t_series$ts.y[events$index_stop + 1]
F <- t_series$seas_clim_year[events$index_stop + 1]
mhw_rel_seas_end <- 0.5 * (D + E - F)
events$rate_decline <- ifelse(
events$index_stop < nrow(t_series),
(events$int_max - mhw_rel_seas_end) / (as.numeric(
difftime(events$date_stop, events$date_peak, units = "days")) + 0.5),
NA
)
if (cold_spells) {
events <- events %>% dplyr::mutate(
int_mean = -int_mean,
int_max = -int_max,
int_cum = -int_cum,
int_mean_rel_thresh = -int_mean_rel_thresh,
int_max_rel_thresh = -int_max_rel_thresh,
int_cum_rel_thresh = -int_cum_rel_thresh,
int_mean_abs = -int_mean_abs,
int_max_abs = -int_max_abs,
int_cum_abs = -int_cum_abs,
int_mean_norm = -int_mean_norm,
int_max_norm = -int_max_norm,
rate_onset = -rate_onset,
rate_decline = -rate_decline
)
t_series <- t_series %>% dplyr::mutate(
ts.y = -ts.y,
seas_clim_year = -seas_clim_year,
thresh_clim_year = -thresh_clim_year
)
}
names(t_series)[1] <- paste(substitute(doy))
names(t_series)[2] <- paste(substitute(x))
names(t_series)[3] <- paste(substitute(y))
list(clim = tibble::as_tibble(t_series),
event = tibble::as_tibble(events))
}
}
|
library(Canopy)
### Name: AML43
### Title: SNA input for primary tumor and relapse genome of leukemia
### patient from Ding et al. Nature 2012.
### Aliases: AML43
### Keywords: datasets
### ** Examples
data(AML43)
| /data/genthat_extracted_code/Canopy/examples/AML43.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 227 | r | library(Canopy)
### Name: AML43
### Title: SNA input for primary tumor and relapse genome of leukemia
### patient from Ding et al. Nature 2012.
### Aliases: AML43
### Keywords: datasets
### ** Examples
data(AML43)
|
rep(c(1:5), 2)
c(c(1:5),c(1:5))
X = c(3,4,-5,7,8,12,10,4,-3)
X[X<0] #everything smaller than 0
X[X<mean(X)] #everything smaller than mean value (3.5)
matrix(data=x, nrow=3)
matrix(data=x, nrow=3, byrow=TRUE)
| /aufgaben/blatt01/1.r | permissive | glor/R | R | false | false | 210 | r | rep(c(1:5), 2)
c(c(1:5),c(1:5))
X = c(3,4,-5,7,8,12,10,4,-3)
X[X<0] #everything smaller than 0
X[X<mean(X)] #everything smaller than mean value (3.5)
matrix(data=x, nrow=3)
matrix(data=x, nrow=3, byrow=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/families.R
\name{brmsfamily}
\alias{Beta}
\alias{acat}
\alias{bernoulli}
\alias{brmsfamily}
\alias{categorical}
\alias{cratio}
\alias{cumulative}
\alias{exponential}
\alias{geometric}
\alias{hurdle_gamma}
\alias{hurdle_negbinomial}
\alias{hurdle_poisson}
\alias{lognormal}
\alias{negbinomial}
\alias{sratio}
\alias{student}
\alias{von_mises}
\alias{weibull}
\alias{zero_inflated_beta}
\alias{zero_inflated_binomial}
\alias{zero_inflated_negbinomial}
\alias{zero_inflated_poisson}
\title{Special Family Functions for \pkg{brms} Models}
\usage{
brmsfamily(family, link = NULL)
student(link = "identity")
bernoulli(link = "logit")
negbinomial(link = "log")
geometric(link = "log")
lognormal(link = "identity")
exponential(link = "log")
weibull(link = "log")
Beta(link = "logit")
von_mises(link = "tan_half")
hurdle_poisson(link = "log")
hurdle_negbinomial(link = "log")
hurdle_gamma(link = "log")
zero_inflated_beta(link = "logit")
zero_inflated_poisson(link = "log")
zero_inflated_negbinomial(link = "log")
zero_inflated_binomial(link = "logit")
categorical(link = "logit")
cumulative(link = "logit")
sratio(link = "logit")
cratio(link = "logit")
acat(link = "logit")
}
\arguments{
\item{family}{A character string naming the distribution
of the response variable be used in the model.
Currently, the following families are supported:
\code{gaussian}, \code{student}, \code{binomial},
\code{bernoulli}, \code{poisson}, \code{negbinomial},
\code{geometric}, \code{Gamma}, \code{lognormal}, \code{inverse.gaussian},
\code{exponential}, \code{weibull}, \code{Beta}, \code{von_mises},
\code{categorical}, \code{cumulative}, \code{cratio}, \code{sratio},
\code{acat}, \code{hurdle_poisson}, \code{hurdle_negbinomial},
\code{hurdle_gamma}, \code{zero_inflated_binomial},
\code{zero_inflated_beta}, \code{zero_inflated_negbinomial},
and \code{zero_inflated_poisson}.}
\item{link}{A specification for the model link function.
This can be a name/expression or character string.
See the 'Details' section for more information on link
functions supported by each family.}
}
\description{
Family objects provide a convenient way to specify the details of the models
used by many model fitting functions. The familiy functions present here are
currently for use with \pkg{brms} only and will NOT work with other model
fitting functions such as \code{glm} or \code{glmer}.
However, the standard family functions as decribed in
\code{\link[stats:family]{family}} will work with \pkg{brms}.
}
\details{
Family \code{gaussian} with \code{identity} link leads to linear regression.
Family \code{student} with \code{identity} link leads to
robust linear regression that is less influenced by outliers.
Families \code{poisson}, \code{negbinomial}, and \code{geometric}
with \code{log} link lead to regression models for count data.
Families \code{binomial} and \code{bernoulli} with \code{logit} link leads to
logistic regression and family \code{categorical} to multi-logistic regression
when there are more than two possible outcomes.
Families \code{cumulative}, \code{cratio} ('contiuation ratio'),
\code{sratio} ('stopping ratio'), and \code{acat} ('adjacent category')
leads to ordinal regression. Families \code{Gamma}, \code{weibull},
\code{exponential}, \code{lognormal}, and \code{inverse.gaussian} can be used
(among others) for survival regression.
Families \code{hurdle_poisson}, \code{hurdle_negbinomial}, \code{hurdle_gamma},
\code{zero_inflated_poisson}, and \cr
\code{zero_inflated_negbinomial} combined with the
\code{log} link, and \code{zero_inflated_binomial} with the \code{logit} link,
allow to estimate zero-inflated and hurdle models. These models
can be very helpful when there are many zeros in the data that cannot be explained
by the primary distribution of the response. Family \code{hurdle_gamma} is
especially useful, as a traditional \code{Gamma} model cannot be reasonably
fitted for data containing zeros in the response.
In the following, we list all possible links for each family.
The families \code{gaussian}, and \code{student},
accept the links (as names) \code{identity}, \code{log}, and \code{inverse};
families \code{poisson}, \code{negbinomial}, and \code{geometric} the links
\code{log}, \code{identity}, and \code{sqrt};
families \code{binomial}, \code{bernoulli}, \code{Beta},
\code{cumulative}, \code{cratio}, \code{sratio}, and \code{acat}
the links \code{logit}, \code{probit}, \code{probit_approx},
\code{cloglog}, and \code{cauchit};
family \code{categorical} the link \code{logit};
families \code{Gamma}, \code{weibull}, and \code{exponential}
the links \code{log}, \code{identity}, and \code{inverse};
family \code{lognormal} the links \code{identity} and \code{inverse};
family \code{inverse.gaussian} the links \code{1/mu^2},
\code{inverse}, \code{identity} and \code{log};
families \code{hurdle_poisson}, \code{hurdle_negbinomial},
\code{hurdle_gamma}, \code{zero_inflated_poisson}, and
\code{zero_inflated_negbinomial} the link \code{log}.
The first link mentioned for each family is the default.
Please note that when calling the \code{\link[stats:family]{Gamma}}
family function, the default link will be \code{inverse} not \code{log}.
Also, the \code{probit_approx} link cannot be used when calling the
\code{\link[stats:family]{binomial}} family function.
The current implementation of \code{inverse.gaussian} models has some
convergence problems and requires carefully chosen prior distributions
to work efficiently. For this reason, we currently do not recommend
to use the \code{inverse.gaussian} family, unless you really feel
that your data requires exactly this type of model. \cr
}
\examples{
# create a family object
(fam1 <- student("log"))
# alternatively use the brmsfamily function
(fam2 <- brmsfamily("student", "log"))
# both leads to the same object
identical(fam1, fam2)
}
\seealso{
\code{\link[brms:brm]{brm}},
\code{\link[stats:family]{family}}
}
| /man/brmsfamily.Rd | no_license | hoardboard/brms | R | false | true | 6,176 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/families.R
\name{brmsfamily}
\alias{Beta}
\alias{acat}
\alias{bernoulli}
\alias{brmsfamily}
\alias{categorical}
\alias{cratio}
\alias{cumulative}
\alias{exponential}
\alias{geometric}
\alias{hurdle_gamma}
\alias{hurdle_negbinomial}
\alias{hurdle_poisson}
\alias{lognormal}
\alias{negbinomial}
\alias{sratio}
\alias{student}
\alias{von_mises}
\alias{weibull}
\alias{zero_inflated_beta}
\alias{zero_inflated_binomial}
\alias{zero_inflated_negbinomial}
\alias{zero_inflated_poisson}
\title{Special Family Functions for \pkg{brms} Models}
\usage{
brmsfamily(family, link = NULL)
student(link = "identity")
bernoulli(link = "logit")
negbinomial(link = "log")
geometric(link = "log")
lognormal(link = "identity")
exponential(link = "log")
weibull(link = "log")
Beta(link = "logit")
von_mises(link = "tan_half")
hurdle_poisson(link = "log")
hurdle_negbinomial(link = "log")
hurdle_gamma(link = "log")
zero_inflated_beta(link = "logit")
zero_inflated_poisson(link = "log")
zero_inflated_negbinomial(link = "log")
zero_inflated_binomial(link = "logit")
categorical(link = "logit")
cumulative(link = "logit")
sratio(link = "logit")
cratio(link = "logit")
acat(link = "logit")
}
\arguments{
\item{family}{A character string naming the distribution
of the response variable be used in the model.
Currently, the following families are supported:
\code{gaussian}, \code{student}, \code{binomial},
\code{bernoulli}, \code{poisson}, \code{negbinomial},
\code{geometric}, \code{Gamma}, \code{lognormal}, \code{inverse.gaussian},
\code{exponential}, \code{weibull}, \code{Beta}, \code{von_mises},
\code{categorical}, \code{cumulative}, \code{cratio}, \code{sratio},
\code{acat}, \code{hurdle_poisson}, \code{hurdle_negbinomial},
\code{hurdle_gamma}, \code{zero_inflated_binomial},
\code{zero_inflated_beta}, \code{zero_inflated_negbinomial},
and \code{zero_inflated_poisson}.}
\item{link}{A specification for the model link function.
This can be a name/expression or character string.
See the 'Details' section for more information on link
functions supported by each family.}
}
\description{
Family objects provide a convenient way to specify the details of the models
used by many model fitting functions. The familiy functions present here are
currently for use with \pkg{brms} only and will NOT work with other model
fitting functions such as \code{glm} or \code{glmer}.
However, the standard family functions as decribed in
\code{\link[stats:family]{family}} will work with \pkg{brms}.
}
\details{
Family \code{gaussian} with \code{identity} link leads to linear regression.
Family \code{student} with \code{identity} link leads to
robust linear regression that is less influenced by outliers.
Families \code{poisson}, \code{negbinomial}, and \code{geometric}
with \code{log} link lead to regression models for count data.
Families \code{binomial} and \code{bernoulli} with \code{logit} link leads to
logistic regression and family \code{categorical} to multi-logistic regression
when there are more than two possible outcomes.
Families \code{cumulative}, \code{cratio} ('contiuation ratio'),
\code{sratio} ('stopping ratio'), and \code{acat} ('adjacent category')
leads to ordinal regression. Families \code{Gamma}, \code{weibull},
\code{exponential}, \code{lognormal}, and \code{inverse.gaussian} can be used
(among others) for survival regression.
Families \code{hurdle_poisson}, \code{hurdle_negbinomial}, \code{hurdle_gamma},
\code{zero_inflated_poisson}, and \cr
\code{zero_inflated_negbinomial} combined with the
\code{log} link, and \code{zero_inflated_binomial} with the \code{logit} link,
allow to estimate zero-inflated and hurdle models. These models
can be very helpful when there are many zeros in the data that cannot be explained
by the primary distribution of the response. Family \code{hurdle_gamma} is
especially useful, as a traditional \code{Gamma} model cannot be reasonably
fitted for data containing zeros in the response.
In the following, we list all possible links for each family.
The families \code{gaussian}, and \code{student},
accept the links (as names) \code{identity}, \code{log}, and \code{inverse};
families \code{poisson}, \code{negbinomial}, and \code{geometric} the links
\code{log}, \code{identity}, and \code{sqrt};
families \code{binomial}, \code{bernoulli}, \code{Beta},
\code{cumulative}, \code{cratio}, \code{sratio}, and \code{acat}
the links \code{logit}, \code{probit}, \code{probit_approx},
\code{cloglog}, and \code{cauchit};
family \code{categorical} the link \code{logit};
families \code{Gamma}, \code{weibull}, and \code{exponential}
the links \code{log}, \code{identity}, and \code{inverse};
family \code{lognormal} the links \code{identity} and \code{inverse};
family \code{inverse.gaussian} the links \code{1/mu^2},
\code{inverse}, \code{identity} and \code{log};
families \code{hurdle_poisson}, \code{hurdle_negbinomial},
\code{hurdle_gamma}, \code{zero_inflated_poisson}, and
\code{zero_inflated_negbinomial} the link \code{log}.
The first link mentioned for each family is the default.
Please note that when calling the \code{\link[stats:family]{Gamma}}
family function, the default link will be \code{inverse} not \code{log}.
Also, the \code{probit_approx} link cannot be used when calling the
\code{\link[stats:family]{binomial}} family function.
The current implementation of \code{inverse.gaussian} models has some
convergence problems and requires carefully chosen prior distributions
to work efficiently. For this reason, we currently do not recommend
to use the \code{inverse.gaussian} family, unless you really feel
that your data requires exactly this type of model. \cr
}
\examples{
# create a family object
(fam1 <- student("log"))
# alternatively use the brmsfamily function
(fam2 <- brmsfamily("student", "log"))
# both leads to the same object
identical(fam1, fam2)
}
\seealso{
\code{\link[brms:brm]{brm}},
\code{\link[stats:family]{family}}
}
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.32515007107778e+304, 1.24295379909437e+193, -1.01766835177128e-176, -8.76565315805156e-244, 1.63920632895006e+68, 3.12377786073991e-115, 9.8820258034891e-217, -8.30912394988109e-104, -3.62627129016487e+265, -7.14754307476965e-22, 3.19469238205929e-38, 1.43952099721435e-232, -1.01348303244776e-293, -3.0515167874106e-295, 4.99937052414045e-120, 2.97176854710737e-99, 1.26626371639149e+187, -2.98344211111064e+248, 2.29357628182474e-101, 7.62955259991761e-307, -1.34248959975439, -3.77133744814312e+264, 526188584.776908, -1.68064395986298e+112, 1.61337657345915e-109, 6.019573643963e-310, 1.29364284330916e+241, 3.25034549397748e-233, -1.11814610338395e-218, 5.28736667283445e+202, -2.86439499564374e+79, 4.91599523387209e-131, 4.06912859027726e-34, 1753402522710575616, -2.35423749527038e-220, -4.72430389471873e-178, -6.8083242542928e+107, 1.78118795509852e+135, -3.001710958733e+63, -8.58221484813696e+249, -6.813199350629e-68, 5.23821059483045e+134, -1.07002243102713e-151, -1.22093386688349e-144, 439.565362839029, -4.48274132320775e-302, -1.37358087659649e-257, -3.52298627004724e-35, 5.31493800845617e-162, -2.83890369439335e+306, -1.26522665596753e-79, 1.04757395057911e-135, -4276236286908.59, 0.0690963851519292, 1.44038862406811e+42, 1.64142542941541e+145, 1.0507886262257e-116, -1.55576020696391e+235, -3.09667362230015e+48, -1.59537597923192e-89), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) | /meteor/inst/testfiles/ET0_PriestleyTaylor/AFL_ET0_PriestleyTaylor/ET0_PriestleyTaylor_valgrind_files/1615843334-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 2,216 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.32515007107778e+304, 1.24295379909437e+193, -1.01766835177128e-176, -8.76565315805156e-244, 1.63920632895006e+68, 3.12377786073991e-115, 9.8820258034891e-217, -8.30912394988109e-104, -3.62627129016487e+265, -7.14754307476965e-22, 3.19469238205929e-38, 1.43952099721435e-232, -1.01348303244776e-293, -3.0515167874106e-295, 4.99937052414045e-120, 2.97176854710737e-99, 1.26626371639149e+187, -2.98344211111064e+248, 2.29357628182474e-101, 7.62955259991761e-307, -1.34248959975439, -3.77133744814312e+264, 526188584.776908, -1.68064395986298e+112, 1.61337657345915e-109, 6.019573643963e-310, 1.29364284330916e+241, 3.25034549397748e-233, -1.11814610338395e-218, 5.28736667283445e+202, -2.86439499564374e+79, 4.91599523387209e-131, 4.06912859027726e-34, 1753402522710575616, -2.35423749527038e-220, -4.72430389471873e-178, -6.8083242542928e+107, 1.78118795509852e+135, -3.001710958733e+63, -8.58221484813696e+249, -6.813199350629e-68, 5.23821059483045e+134, -1.07002243102713e-151, -1.22093386688349e-144, 439.565362839029, -4.48274132320775e-302, -1.37358087659649e-257, -3.52298627004724e-35, 5.31493800845617e-162, -2.83890369439335e+306, -1.26522665596753e-79, 1.04757395057911e-135, -4276236286908.59, 0.0690963851519292, 1.44038862406811e+42, 1.64142542941541e+145, 1.0507886262257e-116, -1.55576020696391e+235, -3.09667362230015e+48, -1.59537597923192e-89), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) |
context("Subsetting")
test_that("Test Subsetting on default inquiry handler", {
my_population <- declare_population(N = 50, noise = rnorm(N))
my_potential_outcomes <- declare_potential_outcomes(Y_Z_0 = noise, Y_Z_1 = noise + rnorm(N, mean = 2, sd = 2))
my_sampling <- declare_sampling(S = complete_rs(N, n = 25))
my_inquiry <- declare_inquiry(
ATE_pos = mean(Y_Z_1 - Y_Z_0),
subset = Y_Z_1 > 0
)
my_inquiry2 <- declare_inquiry(
ATE_neg = mean(Y_Z_1 - Y_Z_0),
subset = Y_Z_1 < 0
)
design <- my_population + my_potential_outcomes + my_sampling + my_inquiry + my_inquiry2
expect_true(design %>% draw_estimands() %>% with(estimand[1] > 2 && estimand[2] < 0))
# > z <- replicate(10000, design %>% draw_estimands() %>% with(inquiry[[1]] > 2 && inquiry[2] < 0)) %>% table
# > z
# .
# FALSE TRUE
# 8 9992
})
| /tests/testthat/test-subset.R | no_license | reuning/DeclareDesign | R | false | false | 857 | r | context("Subsetting")
test_that("Test Subsetting on default inquiry handler", {
my_population <- declare_population(N = 50, noise = rnorm(N))
my_potential_outcomes <- declare_potential_outcomes(Y_Z_0 = noise, Y_Z_1 = noise + rnorm(N, mean = 2, sd = 2))
my_sampling <- declare_sampling(S = complete_rs(N, n = 25))
my_inquiry <- declare_inquiry(
ATE_pos = mean(Y_Z_1 - Y_Z_0),
subset = Y_Z_1 > 0
)
my_inquiry2 <- declare_inquiry(
ATE_neg = mean(Y_Z_1 - Y_Z_0),
subset = Y_Z_1 < 0
)
design <- my_population + my_potential_outcomes + my_sampling + my_inquiry + my_inquiry2
expect_true(design %>% draw_estimands() %>% with(estimand[1] > 2 && estimand[2] < 0))
# > z <- replicate(10000, design %>% draw_estimands() %>% with(inquiry[[1]] > 2 && inquiry[2] < 0)) %>% table
# > z
# .
# FALSE TRUE
# 8 9992
})
|
#' Positions API
#'
#' A list of ordered, unique integers related to a users's lists or a list's tasks or a task's subtasks.
#'
#' @seealso \url{https://developer.wunderlist.com/documentation/endpoints/positions}
#' @name wndr_position
#'
#' @param id Position ID
#' @param values Positions
#' @param revision Revision
#'
#' @examples
#' \dontrun{
#' # get all list positions
#' p <- wndr_get_list_position()
#'
#' # get a list position
#' wndr_get_list_position(id = 1111)
#'
#' # update the list position
#' wndr_update_list_position(id = p$id[1], values = rev(p$values[[1]]), revision = p$revision[1])
#' }
#'
#' @export
wndr_get_list_position <- function(id = NULL) {
wndr_api(verb = "GET",
path = "/api/v1/list_positions",
id = id)
}
#' @rdname wndr_position
#' @export
wndr_update_list_position <- function(id, revision, values) {
wndr_api(verb = "PATCH",
path = "/api/v1/list_positions",
id = id,
body = list(
values = I(values),
revision = revision
))
}
#' @rdname wndr_position
#' @export
wndr_get_task_position <- function(id = NULL) {
wndr_api(verb = "GET",
path = "/api/v1/task_positions",
id = id)
}
#' @rdname wndr_position
#' @export
wndr_update_task_position <- function(id, revision, values) {
wndr_api(verb = "PATCH",
path = "/api/v1/task_positions",
id = id,
body = list(
values = I(values),
revision = revision
))
}
#' @rdname wndr_position
#' @export
wndr_get_subtask_position <- function(id = NULL) {
wndr_api(verb = "GET",
path = "/api/v1/subtask_positions",
id = id)
}
#' @rdname wndr_position
#' @export
wndr_update_subtask_position <- function(id, revision, values) {
wndr_api(verb = "PATCH",
path = "/api/v1/subtask_positions",
id = id,
body = list(
values = I(values),
revision = revision
))
}
| /R/position.R | no_license | yutannihilation/wunderlistr | R | false | false | 2,027 | r | #' Positions API
#'
#' A list of ordered, unique integers related to a users's lists or a list's tasks or a task's subtasks.
#'
#' @seealso \url{https://developer.wunderlist.com/documentation/endpoints/positions}
#' @name wndr_position
#'
#' @param id Position ID
#' @param values Positions
#' @param revision Revision
#'
#' @examples
#' \dontrun{
#' # get all list positions
#' p <- wndr_get_list_position()
#'
#' # get a list position
#' wndr_get_list_position(id = 1111)
#'
#' # update the list position
#' wndr_update_list_position(id = p$id[1], values = rev(p$values[[1]]), revision = p$revision[1])
#' }
#'
#' @export
wndr_get_list_position <- function(id = NULL) {
wndr_api(verb = "GET",
path = "/api/v1/list_positions",
id = id)
}
#' @rdname wndr_position
#' @export
wndr_update_list_position <- function(id, revision, values) {
wndr_api(verb = "PATCH",
path = "/api/v1/list_positions",
id = id,
body = list(
values = I(values),
revision = revision
))
}
#' @rdname wndr_position
#' @export
wndr_get_task_position <- function(id = NULL) {
wndr_api(verb = "GET",
path = "/api/v1/task_positions",
id = id)
}
#' @rdname wndr_position
#' @export
wndr_update_task_position <- function(id, revision, values) {
wndr_api(verb = "PATCH",
path = "/api/v1/task_positions",
id = id,
body = list(
values = I(values),
revision = revision
))
}
#' @rdname wndr_position
#' @export
wndr_get_subtask_position <- function(id = NULL) {
wndr_api(verb = "GET",
path = "/api/v1/subtask_positions",
id = id)
}
#' @rdname wndr_position
#' @export
wndr_update_subtask_position <- function(id, revision, values) {
wndr_api(verb = "PATCH",
path = "/api/v1/subtask_positions",
id = id,
body = list(
values = I(values),
revision = revision
))
}
|
library(ggplot2)
library(forecast)
library(zoo)
library(ggseas)
library(magrittr)
library(dplyr)
library(DescTools)
library(MASS)
data <- read.delim("E:/EBAC/6 Data Analytics/Assignment 3 ARIMA/GRPRating.csv")
str(data)
data$Date <- as.Date(data[,1],"%d-%b-%y")
head(data)
data$GRPRatingsDate <- NULL
#Initial plot
ggplot(data,aes(x=Date,y=GRP))+geom_line()+ylim(0,350)
##Decomposition method--------STL method
#Test and train split
train_data <- data$GRP[1:72]
test_data <- data$GRP[73:92]
data_GRP <- ts(train_data,frequency=26)
#Seasonal plots
seasonplot(data_GRP,col=rainbow(3), year.labels = TRUE, year.labels.left = TRUE)
ggseasonplot(data_GRP,polar=TRUE,year.labels = TRUE, col=rainbow(3))
#Taking log of data to convert from multiplicative to additive
l1 <- log(train_data)
l2 <- ts(l1,frequency=26)
#using stl function to decompose
stl_mul <- stl(l2,"per")
stl_mul%>% forecast(method="naive", h=20)%>%autoplot
fit_1 <- stl_mul%>% forecast(method="naive", h=20)
fit_1= as.data.frame(fit_1)
#plotting the decomposed time series
s2 <- as.matrix(stl_mul)
s2 <- stl_mul$time.series
s3 <- as.data.frame(s2)
decomposed_data <- exp(s3)
par(mfrow=c(3,1))
p1 <- plot(decomposed_data$seasonal,type="l",xlab = "Week",ylab= "seasonal")
p2 <- plot(decomposed_data$trend,type="l",xlab = "week",ylab="trend")
p3 <- plot(decomposed_data$remainder,type="l",xlab="week", ylab="remainder")
#Back transforming log data
fit.stldecomposition <- exp(fit_1$'Point Forecast')
#Accuracy measures
accuracy(fit.stldecomposition,test_data)
#Deriving predicted values from formula
df_3 <- as.data.frame(stl_mul$time.series)
df_3 <- exp(df_3)
df_3$new <- df_3$seasonal*df_3$trend
Predicted <-c((df_3$new),(fit.stldecomposition))
Actual <- data$GRP
#Plotting predicted Vs actual
autoplot.zoo(cbind.zoo(Predicted,Actual),facets = "FALSE")+theme_set(theme_minimal())+theme(legend.position = "bottom") +geom_line(size=1)+
ylim(0,350)+geom_vline(xintercept=73)+xlab("Week Number")+ylab("GRP") +ggtitle("Actual Vs Predicted by STL Decomposition Method")
##-------------------------------------------------------------------------------------------------------------------------##
#Time series regression
#Creating variables for regression
data_1 <- data
data_1$t <- seq(1:92)
data_1$t_sqrt <- (data_1$t)^(1/2)
data_1$t_cube <- (data_1$t)^(1/3)
data_1$t_log <- log(data_1$t)
data_1$logy <- log(data_1$GRP)
data_1$logx <- log(data_1$t)
#Square root model
train_sqrt <- data.frame(data_1$GRP[1:72],data_1$t_sqrt[1:72])
test_sqrt <- data.frame(data_1$GRP[73:92],data_1$t_sqrt[73:92])
total <- data.frame(data_1$GRP,data_1$t_sqrt)
colnames(train_sqrt)= c("GRP", "Input")
colnames(test_sqrt)= c("GRP","Input")
total
colnames(total)= c("GRP","Input")
fit.sqrt<- lm(GRP~ Input,train_sqrt)
summary(fit.sqrt)
pred.sqrt <- predict(fit.sqrt,test_sqrt)
pred.sqrt.total <- predict(fit.sqrt,total)
accuracy(pred.sqrt,data_1$GRP[73:92])
fit.sqrt.rlm<- rlm(GRP~ Input,train_sqrt,psi=psi.bisquare)
summary(fit.sqrt.rlm)
pred.sqrt.rlm <- predict(fit.sqrt.rlm,test_sqrt)
predicted.fit.rlm <- predict(fit.sqrt.rlm,total)
#pred.rlm.total
accuracy(pred.sqrt.rlm,data_1$GRP[73:92])
plot(predicted.fit.rlm,type="l",ylim=c(0,350),col="green",xlab = "Week",
ylab="GRP", main="Predicted Vs Actual of Time Series Regression", cex.main=0.9)
points(data_1$GRP,type="l",col="red")
abline(v=73)
title("", cex=0.5)
#log model
train_log <- data.frame(data_1$logy[1:72],data_1$t[1:72])
test_log <- data.frame(data_1$logy[73:92],data_1$t[73:92])
colnames(train_log)= c("GRP", "Input")
colnames(test_log)= c("GRP","Input")
fit.log<- lm(GRP~ Input,train_log)
summary(fit.log)
pred <- exp(predict(fit.log,test_log))
accuracy(pred,data_1$GRP[73:92])
fit.log.rlm<- rlm(GRP~ Input,train_log,psi=psi.bisquare)
summary(fit.log.rlm)
pred.log.rlm <- exp(predict(fit.log.rlm,test_log))
accuracy(pred.log.rlm,data_1$GRP[73:92])
#cube root model
train_cuberoot <- data.frame(data_1$GRP[1:72],data_1$t_cube[1:72])
test_cuberoot <- data.frame(data_1$GRP[73:92],data_1$t_cube[73:92])
colnames(train_cuberoot)= c("GRP", "Input")
colnames(test_cuberoot)= c("GRP","Input")
fit.cuberoot<- lm(GRP~ Input,train_cuberoot)
summary(fit.cuberoot)
pred.cuberoot <- predict(fit.cuberoot,test_cuberoot)
accuracy(pred.cuberoot,data_1$GRP[73:92])
fit.cube.rlm<- rlm(GRP~ Input,train_cuberoot,psi=psi.bisquare)
summary(fit.cube.rlm)
pred.cube.rlm <- predict(fit.cube.rlm,test_cuberoot)
accuracy(pred.cube.rlm,data_1$GRP[73:92])
#logxlogy model
train_loglog <- data.frame(data_1$logy[1:72],data_1$logx[1:72])
test_loglog <- data.frame(data_1$logy[73:92],data_1$logx[73:92])
total <- data.frame(data_1$logy,data_1$logx)
colnames(train_loglog)= c("logy", "logx")
colnames(test_loglog)= c("logy","logx")
colnames(total)= c("logy","logx")
fit.loglog<- lm(logy~logx,train_loglog)
summary(fit.loglog)
pred.loglog <- exp(predict(fit.loglog,test_loglog))
pred <- exp(predict(fit.loglog,total))
plot(pred,type="l")
plot(pred, type="l", col="green", ylim=c(0,350))
points(data_1$GRP, type="l", col="red" )
accuracy(pred.loglog,data_1$GRP[73:92])
fit.loglog.rlm<- rlm(logy~logx,train_loglog,psi=psi.bisquare)
summary(fit.loglog.rlm)
pred.loglog.rlm <- exp(predict(fit.loglog.rlm,test_loglog))
accuracy(pred.loglog.rlm,data_1$GRP[73:92])
#Multinomial model
train_lm <- data.frame(data_1$GRP[1:72],data_1$t[1:72])
test_lm <- data.frame(data_1$GRP[73:92],data_1$t[73:92])
total <- data.frame(data_1$GRP,data_1$t)
colnames(train_lm)= c("GRP", "Input")
colnames(test_lm)= c("GRP","Input")
colnames(total)= c("GRP","Input")
lm.fit2=lm(GRP~Input+I(Input^2)+I(Input^3),train_lm)
summary(lm.fit2)
pred.linear <- predict(lm.fit2,test_lm)
accuracy(pred.linear,data_1$GRP[73:92])
fit.rlm.linear<- rlm(GRP~Input+I(Input^2)+I(Input^3),train_lm,psi=psi.bisquare)
summary(fit.rlm.linear)
pred.linear.rlm <- predict(fit.rlm.linear,test_lm)
accuracy(pred.linear.rlm,data_1$GRP[73:92])
#Linear model
lm.fit <- lm(GRP~Input,train_lm)
summary(lm.fit)
pred_linear <- predict(lm.fit,test_lm)
accuracy(pred_linear,data_1$GRP[73:92])
fit.rlm<- rlm(GRP~Input,train_lm,psi=psi.bisquare)
summary(fit.rlm)
pred.rlm <- predict(fit.rlm,test_lm)
pred.total <- predict(fit.rlm,total)
accuracy(pred.rlm,data_1$GRP[73:92])
#Shapiro test on residuals
shapiro.test(fit.sqrt.rlm$residuals)
| /Submission_Code_Decomposition & Time Series Regresssion.R | no_license | Keerthbeth/BusinessAnalytics | R | false | false | 6,470 | r | library(ggplot2)
library(forecast)
library(zoo)
library(ggseas)
library(magrittr)
library(dplyr)
library(DescTools)
library(MASS)
data <- read.delim("E:/EBAC/6 Data Analytics/Assignment 3 ARIMA/GRPRating.csv")
str(data)
data$Date <- as.Date(data[,1],"%d-%b-%y")
head(data)
data$GRPRatingsDate <- NULL
#Initial plot
ggplot(data,aes(x=Date,y=GRP))+geom_line()+ylim(0,350)
##Decomposition method--------STL method
#Test and train split
train_data <- data$GRP[1:72]
test_data <- data$GRP[73:92]
data_GRP <- ts(train_data,frequency=26)
#Seasonal plots
seasonplot(data_GRP,col=rainbow(3), year.labels = TRUE, year.labels.left = TRUE)
ggseasonplot(data_GRP,polar=TRUE,year.labels = TRUE, col=rainbow(3))
#Taking log of data to convert from multiplicative to additive
l1 <- log(train_data)
l2 <- ts(l1,frequency=26)
#using stl function to decompose
stl_mul <- stl(l2,"per")
stl_mul%>% forecast(method="naive", h=20)%>%autoplot
fit_1 <- stl_mul%>% forecast(method="naive", h=20)
fit_1= as.data.frame(fit_1)
#plotting the decomposed time series
s2 <- as.matrix(stl_mul)
s2 <- stl_mul$time.series
s3 <- as.data.frame(s2)
decomposed_data <- exp(s3)
par(mfrow=c(3,1))
p1 <- plot(decomposed_data$seasonal,type="l",xlab = "Week",ylab= "seasonal")
p2 <- plot(decomposed_data$trend,type="l",xlab = "week",ylab="trend")
p3 <- plot(decomposed_data$remainder,type="l",xlab="week", ylab="remainder")
#Back transforming log data
fit.stldecomposition <- exp(fit_1$'Point Forecast')
#Accuracy measures
accuracy(fit.stldecomposition,test_data)
#Deriving predicted values from formula
df_3 <- as.data.frame(stl_mul$time.series)
df_3 <- exp(df_3)
df_3$new <- df_3$seasonal*df_3$trend
Predicted <-c((df_3$new),(fit.stldecomposition))
Actual <- data$GRP
#Plotting predicted Vs actual
autoplot.zoo(cbind.zoo(Predicted,Actual),facets = "FALSE")+theme_set(theme_minimal())+theme(legend.position = "bottom") +geom_line(size=1)+
ylim(0,350)+geom_vline(xintercept=73)+xlab("Week Number")+ylab("GRP") +ggtitle("Actual Vs Predicted by STL Decomposition Method")
##-------------------------------------------------------------------------------------------------------------------------##
#Time series regression
#Creating variables for regression
data_1 <- data
data_1$t <- seq(1:92)
data_1$t_sqrt <- (data_1$t)^(1/2)
data_1$t_cube <- (data_1$t)^(1/3)
data_1$t_log <- log(data_1$t)
data_1$logy <- log(data_1$GRP)
data_1$logx <- log(data_1$t)
#Square root model
train_sqrt <- data.frame(data_1$GRP[1:72],data_1$t_sqrt[1:72])
test_sqrt <- data.frame(data_1$GRP[73:92],data_1$t_sqrt[73:92])
total <- data.frame(data_1$GRP,data_1$t_sqrt)
colnames(train_sqrt)= c("GRP", "Input")
colnames(test_sqrt)= c("GRP","Input")
total
colnames(total)= c("GRP","Input")
fit.sqrt<- lm(GRP~ Input,train_sqrt)
summary(fit.sqrt)
pred.sqrt <- predict(fit.sqrt,test_sqrt)
pred.sqrt.total <- predict(fit.sqrt,total)
accuracy(pred.sqrt,data_1$GRP[73:92])
fit.sqrt.rlm<- rlm(GRP~ Input,train_sqrt,psi=psi.bisquare)
summary(fit.sqrt.rlm)
pred.sqrt.rlm <- predict(fit.sqrt.rlm,test_sqrt)
predicted.fit.rlm <- predict(fit.sqrt.rlm,total)
#pred.rlm.total
accuracy(pred.sqrt.rlm,data_1$GRP[73:92])
plot(predicted.fit.rlm,type="l",ylim=c(0,350),col="green",xlab = "Week",
ylab="GRP", main="Predicted Vs Actual of Time Series Regression", cex.main=0.9)
points(data_1$GRP,type="l",col="red")
abline(v=73)
title("", cex=0.5)
#log model
train_log <- data.frame(data_1$logy[1:72],data_1$t[1:72])
test_log <- data.frame(data_1$logy[73:92],data_1$t[73:92])
colnames(train_log)= c("GRP", "Input")
colnames(test_log)= c("GRP","Input")
fit.log<- lm(GRP~ Input,train_log)
summary(fit.log)
pred <- exp(predict(fit.log,test_log))
accuracy(pred,data_1$GRP[73:92])
fit.log.rlm<- rlm(GRP~ Input,train_log,psi=psi.bisquare)
summary(fit.log.rlm)
pred.log.rlm <- exp(predict(fit.log.rlm,test_log))
accuracy(pred.log.rlm,data_1$GRP[73:92])
#cube root model
train_cuberoot <- data.frame(data_1$GRP[1:72],data_1$t_cube[1:72])
test_cuberoot <- data.frame(data_1$GRP[73:92],data_1$t_cube[73:92])
colnames(train_cuberoot)= c("GRP", "Input")
colnames(test_cuberoot)= c("GRP","Input")
fit.cuberoot<- lm(GRP~ Input,train_cuberoot)
summary(fit.cuberoot)
pred.cuberoot <- predict(fit.cuberoot,test_cuberoot)
accuracy(pred.cuberoot,data_1$GRP[73:92])
fit.cube.rlm<- rlm(GRP~ Input,train_cuberoot,psi=psi.bisquare)
summary(fit.cube.rlm)
pred.cube.rlm <- predict(fit.cube.rlm,test_cuberoot)
accuracy(pred.cube.rlm,data_1$GRP[73:92])
#logxlogy model
train_loglog <- data.frame(data_1$logy[1:72],data_1$logx[1:72])
test_loglog <- data.frame(data_1$logy[73:92],data_1$logx[73:92])
total <- data.frame(data_1$logy,data_1$logx)
colnames(train_loglog)= c("logy", "logx")
colnames(test_loglog)= c("logy","logx")
colnames(total)= c("logy","logx")
fit.loglog<- lm(logy~logx,train_loglog)
summary(fit.loglog)
pred.loglog <- exp(predict(fit.loglog,test_loglog))
pred <- exp(predict(fit.loglog,total))
plot(pred,type="l")
plot(pred, type="l", col="green", ylim=c(0,350))
points(data_1$GRP, type="l", col="red" )
accuracy(pred.loglog,data_1$GRP[73:92])
fit.loglog.rlm<- rlm(logy~logx,train_loglog,psi=psi.bisquare)
summary(fit.loglog.rlm)
pred.loglog.rlm <- exp(predict(fit.loglog.rlm,test_loglog))
accuracy(pred.loglog.rlm,data_1$GRP[73:92])
#Multinomial model
train_lm <- data.frame(data_1$GRP[1:72],data_1$t[1:72])
test_lm <- data.frame(data_1$GRP[73:92],data_1$t[73:92])
total <- data.frame(data_1$GRP,data_1$t)
colnames(train_lm)= c("GRP", "Input")
colnames(test_lm)= c("GRP","Input")
colnames(total)= c("GRP","Input")
lm.fit2=lm(GRP~Input+I(Input^2)+I(Input^3),train_lm)
summary(lm.fit2)
pred.linear <- predict(lm.fit2,test_lm)
accuracy(pred.linear,data_1$GRP[73:92])
fit.rlm.linear<- rlm(GRP~Input+I(Input^2)+I(Input^3),train_lm,psi=psi.bisquare)
summary(fit.rlm.linear)
pred.linear.rlm <- predict(fit.rlm.linear,test_lm)
accuracy(pred.linear.rlm,data_1$GRP[73:92])
#Linear model
lm.fit <- lm(GRP~Input,train_lm)
summary(lm.fit)
pred_linear <- predict(lm.fit,test_lm)
accuracy(pred_linear,data_1$GRP[73:92])
fit.rlm<- rlm(GRP~Input,train_lm,psi=psi.bisquare)
summary(fit.rlm)
pred.rlm <- predict(fit.rlm,test_lm)
pred.total <- predict(fit.rlm,total)
accuracy(pred.rlm,data_1$GRP[73:92])
#Shapiro test on residuals
shapiro.test(fit.sqrt.rlm$residuals)
|
library(data.table)
# Load test data and subject into variables
testData <- read.table("./UCI HAR Dataset/test/X_test.txt",header=FALSE)
testLabels <- read.table("./UCI HAR Dataset/test/y_test.txt",header=FALSE)
testData_sub <- read.table("./UCI HAR Dataset/test/subject_test.txt",header=FALSE)
# Load training data and subject into variables
trainData <- read.table("./UCI HAR Dataset/train/X_train.txt",header=FALSE)
trainLabels <- read.table("./UCI HAR Dataset/train/y_train.txt",header=FALSE)
trainData_sub <- read.table("./UCI HAR Dataset/train/subject_train.txt",header=FALSE)
# Name activities using activity labels for the test and training data set
activities <- read.table("./UCI HAR Dataset/activity_labels.txt",header=FALSE,colClasses="character")
testLabels$V1 <- factor(testLabels$V1,levels=activities$V1,labels=activities$V2)
trainLabels$V1 <- factor(trainLabels$V1,levels=activities$V1,labels=activities$V2)
# Appropriately labels the data set with descriptive activity names
features <- read.table("./UCI HAR Dataset/features.txt",header=FALSE,colClasses="character")
colnames(testData)<-features$V2
colnames(trainData)<-features$V2
colnames(testLabels)<-c("Activity")
colnames(trainLabels)<-c("Activity")
colnames(testData_sub)<-c("Subject")
colnames(trainData_sub)<-c("Subject")
# Merge test and training sets into one data set, including the activities
testData<-cbind(testData,testLabels)
testData<-cbind(testData,testData_sub)
trainData<-cbind(trainData,trainLabels)
trainData<-cbind(trainData,trainData_sub)
bigData<-rbind(testData,trainData)
# Calculate mean and standard deviation
bigData_mean<-sapply(bigData,mean,na.rm=TRUE)
bigData_sd<-sapply(bigData,sd,na.rm=TRUE)
# Create tidy output as text file using write.table
DT <- data.table(bigData)
tidy<-DT[,lapply(.SD,mean),by="Activity,Subject"]
write.table(tidy,file="tidy_data_set.txt",sep=",",row.names = FALSE) | /run_analysis.R | no_license | chankf87/Getting-and-Cleaning-Data | R | false | false | 1,897 | r | library(data.table)
# Load test data and subject into variables
testData <- read.table("./UCI HAR Dataset/test/X_test.txt",header=FALSE)
testLabels <- read.table("./UCI HAR Dataset/test/y_test.txt",header=FALSE)
testData_sub <- read.table("./UCI HAR Dataset/test/subject_test.txt",header=FALSE)
# Load training data and subject into variables
trainData <- read.table("./UCI HAR Dataset/train/X_train.txt",header=FALSE)
trainLabels <- read.table("./UCI HAR Dataset/train/y_train.txt",header=FALSE)
trainData_sub <- read.table("./UCI HAR Dataset/train/subject_train.txt",header=FALSE)
# Name activities using activity labels for the test and training data set
activities <- read.table("./UCI HAR Dataset/activity_labels.txt",header=FALSE,colClasses="character")
testLabels$V1 <- factor(testLabels$V1,levels=activities$V1,labels=activities$V2)
trainLabels$V1 <- factor(trainLabels$V1,levels=activities$V1,labels=activities$V2)
# Appropriately labels the data set with descriptive activity names
features <- read.table("./UCI HAR Dataset/features.txt",header=FALSE,colClasses="character")
colnames(testData)<-features$V2
colnames(trainData)<-features$V2
colnames(testLabels)<-c("Activity")
colnames(trainLabels)<-c("Activity")
colnames(testData_sub)<-c("Subject")
colnames(trainData_sub)<-c("Subject")
# Merge test and training sets into one data set, including the activities
testData<-cbind(testData,testLabels)
testData<-cbind(testData,testData_sub)
trainData<-cbind(trainData,trainLabels)
trainData<-cbind(trainData,trainData_sub)
bigData<-rbind(testData,trainData)
# Calculate mean and standard deviation
bigData_mean<-sapply(bigData,mean,na.rm=TRUE)
bigData_sd<-sapply(bigData,sd,na.rm=TRUE)
# Create tidy output as text file using write.table
DT <- data.table(bigData)
tidy<-DT[,lapply(.SD,mean),by="Activity,Subject"]
write.table(tidy,file="tidy_data_set.txt",sep=",",row.names = FALSE) |
library(raster)
library(ggplot2)
library(extrafont)
library(reshape2)
library(Cairo)
#font_import()
loadfonts(device="win")
# barron land: 505,1771
# cultivated crops: 1441,1284
# deciduous forest: 6,884
# developed land: 1701,1280
# evergreen forest: 1211,880
# mixed forest: 284,1762
# open water: 2158,158
# For open water
list_poi = c(2158,158)
str_pic_points_name <- 'six_points_water.png'
str_pic_curves_name <- 'six_curves_water.png'
inter_y = 200
limit_y = 800
# # For deciduous forest
# list_poi = c(6,884)
# str_pic_points_name <- 'six_points_deciduous.png'
# str_pic_curves_name <- 'six_curves_deciduous.png'
# inter_y = 2000
# limit_y = 6000
# basic path
basic_path <- 'E:/Research/LandCoverMapping/Experiment/qianshan/Final/TimeSeriesRdata'
xnew <- c(julian(as.Date("2013-12-31")):julian(as.Date("2014-12-31")))
#####################################################################################
# This part is for points
path <- file.path(basic_path,'sr_blue_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
byaxis <- mmatrix[1,]
xaxis <- julian(as.Date(getZ(bandBrick)))
#Green
path <- file.path(basic_path,'sr_green_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
gyaxis <- mmatrix[1,]
#Red
path <- file.path(basic_path,'sr_red_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
ryaxis <- mmatrix[1,]
#NIR
path <- file.path(basic_path,'sr_nir_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
nyaxis <- mmatrix[1,]
#SWIR1
path <- file.path(basic_path,'sr_swir1_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
s1yaxis <- mmatrix[1,]
#SWIR2
path <- file.path(basic_path,'sr_swir2_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
s2yaxis <- mmatrix[1,]
fitFrame <- data.frame(xaxis, byaxis, gyaxis, ryaxis, nyaxis, s1yaxis, s2yaxis)
# Output pictures
Cairo(width = 15, height = 9, file=file.path(basic_path, str_pic_points_name),
type="png", pointsize=12, bg = "transparent", canvas = "white", units = "cm", dpi = 300)
xseq <- seq(from = 16100, to = 16400, by = 100)
yseq <- seq(from = 0, to = 10000, by = inter_y)
bandsData<-melt(fitFrame, id.vars = 'xaxis')
pp <- ggplot(data = bandsData, mapping = aes(x=xaxis, y=value))
pp+geom_point(mapping = aes(shape = variable), size = 1.5, na.rm = TRUE)+
scale_x_continuous(name = 'Julian date', limits = c(xnew[1],xnew[length(xnew)]), breaks=xseq, labels=xseq)+
scale_y_continuous(name = expression(paste('Reflectance(', 1%*%10^4, ')', sep = '')), limits = c(0,limit_y), breaks=yseq, labels=yseq)+
theme_classic(base_size = 18, base_family = 'Times New Roman')+
theme(axis.text.x = element_text(color = "black"), axis.text.y = element_text(color = "black"))+
scale_shape_discrete(labels=c('Blue',
'Green',
'Red',
'NIR',
'SWIR 1',
'SWIR 2'))+
labs(shape='Bands')
dev.off()
###########################################################################
# This part is for line
#Blue
path <- file.path(basic_path,'sr_blue_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
xaxis <- julian(as.Date(getZ(bandBrick)))
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
blr2 <- summary(fresult)$r.squared
blynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
#Green
path <- file.path(basic_path,'sr_green_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
ccr2 <- summary(fresult)$r.squared
ccynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
#Red
path <- file.path(basic_path,'sr_red_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
dfr2 <- summary(fresult)$r.squared
dfynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
#NIR
path <- file.path(basic_path,'sr_nir_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
dr2 <- summary(fresult)$r.squared
dynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
#SWIR1
path <- file.path(basic_path,'sr_swir1_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
efr2 <- summary(fresult)$r.squared
efynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
#SWIR2
path <- file.path(basic_path,'sr_swir2_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
mfr2 <- summary(fresult)$r.squared
mfynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
fitFrame <- data.frame(xnew, blynew, ccynew, dfynew, dynew, efynew, mfynew)
Cairo(width = 15, height = 9, file=file.path(basic_path, str_pic_curves_name),
type="png", pointsize=12, bg = "transparent", canvas = "white", units = "cm", dpi = 300)
xseq <- seq(from = 16100, to = 16400, by = 100)
yseq <- seq(from = 0, to = 10000, by = inter_y)
bandsData<-melt(fitFrame, id.vars = 'xnew')
pc <- ggplot(data = bandsData, mapping = aes(x=xnew, y=value))
pc+geom_line(mapping = aes(linetype = variable), size = 0.5)+
scale_x_continuous(name = 'Julian date', limits = c(xnew[1],xnew[length(xnew)]), breaks=xseq, labels=xseq)+
scale_y_continuous(name = expression(paste('Reflectance(', 1%*%10^4, ')', sep = '')), limits = c(0,limit_y), breaks=yseq, labels=yseq)+
theme_classic(base_size = 18, base_family = 'Times New Roman')+
theme(axis.text.x = element_text(color = "black"), axis.text.y = element_text(color = "black"))+
scale_linetype_discrete(labels=c('Blue',
'Green',
'Red',
'NIR',
'SWIR 1',
'SWIR 2'))+
labs(linetype='Bands')
dev.off()
| /data_draw2.R | no_license | GRSEB9S/LaTiP | R | false | false | 7,969 | r | library(raster)
library(ggplot2)
library(extrafont)
library(reshape2)
library(Cairo)
#font_import()
loadfonts(device="win")
# barron land: 505,1771
# cultivated crops: 1441,1284
# deciduous forest: 6,884
# developed land: 1701,1280
# evergreen forest: 1211,880
# mixed forest: 284,1762
# open water: 2158,158
# For open water
list_poi = c(2158,158)
str_pic_points_name <- 'six_points_water.png'
str_pic_curves_name <- 'six_curves_water.png'
inter_y = 200
limit_y = 800
# # For deciduous forest
# list_poi = c(6,884)
# str_pic_points_name <- 'six_points_deciduous.png'
# str_pic_curves_name <- 'six_curves_deciduous.png'
# inter_y = 2000
# limit_y = 6000
# basic path
basic_path <- 'E:/Research/LandCoverMapping/Experiment/qianshan/Final/TimeSeriesRdata'
xnew <- c(julian(as.Date("2013-12-31")):julian(as.Date("2014-12-31")))
#####################################################################################
# This part is for points
path <- file.path(basic_path,'sr_blue_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
byaxis <- mmatrix[1,]
xaxis <- julian(as.Date(getZ(bandBrick)))
#Green
path <- file.path(basic_path,'sr_green_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
gyaxis <- mmatrix[1,]
#Red
path <- file.path(basic_path,'sr_red_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
ryaxis <- mmatrix[1,]
#NIR
path <- file.path(basic_path,'sr_nir_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
nyaxis <- mmatrix[1,]
#SWIR1
path <- file.path(basic_path,'sr_swir1_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
s1yaxis <- mmatrix[1,]
#SWIR2
path <- file.path(basic_path,'sr_swir2_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
s2yaxis <- mmatrix[1,]
fitFrame <- data.frame(xaxis, byaxis, gyaxis, ryaxis, nyaxis, s1yaxis, s2yaxis)
# Output pictures
Cairo(width = 15, height = 9, file=file.path(basic_path, str_pic_points_name),
type="png", pointsize=12, bg = "transparent", canvas = "white", units = "cm", dpi = 300)
xseq <- seq(from = 16100, to = 16400, by = 100)
yseq <- seq(from = 0, to = 10000, by = inter_y)
bandsData<-melt(fitFrame, id.vars = 'xaxis')
pp <- ggplot(data = bandsData, mapping = aes(x=xaxis, y=value))
pp+geom_point(mapping = aes(shape = variable), size = 1.5, na.rm = TRUE)+
scale_x_continuous(name = 'Julian date', limits = c(xnew[1],xnew[length(xnew)]), breaks=xseq, labels=xseq)+
scale_y_continuous(name = expression(paste('Reflectance(', 1%*%10^4, ')', sep = '')), limits = c(0,limit_y), breaks=yseq, labels=yseq)+
theme_classic(base_size = 18, base_family = 'Times New Roman')+
theme(axis.text.x = element_text(color = "black"), axis.text.y = element_text(color = "black"))+
scale_shape_discrete(labels=c('Blue',
'Green',
'Red',
'NIR',
'SWIR 1',
'SWIR 2'))+
labs(shape='Bands')
dev.off()
###########################################################################
# This part is for line
#Blue
path <- file.path(basic_path,'sr_blue_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
xaxis <- julian(as.Date(getZ(bandBrick)))
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
blr2 <- summary(fresult)$r.squared
blynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
#Green
path <- file.path(basic_path,'sr_green_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
ccr2 <- summary(fresult)$r.squared
ccynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
#Red
path <- file.path(basic_path,'sr_red_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
dfr2 <- summary(fresult)$r.squared
dfynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
#NIR
path <- file.path(basic_path,'sr_nir_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
dr2 <- summary(fresult)$r.squared
dynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
#SWIR1
path <- file.path(basic_path,'sr_swir1_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
efr2 <- summary(fresult)$r.squared
efynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
#SWIR2
path <- file.path(basic_path,'sr_swir2_stack.grd')
bandBrick <- brick(path)
mmatrix <- bandBrick[list_poi[1],list_poi[2],]
yaxis <- mmatrix[1,]
fdata <- data.frame(xaxis,yaxis)
fresult = lm(yaxis ~ xaxis + I(cospi((2/365.256363004)*xaxis)) + I(sinpi((2/365.256363004)*xaxis)), data = fdata)
b0 <- summary(fresult)$coefficients[1]
b1 <- summary(fresult)$coefficients[2]
b2 <- summary(fresult)$coefficients[3]
b3 <- summary(fresult)$coefficients[4]
mfr2 <- summary(fresult)$r.squared
mfynew <- b0 + b1*xnew + b2 * cospi((2/365.25) * xnew) + b3 * sinpi((2/365.25) * xnew)
fitFrame <- data.frame(xnew, blynew, ccynew, dfynew, dynew, efynew, mfynew)
Cairo(width = 15, height = 9, file=file.path(basic_path, str_pic_curves_name),
type="png", pointsize=12, bg = "transparent", canvas = "white", units = "cm", dpi = 300)
xseq <- seq(from = 16100, to = 16400, by = 100)
yseq <- seq(from = 0, to = 10000, by = inter_y)
bandsData<-melt(fitFrame, id.vars = 'xnew')
pc <- ggplot(data = bandsData, mapping = aes(x=xnew, y=value))
pc+geom_line(mapping = aes(linetype = variable), size = 0.5)+
scale_x_continuous(name = 'Julian date', limits = c(xnew[1],xnew[length(xnew)]), breaks=xseq, labels=xseq)+
scale_y_continuous(name = expression(paste('Reflectance(', 1%*%10^4, ')', sep = '')), limits = c(0,limit_y), breaks=yseq, labels=yseq)+
theme_classic(base_size = 18, base_family = 'Times New Roman')+
theme(axis.text.x = element_text(color = "black"), axis.text.y = element_text(color = "black"))+
scale_linetype_discrete(labels=c('Blue',
'Green',
'Red',
'NIR',
'SWIR 1',
'SWIR 2'))+
labs(linetype='Bands')
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature_selection.R
\name{select_features}
\alias{select_features}
\title{Multiple pre-processing methods for feature selection}
\usage{
select_features(trj, feature_selection = "pca",
n_princ_comp = floor(ncol(trj)/10), pca_method = "R", plotit = FALSE,
frameit = FALSE, return_plot = FALSE, cluster_vector = NULL,
plotly_it = FALSE, points_size = 1, specific_palette = NULL,
plot_legend = FALSE, legend_title = NULL, legend_labels = NULL,
silent = FALSE)
}
\arguments{
\item{trj}{Input trajectory (variables on the columns and equal-time spaced snpashots on the row). It must be a \code{matrix} or a \code{data.frame} of numeric.}
\item{feature_selection}{Available method is 'pca'}
\item{n_princ_comp}{number of principal components to use}
\item{pca_method}{If set 'R' (default) it will use \code{\link{princomp}}. The other (slower) option is 'robust' which is using \code{\link{PCAproj}}.}
\item{plotit}{Plot the PCA components if two are selected.}
\item{frameit}{Add a frame (shaded clustering) of the whole performed PCA.}
\item{return_plot}{This option is usually used to add layers to the ggplot (made using autoplot).}
\item{cluster_vector}{This option can be used to set the clusters you want and show them with different colors (and shades if \code{frameit = TRUE}).
Please set this option with the same dimensionality of the trj (n_snapshots) and use integer numbers (to define the clusters).}
\item{plotly_it}{Plot the PCA components using ggplotly (dynamic plots, to use only with reduced dimensionality).}
\item{points_size}{It must be a number and it defines the size of the points.}
\item{specific_palette}{use some specific color for the clusters}
\item{plot_legend}{plot the legend}
\item{legend_title}{define a title for the legend}
\item{legend_labels}{labels for the legend}
\item{silent}{A logical value indicating whether the function has to remain silent or not. Default value is \code{FALSE}.}
}
\value{
It will return a modified trajectory matrix and print the principal components vector.
}
\description{
\code{select_features} is able to select input variables on the basis of the trajectory input. For the moment only PCA-based feature selection
is supported. Moreover, this tool is meant to be used with the total trajectory input.
}
\details{
This function is based primarly on the basic R function \code{pricomp} and on \code{PCAproj} from the package pcaPP. Insead, for more details on the SAPPHIRE anlysis, please refer to the main documentation
of the original campari software \url{http://campari.sourceforge.net/documentation.html}.
}
\seealso{
\code{\link{princomp}}, \code{\link{PCAproj}}, \code{\link{adjl_from_progindex}}, \code{\link{gen_progindex}}, \code{\link{gen_annotation}}.
}
| /man/select_features.Rd | no_license | clangi/CampaRi | R | false | true | 2,837 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature_selection.R
\name{select_features}
\alias{select_features}
\title{Multiple pre-processing methods for feature selection}
\usage{
select_features(trj, feature_selection = "pca",
n_princ_comp = floor(ncol(trj)/10), pca_method = "R", plotit = FALSE,
frameit = FALSE, return_plot = FALSE, cluster_vector = NULL,
plotly_it = FALSE, points_size = 1, specific_palette = NULL,
plot_legend = FALSE, legend_title = NULL, legend_labels = NULL,
silent = FALSE)
}
\arguments{
\item{trj}{Input trajectory (variables on the columns and equal-time spaced snpashots on the row). It must be a \code{matrix} or a \code{data.frame} of numeric.}
\item{feature_selection}{Available method is 'pca'}
\item{n_princ_comp}{number of principal components to use}
\item{pca_method}{If set 'R' (default) it will use \code{\link{princomp}}. The other (slower) option is 'robust' which is using \code{\link{PCAproj}}.}
\item{plotit}{Plot the PCA components if two are selected.}
\item{frameit}{Add a frame (shaded clustering) of the whole performed PCA.}
\item{return_plot}{This option is usually used to add layers to the ggplot (made using autoplot).}
\item{cluster_vector}{This option can be used to set the clusters you want and show them with different colors (and shades if \code{frameit = TRUE}).
Please set this option with the same dimensionality of the trj (n_snapshots) and use integer numbers (to define the clusters).}
\item{plotly_it}{Plot the PCA components using ggplotly (dynamic plots, to use only with reduced dimensionality).}
\item{points_size}{It must be a number and it defines the size of the points.}
\item{specific_palette}{use some specific color for the clusters}
\item{plot_legend}{plot the legend}
\item{legend_title}{define a title for the legend}
\item{legend_labels}{labels for the legend}
\item{silent}{A logical value indicating whether the function has to remain silent or not. Default value is \code{FALSE}.}
}
\value{
It will return a modified trajectory matrix and print the principal components vector.
}
\description{
\code{select_features} is able to select input variables on the basis of the trajectory input. For the moment only PCA-based feature selection
is supported. Moreover, this tool is meant to be used with the total trajectory input.
}
\details{
This function is based primarly on the basic R function \code{pricomp} and on \code{PCAproj} from the package pcaPP. Insead, for more details on the SAPPHIRE anlysis, please refer to the main documentation
of the original campari software \url{http://campari.sourceforge.net/documentation.html}.
}
\seealso{
\code{\link{princomp}}, \code{\link{PCAproj}}, \code{\link{adjl_from_progindex}}, \code{\link{gen_progindex}}, \code{\link{gen_annotation}}.
}
|
context("transmute")
test_that("non-syntactic grouping variable is preserved (#1138)", {
df <- tibble(`a b` = 1L) %>% group_by(`a b`) %>% transmute()
expect_named(df, "a b")
})
# Empty transmutes -------------------------------------------------
test_that("transmute with no args returns nothing", {
empty <- transmute(mtcars)
expect_equal(ncol(empty), 0)
expect_equal(nrow(empty), 32)
})
# transmute variables -----------------------------------------------
test_that("transmute succeeds in presence of raw columns (#1803)", {
df <- tibble(a = 1:3, b = as.raw(1:3))
expect_identical(transmute(df, a), df["a"])
expect_identical(transmute(df, b), df["b"])
})
test_that("arguments to transmute() don't match vars_transmute() arguments", {
df <- tibble(a = 1)
expect_identical(transmute(df, var = a), tibble(var = 1))
expect_identical(transmute(df, exclude = a), tibble(exclude = 1))
expect_identical(transmute(df, include = a), tibble(include = 1))
})
test_that("arguments to rename() don't match vars_rename() arguments (#2861)", {
df <- tibble(a = 1)
expect_identical(rename(df, var = a), tibble(var = 1))
expect_identical(rename(group_by(df, a), var = a), group_by(tibble(var = 1), var))
expect_identical(rename(df, strict = a), tibble(strict = 1))
expect_identical(rename(group_by(df, a), strict = a), group_by(tibble(strict = 1), strict))
})
test_that("can transmute() with .data pronoun (#2715)", {
expect_identical(transmute(mtcars, .data$cyl), transmute(mtcars, cyl))
})
test_that("transmute() does not warn when a variable is removed with = NULL (#4609)", {
df <- data.frame(x=1)
expect_warning(transmute(df, y =x+1, z=y*2, y = NULL), NA)
})
test_that("transmute() can handle auto splicing", {
expect_equal(
iris %>% transmute(tibble(Sepal.Length, Sepal.Width)),
iris %>% select(Sepal.Length, Sepal.Width)
)
})
| /tests/testthat/test-transmute.R | permissive | krlmlr/dplyr | R | false | false | 1,881 | r | context("transmute")
test_that("non-syntactic grouping variable is preserved (#1138)", {
df <- tibble(`a b` = 1L) %>% group_by(`a b`) %>% transmute()
expect_named(df, "a b")
})
# Empty transmutes -------------------------------------------------
test_that("transmute with no args returns nothing", {
empty <- transmute(mtcars)
expect_equal(ncol(empty), 0)
expect_equal(nrow(empty), 32)
})
# transmute variables -----------------------------------------------
test_that("transmute succeeds in presence of raw columns (#1803)", {
df <- tibble(a = 1:3, b = as.raw(1:3))
expect_identical(transmute(df, a), df["a"])
expect_identical(transmute(df, b), df["b"])
})
test_that("arguments to transmute() don't match vars_transmute() arguments", {
df <- tibble(a = 1)
expect_identical(transmute(df, var = a), tibble(var = 1))
expect_identical(transmute(df, exclude = a), tibble(exclude = 1))
expect_identical(transmute(df, include = a), tibble(include = 1))
})
test_that("arguments to rename() don't match vars_rename() arguments (#2861)", {
df <- tibble(a = 1)
expect_identical(rename(df, var = a), tibble(var = 1))
expect_identical(rename(group_by(df, a), var = a), group_by(tibble(var = 1), var))
expect_identical(rename(df, strict = a), tibble(strict = 1))
expect_identical(rename(group_by(df, a), strict = a), group_by(tibble(strict = 1), strict))
})
test_that("can transmute() with .data pronoun (#2715)", {
expect_identical(transmute(mtcars, .data$cyl), transmute(mtcars, cyl))
})
test_that("transmute() does not warn when a variable is removed with = NULL (#4609)", {
df <- data.frame(x=1)
expect_warning(transmute(df, y =x+1, z=y*2, y = NULL), NA)
})
test_that("transmute() can handle auto splicing", {
expect_equal(
iris %>% transmute(tibble(Sepal.Length, Sepal.Width)),
iris %>% select(Sepal.Length, Sepal.Width)
)
})
|
require("ggplot2")
require("gplots")
require("grid")
require("plyr")
require("RCurl")
require("reshape2") | /.Rprofile | no_license | cclanofirish/DV_RProject1 | R | false | false | 105 | rprofile | require("ggplot2")
require("gplots")
require("grid")
require("plyr")
require("RCurl")
require("reshape2") |
#CONCOR supplementary functions
#Tyme Suda
.blk_apply <- function(iobject, split, v = "cat") {
o <- match(igraph::vertex.attributes(iobject)$name, split$vertex)
o_block <- split$block[o]
blk_return <- igraph::set.vertex.attribute(iobject, v, value = o_block)
return(blk_return)
}
concor_make_igraph <- function(adj_list, nsplit = 1) {
adj_list <- .concor_validitycheck(adj_list)
concor_out <- suppressWarnings(concor(adj_list, nsplit))
igraph_list <- lapply(adj_list,
function(x) igraph::graph_from_adjacency_matrix(x))
v <- paste("csplit", nsplit, sep = "")
igraph_out <- lapply(igraph_list, function(x) .blk_apply(x, concor_out, v))
return(igraph_out)
}
.name_igraph <- function(iobject) {
l <- length(V(iobject))
lvec <- 1:l
n_zero <- floor(log10(l))+1
num_list <- formatC(lvec, width = n_zero, format = "d", flag = "0")
v <- paste0("V", num_list)
vertex.attributes(iobject)$name <- v
return(iobject)
}
concor_igraph_apply <- function(igraph_list, nsplit = 1) {
b <- sapply(igraph_list,
function(x) is.null(vertex.attributes(x)$name))
if (all(b)) {
warning("node names don't exist\nAdding default node names\n")
igraph_list <- lapply(igraph_list, .name_igraph)
}
adj_list <- lapply(igraph_list,
function(x) igraph::get.adjacency(x, sparse = FALSE))
concor_out <- suppressWarnings(concor(adj_list, nsplit))
v <- paste("csplit", nsplit, sep = "")
igraph_out <- lapply(igraph_list, function(x) .blk_apply(x, concor_out, v))
return(igraph_out)
}
plot_socio <- function(iobject, nsplit = NULL, vertex.label = NA,
vertex.size = 5, edge.arrow.size = .3) {
split_name <- paste0("csplit", nsplit)
vcolors <- igraph::vertex.attributes(iobject)[[split_name]]
igraph::plot.igraph(iobject, vertex.color = vcolors,
vertex.label = vertex.label,
vertex.size = vertex.size,
edge.arrow.size = edge.arrow.size)
}
| /R/CONCOR_supplemental_fun.R | no_license | Zeldoxsis/concorR | R | false | false | 2,022 | r | #CONCOR supplementary functions
#Tyme Suda
.blk_apply <- function(iobject, split, v = "cat") {
o <- match(igraph::vertex.attributes(iobject)$name, split$vertex)
o_block <- split$block[o]
blk_return <- igraph::set.vertex.attribute(iobject, v, value = o_block)
return(blk_return)
}
concor_make_igraph <- function(adj_list, nsplit = 1) {
adj_list <- .concor_validitycheck(adj_list)
concor_out <- suppressWarnings(concor(adj_list, nsplit))
igraph_list <- lapply(adj_list,
function(x) igraph::graph_from_adjacency_matrix(x))
v <- paste("csplit", nsplit, sep = "")
igraph_out <- lapply(igraph_list, function(x) .blk_apply(x, concor_out, v))
return(igraph_out)
}
.name_igraph <- function(iobject) {
l <- length(V(iobject))
lvec <- 1:l
n_zero <- floor(log10(l))+1
num_list <- formatC(lvec, width = n_zero, format = "d", flag = "0")
v <- paste0("V", num_list)
vertex.attributes(iobject)$name <- v
return(iobject)
}
concor_igraph_apply <- function(igraph_list, nsplit = 1) {
b <- sapply(igraph_list,
function(x) is.null(vertex.attributes(x)$name))
if (all(b)) {
warning("node names don't exist\nAdding default node names\n")
igraph_list <- lapply(igraph_list, .name_igraph)
}
adj_list <- lapply(igraph_list,
function(x) igraph::get.adjacency(x, sparse = FALSE))
concor_out <- suppressWarnings(concor(adj_list, nsplit))
v <- paste("csplit", nsplit, sep = "")
igraph_out <- lapply(igraph_list, function(x) .blk_apply(x, concor_out, v))
return(igraph_out)
}
plot_socio <- function(iobject, nsplit = NULL, vertex.label = NA,
vertex.size = 5, edge.arrow.size = .3) {
split_name <- paste0("csplit", nsplit)
vcolors <- igraph::vertex.attributes(iobject)[[split_name]]
igraph::plot.igraph(iobject, vertex.color = vcolors,
vertex.label = vertex.label,
vertex.size = vertex.size,
edge.arrow.size = edge.arrow.size)
}
|
# globals ----
{
library(tidyverse)
library(rsample)
genData <- function(nrow = 1000, ncol = 1000, .min = 0, .max = 1){
# Generating test data.
# To prevent the need of normalizing the data use the defaults for min and max
library(foreach)
library(iterators)
data <- foreach( c=1:ncol,
.init = tibble( y = runif(nrow, min = .min, max = .max) ),
.combine = cbind) %do% {
col.name <- paste0('x.', c)
tmp <- tibble( x = runif(nrow, min = .min, max = .max) )
names(tmp) <- col.name
return(tmp)
}
return (data)
}
testData <- genData()
data_split <- testData %>% rsample::initial_time_split(prop = 0.9)
train_tbl <- training(data_split)
test_tbl <- testing(data_split)
}
# h2o ----
{
h2o_automl_test <- function(data=testData, .max_runtime_secs = 60 * 3, test = test_tbl){
#set-up ----
{
library(h2o)
library(lime)
h2o.init()
}
# modeling ----
{
aml <- h2o.automl( x = grep( pattern = 'x.', x = names(data)), #indices of features
y = grep( pattern = 'y' , x = names(data)), #indices of target (will be always 1)
training_frame = as.h2o(data),
nfolds = 5,
max_runtime_secs = .max_runtime_secs )
model <- aml@leaderboard %>%
as_tibble() %>%
slice(1) %>%
pull(model_id) %>%
h2o.getModel()
}
# evaluation ----
{
h2o.performance(model = model, xval = TRUE)
#explainer <- lime (data, model)
#explanation <- explain(test, explanation, n_features = 5, feature_select = "highest_weights")
#p <- plot_explanations(explanation) # not working :(
}
# store ----
{
#ggplot2::ggsave(filename = paste(model_filepath, lime.plot.png, sep = '/'), plot = p)
dir.create( path = model_filepath <- paste('models', 'h2o', 'automl', sep = '/'), showWarnings = F, recursive = T)
h2o.saveModel(model, model_filepath, force = TRUE)
}
# clean-up ----
{
h2o.shutdown(prompt = F)
}
return(model_filepath)
}
}
# autokeras ----
{
setUpAutokeras <- function(){
if(! ("autokeras" %in% (installed.packages() %>% as_tibble())$Package) ){
install.packages('autokeras')
}
# library(reticulate)
# if( !('autokeras' %in% reticulate::conda_list(conda = '/opt/conda/bin/conda')$name) ){
# reticulate::conda_create(envname = 'autokeras', packages = 'python=3.6', conda = '/opt/conda/bin/conda')
# }
reticulate::use_virtualenv()
library(autokeras)
library(keras)
autokeras::install_autokeras( method = 'virtualenv',
conda = '/opt/conda/bin/conda',
tensorflow = '2.1.0-gpu',
version = 'default' )
}
autokeras_test <- function(data=train_tbl, .max_trials = 10, .epochs = 10, test = test_tbl){
# set-up ----
{
setUpAutokeras()
library(autokeras)
library(keras)
library(reticulate)
library(ggplot2)
#reticulate::use_condaenv(condaenv = 'autokeras', conda = '/opt/conda/bin/conda')
reticulate::use_virtualenv()
}
model <- NULL
# modeling ----
{
reg <- model_structured_data_regressor(
column_names = grep(pattern = 'x.', x = names(data), value = T),
loss = "mean_squared_error",
max_trials = .max_trials,
objective = "val_loss",
overwrite = TRUE,
seed = runif(1, 0, 1e+06) )
tensorboard("models/logs/run_autokeras")
model <-
fit( object = reg,
x = as_tibble(data[ , grep( pattern = 'x.', x = names(data))]), #tibble of features
y = as_tibble(data[ , grep( pattern = 'y' , x = names(data))]), # target values
epochs = .epochs,
callbacks = list (
keras::callback_tensorboard("models/logs/run_autokeras"),
# keras::callback_reduce_lr_on_plateau(monitor = "val_loss", factor = 0.01),
keras::callback_early_stopping(min_delta = 0.0001, restore_best_weights = TRUE, verbose = T)
),
validation_split = 0.2
)
}
# evaluation ----
{
# Predict with the best model
predicted <- tibble(idx = seq(1:nrow(test)),
value = predict(model, test[ , grep( pattern = 'x.', x = names(data))]),
variable = 'predicted' )
result <- rbind( tibble(idx = seq(1:nrow(test)),
value = test$y,
variable = 'value' ),
predicted ) %>%
arrange(idx) %>%
select(idx, variable, value)
p <- result %>% ggplot(aes(idx, value, colour = variable)) + geom_line()
# Evaluate the best model with testing data
model %>% evaluate(
x = as_tibble(test_tbl[ , grep( pattern = 'x.', x = names(data))]), #tibble of features
y = as_tibble(test_tbl[ , grep( pattern = 'y' , x = names(data))]) # target values
)
}
# store ----
{
# save the model
dir.create( path = dirname( model_filepath <- paste('models', 'autokeras', 'autokeras.model', sep = '/') ), showWarnings = F, recursive = T)
autokeras::save_model(autokeras_model = model, filename = model_filepath)
#nvidia-smi pmon -c 1 --select m | grep rsession
}
return (model)
}
}
# keras & tensorflow ----
{
# generators ----
{
# data preparation
# comming from https://blogs.rstudio.com/tensorflow/posts/2017-12-20-time-series-forecasting-with-recurrent-neural-networks/
generator <- function(data, lookback, delay, min_index, max_index,
shuffle = FALSE, batch_size = 128, step = 1) {
if (is.null(max_index))
max_index <- nrow(data) - delay - 1
i <- min_index + lookback
function() {
if (shuffle) {
rows <- sample(c((min_index+lookback):max_index), size = batch_size)
} else {
if (i + batch_size >= max_index)
i <<- min_index + lookback
rows <- c(i:min(i+batch_size-1, max_index))
i <<- i + length(rows)
}
samples <- array(0, dim = c(length(rows),
lookback / step,
dim(data)[[-1]]))
targets <- array(0, dim = c(length(rows)))
for (j in 1:length(rows)) {
indices <- seq(rows[[j]] - lookback, rows[[j]]-1,
length.out = dim(samples)[[2]])
samples[j,,] <- data[indices,]
targets[[j]] <- data[rows[[j]] + delay, 1] # target variable must always be the first column !!!!
}
list(samples, targets)
}
}
lookback = 5 # Observations will go back 5 rows
step = 1 # Observations will be sampled at one data point per day.
delay = 0 # uninteresting for the tests
batch_size = 30 #
}
basicTFtest <- function(data = testData){
# set up ----
{
library(reticulate)
use_condaenv(condaenv = 'r-reticulate', conda = '/opt/conda/bin/conda')
train_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = 1,
max_index = floor(nrow(data)*(8/10)),
shuffle = FALSE,
step = step,
batch_size = batch_size
)
val_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(8/10)) + 1,
max_index = floor(nrow(data)*(9/10)),
step = step,
batch_size = batch_size
)
test_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(9/10)) + 1,
max_index = nrow(data),
step = step,
batch_size = batch_size
)
# # How many steps to draw from val_gen in order to see the entire validation set
val_steps <- (floor(nrow(data)*(9/10)) - floor(nrow(data)*(8/10)) + 1 - lookback) / batch_size
#
# # How many steps to draw from test_gen in order to see the entire test set
test_steps <- (nrow(data) - floor(nrow(data)*(9/10)) + 1 - lookback) / batch_size
}
library(keras)
model <- keras_model_sequential() %>%
layer_flatten(input_shape = c(lookback / step, dim(data)[-1])) %>%
layer_dense(units = 32, activation = "relu") %>%
layer_dense(units = 1)
# at this point rsession needs 10GB more of GPU memory
model %>% compile(
optimizer = optimizer_rmsprop(),
loss = "mae"
)
tensorboard("models/logs/run_basicTF")
history <- model %>% fit_generator(
train_gen,
steps_per_epoch = 500,
epochs = 20,
validation_data = val_gen,
validation_steps = val_steps,
callbacks = callback_tensorboard("models/logs/run_basicTF")
)
# this will result in an error when using TensorFlow 2.1.0 as described in bug 36919
# https://github.com/tensorflow/tensorflow/issues/36919
# but it works with TensorFlow 2.0.0
# here still the GPU memeory is used
# how to release it?
evaluate_generator(model, test_gen, test_steps)
dir.create('models/tensorflow/', recursive = T, showWarnings = F)
save_model_hdf5(model, filepath = 'models/tensorflow/basic.h5')
return (model)
}
basicRNN_test <- function(data = testData){
# set up ----
{
library(reticulate)
use_condaenv(condaenv = 'r-reticulate', conda = '/opt/conda/bin/conda')
train_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = 1,
max_index = floor(nrow(data)*(8/10)),
shuffle = FALSE,
step = step,
batch_size = batch_size
)
val_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(8/10)) + 1,
max_index = floor(nrow(data)*(9/10)),
step = step,
batch_size = batch_size
)
test_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(9/10)) + 1,
max_index = nrow(data),
step = step,
batch_size = batch_size
)
# # How many steps to draw from val_gen in order to see the entire validation set
val_steps <- (floor(nrow(data)*(9/10)) - floor(nrow(data)*(8/10)) + 1 - lookback) / batch_size
#
# # How many steps to draw from test_gen in order to see the entire test set
test_steps <- (nrow(data) - floor(nrow(data)*(9/10)) + 1 - lookback) / batch_size
}
library(keras)
model <- keras_model_sequential() %>%
layer_gru(units = 32, input_shape = list(NULL, dim(data)[[-1]])) %>%
layer_dense(units = 1)
# at this point rsession needs 10GB more of GPU memory
model %>% compile(
optimizer = optimizer_rmsprop(),
loss = "mae"
)
tensorboard("models/logs/run_basicRNN")
history <- model %>% fit_generator(
train_gen,
steps_per_epoch = 500,
epochs = 20,
validation_data = val_gen,
validation_steps = val_steps,
callbacks = callback_tensorboard("models/logs/run_basicRNN")
)
evaluate_generator(model, test_gen, test_steps)
dir.create('models/tensorflow/', recursive = T, showWarnings = F)
save_model_hdf5(model, filepath = 'models/tensorflow/RNN.h5')
return (model)
}
basicRNN_w_dropout_test <- function(data = testData){
# set up ----
{
library(reticulate)
use_condaenv(condaenv = 'r-reticulate', conda = '/opt/conda/bin/conda')
train_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = 1,
max_index = floor(nrow(data)*(8/10)),
shuffle = FALSE,
step = step,
batch_size = batch_size
)
val_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(8/10)) + 1,
max_index = floor(nrow(data)*(9/10)),
step = step,
batch_size = batch_size
)
test_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(9/10)) + 1,
max_index = nrow(data),
step = step,
batch_size = batch_size
)
# # How many steps to draw from val_gen in order to see the entire validation set
val_steps <- (floor(nrow(data)*(9/10)) - floor(nrow(data)*(8/10)) + 1 - lookback) / batch_size
#
# # How many steps to draw from test_gen in order to see the entire test set
test_steps <- (nrow(data) - floor(nrow(data)*(9/10)) + 1 - lookback) / batch_size
}
library(keras)
model <- keras_model_sequential() %>%
layer_gru(units = 32, dropout = 0.2, recurrent_dropout = 0.2,
input_shape = list(NULL, dim(data)[[-1]])) %>%
layer_dense(units = 1)
# at this point rsession needs 10GB more of GPU memory
model %>% compile(
optimizer = optimizer_rmsprop(),
loss = "mae"
)
tensorboard("models/logs/run_basicRNN_w_dropout")
history <- model %>% fit_generator(
train_gen,
steps_per_epoch = 500,
epochs = 20,
validation_data = val_gen,
validation_steps = val_steps,
callbacks = callback_tensorboard("models/logs/run_basicRNN_w_dropout")
)
evaluate_generator(model, test_gen, test_steps)
dir.create('models/tensorflow/', recursive = T, showWarnings = F)
save_model_hdf5(model, filepath = 'models/tensorflow/RNN_w_dropout.h5')
return (model)
}
basicStackedRNN_test <- function(data = testData){
# set up ----
{
library(reticulate)
use_condaenv(condaenv = 'r-reticulate', conda = '/opt/conda/bin/conda')
train_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = 1,
max_index = floor(nrow(data)*(8/10)),
shuffle = FALSE,
step = step,
batch_size = batch_size
)
val_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(8/10)) + 1,
max_index = floor(nrow(data)*(9/10)),
step = step,
batch_size = batch_size
)
test_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(9/10)) + 1,
max_index = nrow(data),
step = step,
batch_size = batch_size
)
# # How many steps to draw from val_gen in order to see the entire validation set
val_steps <- (floor(nrow(data)*(9/10)) - floor(nrow(data)*(8/10)) + 1 - lookback) / batch_size
#
# # How many steps to draw from test_gen in order to see the entire test set
test_steps <- (nrow(data) - floor(nrow(data)*(9/10)) + 1 - lookback) / batch_size
}
library(keras)
model <- keras_model_sequential() %>%
layer_gru(units = 32,
dropout = 0.1,
recurrent_dropout = 0.5,
return_sequences = TRUE,
input_shape = list(NULL, dim(data)[[-1]])) %>%
layer_gru(units = 64, activation = "relu",
dropout = 0.1,
recurrent_dropout = 0.5) %>%
layer_dense(units = 1)
model %>% compile(
optimizer = optimizer_rmsprop(),
loss = "mae"
)
tensorboard("models/logs/run_basicStackedRNN")
history <- model %>% fit_generator(
train_gen,
steps_per_epoch = 500,
epochs = 40,
validation_data = val_gen,
validation_steps = val_steps,
callbacks = callback_tensorboard("models/logs/run_basicStackedRNN")
)
evaluate_generator(model, test_gen, test_steps)
dir.create('models/tensorflow/', recursive = T, showWarnings = F)
save_model_hdf5(model, filepath = 'models/tensorflow/stackedRNN.h5')
return (model)
}
basicBidirectionalRNN_test <- function(data = testData){
# set up ----
{
library(reticulate)
use_condaenv(condaenv = 'r-reticulate', conda = '/opt/conda/bin/conda')
train_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = 1,
max_index = floor(nrow(data)*(8/10)),
shuffle = FALSE,
step = step,
batch_size = batch_size
)
val_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(8/10)) + 1,
max_index = floor(nrow(data)*(9/10)),
step = step,
batch_size = batch_size
)
test_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(9/10)) + 1,
max_index = nrow(data),
step = step,
batch_size = batch_size
)
# # How many steps to draw from val_gen in order to see the entire validation set
val_steps <- (floor(nrow(data)*(9/10)) - floor(nrow(data)*(8/10)) + 1 - lookback) / batch_size
#
# # How many steps to draw from test_gen in order to see the entire test set
test_steps <- (nrow(data) - floor(nrow(data)*(9/10)) + 1 - lookback) / batch_size
}
library(keras)
model <- keras_model_sequential() %>%
layer_embedding(input_dim = max_features, output_dim = 32) %>%
bidirectional(
layer_lstm(units = 32)
) %>%
layer_dense(units = 1, activation = "sigmoid")
model %>% compile(
optimizer = "rmsprop",
loss = "binary_crossentropy",
metrics = c("acc")
)
tensorboard("models/logs/run_basicBidirectionalRNN")
history <- model %>% fit(
x_train, y_train,
epochs = 40,
batch_size = 128,
validation_split = 0.2,
callbacks = callback_tensorboard("models/logs/run_basicBidirectionalRNN")
)
evaluate_generator(model, test_gen, test_steps)
dir.create('models/tensorflow/', recursive = T, showWarnings = F)
save_model_hdf5(model, filepath = 'models/tensorflow/biRNN.h5')
return (model)
}
}
{ # test the nbt model
# library(keras)
# library(ini)
# ini <- ini::read.ini('../nbt/10_Models/FOREX_EURRUB_close/20200327/model.ini')
# model <- load_model_hdf5(paste('../nbt', ini[['model']]$filename.1, sep = '/'))
# model_data <- read.csv(paste('../nbt', ini[['data']]$file, sep = '/'))
# sd <- ini[['normalizer']]$sd
# mean <- ini[['normalizer']]$mean
#
# unscale <- function(df, sd=std, mean=mean){
# unscaled <- foreach(c = iter(df, by='col'), .combine = cbind ) %do% {
# return ( tibble::enframe( c * sd + mean,
# name = NULL) )
# }
# names(unscaled) <- names(df)
# return (unscaled)
# }
#
# lookback = 10
# delay = 1
# min_index = 1
# max_index = nrow(model_data)
# batch_size = 30
# step = 1
# # data preparation
# # comming from https://blogs.rstudio.com/tensorflow/posts/2017-12-20-time-series-forecasting-with-recurrent-neural-networks/
#
# generator <- function(data, lookback, delay,
# min_index, max_index,
# shuffle = FALSE, batch_size, step) {
# if (is.null(max_index))
# max_index <- nrow(data) - delay - 1
# i <- min_index + lookback
# function() {
# if (shuffle) {
# rows <- sample(c((min_index+lookback):max_index), size = batch_size)
# } else {
# if (i + batch_size >= max_index)
# i <<- min_index + lookback
# rows <- c(i:min(i+batch_size, max_index))
# i <<- i + length(rows)
# }
#
# samples <- array(0, dim = c(length(rows),
# lookback / step,
# dim(data)[[-1]]))
# targets <- array(0, dim = c(length(rows)))
#
# for (j in 1:length(rows)) {
# indices <- seq(rows[[j]] - lookback, rows[[j]] - 1,
# length.out = dim(samples)[[2]])
# samples[j,,] <- data[indices,]
# targets[[j]] <- data[rows[[j]] + delay, 1]
# }
#
# list(samples, targets)
# }
# }
#
# data_gen <- generator(data = as.matrix(model_data), lookback, delay = delay,
# min_index=1, max_index=NULL,
# shuffle = FALSE, batch_size=batch_size, step=step )
# predictions <- model %>% predict_generator(generator = data_gen, steps = nrow(model_data))
}
# cloudml ----
{
GCloud_test <- function() {
# set-up
{
library(cloudml)
gcloud_init()
}
}
}
# run tests ----
# h2o_automl_test()
# setUpAutokeras() # run this once to install the right tensorflow version
# train_tbl %>% autokeras_test()
# model <- basicTFtest()
# model <- basicRNNtest()
# model <- basicRNN_w_dropout_test() | /workspace.R | no_license | laiki/R_ML_tests | R | false | false | 21,941 | r |
# globals ----
{
library(tidyverse)
library(rsample)
genData <- function(nrow = 1000, ncol = 1000, .min = 0, .max = 1){
# Generating test data.
# To prevent the need of normalizing the data use the defaults for min and max
library(foreach)
library(iterators)
data <- foreach( c=1:ncol,
.init = tibble( y = runif(nrow, min = .min, max = .max) ),
.combine = cbind) %do% {
col.name <- paste0('x.', c)
tmp <- tibble( x = runif(nrow, min = .min, max = .max) )
names(tmp) <- col.name
return(tmp)
}
return (data)
}
testData <- genData()
data_split <- testData %>% rsample::initial_time_split(prop = 0.9)
train_tbl <- training(data_split)
test_tbl <- testing(data_split)
}
# h2o ----
{
h2o_automl_test <- function(data=testData, .max_runtime_secs = 60 * 3, test = test_tbl){
#set-up ----
{
library(h2o)
library(lime)
h2o.init()
}
# modeling ----
{
aml <- h2o.automl( x = grep( pattern = 'x.', x = names(data)), #indices of features
y = grep( pattern = 'y' , x = names(data)), #indices of target (will be always 1)
training_frame = as.h2o(data),
nfolds = 5,
max_runtime_secs = .max_runtime_secs )
model <- aml@leaderboard %>%
as_tibble() %>%
slice(1) %>%
pull(model_id) %>%
h2o.getModel()
}
# evaluation ----
{
h2o.performance(model = model, xval = TRUE)
#explainer <- lime (data, model)
#explanation <- explain(test, explanation, n_features = 5, feature_select = "highest_weights")
#p <- plot_explanations(explanation) # not working :(
}
# store ----
{
#ggplot2::ggsave(filename = paste(model_filepath, lime.plot.png, sep = '/'), plot = p)
dir.create( path = model_filepath <- paste('models', 'h2o', 'automl', sep = '/'), showWarnings = F, recursive = T)
h2o.saveModel(model, model_filepath, force = TRUE)
}
# clean-up ----
{
h2o.shutdown(prompt = F)
}
return(model_filepath)
}
}
# autokeras ----
{
setUpAutokeras <- function(){
if(! ("autokeras" %in% (installed.packages() %>% as_tibble())$Package) ){
install.packages('autokeras')
}
# library(reticulate)
# if( !('autokeras' %in% reticulate::conda_list(conda = '/opt/conda/bin/conda')$name) ){
# reticulate::conda_create(envname = 'autokeras', packages = 'python=3.6', conda = '/opt/conda/bin/conda')
# }
reticulate::use_virtualenv()
library(autokeras)
library(keras)
autokeras::install_autokeras( method = 'virtualenv',
conda = '/opt/conda/bin/conda',
tensorflow = '2.1.0-gpu',
version = 'default' )
}
autokeras_test <- function(data=train_tbl, .max_trials = 10, .epochs = 10, test = test_tbl){
# set-up ----
{
setUpAutokeras()
library(autokeras)
library(keras)
library(reticulate)
library(ggplot2)
#reticulate::use_condaenv(condaenv = 'autokeras', conda = '/opt/conda/bin/conda')
reticulate::use_virtualenv()
}
model <- NULL
# modeling ----
{
reg <- model_structured_data_regressor(
column_names = grep(pattern = 'x.', x = names(data), value = T),
loss = "mean_squared_error",
max_trials = .max_trials,
objective = "val_loss",
overwrite = TRUE,
seed = runif(1, 0, 1e+06) )
tensorboard("models/logs/run_autokeras")
model <-
fit( object = reg,
x = as_tibble(data[ , grep( pattern = 'x.', x = names(data))]), #tibble of features
y = as_tibble(data[ , grep( pattern = 'y' , x = names(data))]), # target values
epochs = .epochs,
callbacks = list (
keras::callback_tensorboard("models/logs/run_autokeras"),
# keras::callback_reduce_lr_on_plateau(monitor = "val_loss", factor = 0.01),
keras::callback_early_stopping(min_delta = 0.0001, restore_best_weights = TRUE, verbose = T)
),
validation_split = 0.2
)
}
# evaluation ----
{
# Predict with the best model
predicted <- tibble(idx = seq(1:nrow(test)),
value = predict(model, test[ , grep( pattern = 'x.', x = names(data))]),
variable = 'predicted' )
result <- rbind( tibble(idx = seq(1:nrow(test)),
value = test$y,
variable = 'value' ),
predicted ) %>%
arrange(idx) %>%
select(idx, variable, value)
p <- result %>% ggplot(aes(idx, value, colour = variable)) + geom_line()
# Evaluate the best model with testing data
model %>% evaluate(
x = as_tibble(test_tbl[ , grep( pattern = 'x.', x = names(data))]), #tibble of features
y = as_tibble(test_tbl[ , grep( pattern = 'y' , x = names(data))]) # target values
)
}
# store ----
{
# save the model
dir.create( path = dirname( model_filepath <- paste('models', 'autokeras', 'autokeras.model', sep = '/') ), showWarnings = F, recursive = T)
autokeras::save_model(autokeras_model = model, filename = model_filepath)
#nvidia-smi pmon -c 1 --select m | grep rsession
}
return (model)
}
}
# keras & tensorflow ----
{
# generators ----
{
# data preparation
# comming from https://blogs.rstudio.com/tensorflow/posts/2017-12-20-time-series-forecasting-with-recurrent-neural-networks/
generator <- function(data, lookback, delay, min_index, max_index,
shuffle = FALSE, batch_size = 128, step = 1) {
if (is.null(max_index))
max_index <- nrow(data) - delay - 1
i <- min_index + lookback
function() {
if (shuffle) {
rows <- sample(c((min_index+lookback):max_index), size = batch_size)
} else {
if (i + batch_size >= max_index)
i <<- min_index + lookback
rows <- c(i:min(i+batch_size-1, max_index))
i <<- i + length(rows)
}
samples <- array(0, dim = c(length(rows),
lookback / step,
dim(data)[[-1]]))
targets <- array(0, dim = c(length(rows)))
for (j in 1:length(rows)) {
indices <- seq(rows[[j]] - lookback, rows[[j]]-1,
length.out = dim(samples)[[2]])
samples[j,,] <- data[indices,]
targets[[j]] <- data[rows[[j]] + delay, 1] # target variable must always be the first column !!!!
}
list(samples, targets)
}
}
lookback = 5 # Observations will go back 5 rows
step = 1 # Observations will be sampled at one data point per day.
delay = 0 # uninteresting for the tests
batch_size = 30 #
}
basicTFtest <- function(data = testData){
# set up ----
{
library(reticulate)
use_condaenv(condaenv = 'r-reticulate', conda = '/opt/conda/bin/conda')
train_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = 1,
max_index = floor(nrow(data)*(8/10)),
shuffle = FALSE,
step = step,
batch_size = batch_size
)
val_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(8/10)) + 1,
max_index = floor(nrow(data)*(9/10)),
step = step,
batch_size = batch_size
)
test_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(9/10)) + 1,
max_index = nrow(data),
step = step,
batch_size = batch_size
)
# # How many steps to draw from val_gen in order to see the entire validation set
val_steps <- (floor(nrow(data)*(9/10)) - floor(nrow(data)*(8/10)) + 1 - lookback) / batch_size
#
# # How many steps to draw from test_gen in order to see the entire test set
test_steps <- (nrow(data) - floor(nrow(data)*(9/10)) + 1 - lookback) / batch_size
}
library(keras)
model <- keras_model_sequential() %>%
layer_flatten(input_shape = c(lookback / step, dim(data)[-1])) %>%
layer_dense(units = 32, activation = "relu") %>%
layer_dense(units = 1)
# at this point rsession needs 10GB more of GPU memory
model %>% compile(
optimizer = optimizer_rmsprop(),
loss = "mae"
)
tensorboard("models/logs/run_basicTF")
history <- model %>% fit_generator(
train_gen,
steps_per_epoch = 500,
epochs = 20,
validation_data = val_gen,
validation_steps = val_steps,
callbacks = callback_tensorboard("models/logs/run_basicTF")
)
# this will result in an error when using TensorFlow 2.1.0 as described in bug 36919
# https://github.com/tensorflow/tensorflow/issues/36919
# but it works with TensorFlow 2.0.0
# here still the GPU memeory is used
# how to release it?
evaluate_generator(model, test_gen, test_steps)
dir.create('models/tensorflow/', recursive = T, showWarnings = F)
save_model_hdf5(model, filepath = 'models/tensorflow/basic.h5')
return (model)
}
basicRNN_test <- function(data = testData){
# set up ----
{
library(reticulate)
use_condaenv(condaenv = 'r-reticulate', conda = '/opt/conda/bin/conda')
train_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = 1,
max_index = floor(nrow(data)*(8/10)),
shuffle = FALSE,
step = step,
batch_size = batch_size
)
val_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(8/10)) + 1,
max_index = floor(nrow(data)*(9/10)),
step = step,
batch_size = batch_size
)
test_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(9/10)) + 1,
max_index = nrow(data),
step = step,
batch_size = batch_size
)
# # How many steps to draw from val_gen in order to see the entire validation set
val_steps <- (floor(nrow(data)*(9/10)) - floor(nrow(data)*(8/10)) + 1 - lookback) / batch_size
#
# # How many steps to draw from test_gen in order to see the entire test set
test_steps <- (nrow(data) - floor(nrow(data)*(9/10)) + 1 - lookback) / batch_size
}
library(keras)
model <- keras_model_sequential() %>%
layer_gru(units = 32, input_shape = list(NULL, dim(data)[[-1]])) %>%
layer_dense(units = 1)
# at this point rsession needs 10GB more of GPU memory
model %>% compile(
optimizer = optimizer_rmsprop(),
loss = "mae"
)
tensorboard("models/logs/run_basicRNN")
history <- model %>% fit_generator(
train_gen,
steps_per_epoch = 500,
epochs = 20,
validation_data = val_gen,
validation_steps = val_steps,
callbacks = callback_tensorboard("models/logs/run_basicRNN")
)
evaluate_generator(model, test_gen, test_steps)
dir.create('models/tensorflow/', recursive = T, showWarnings = F)
save_model_hdf5(model, filepath = 'models/tensorflow/RNN.h5')
return (model)
}
basicRNN_w_dropout_test <- function(data = testData){
# set up ----
{
library(reticulate)
use_condaenv(condaenv = 'r-reticulate', conda = '/opt/conda/bin/conda')
train_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = 1,
max_index = floor(nrow(data)*(8/10)),
shuffle = FALSE,
step = step,
batch_size = batch_size
)
val_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(8/10)) + 1,
max_index = floor(nrow(data)*(9/10)),
step = step,
batch_size = batch_size
)
test_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(9/10)) + 1,
max_index = nrow(data),
step = step,
batch_size = batch_size
)
# # How many steps to draw from val_gen in order to see the entire validation set
val_steps <- (floor(nrow(data)*(9/10)) - floor(nrow(data)*(8/10)) + 1 - lookback) / batch_size
#
# # How many steps to draw from test_gen in order to see the entire test set
test_steps <- (nrow(data) - floor(nrow(data)*(9/10)) + 1 - lookback) / batch_size
}
library(keras)
model <- keras_model_sequential() %>%
layer_gru(units = 32, dropout = 0.2, recurrent_dropout = 0.2,
input_shape = list(NULL, dim(data)[[-1]])) %>%
layer_dense(units = 1)
# at this point rsession needs 10GB more of GPU memory
model %>% compile(
optimizer = optimizer_rmsprop(),
loss = "mae"
)
tensorboard("models/logs/run_basicRNN_w_dropout")
history <- model %>% fit_generator(
train_gen,
steps_per_epoch = 500,
epochs = 20,
validation_data = val_gen,
validation_steps = val_steps,
callbacks = callback_tensorboard("models/logs/run_basicRNN_w_dropout")
)
evaluate_generator(model, test_gen, test_steps)
dir.create('models/tensorflow/', recursive = T, showWarnings = F)
save_model_hdf5(model, filepath = 'models/tensorflow/RNN_w_dropout.h5')
return (model)
}
basicStackedRNN_test <- function(data = testData){
# set up ----
{
library(reticulate)
use_condaenv(condaenv = 'r-reticulate', conda = '/opt/conda/bin/conda')
train_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = 1,
max_index = floor(nrow(data)*(8/10)),
shuffle = FALSE,
step = step,
batch_size = batch_size
)
val_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(8/10)) + 1,
max_index = floor(nrow(data)*(9/10)),
step = step,
batch_size = batch_size
)
test_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(9/10)) + 1,
max_index = nrow(data),
step = step,
batch_size = batch_size
)
# # How many steps to draw from val_gen in order to see the entire validation set
val_steps <- (floor(nrow(data)*(9/10)) - floor(nrow(data)*(8/10)) + 1 - lookback) / batch_size
#
# # How many steps to draw from test_gen in order to see the entire test set
test_steps <- (nrow(data) - floor(nrow(data)*(9/10)) + 1 - lookback) / batch_size
}
library(keras)
model <- keras_model_sequential() %>%
layer_gru(units = 32,
dropout = 0.1,
recurrent_dropout = 0.5,
return_sequences = TRUE,
input_shape = list(NULL, dim(data)[[-1]])) %>%
layer_gru(units = 64, activation = "relu",
dropout = 0.1,
recurrent_dropout = 0.5) %>%
layer_dense(units = 1)
model %>% compile(
optimizer = optimizer_rmsprop(),
loss = "mae"
)
tensorboard("models/logs/run_basicStackedRNN")
history <- model %>% fit_generator(
train_gen,
steps_per_epoch = 500,
epochs = 40,
validation_data = val_gen,
validation_steps = val_steps,
callbacks = callback_tensorboard("models/logs/run_basicStackedRNN")
)
evaluate_generator(model, test_gen, test_steps)
dir.create('models/tensorflow/', recursive = T, showWarnings = F)
save_model_hdf5(model, filepath = 'models/tensorflow/stackedRNN.h5')
return (model)
}
basicBidirectionalRNN_test <- function(data = testData){
# set up ----
{
library(reticulate)
use_condaenv(condaenv = 'r-reticulate', conda = '/opt/conda/bin/conda')
train_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = 1,
max_index = floor(nrow(data)*(8/10)),
shuffle = FALSE,
step = step,
batch_size = batch_size
)
val_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(8/10)) + 1,
max_index = floor(nrow(data)*(9/10)),
step = step,
batch_size = batch_size
)
test_gen <- generator(
as.matrix(data),
lookback = lookback,
delay = delay,
min_index = floor(nrow(data)*(9/10)) + 1,
max_index = nrow(data),
step = step,
batch_size = batch_size
)
# # How many steps to draw from val_gen in order to see the entire validation set
val_steps <- (floor(nrow(data)*(9/10)) - floor(nrow(data)*(8/10)) + 1 - lookback) / batch_size
#
# # How many steps to draw from test_gen in order to see the entire test set
test_steps <- (nrow(data) - floor(nrow(data)*(9/10)) + 1 - lookback) / batch_size
}
library(keras)
model <- keras_model_sequential() %>%
layer_embedding(input_dim = max_features, output_dim = 32) %>%
bidirectional(
layer_lstm(units = 32)
) %>%
layer_dense(units = 1, activation = "sigmoid")
model %>% compile(
optimizer = "rmsprop",
loss = "binary_crossentropy",
metrics = c("acc")
)
tensorboard("models/logs/run_basicBidirectionalRNN")
history <- model %>% fit(
x_train, y_train,
epochs = 40,
batch_size = 128,
validation_split = 0.2,
callbacks = callback_tensorboard("models/logs/run_basicBidirectionalRNN")
)
evaluate_generator(model, test_gen, test_steps)
dir.create('models/tensorflow/', recursive = T, showWarnings = F)
save_model_hdf5(model, filepath = 'models/tensorflow/biRNN.h5')
return (model)
}
}
{ # test the nbt model
# library(keras)
# library(ini)
# ini <- ini::read.ini('../nbt/10_Models/FOREX_EURRUB_close/20200327/model.ini')
# model <- load_model_hdf5(paste('../nbt', ini[['model']]$filename.1, sep = '/'))
# model_data <- read.csv(paste('../nbt', ini[['data']]$file, sep = '/'))
# sd <- ini[['normalizer']]$sd
# mean <- ini[['normalizer']]$mean
#
# unscale <- function(df, sd=std, mean=mean){
# unscaled <- foreach(c = iter(df, by='col'), .combine = cbind ) %do% {
# return ( tibble::enframe( c * sd + mean,
# name = NULL) )
# }
# names(unscaled) <- names(df)
# return (unscaled)
# }
#
# lookback = 10
# delay = 1
# min_index = 1
# max_index = nrow(model_data)
# batch_size = 30
# step = 1
# # data preparation
# # comming from https://blogs.rstudio.com/tensorflow/posts/2017-12-20-time-series-forecasting-with-recurrent-neural-networks/
#
# generator <- function(data, lookback, delay,
# min_index, max_index,
# shuffle = FALSE, batch_size, step) {
# if (is.null(max_index))
# max_index <- nrow(data) - delay - 1
# i <- min_index + lookback
# function() {
# if (shuffle) {
# rows <- sample(c((min_index+lookback):max_index), size = batch_size)
# } else {
# if (i + batch_size >= max_index)
# i <<- min_index + lookback
# rows <- c(i:min(i+batch_size, max_index))
# i <<- i + length(rows)
# }
#
# samples <- array(0, dim = c(length(rows),
# lookback / step,
# dim(data)[[-1]]))
# targets <- array(0, dim = c(length(rows)))
#
# for (j in 1:length(rows)) {
# indices <- seq(rows[[j]] - lookback, rows[[j]] - 1,
# length.out = dim(samples)[[2]])
# samples[j,,] <- data[indices,]
# targets[[j]] <- data[rows[[j]] + delay, 1]
# }
#
# list(samples, targets)
# }
# }
#
# data_gen <- generator(data = as.matrix(model_data), lookback, delay = delay,
# min_index=1, max_index=NULL,
# shuffle = FALSE, batch_size=batch_size, step=step )
# predictions <- model %>% predict_generator(generator = data_gen, steps = nrow(model_data))
}
# cloudml ----
{
GCloud_test <- function() {
# set-up
{
library(cloudml)
gcloud_init()
}
}
}
# run tests ----
# h2o_automl_test()
# setUpAutokeras() # run this once to install the right tensorflow version
# train_tbl %>% autokeras_test()
# model <- basicTFtest()
# model <- basicRNNtest()
# model <- basicRNN_w_dropout_test() |
#' ggplot smoothed coverage as density plot
#'
#' @param coverage
#' @param fill
#' @param alpha
#'
#' @import ggplot2
#' @return
#' @export
#'
#' @examples
plotRNAmap <- function(coverage, fill, alpha,
bar_width = NA,
bar_fill = NA,
bar_alpha = NA){
# extract objects from list
scaled <- coverage$scaled
metadata <- coverage$metadata
print(metadata)
# density plot
p <- ggplot() + theme_classic() # make this customisable?
p <- p + ggplot2::geom_area( aes(1:length(scaled), scaled ), colour = NA, fill = fill, alpha = alpha)
# PROPORTION
# if centre == "proportion" then add a bar plot with a label - this will be a very different proportion so label it with the percentage overlap
# but make the height?
if( metadata$centre_mode == "proportion"){
centre_point <- length(scaled) / 2
my_ymax <- max(scaled)
if( is.na(bar_alpha) ){
bar_alpha <- alpha
}
if( is.na(bar_width) ){
bar_width <- 0.5 * metadata$centre_seg
}
if( is.na(bar_fill) ){
bar_fill <- fill
}
p <- p +
#geom_bar( aes( x = centre_point, y = my_ymax * 0.90 ), stat = "identity" , fill = bar_fill, alpha = bar_alpha, width = bar_width) +
annotate( geom = "text", x = centre_point, y = my_ymax * 0.10, label = paste0( signif(coverage$overlap * 100, digits = 3), "%" ), colour = fill )
}
# AXES
# X axis marks should annotate the original flanking parameters AND the parameters set by formatting
# example: an exon may be flanked by 50bp either side but the user also wants to see the unscaled coverage of 20bp inside at either end
# so flank = 50 but left_seg and right_seg = 70
# the centre is then hidden and the length is then arbitrary. There should be double tick marks to denote the change in scale
# build up x breaks
total_length <- 1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg
# special case when the segment lengths equal the flank on both sides
if( metadata$A_flank == metadata$left_seg & metadata$A_flank == metadata$right_seg){
x_breaks <- c(
1,
metadata$A_flank,
metadata$A_flank + metadata$centre_seg,
metadata$A_flank + metadata$centre_seg + metadata$A_flank
)
x_labels <- c(
-metadata$A_flank,
0,
0,
paste("+", metadata$A_flank)
)
}
# flank is smaller than both segments (symmetric)
if( metadata$A_flank < metadata$left_seg & metadata$A_flank < metadata$right_seg){
x_breaks <- c(
1,
1 + metadata$A_flank,
1 + metadata$left_seg,
1 + metadata$left_seg + metadata$centre_seg,
1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg - metadata$A_flank,
metadata$left_seg + metadata$centre_seg + metadata$right_seg
)
x_labels <- c(
paste0("-", metadata$A_flank),
"5\'",
paste0("+", metadata$left_seg - metadata$A_flank),
paste0("-", metadata$left_seg - metadata$A_flank),
"3\'",
paste0("+", metadata$A_flank)
)
total_length <- metadata$left_seg + metadata$centre_seg + metadata$right_seg
}
# flank is larger than both segments (symmetric)
if( metadata$A_flank > metadata$left_seg & metadata$A_flank > metadata$right_seg){
# TODO
x_breaks <- c(
1,
1 + metadata$left_seg,
1 + metadata$A_flank,
1 + metadata$A_flank + metadata$centre_seg,
1 + metadata$A_flank + metadata$centre_seg + metadata$A_flank - metadata$right_seg,
metadata$A_flank + metadata$centre_seg + metadata$A_flank
)
x_labels <- c(
paste0("-", metadata$A_flank),
0,
paste0("+", metadata$left_seg - metadata$A_flank),
paste0("-", metadata$left_seg - metadata$A_flank),
0,
paste0("+", metadata$A_flank)
)
}
if( !exists("x_breaks")){
message("not supported yet")
return(NULL)
}
p <- p +
scale_x_continuous(
"",
breaks = x_breaks,
label = x_labels,
limits = c(1,total_length),
expand = c(0, 0)
) +
scale_y_continuous(
"Normalised coverage",
#limits = c(-my_ymax, my_ymax ),
labels = scales::percent,
expand = c(0, 0) ) +
theme( axis.line.x = element_line(linetype = 3))
return(p)
}
xBreaksLabels <- function(metadata){
total_length <- 1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg
# special case when the segment lengths equal the flank on both sides
if( metadata$A_flank == metadata$left_seg & metadata$A_flank == metadata$right_seg){
x_breaks <- c(
1,
metadata$A_flank,
metadata$A_flank + metadata$centre_seg,
metadata$A_flank + metadata$centre_seg + metadata$A_flank
)
x_labels <- c(
-metadata$A_flank,
0,
0,
paste("+", metadata$A_flank)
)
}
# flank is smaller than both segments (symmetric)
if( metadata$A_flank < metadata$left_seg & metadata$A_flank < metadata$right_seg){
x_breaks <- c(
1,
1 + metadata$A_flank,
1 + metadata$left_seg,
1 + metadata$left_seg + metadata$centre_seg,
1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg - metadata$A_flank,
metadata$left_seg + metadata$centre_seg + metadata$right_seg
)
x_labels <- c(
paste0("-", metadata$A_flank),
"5\'",
paste0("+", metadata$left_seg - metadata$A_flank),
paste0("-", metadata$left_seg - metadata$A_flank),
"3\'",
paste0("+", metadata$A_flank)
)
total_length <- metadata$left_seg + metadata$centre_seg + metadata$right_seg
}
# flank is larger than both segments (symmetric)
if( metadata$A_flank > metadata$left_seg & metadata$A_flank > metadata$right_seg){
# TODO
x_breaks <- c(
1,
1 + metadata$left_seg,
1 + metadata$A_flank,
1 + metadata$A_flank + metadata$centre_seg,
1 + metadata$A_flank + metadata$centre_seg + metadata$A_flank - metadata$right_seg,
metadata$A_flank + metadata$centre_seg + metadata$A_flank
)
x_labels <- c(
paste0("-", metadata$A_flank),
0,
paste0("+", metadata$left_seg - metadata$A_flank),
paste0("-", metadata$left_seg - metadata$A_flank),
0,
paste0("+", metadata$A_flank)
)
}
return(list(breaks = x_breaks, labels = x_labels))
}
# two separate functions
# genomap - plot a single distribution
# genomulti <- plot multiple distributions with the same formatting
genomap <- function( object, fill = "purple3", colour = "purple3", alpha = 0.75, geom = "area" ){
# or instead use a coverage object and plot that using the metadata from
# formatCoverage
# coverage object given instead
metadata <- object$metadata
# using metadata create plot
total_length <- 1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg
x_data <- xBreaksLabels(metadata)
x_breaks <- x_data$breaks
x_labels <- x_data$labels
if( !exists("x_breaks")){
message("not supported yet")
return(NULL)
}
# create plot
p <- ggplot() + theme_classic() +
scale_x_continuous(
"",
breaks = x_breaks,
label = x_labels,
limits = c(1,total_length),
expand = c(0, 0)
) +
scale_y_continuous(
"Normalised coverage",
#limits = c(-my_ymax, my_ymax ),
labels = scales::percent,
expand = c(0, 0) ) +
theme( axis.line.x = element_line(linetype = 3))
#if object given then add plot
scaled <- object$scaled
if( geom == "area" ){
p <- p + geom_area( aes(1:length(scaled), scaled ), fill = fill, alpha = alpha)
}
if( geom == "line"){
p <- p + geom_line(aes(1:length(scaled), scaled ), colour = colour, alpha = alpha)
}
return(p)
}
# just return a plot without any geoms
genomulti <- function(scheme){
metadata <- scheme
# using metadata create plot
total_length <- 1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg
x_data <- xBreaksLabels(metadata)
x_breaks <- x_data$breaks
x_labels <- x_data$labels
if( !exists("x_breaks")){
message("not supported yet")
return(NULL)
}
# create plot
p <- ggplot() + theme_classic() +
scale_x_continuous(
"",
breaks = x_breaks,
label = x_labels,
limits = c(1,total_length),
expand = c(0, 0)
) +
scale_y_continuous(
"Normalised coverage",
#limits = c(-my_ymax, my_ymax ),
labels = scales::percent,
expand = c(0, 0) ) +
theme( axis.line.x = element_line(linetype = 3))
return(p)
}
addCoverageTrack <- function(coverage2, fill, alpha=0.5,
bar_width = NA,
bar_fill = NA,
bar_alpha = NA){
density <- ggplot2::geom_area(
aes(1:length(coverage2$scaled), coverage2$scaled ),
colour = NA, fill = fill, alpha = alpha
)
return(density)
}
# play with creating new ggplot2 stats and layers
# what if you set the formatting scheme once and then apply that
# as a function to each intersection?
coverage_scheme <- function(left,centre,right,centre_length,smoothing){
return(
list(left=left,
centre=centre,
right=right,
centre_length=centre_length,
smoothing=smoothing)
)
}
myscheme <- coverage_scheme(
left = 20,
centre = "scaled",
right = 20,
centre_length = 20,
smoothing = 10
)
makeCov <- function(coverage, scheme){
scaled <- formatCoverage(coverage,
left = scheme$left,
centre = scheme$centre,
right = scheme$right,
centre_length = scheme$centre_length,
smoothing = scheme$smoothing)
df <- data.frame( x = seq_along(scaled$scaled),
y = scaled$scaled )
return(df)
}
# so then RNAmap could be created like so
# ggplot() +
# geom_area(data = makeCov(coverage_sig, scheme=myscheme),
# aes(x,y), fill = "blue") +
# geom_area( data = makeCov(coverage_null, scheme = myscheme),
# aes(x,-y), fill = "gray", alpha = 0.5)
# now create RNAmap() function that returns a clean looking ggplot()
# object with the correct axes, based on the scheme list
geom_coverage <- function(mapping = NULL, data = NULL, stat = "identity", position = "stack",
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...)
{
ggplot2::layer(data = data, mapping = mapping, stat = stat, geom = GeomArea,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...))
}
#ggplot() + geom_area(data = makeCov(coverage_sig) )
StatCoverage <- ggproto("StatCoverage", Stat,
compute_group = function(data, scales) {
#data[(data$x, data$y), , drop = FALSE]
},
required_aes = c("y")
)
stat_coverage <- function(mapping = NULL, data = NULL, geom = "density",
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
ggplot2::layer(
stat = StatCoverage, data = data, mapping = mapping, geom = geom,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
# ggplot(mpg, aes(displ, hwy)) +
# geom_point() +
# stat_coverage(fill = NA, colour = "blue", geom="point")
| /R/mapRNA.R | no_license | jackhump/featuremaps | R | false | false | 11,449 | r |
#' ggplot smoothed coverage as density plot
#'
#' @param coverage
#' @param fill
#' @param alpha
#'
#' @import ggplot2
#' @return
#' @export
#'
#' @examples
plotRNAmap <- function(coverage, fill, alpha,
bar_width = NA,
bar_fill = NA,
bar_alpha = NA){
# extract objects from list
scaled <- coverage$scaled
metadata <- coverage$metadata
print(metadata)
# density plot
p <- ggplot() + theme_classic() # make this customisable?
p <- p + ggplot2::geom_area( aes(1:length(scaled), scaled ), colour = NA, fill = fill, alpha = alpha)
# PROPORTION
# if centre == "proportion" then add a bar plot with a label - this will be a very different proportion so label it with the percentage overlap
# but make the height?
if( metadata$centre_mode == "proportion"){
centre_point <- length(scaled) / 2
my_ymax <- max(scaled)
if( is.na(bar_alpha) ){
bar_alpha <- alpha
}
if( is.na(bar_width) ){
bar_width <- 0.5 * metadata$centre_seg
}
if( is.na(bar_fill) ){
bar_fill <- fill
}
p <- p +
#geom_bar( aes( x = centre_point, y = my_ymax * 0.90 ), stat = "identity" , fill = bar_fill, alpha = bar_alpha, width = bar_width) +
annotate( geom = "text", x = centre_point, y = my_ymax * 0.10, label = paste0( signif(coverage$overlap * 100, digits = 3), "%" ), colour = fill )
}
# AXES
# X axis marks should annotate the original flanking parameters AND the parameters set by formatting
# example: an exon may be flanked by 50bp either side but the user also wants to see the unscaled coverage of 20bp inside at either end
# so flank = 50 but left_seg and right_seg = 70
# the centre is then hidden and the length is then arbitrary. There should be double tick marks to denote the change in scale
# build up x breaks
total_length <- 1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg
# special case when the segment lengths equal the flank on both sides
if( metadata$A_flank == metadata$left_seg & metadata$A_flank == metadata$right_seg){
x_breaks <- c(
1,
metadata$A_flank,
metadata$A_flank + metadata$centre_seg,
metadata$A_flank + metadata$centre_seg + metadata$A_flank
)
x_labels <- c(
-metadata$A_flank,
0,
0,
paste("+", metadata$A_flank)
)
}
# flank is smaller than both segments (symmetric)
if( metadata$A_flank < metadata$left_seg & metadata$A_flank < metadata$right_seg){
x_breaks <- c(
1,
1 + metadata$A_flank,
1 + metadata$left_seg,
1 + metadata$left_seg + metadata$centre_seg,
1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg - metadata$A_flank,
metadata$left_seg + metadata$centre_seg + metadata$right_seg
)
x_labels <- c(
paste0("-", metadata$A_flank),
"5\'",
paste0("+", metadata$left_seg - metadata$A_flank),
paste0("-", metadata$left_seg - metadata$A_flank),
"3\'",
paste0("+", metadata$A_flank)
)
total_length <- metadata$left_seg + metadata$centre_seg + metadata$right_seg
}
# flank is larger than both segments (symmetric)
if( metadata$A_flank > metadata$left_seg & metadata$A_flank > metadata$right_seg){
# TODO
x_breaks <- c(
1,
1 + metadata$left_seg,
1 + metadata$A_flank,
1 + metadata$A_flank + metadata$centre_seg,
1 + metadata$A_flank + metadata$centre_seg + metadata$A_flank - metadata$right_seg,
metadata$A_flank + metadata$centre_seg + metadata$A_flank
)
x_labels <- c(
paste0("-", metadata$A_flank),
0,
paste0("+", metadata$left_seg - metadata$A_flank),
paste0("-", metadata$left_seg - metadata$A_flank),
0,
paste0("+", metadata$A_flank)
)
}
if( !exists("x_breaks")){
message("not supported yet")
return(NULL)
}
p <- p +
scale_x_continuous(
"",
breaks = x_breaks,
label = x_labels,
limits = c(1,total_length),
expand = c(0, 0)
) +
scale_y_continuous(
"Normalised coverage",
#limits = c(-my_ymax, my_ymax ),
labels = scales::percent,
expand = c(0, 0) ) +
theme( axis.line.x = element_line(linetype = 3))
return(p)
}
xBreaksLabels <- function(metadata){
total_length <- 1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg
# special case when the segment lengths equal the flank on both sides
if( metadata$A_flank == metadata$left_seg & metadata$A_flank == metadata$right_seg){
x_breaks <- c(
1,
metadata$A_flank,
metadata$A_flank + metadata$centre_seg,
metadata$A_flank + metadata$centre_seg + metadata$A_flank
)
x_labels <- c(
-metadata$A_flank,
0,
0,
paste("+", metadata$A_flank)
)
}
# flank is smaller than both segments (symmetric)
if( metadata$A_flank < metadata$left_seg & metadata$A_flank < metadata$right_seg){
x_breaks <- c(
1,
1 + metadata$A_flank,
1 + metadata$left_seg,
1 + metadata$left_seg + metadata$centre_seg,
1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg - metadata$A_flank,
metadata$left_seg + metadata$centre_seg + metadata$right_seg
)
x_labels <- c(
paste0("-", metadata$A_flank),
"5\'",
paste0("+", metadata$left_seg - metadata$A_flank),
paste0("-", metadata$left_seg - metadata$A_flank),
"3\'",
paste0("+", metadata$A_flank)
)
total_length <- metadata$left_seg + metadata$centre_seg + metadata$right_seg
}
# flank is larger than both segments (symmetric)
if( metadata$A_flank > metadata$left_seg & metadata$A_flank > metadata$right_seg){
# TODO
x_breaks <- c(
1,
1 + metadata$left_seg,
1 + metadata$A_flank,
1 + metadata$A_flank + metadata$centre_seg,
1 + metadata$A_flank + metadata$centre_seg + metadata$A_flank - metadata$right_seg,
metadata$A_flank + metadata$centre_seg + metadata$A_flank
)
x_labels <- c(
paste0("-", metadata$A_flank),
0,
paste0("+", metadata$left_seg - metadata$A_flank),
paste0("-", metadata$left_seg - metadata$A_flank),
0,
paste0("+", metadata$A_flank)
)
}
return(list(breaks = x_breaks, labels = x_labels))
}
# two separate functions
# genomap - plot a single distribution
# genomulti <- plot multiple distributions with the same formatting
genomap <- function( object, fill = "purple3", colour = "purple3", alpha = 0.75, geom = "area" ){
# or instead use a coverage object and plot that using the metadata from
# formatCoverage
# coverage object given instead
metadata <- object$metadata
# using metadata create plot
total_length <- 1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg
x_data <- xBreaksLabels(metadata)
x_breaks <- x_data$breaks
x_labels <- x_data$labels
if( !exists("x_breaks")){
message("not supported yet")
return(NULL)
}
# create plot
p <- ggplot() + theme_classic() +
scale_x_continuous(
"",
breaks = x_breaks,
label = x_labels,
limits = c(1,total_length),
expand = c(0, 0)
) +
scale_y_continuous(
"Normalised coverage",
#limits = c(-my_ymax, my_ymax ),
labels = scales::percent,
expand = c(0, 0) ) +
theme( axis.line.x = element_line(linetype = 3))
#if object given then add plot
scaled <- object$scaled
if( geom == "area" ){
p <- p + geom_area( aes(1:length(scaled), scaled ), fill = fill, alpha = alpha)
}
if( geom == "line"){
p <- p + geom_line(aes(1:length(scaled), scaled ), colour = colour, alpha = alpha)
}
return(p)
}
# just return a plot without any geoms
genomulti <- function(scheme){
metadata <- scheme
# using metadata create plot
total_length <- 1 + metadata$left_seg + metadata$centre_seg + metadata$right_seg
x_data <- xBreaksLabels(metadata)
x_breaks <- x_data$breaks
x_labels <- x_data$labels
if( !exists("x_breaks")){
message("not supported yet")
return(NULL)
}
# create plot
p <- ggplot() + theme_classic() +
scale_x_continuous(
"",
breaks = x_breaks,
label = x_labels,
limits = c(1,total_length),
expand = c(0, 0)
) +
scale_y_continuous(
"Normalised coverage",
#limits = c(-my_ymax, my_ymax ),
labels = scales::percent,
expand = c(0, 0) ) +
theme( axis.line.x = element_line(linetype = 3))
return(p)
}
addCoverageTrack <- function(coverage2, fill, alpha=0.5,
bar_width = NA,
bar_fill = NA,
bar_alpha = NA){
density <- ggplot2::geom_area(
aes(1:length(coverage2$scaled), coverage2$scaled ),
colour = NA, fill = fill, alpha = alpha
)
return(density)
}
# play with creating new ggplot2 stats and layers
# what if you set the formatting scheme once and then apply that
# as a function to each intersection?
coverage_scheme <- function(left,centre,right,centre_length,smoothing){
return(
list(left=left,
centre=centre,
right=right,
centre_length=centre_length,
smoothing=smoothing)
)
}
myscheme <- coverage_scheme(
left = 20,
centre = "scaled",
right = 20,
centre_length = 20,
smoothing = 10
)
makeCov <- function(coverage, scheme){
scaled <- formatCoverage(coverage,
left = scheme$left,
centre = scheme$centre,
right = scheme$right,
centre_length = scheme$centre_length,
smoothing = scheme$smoothing)
df <- data.frame( x = seq_along(scaled$scaled),
y = scaled$scaled )
return(df)
}
# so then RNAmap could be created like so
# ggplot() +
# geom_area(data = makeCov(coverage_sig, scheme=myscheme),
# aes(x,y), fill = "blue") +
# geom_area( data = makeCov(coverage_null, scheme = myscheme),
# aes(x,-y), fill = "gray", alpha = 0.5)
# now create RNAmap() function that returns a clean looking ggplot()
# object with the correct axes, based on the scheme list
geom_coverage <- function(mapping = NULL, data = NULL, stat = "identity", position = "stack",
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...)
{
ggplot2::layer(data = data, mapping = mapping, stat = stat, geom = GeomArea,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...))
}
#ggplot() + geom_area(data = makeCov(coverage_sig) )
StatCoverage <- ggproto("StatCoverage", Stat,
compute_group = function(data, scales) {
#data[(data$x, data$y), , drop = FALSE]
},
required_aes = c("y")
)
stat_coverage <- function(mapping = NULL, data = NULL, geom = "density",
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
ggplot2::layer(
stat = StatCoverage, data = data, mapping = mapping, geom = geom,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
# ggplot(mpg, aes(displ, hwy)) +
# geom_point() +
# stat_coverage(fill = NA, colour = "blue", geom="point")
|
# 2020-11-25 --------------------------------------------------------------
p1 <- data.frame(id = 10:1,
y = c(1, 1, 1, 1, 1, 0, 0, 0, 0, 0),
t = c(1, 7, 4, 5, 6, NA, NA, NA, NA, NA),
f = c(10, 10, 10, 10, 10, 10, 10, 6, 6, 6))
p2 <- data.frame(id = 8:1,
t = c(4, 5, 6, 7, NA, NA, NA, NA),
f = rep(10, 8))
library(ggplot2)
ggplot(data = p1) +
geom_segment(aes(x = 0, xend = f, y = id, yend = id), size = 2) +
geom_point(aes(x = t, y = id), size = 5) +
scale_x_continuous(name = "Năm theo dõi", breaks = 1:10) +
scale_y_continuous(name = "Người tham gia", breaks = 1:10) +
geom_vline(xintercept = c(3, 8), linetype = 2) +
theme_bw() +
ggtitle("A")
ggsave(filename = file.path("figures", "SDBT_01.png"), width = 5, height = 3)
ggplot(data = p2) +
geom_segment(aes(x = 0, xend = f, y = id, yend = id), size = 2) +
geom_point(aes(x = t, y = id), size = 5) +
scale_x_continuous(name = "Năm theo dõi", breaks = 1:10) +
scale_y_continuous(name = "Người tham gia", breaks = 1:10) +
geom_vline(xintercept = c(3, 8), linetype = 2) +
theme_bw() +
ggtitle("B")
ggsave(filename = file.path("figures", "SDBT_02.png"), width = 5, height = 3)
prob <- seq(from = 0, to = 0.9, by = 0.01)
odds <- prob/(1-prob)
ggplot(data = data.frame(prob = prob, odds = odds), aes(x = prob, y = odds)) +
geom_line(size = 2) +
geom_vline(xintercept = 0.5, size = 1, linetype = 2) +
geom_hline(yintercept = 1, size = 1, linetype = 2) +
scale_x_continuous(name = "Tỉ lệ", breaks = seq(from = 0, to = 1, by = 0.1)) +
scale_y_continuous(name = "Số chênh", breaks = seq(from = 0, to = 10)) +
theme_bw()
ggsave(filename = file.path("figures", "odds_prop.png"), width = 5, height = 3)
| /figures.R | no_license | DECIDELab/slides | R | false | false | 1,792 | r |
# 2020-11-25 --------------------------------------------------------------
p1 <- data.frame(id = 10:1,
y = c(1, 1, 1, 1, 1, 0, 0, 0, 0, 0),
t = c(1, 7, 4, 5, 6, NA, NA, NA, NA, NA),
f = c(10, 10, 10, 10, 10, 10, 10, 6, 6, 6))
p2 <- data.frame(id = 8:1,
t = c(4, 5, 6, 7, NA, NA, NA, NA),
f = rep(10, 8))
library(ggplot2)
ggplot(data = p1) +
geom_segment(aes(x = 0, xend = f, y = id, yend = id), size = 2) +
geom_point(aes(x = t, y = id), size = 5) +
scale_x_continuous(name = "Năm theo dõi", breaks = 1:10) +
scale_y_continuous(name = "Người tham gia", breaks = 1:10) +
geom_vline(xintercept = c(3, 8), linetype = 2) +
theme_bw() +
ggtitle("A")
ggsave(filename = file.path("figures", "SDBT_01.png"), width = 5, height = 3)
ggplot(data = p2) +
geom_segment(aes(x = 0, xend = f, y = id, yend = id), size = 2) +
geom_point(aes(x = t, y = id), size = 5) +
scale_x_continuous(name = "Năm theo dõi", breaks = 1:10) +
scale_y_continuous(name = "Người tham gia", breaks = 1:10) +
geom_vline(xintercept = c(3, 8), linetype = 2) +
theme_bw() +
ggtitle("B")
ggsave(filename = file.path("figures", "SDBT_02.png"), width = 5, height = 3)
prob <- seq(from = 0, to = 0.9, by = 0.01)
odds <- prob/(1-prob)
ggplot(data = data.frame(prob = prob, odds = odds), aes(x = prob, y = odds)) +
geom_line(size = 2) +
geom_vline(xintercept = 0.5, size = 1, linetype = 2) +
geom_hline(yintercept = 1, size = 1, linetype = 2) +
scale_x_continuous(name = "Tỉ lệ", breaks = seq(from = 0, to = 1, by = 0.1)) +
scale_y_continuous(name = "Số chênh", breaks = seq(from = 0, to = 10)) +
theme_bw()
ggsave(filename = file.path("figures", "odds_prop.png"), width = 5, height = 3)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_samples.R
\name{compute_wholesample_meancotisation}
\alias{compute_wholesample_meancotisation}
\title{Compute wholesample meancotisation}
\usage{
compute_wholesample_meancotisation(db, name, start, end)
}
\arguments{
\item{db}{database}
\item{name}{name of the output table}
\item{start}{start date}
\item{end}{end date}
}
\value{
table in the database
}
\description{
Compute wholesample meancotisation
}
\examples{
\dontrun{
compute_wholesample_meancotisation(
db = database_signauxfaibles,
name = "wholesample_meancotisation",
start = "2013-01-01",
end = "2017-03-01"
)
}
}
| /man/compute_wholesample_meancotisation.Rd | no_license | SGMAP-AGD/opensignauxfaibles | R | false | true | 668 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_samples.R
\name{compute_wholesample_meancotisation}
\alias{compute_wholesample_meancotisation}
\title{Compute wholesample meancotisation}
\usage{
compute_wholesample_meancotisation(db, name, start, end)
}
\arguments{
\item{db}{database}
\item{name}{name of the output table}
\item{start}{start date}
\item{end}{end date}
}
\value{
table in the database
}
\description{
Compute wholesample meancotisation
}
\examples{
\dontrun{
compute_wholesample_meancotisation(
db = database_signauxfaibles,
name = "wholesample_meancotisation",
start = "2013-01-01",
end = "2017-03-01"
)
}
}
|
# This file is code used to solve problems in the textbook Foundations and Applications
# of Statistics - An Introduction Using R
# Ex: 3.1.1 Show that f is a pdf.
# define the pdf
f <- function(x) {3 * x^2 * (0 <= x & x <=1)}
integrate(f, 0, 1)
integrate(f, 0, 0.5)$value # give the approximate value
require(MASS) # fractions() function is in MASS
fractions(integrate(f, 0, 0.5)$value) # convert the solution to a fraction
# Ex: 3.1.3 Integrate a uniform pdf
x <- 5:15
# typically we would define a pdf this way:
tempf <- function(x) {0.1 * (0 <= x & x <= 10)}
tempf(x)
integrate(tempf,7,10)
runif(6,0,10) # generate 6 random values from a uniform dist [0,10]
dunif(5,0,10) # calculate density of unif dist at 5
punif(3,0,10) # calcualte prob(x<3) on unif dist
qunif(0.25,0,10) # calcuate x for the 25th quantile
# Ex: 3.1.11
# Simulate the timing of Poisson events using the exponential distribution to model
# the time between consecutive events.
runs <- 8; size <- 40
# randomly generate 8 runs of 40 exponentially distributed arrivals
time <- replicate(runs, cumsum(rexp(size)))
df <- data.frame(time = as.vector(time), run = rep(1:runs, each=size))
# use the shortest run as the maximum run time
stop <- min(apply(time, 2, max))
stop <- 5 * trunc(stop/5)
df <- df[time <= stop,]
#graph the results
require(graphics)
myplot <- stripchart(run~time, df, pch=1, cex=0.7, col='black',
panel=function(x,y,...){
panel.abline(h=seq(1.5,7.5,by=1),col='gray60')
panel.abline(v=seq(0,stop,by=5),col='gray60')
panel.stripchart(x,y,...)
})
# Ex: 3.6.1 Build a qq-plot for data we assume is normally distributed
x <- c(-0.16, 1.17, -0.43, -0.02, 1.06, -1.35, 0.65, -1.12, 0.03, -1.44)
# sort the data
x.sorted <- sort(x)
q <- seq(0.05, 0.95, by=0.1)
y <- qnorm(q)
require(lattice) # xyplot is in lattice
qqplot <- xyplot(x.sorted~y)
ppoints(10)
myplot <- qqmath(x) # graph the qq plot
require(fastR) # xqqmath is in fastR
myplot <- xqqmath(~x,fitline=TRUE)
| /Chap 3 R.R | no_license | gregxrenner/Data-Analysis | R | false | false | 2,156 | r | # This file is code used to solve problems in the textbook Foundations and Applications
# of Statistics - An Introduction Using R
# Ex: 3.1.1 Show that f is a pdf.
# define the pdf
f <- function(x) {3 * x^2 * (0 <= x & x <=1)}
integrate(f, 0, 1)
integrate(f, 0, 0.5)$value # give the approximate value
require(MASS) # fractions() function is in MASS
fractions(integrate(f, 0, 0.5)$value) # convert the solution to a fraction
# Ex: 3.1.3 Integrate a uniform pdf
x <- 5:15
# typically we would define a pdf this way:
tempf <- function(x) {0.1 * (0 <= x & x <= 10)}
tempf(x)
integrate(tempf,7,10)
runif(6,0,10) # generate 6 random values from a uniform dist [0,10]
dunif(5,0,10) # calculate density of unif dist at 5
punif(3,0,10) # calcualte prob(x<3) on unif dist
qunif(0.25,0,10) # calcuate x for the 25th quantile
# Ex: 3.1.11
# Simulate the timing of Poisson events using the exponential distribution to model
# the time between consecutive events.
runs <- 8; size <- 40
# randomly generate 8 runs of 40 exponentially distributed arrivals
time <- replicate(runs, cumsum(rexp(size)))
df <- data.frame(time = as.vector(time), run = rep(1:runs, each=size))
# use the shortest run as the maximum run time
stop <- min(apply(time, 2, max))
stop <- 5 * trunc(stop/5)
df <- df[time <= stop,]
#graph the results
require(graphics)
myplot <- stripchart(run~time, df, pch=1, cex=0.7, col='black',
panel=function(x,y,...){
panel.abline(h=seq(1.5,7.5,by=1),col='gray60')
panel.abline(v=seq(0,stop,by=5),col='gray60')
panel.stripchart(x,y,...)
})
# Ex: 3.6.1 Build a qq-plot for data we assume is normally distributed
x <- c(-0.16, 1.17, -0.43, -0.02, 1.06, -1.35, 0.65, -1.12, 0.03, -1.44)
# sort the data
x.sorted <- sort(x)
q <- seq(0.05, 0.95, by=0.1)
y <- qnorm(q)
require(lattice) # xyplot is in lattice
qqplot <- xyplot(x.sorted~y)
ppoints(10)
myplot <- qqmath(x) # graph the qq plot
require(fastR) # xqqmath is in fastR
myplot <- xqqmath(~x,fitline=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R, R/gen-namespace.R
\name{torch_as_strided}
\alias{torch_as_strided}
\title{As_strided}
\usage{
torch_as_strided(self, size, stride, storage_offset = NULL)
}
\arguments{
\item{self}{(Tensor) the input tensor.}
\item{size}{(tuple or ints) the shape of the output tensor}
\item{stride}{(tuple or ints) the stride of the output tensor}
\item{storage_offset}{(int, optional) the offset in the underlying storage of the output tensor}
}
\description{
As_strided
}
\section{as_strided(input, size, stride, storage_offset=0) -> Tensor }{
Create a view of an existing \code{torch_Tensor} \code{input} with specified
\code{size}, \code{stride} and \code{storage_offset}.
}
\section{Warning}{
More than one element of a created tensor may refer to a single memory
location. As a result, in-place operations (especially ones that are
vectorized) may result in incorrect behavior. If you need to write to
the tensors, please clone them first.
\if{html}{\out{<div class="sourceCode">}}\preformatted{Many PyTorch functions, which return a view of a tensor, are internally
implemented with this function. Those functions, like
`torch_Tensor.expand`, are easier to read and are therefore more
advisable to use.
}\if{html}{\out{</div>}}
}
\examples{
if (torch_is_installed()) {
x = torch_randn(c(3, 3))
x
t = torch_as_strided(x, list(2, 2), list(1, 2))
t
t = torch_as_strided(x, list(2, 2), list(1, 2), 1)
t
}
}
| /man/torch_as_strided.Rd | permissive | mlverse/torch | R | false | true | 1,535 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R, R/gen-namespace.R
\name{torch_as_strided}
\alias{torch_as_strided}
\title{As_strided}
\usage{
torch_as_strided(self, size, stride, storage_offset = NULL)
}
\arguments{
\item{self}{(Tensor) the input tensor.}
\item{size}{(tuple or ints) the shape of the output tensor}
\item{stride}{(tuple or ints) the stride of the output tensor}
\item{storage_offset}{(int, optional) the offset in the underlying storage of the output tensor}
}
\description{
As_strided
}
\section{as_strided(input, size, stride, storage_offset=0) -> Tensor }{
Create a view of an existing \code{torch_Tensor} \code{input} with specified
\code{size}, \code{stride} and \code{storage_offset}.
}
\section{Warning}{
More than one element of a created tensor may refer to a single memory
location. As a result, in-place operations (especially ones that are
vectorized) may result in incorrect behavior. If you need to write to
the tensors, please clone them first.
\if{html}{\out{<div class="sourceCode">}}\preformatted{Many PyTorch functions, which return a view of a tensor, are internally
implemented with this function. Those functions, like
`torch_Tensor.expand`, are easier to read and are therefore more
advisable to use.
}\if{html}{\out{</div>}}
}
\examples{
if (torch_is_installed()) {
x = torch_randn(c(3, 3))
x
t = torch_as_strided(x, list(2, 2), list(1, 2))
t
t = torch_as_strided(x, list(2, 2), list(1, 2), 1)
t
}
}
|
## We continue with the data previously loaded.
## Now, do the complete differential expression analysis:
dds <- DESeq(dds) #this adds things to the 'dds' object
## 1. Amongst others, data has now been normalized. This is visible in
## the colData.
## What is the normalization factor for the 'odd one out' sample
## from the previous exercise?
colData(dds)
## 2. To get the read counts after normalization, specify
## normalized=TRUE as an extra argument to counts(). Compare the
## boxplots of the unnormalized data (done in the last exercise of the
## previous session) with those of normalized data. Did the
## normalization work?
boxplot(counts(dds, normalized=TRUE), ylim=c(0,2000))
## To get the statistical results out of the normalized data,
## use the results() function. It needs the DESeqDataSet and
## a 'contrast': this specifies what experimental factor to
## compare (here: 'group'), which samples are 'treatment', and
## which samples are 'control'. It returns a table-like data
## structure
res <- results(dds, contrast=c("group", "Smchd1-null", "WT"))
## 3. The summary() function again gives a useful overview of the results
## How many outliers are there, and how many 'low counts'?
summary(res)
## 4. To get an impression of the data as a whole, the change per
## gene versus its average is plotted. Use the plotMA() function for this,
## and pass it the res object as an argument.
plotMA(dds)
## 5. By default, plotMA() tries to show most of the data, and chooses
## its own y-axis limits. Genes outside the range are shown as
## triangles. Play with the ylim argument to show all the data. Better
## yet, use min() and max() on the 'log2FoldChange' column of your
## results data to find the limits automatically. To make the min() and
## max() functions ignore the NA's, you have to also pass an na.rm=TRUE
## argument.
lowest <- min(res[,'log2FoldChange'], na.rm=TRUE)
highest <- max(res[,'log2FoldChange'], na.rm=TRUE)
plotMA(dds, ylim=c(lowest,highest))
## 6. Have a look at e.g. the first 10 rows of the results table. What
## do the columns mean? Why is padj greater than pvalue? What are the
## statistics for the Smchd1 gene? (Remember how you selected data on a
## particular gene in the first exercise).
res[1:10, ]
res['Smchd1', ]
## 7. The genes Ndn, Mkrn3 and Peg12 are known to be repressed by
## Smchd1. Do the statistics confirm this?
res['Ndn',]
res['Mkrn3',]
res['Peg12',]
## 8. Use plot(x= ... , y= ... ) to make a plot of padj versus pvalue
## (remember how you selected columns in the first exercises). Where are
## the differences between the two largest? What multiple testing
## correction was used? Feel free to play and use different multiple
## testing correction methods when calling results() (see its
## documentation)
plot(x=res[,'pvalue'], y=res[,'padj'])
## 9. Function plotCounts() gives an overview, per experimental group,
## of the expression changes for a gene. Use the which.min function to
## find the most significantly changed gene, and plot its counts. Do the
## same for the gene that is 'most down' (any surprises there?), and the
## gene that is most up.
plotCounts(dds, gene=which.max(res[,'log2FoldChange']), intgroup="group")
## To find the top 10 genes that, in the Smchd1 knock-out, go down or go
## up most, we have first have to sort the results table. In R, this is
## done as follows:
order.incr <- order(res[, 'log2FoldChange'])
res.incr <- res[order.incr, ]
order.decr <- order(res[, 'log2FoldChange'], decreasing=TRUE)
res.decr <- res[order.decr, ]
## order() simply calculates a vector of numbers that puts the rows of
## the table in the the right order. By default, the ordering is from
## low to high; to get a descending order, specify 'decreasing=TRUE' as
## an extra argument to order()
## 10. Find the 10 genes that go up most, and those that go down most
res.incr[1:10,] #down most
res.decr[1:10,] #up most
| /obsolete/2-statistics.R | no_license | plijnzaad/ibls-expression | R | false | false | 3,993 | r | ## We continue with the data previously loaded.
## Now, do the complete differential expression analysis:
dds <- DESeq(dds) #this adds things to the 'dds' object
## 1. Amongst others, data has now been normalized. This is visible in
## the colData.
## What is the normalization factor for the 'odd one out' sample
## from the previous exercise?
colData(dds)
## 2. To get the read counts after normalization, specify
## normalized=TRUE as an extra argument to counts(). Compare the
## boxplots of the unnormalized data (done in the last exercise of the
## previous session) with those of normalized data. Did the
## normalization work?
boxplot(counts(dds, normalized=TRUE), ylim=c(0,2000))
## To get the statistical results out of the normalized data,
## use the results() function. It needs the DESeqDataSet and
## a 'contrast': this specifies what experimental factor to
## compare (here: 'group'), which samples are 'treatment', and
## which samples are 'control'. It returns a table-like data
## structure
res <- results(dds, contrast=c("group", "Smchd1-null", "WT"))
## 3. The summary() function again gives a useful overview of the results
## How many outliers are there, and how many 'low counts'?
summary(res)
## 4. To get an impression of the data as a whole, the change per
## gene versus its average is plotted. Use the plotMA() function for this,
## and pass it the res object as an argument.
plotMA(dds)
## 5. By default, plotMA() tries to show most of the data, and chooses
## its own y-axis limits. Genes outside the range are shown as
## triangles. Play with the ylim argument to show all the data. Better
## yet, use min() and max() on the 'log2FoldChange' column of your
## results data to find the limits automatically. To make the min() and
## max() functions ignore the NA's, you have to also pass an na.rm=TRUE
## argument.
lowest <- min(res[,'log2FoldChange'], na.rm=TRUE)
highest <- max(res[,'log2FoldChange'], na.rm=TRUE)
plotMA(dds, ylim=c(lowest,highest))
## 6. Have a look at e.g. the first 10 rows of the results table. What
## do the columns mean? Why is padj greater than pvalue? What are the
## statistics for the Smchd1 gene? (Remember how you selected data on a
## particular gene in the first exercise).
res[1:10, ]
res['Smchd1', ]
## 7. The genes Ndn, Mkrn3 and Peg12 are known to be repressed by
## Smchd1. Do the statistics confirm this?
res['Ndn',]
res['Mkrn3',]
res['Peg12',]
## 8. Use plot(x= ... , y= ... ) to make a plot of padj versus pvalue
## (remember how you selected columns in the first exercises). Where are
## the differences between the two largest? What multiple testing
## correction was used? Feel free to play and use different multiple
## testing correction methods when calling results() (see its
## documentation)
plot(x=res[,'pvalue'], y=res[,'padj'])
## 9. Function plotCounts() gives an overview, per experimental group,
## of the expression changes for a gene. Use the which.min function to
## find the most significantly changed gene, and plot its counts. Do the
## same for the gene that is 'most down' (any surprises there?), and the
## gene that is most up.
plotCounts(dds, gene=which.max(res[,'log2FoldChange']), intgroup="group")
## To find the top 10 genes that, in the Smchd1 knock-out, go down or go
## up most, we have first have to sort the results table. In R, this is
## done as follows:
order.incr <- order(res[, 'log2FoldChange'])
res.incr <- res[order.incr, ]
order.decr <- order(res[, 'log2FoldChange'], decreasing=TRUE)
res.decr <- res[order.decr, ]
## order() simply calculates a vector of numbers that puts the rows of
## the table in the the right order. By default, the ordering is from
## low to high; to get a descending order, specify 'decreasing=TRUE' as
## an extra argument to order()
## 10. Find the 10 genes that go up most, and those that go down most
res.incr[1:10,] #down most
res.decr[1:10,] #up most
|
library(igraph)
library(netrw)
randomWalker = function(node, probability){
random_network = random.graph.game(n = node, p = probability, directed = FALSE)
cat("Diameter of network with", node, "nodes = ", diameter(random_network))
average_step_t = numeric()
average_standard_deviation_t = numeric()
distance_matrix = shortest.paths(random_network, v = V(random_network), to = V(random_network))
deg_random_walk = numeric()
for (t in 1:35) {
distance = numeric()
vertex_sequence = netrw(random_network, walker.num = node, damping = 1, T = t, output.walk.path = TRUE)$walk.path # get vertex sequence of random walk
for(n in (1:node))
{
start_vertex = vertex_sequence[1,n]
tail_vertex = vertex_sequence[t,n]
shortest_distance = distance_matrix[start_vertex, tail_vertex]
if (shortest_distance == Inf) {
shortest_distance = 0
}
distance = c(distance, shortest_distance)
deg_random_walk = c(deg_random_walk, degree(random_network, v = tail_vertex))
}
average_step_t = c(average_step_t, mean(distance))
average_standard_deviation_t = c(average_standard_deviation_t, mean((distance - mean(distance))**2))
}
plot(average_step_t+1, type ='o', main = paste("<s(t)> v.s. t with ", n, "nodes"), xlab = "t(number of steps)", ylab = "<s(t)>Average distance")
plot(average_standard_deviation_t, type ='o', main = paste("s^2(t) v.s. t with ", n, "nodes"), xlab = "t(number of steps)", ylab = "s^2(t)Standard Deviation")
if (node == 1000) {
deg_network = degree(random_network)
hist(x = deg_network, breaks = seq(from = min(deg_network), to = max(deg_network), by=1), main = "Degree Distribution for Random Undirected Graph (with n=1000)", xlab = "Degrees")
hist(x = deg_random_walk, breaks = seq(from = min(deg_random_walk), to = max(deg_random_walk), by=1), main = "Degree Distribution at end of Random Walk", xlab = "Degrees")
}
}
cat("Executing for Random Network with 1000 nodes")
randomWalker(node = 1000, 0.01)
cat("Executing for Random Network with 100 nodes")
randomWalker(node = 100, 0.01)
cat("Executing for Random Network with 10000 nodes")
randomWalker(node = 10000, 0.01)
| /HW2_004773895_404753334_704775693/HW2_004773895_404753334_704775693/code/Q1.R | no_license | jameszrx/EE232E-Network-and-Flows | R | false | false | 2,214 | r | library(igraph)
library(netrw)
randomWalker = function(node, probability){
random_network = random.graph.game(n = node, p = probability, directed = FALSE)
cat("Diameter of network with", node, "nodes = ", diameter(random_network))
average_step_t = numeric()
average_standard_deviation_t = numeric()
distance_matrix = shortest.paths(random_network, v = V(random_network), to = V(random_network))
deg_random_walk = numeric()
for (t in 1:35) {
distance = numeric()
vertex_sequence = netrw(random_network, walker.num = node, damping = 1, T = t, output.walk.path = TRUE)$walk.path # get vertex sequence of random walk
for(n in (1:node))
{
start_vertex = vertex_sequence[1,n]
tail_vertex = vertex_sequence[t,n]
shortest_distance = distance_matrix[start_vertex, tail_vertex]
if (shortest_distance == Inf) {
shortest_distance = 0
}
distance = c(distance, shortest_distance)
deg_random_walk = c(deg_random_walk, degree(random_network, v = tail_vertex))
}
average_step_t = c(average_step_t, mean(distance))
average_standard_deviation_t = c(average_standard_deviation_t, mean((distance - mean(distance))**2))
}
plot(average_step_t+1, type ='o', main = paste("<s(t)> v.s. t with ", n, "nodes"), xlab = "t(number of steps)", ylab = "<s(t)>Average distance")
plot(average_standard_deviation_t, type ='o', main = paste("s^2(t) v.s. t with ", n, "nodes"), xlab = "t(number of steps)", ylab = "s^2(t)Standard Deviation")
if (node == 1000) {
deg_network = degree(random_network)
hist(x = deg_network, breaks = seq(from = min(deg_network), to = max(deg_network), by=1), main = "Degree Distribution for Random Undirected Graph (with n=1000)", xlab = "Degrees")
hist(x = deg_random_walk, breaks = seq(from = min(deg_random_walk), to = max(deg_random_walk), by=1), main = "Degree Distribution at end of Random Walk", xlab = "Degrees")
}
}
cat("Executing for Random Network with 1000 nodes")
randomWalker(node = 1000, 0.01)
cat("Executing for Random Network with 100 nodes")
randomWalker(node = 100, 0.01)
cat("Executing for Random Network with 10000 nodes")
randomWalker(node = 10000, 0.01)
|
## ----load library, message=FALSE, warning=FALSE, include=FALSE----------------
library(ggplot2)
library(CampR)
library(plotly)
# library(RColorBrewer)
# yor_col<- brewer.pal(7, "Greens")
## ----Data---------------------------------------------------------------------
Porc<-ggplot2::map_data(Porc.map)
head(Porc)
hake<-CampR::maphist(1,50,"P16","Porc",out.dat=T,plot=F)
## ----Graf Porcupine-----------------------------------------------------------
p<-ggplot2::ggplot(hake)+
geom_polygon(aes(long,lat,group=group),data=Porc,fill="white",color="darkgrey")+
geom_point(aes(x=long,y=lat,size=sqrt(numero),text=lan),color="blue")+
scale_size_continuous(name="No. ind.")+coord_fixed(1.3)
ggplotly(p,tooltip=c("text","lance"),width=800,height=500)
## ----resultados tabla---------------------------------------------------------
library(knitr)
library(kableExtra)
options(knitr.table.format = "markdown")
kable(databICES(1,50,"N16","Cant"),digits=2,caption="Merluza en 2016 Cantábrico y Galicia") %>%
kable_styling(bootstrap_options="condensed",full_width=F,position="center")
## ----Demersales datos---------------------------------------------------------
Nort<-ggplot2::map_data(Nort.map)
head(Nort)
## ----mapas Demersales---------------------------------------------------------
ggplot2::ggplot(data=Nort)+geom_polygon(aes(long,lat,fill=region,group=group),col="white")+
coord_fixed(1.3)
| /vignettes/GrafsGGPLOT.R | no_license | Franvgls/CampR | R | false | false | 1,410 | r | ## ----load library, message=FALSE, warning=FALSE, include=FALSE----------------
library(ggplot2)
library(CampR)
library(plotly)
# library(RColorBrewer)
# yor_col<- brewer.pal(7, "Greens")
## ----Data---------------------------------------------------------------------
Porc<-ggplot2::map_data(Porc.map)
head(Porc)
hake<-CampR::maphist(1,50,"P16","Porc",out.dat=T,plot=F)
## ----Graf Porcupine-----------------------------------------------------------
p<-ggplot2::ggplot(hake)+
geom_polygon(aes(long,lat,group=group),data=Porc,fill="white",color="darkgrey")+
geom_point(aes(x=long,y=lat,size=sqrt(numero),text=lan),color="blue")+
scale_size_continuous(name="No. ind.")+coord_fixed(1.3)
ggplotly(p,tooltip=c("text","lance"),width=800,height=500)
## ----resultados tabla---------------------------------------------------------
library(knitr)
library(kableExtra)
options(knitr.table.format = "markdown")
kable(databICES(1,50,"N16","Cant"),digits=2,caption="Merluza en 2016 Cantábrico y Galicia") %>%
kable_styling(bootstrap_options="condensed",full_width=F,position="center")
## ----Demersales datos---------------------------------------------------------
Nort<-ggplot2::map_data(Nort.map)
head(Nort)
## ----mapas Demersales---------------------------------------------------------
ggplot2::ggplot(data=Nort)+geom_polygon(aes(long,lat,fill=region,group=group),col="white")+
coord_fixed(1.3)
|
# check libraries
library(readr)
library(rgdal)
library(dplyr)
library(ggplot2)
library(ggmap)
library(ggthemes)
# Loading shapefiles
ill <- readOGR(dsn = "ILcounties/simplified.shp")
# divides things into slots geodata in one, data in another etc
# Access it with @, not $
head(ill@data, n = 10)
summary(ill@data)
# check the projection
ill@proj4string
plot(ill)
census16 <- read.csv("census2016_all.csv", stringsAsFactors = FALSE)
head(census16)
summary(census16)
# let's see if we can join by county name
ill$NAMELSAD10 %in% census16$Place
# Now join
ill@data <- left_join(ill@data, census16, by = c('NAMELSAD10' = 'Place'))
head(ill@data)
summary(ill@data)
names(ill)
# let's write the data to a csv and read it back again
write_csv(ill@data,"illdata.csv")
census16 <- read.csv("illdata.csv", stringsAsFactors = FALSE)
head(census16)
# let's get rid of a few columns we don't need
census16$STATEFP10=NULL
census16$COUNTYFP10=NULL
census16$COUNTYNS10=NULL
head(census16)
# now we have the census data with the geoid attached to each county
# working with ggplot means the data has to be saved in a different way.
ill_f <- fortify(ill, region="GEOID10")
# let's look at the dataframe
head(ill_f, n = 5)
# Fortify looks like it takes each polygon in a shapefile
# and changes it to a groupable set of points
# groupable based on ID
# Once we have this dataframe, we have to rejoin the
# data associated with it.
ill_f$id <- as.numeric(as.character(ill_f$id))
class(ill_f$id)
class(census16$GEOID10)
ill_f <- left_join(ill_f, census16, by = c('id' = 'GEOID10'))
head(ill_f, n=5)
names(ill_f)
summary(ill_f$rate_16under19)
quantile(ill_f$rate_16under19, probs = seq(0, 1, .25))
# let's write the data to a csv
write_csv(ill_f,"ill_f.csv")
| /M01_dataprep.R | no_license | timbroderick/R_graphics | R | false | false | 1,757 | r | # check libraries
library(readr)
library(rgdal)
library(dplyr)
library(ggplot2)
library(ggmap)
library(ggthemes)
# Loading shapefiles
ill <- readOGR(dsn = "ILcounties/simplified.shp")
# divides things into slots geodata in one, data in another etc
# Access it with @, not $
head(ill@data, n = 10)
summary(ill@data)
# check the projection
ill@proj4string
plot(ill)
census16 <- read.csv("census2016_all.csv", stringsAsFactors = FALSE)
head(census16)
summary(census16)
# let's see if we can join by county name
ill$NAMELSAD10 %in% census16$Place
# Now join
ill@data <- left_join(ill@data, census16, by = c('NAMELSAD10' = 'Place'))
head(ill@data)
summary(ill@data)
names(ill)
# let's write the data to a csv and read it back again
write_csv(ill@data,"illdata.csv")
census16 <- read.csv("illdata.csv", stringsAsFactors = FALSE)
head(census16)
# let's get rid of a few columns we don't need
census16$STATEFP10=NULL
census16$COUNTYFP10=NULL
census16$COUNTYNS10=NULL
head(census16)
# now we have the census data with the geoid attached to each county
# working with ggplot means the data has to be saved in a different way.
ill_f <- fortify(ill, region="GEOID10")
# let's look at the dataframe
head(ill_f, n = 5)
# Fortify looks like it takes each polygon in a shapefile
# and changes it to a groupable set of points
# groupable based on ID
# Once we have this dataframe, we have to rejoin the
# data associated with it.
ill_f$id <- as.numeric(as.character(ill_f$id))
class(ill_f$id)
class(census16$GEOID10)
ill_f <- left_join(ill_f, census16, by = c('id' = 'GEOID10'))
head(ill_f, n=5)
names(ill_f)
summary(ill_f$rate_16under19)
quantile(ill_f$rate_16under19, probs = seq(0, 1, .25))
# let's write the data to a csv
write_csv(ill_f,"ill_f.csv")
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(datasets)
library(dplyr)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Show the cars that correspond to the filters
output$table <- renderDataTable({
disp_seq <- seq(from = input$disp[1], to = input$disp[2], by = 0.1)
hp_seq <- seq(from = input$hp[1], to = input$hp[2], by = 1)
data <- transmute(mtcars, Car = rownames(mtcars), MilesPerGallon = mpg,
GasolineExpenditure = input$dis/mpg*input$cost,
Cylinders = cyl, Displacement = disp, Horsepower = hp,
Transmission = am)
data <- filter(data, GasolineExpenditure <= input$gas, Cylinders %in% input$cyl,
Displacement %in% disp_seq, Horsepower %in% hp_seq, Transmission %in% input$am)
data <- mutate(data, Transmission = ifelse(Transmission==0, "Automatic", "Manual"))
data <- arrange(data, GasolineExpenditure)
data
}, options = list(lengthMenu = c(5, 15, 30), pageLength = 30))
}) | /shiny-application/server.R | no_license | sujitha-puthana/Shiny-Application-and-Reproducible-Pitch | R | false | false | 1,293 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(datasets)
library(dplyr)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Show the cars that correspond to the filters
output$table <- renderDataTable({
disp_seq <- seq(from = input$disp[1], to = input$disp[2], by = 0.1)
hp_seq <- seq(from = input$hp[1], to = input$hp[2], by = 1)
data <- transmute(mtcars, Car = rownames(mtcars), MilesPerGallon = mpg,
GasolineExpenditure = input$dis/mpg*input$cost,
Cylinders = cyl, Displacement = disp, Horsepower = hp,
Transmission = am)
data <- filter(data, GasolineExpenditure <= input$gas, Cylinders %in% input$cyl,
Displacement %in% disp_seq, Horsepower %in% hp_seq, Transmission %in% input$am)
data <- mutate(data, Transmission = ifelse(Transmission==0, "Automatic", "Manual"))
data <- arrange(data, GasolineExpenditure)
data
}, options = list(lengthMenu = c(5, 15, 30), pageLength = 30))
}) |
context("ApiData.R")
test_that("ApiData - Readymade SSB-data with urlType", {
skip_on_cran()
ssb1066 <- ApiData(1066, getDataByGET = TRUE, urlType = "SSB")
expect_true(is.data.frame(ssb1066[[1]]))
expect_equal(names(ssb1066)[2], "dataset")
expect_true(grepl("Detaljomsetningsindeksen, etter næring, måned og statistikkvariabel", names(ssb1066)[1]))
})
test_that("ApiData - SCB-data using TRUE and FALSE", {
skip_on_cran()
urlSCB <- "http://api.scb.se/OV0104/v1/doris/sv/ssd/BE/BE0101/BE0101A/BefolkningNy"
a1 <- ApiData(urlSCB, Region = FALSE, Civilstand = "G", Alder = "19", Kon = "2", ContentsCode = c("Folkmängd", "Folkökning"), Tid = "1969")
a2 <- ApiData(urlSCB, Region = FALSE, Civilstand = "gifta", Alder = "19 år", Kon = "kvinnor", ContentsCode = c("BE0101N1", "BE0101N2"), Tid = "1969")
a3 <- ApiData(urlSCB, Region = FALSE, Civilstand = 2, Alder = 20, Kon = 2, ContentsCode = TRUE, Tid = 2)
expect_equal(is.data.frame(a1[[1]]), TRUE)
expect_equal(is.integer(a1[[1]][, "value"]), TRUE)
expect_equal(is.character(a1[[2]][, "ContentsCode"]), TRUE)
expect_equal(a1[[1]][, "value"], a1[[2]][, "value"])
expect_equal(a1, a2)
expect_equal(a1, a3)
})
if(FALSE) # url not working
test_that("ApiData - StatFin-data with special characters", {
skip_on_cran()
urlStatFin <- "http://pxnet2.stat.fi/PXWeb/api/v1/fi/StatFin/tym/tyonv/statfin_tyonv_pxt_001.px"
a1 <- ApiData(urlStatFin, Kuukausi = c("2006M02"), Alue2018 = c("005"), Muuttujat = c("TYOTTOMAT"))
a2 <- ApiData(urlStatFin, Kuukausi = "2006M02", Alue2018 = "Alajärvi Kunta", Muuttujat = "Työttömät")
a3 <- ApiData(urlStatFin, Kuukausi = 2, Alue2018 = 2, Muuttujat = 2)
expect_equal(a1[[1]]$Alue2018, "Alajärvi Kunta")
expect_equal(a1, a2)
expect_equal(a1, a3)
})
test_that("ApiData - SSB-data advanced use", {
skip_on_cran()
urlSSB <- "http://data.ssb.no/api/v0/en/table/04861"
a1 <- ApiData(urlSSB, Region = list("039*"), ContentsCode = TRUE, Tid = 2i)
a2 <- ApiData(urlSSB, Region = "0399", ContentsCode = list("all", "*"), Tid = -(1:2))
a3 <- ApiData(urlSSB, Region = "Uoppgitt komm. Oslo", ContentsCode = c("Area of urban settlements (km²)", "Bosatte"), Tid = list("top", "2"))
expect_equal(a1, a2)
expect_equal(a1, a3)
})
test_that("ApiData - SSB-data with returnMetaFrames", {
skip_on_cran()
urlSSB <- "http://data.ssb.no/api/v0/en/table/04861"
mf <- ApiData(urlSSB, returnMetaFrames = TRUE)
expect_equal(names(mf), c("Region", "ContentsCode", "Tid"))
expect_equivalent(attr(mf, "text")[c("Region", "ContentsCode", "Tid")], c("region", "contents", "year"))
expect_equivalent(c(attr(mf, "elimination"), attr(mf, "time")), c(TRUE, FALSE, FALSE, FALSE, FALSE, TRUE))
expect_equal(mf[[1]]$valueTexts[mf[[1]]$values == "0121"], "Rømskog")
expect_equal(mf[[2]]$valueTexts, c("Area of urban settlements (km²)", "Number of residents"))
expect_equivalent(sapply(mf, class), rep("data.frame", 3))
expect_equivalent(sapply(mf[[3]], class), c("character", "character"))
}) | /tests/testthat/test-ApiData.R | permissive | oledysken/PxWebApiData | R | false | false | 3,086 | r | context("ApiData.R")
test_that("ApiData - Readymade SSB-data with urlType", {
skip_on_cran()
ssb1066 <- ApiData(1066, getDataByGET = TRUE, urlType = "SSB")
expect_true(is.data.frame(ssb1066[[1]]))
expect_equal(names(ssb1066)[2], "dataset")
expect_true(grepl("Detaljomsetningsindeksen, etter næring, måned og statistikkvariabel", names(ssb1066)[1]))
})
test_that("ApiData - SCB-data using TRUE and FALSE", {
skip_on_cran()
urlSCB <- "http://api.scb.se/OV0104/v1/doris/sv/ssd/BE/BE0101/BE0101A/BefolkningNy"
a1 <- ApiData(urlSCB, Region = FALSE, Civilstand = "G", Alder = "19", Kon = "2", ContentsCode = c("Folkmängd", "Folkökning"), Tid = "1969")
a2 <- ApiData(urlSCB, Region = FALSE, Civilstand = "gifta", Alder = "19 år", Kon = "kvinnor", ContentsCode = c("BE0101N1", "BE0101N2"), Tid = "1969")
a3 <- ApiData(urlSCB, Region = FALSE, Civilstand = 2, Alder = 20, Kon = 2, ContentsCode = TRUE, Tid = 2)
expect_equal(is.data.frame(a1[[1]]), TRUE)
expect_equal(is.integer(a1[[1]][, "value"]), TRUE)
expect_equal(is.character(a1[[2]][, "ContentsCode"]), TRUE)
expect_equal(a1[[1]][, "value"], a1[[2]][, "value"])
expect_equal(a1, a2)
expect_equal(a1, a3)
})
if(FALSE) # url not working
test_that("ApiData - StatFin-data with special characters", {
skip_on_cran()
urlStatFin <- "http://pxnet2.stat.fi/PXWeb/api/v1/fi/StatFin/tym/tyonv/statfin_tyonv_pxt_001.px"
a1 <- ApiData(urlStatFin, Kuukausi = c("2006M02"), Alue2018 = c("005"), Muuttujat = c("TYOTTOMAT"))
a2 <- ApiData(urlStatFin, Kuukausi = "2006M02", Alue2018 = "Alajärvi Kunta", Muuttujat = "Työttömät")
a3 <- ApiData(urlStatFin, Kuukausi = 2, Alue2018 = 2, Muuttujat = 2)
expect_equal(a1[[1]]$Alue2018, "Alajärvi Kunta")
expect_equal(a1, a2)
expect_equal(a1, a3)
})
test_that("ApiData - SSB-data advanced use", {
skip_on_cran()
urlSSB <- "http://data.ssb.no/api/v0/en/table/04861"
a1 <- ApiData(urlSSB, Region = list("039*"), ContentsCode = TRUE, Tid = 2i)
a2 <- ApiData(urlSSB, Region = "0399", ContentsCode = list("all", "*"), Tid = -(1:2))
a3 <- ApiData(urlSSB, Region = "Uoppgitt komm. Oslo", ContentsCode = c("Area of urban settlements (km²)", "Bosatte"), Tid = list("top", "2"))
expect_equal(a1, a2)
expect_equal(a1, a3)
})
test_that("ApiData - SSB-data with returnMetaFrames", {
skip_on_cran()
urlSSB <- "http://data.ssb.no/api/v0/en/table/04861"
mf <- ApiData(urlSSB, returnMetaFrames = TRUE)
expect_equal(names(mf), c("Region", "ContentsCode", "Tid"))
expect_equivalent(attr(mf, "text")[c("Region", "ContentsCode", "Tid")], c("region", "contents", "year"))
expect_equivalent(c(attr(mf, "elimination"), attr(mf, "time")), c(TRUE, FALSE, FALSE, FALSE, FALSE, TRUE))
expect_equal(mf[[1]]$valueTexts[mf[[1]]$values == "0121"], "Rømskog")
expect_equal(mf[[2]]$valueTexts, c("Area of urban settlements (km²)", "Number of residents"))
expect_equivalent(sapply(mf, class), rep("data.frame", 3))
expect_equivalent(sapply(mf[[3]], class), c("character", "character"))
}) |
library(ape)
testtree <- read.tree("797_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="797_0_unrooted.txt") | /codeml_files/newick_trees_processed/797_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 133 | r | library(ape)
testtree <- read.tree("797_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="797_0_unrooted.txt") |
pollutantmean <- function(directory, pollutant, id = 1:332) {
#store the current working directorr in a vriable
old.dir <- getwd()
#go to the directory having data files
setwd("~/Documents/DS/specdata")
#store file names in a variable
fl_names <- list.files(path = ".", pattern = ".csv")
#creat a data frame for required files
dt_frm <- data.frame()
#store the required data in data frame as per the id
for(i in 1:length(fl_names[id]))
{
dt_frm <- rbind(dt_frm, read.csv(fl_names[id[i]]))
}
# Print the output mean
print(mean(dt_frm[, pollutant], na.rm = 1))
#return to the original working directory
setwd(old.dir)
}
| /Assignment 1_Air Pollution_Part 1.R | no_license | maniatul/R-Program | R | false | false | 712 | r | pollutantmean <- function(directory, pollutant, id = 1:332) {
#store the current working directorr in a vriable
old.dir <- getwd()
#go to the directory having data files
setwd("~/Documents/DS/specdata")
#store file names in a variable
fl_names <- list.files(path = ".", pattern = ".csv")
#creat a data frame for required files
dt_frm <- data.frame()
#store the required data in data frame as per the id
for(i in 1:length(fl_names[id]))
{
dt_frm <- rbind(dt_frm, read.csv(fl_names[id[i]]))
}
# Print the output mean
print(mean(dt_frm[, pollutant], na.rm = 1))
#return to the original working directory
setwd(old.dir)
}
|
#' Loads a bayou object
#'
#' \code{load.bayou} loads a bayouFit object that was created using \code{bayou.mcmc()}
#'
#' @param bayouFit An object of class \code{bayouFit} produced by the function \code{bayou.mcmc()}
#' @param save.Rdata A logical indicating whether the resulting chains should be saved as an *.rds file
#' @param file An optional filename (possibly including path) for the saved *.rds file
#' @param cleanup A logical indicating whether the files produced by \code{bayou.mcmc()} should be removed.
#'
#' @details If both \code{save.Rdata} is \code{FALSE} and \code{cleanup} is \code{TRUE}, then \code{load.bayou} will trigger a
#' warning and ask for confirmation. In this case, if the results of \code{load.bayou()} are not stored in an object,
#' the results of the MCMC run will be permanently deleted.
#'
#' @examples
#' \dontrun{
#' data(chelonia)
#' tree <- chelonia$phy
#' dat <- chelonia$dat
#' prior <- make.prior(tree)
#' fit <- bayou.mcmc(tree, dat, model="OU", prior=prior,
#' new.dir=TRUE, ngen=5000)
#' chain <- load.bayou(fit, save.Rdata=FALSE, cleanup=TRUE)
#' plot(chain)
#' }
#' @export
load.bayou <- function(bayouFit, save.Rdata=TRUE, file=NULL,
cleanup=FALSE){#dir=NULL,outname="bayou",model="OU"){
tree <- bayouFit$tree
dat <- bayouFit$dat
outname <- bayouFit$outname
model <- bayouFit$model
dir <- bayouFit$dir
#mapsr2 <- read.table(file="mapsr2.dta",header=FALSE)
#mapsb <- read.table(file="mapsb.dta",header=FALSE)
#mapst2 <- read.table(file="mapst2.dta",header=FALSE)
mapsr2 <- scan(file=paste(dir,outname,".loc",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
mapsb <- scan(file=paste(dir,outname,".sb",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
mapst2 <- scan(file=paste(dir,outname,".t2",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
pars.out <- scan(file=paste(dir,outname,".pars",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
pars.out <- lapply(strsplit(pars.out,"[[:space:]]+"),as.numeric)
mapsr2 <- lapply(strsplit(mapsr2,"[[:space:]]+"),as.numeric)
mapsb <- lapply(strsplit(mapsb,"[[:space:]]+"),as.numeric)
mapst2 <- lapply(strsplit(mapst2,"[[:space:]]+"),as.numeric)
chain <- list()
if(model=="OU"){
chain$gen <- sapply(pars.out,function(x) x[1])
chain$lnL <- sapply(pars.out,function(x) x[2])
chain$prior <- sapply(pars.out,function(x) x[3])
chain$alpha <- sapply(pars.out,function(x) x[4])
chain$sig2 <- sapply(pars.out,function(x) x[5])
chain$k <- sapply(pars.out,function(x) x[6])
chain$ntheta <- sapply(pars.out,function(x) x[7])
chain$theta <- lapply(pars.out,function(x) x[-(1:7)])
chain$sb <- mapsb
chain$loc <- mapsr2
chain$t2 <- mapst2
}
if(model=="QG"){
chain$gen <- sapply(pars.out,function(x) x[1])
chain$lnL <- sapply(pars.out,function(x) x[2])
chain$prior <- sapply(pars.out,function(x) x[3])
chain$h2 <- sapply(pars.out,function(x) x[4])
chain$P <- sapply(pars.out,function(x) x[5])
chain$w2 <- sapply(pars.out,function(x) x[6])
chain$Ne <- sapply(pars.out,function(x) x[7])
chain$k <- sapply(pars.out,function(x) x[8])
chain$ntheta <- sapply(pars.out,function(x) x[9])
chain$theta <- lapply(pars.out,function(x) x[-(1:9)])
chain$sb <- mapsb
chain$loc <- mapsr2
chain$t2 <- mapst2
}
if(model=="OUrepar"){
chain$gen <- sapply(pars.out,function(x) x[1])
chain$lnL <- sapply(pars.out,function(x) x[2])
chain$prior <- sapply(pars.out,function(x) x[3])
chain$halflife <- sapply(pars.out,function(x) x[4])
chain$Vy <- sapply(pars.out,function(x) x[5])
chain$k <- sapply(pars.out,function(x) x[6])
chain$ntheta <- sapply(pars.out,function(x) x[7])
chain$theta <- lapply(pars.out,function(x) x[-(1:7)])
chain$sb <- mapsb
chain$loc <- mapsr2
chain$t2 <- mapst2
}
attributes(chain)$model <- bayouFit$model
attributes(chain)$tree <- tree
attributes(chain)$dat <- dat
class(chain) <- c("bayouMCMC", "list")
if(save.Rdata==FALSE & cleanup==TRUE){
ans <- toupper(readline("Warning: You have selected to delete all created MCMC files and not to save them as an .rds file.
Your mcmc results will not be saved on your hard drive. If you do not output to a object, your results will be lost.
Continue? (Y or N):"))
cleanup <- ifelse(ans=="Y", TRUE, FALSE)
}
if(save.Rdata){
if(is.null(file)){
save(chain, file=paste(bayouFit$dir,"../", outname, ".chain.rds",sep=""))
cat(paste("file saved to", paste(bayouFit$dir,"/",outname,".chain.rds\n",sep="")))
} else {
save(chain, file=file)
cat(paste("file saved to", file))
}
}
if(cleanup){
if(bayouFit$tmpdir){
unlink(dir,T,T)
cat(paste("deleting temporary directory", dir))
} else {
file.remove(paste(dir, outname, ".loc", sep=""))
file.remove(paste(dir, outname, ".t2", sep=""))
file.remove(paste(dir, outname, ".sb", sep=""))
file.remove(paste(dir, outname, ".pars", sep=""))
}
}
return(chain)
}
#' Calculate Gelman's R statistic
#'
#' @param parameter The name or number of the parameter to calculate the statistic on
#' @param chain1 The first bayouMCMC chain
#' @param chain2 The second bayouMCMC chain
#' @param freq The interval between which the diagnostic is calculated
#' @param start The first sample to calculate the diagnostic at
#' @param plot A logical indicating whether the results should be plotted
#' @param ... Optional arguments passed to \code{gelman.diag(...)} from the \code{coda} package
#'
#' @export
gelman.R <- function(parameter,chain1,chain2,freq=20,start=1,
plot=TRUE, ...){
R <- NULL
R.UCI <- NULL
int <- seq(start,length(chain1[[parameter]]),freq)
for(i in 1:length(int)){
chain.list <- mcmc.list(mcmc(chain1[[parameter]][1:int[i]]),mcmc(chain2[[parameter]][1:int[i]]))
GD <- gelman.diag(chain.list)
R[i] <- GD$psrf[1]
R.UCI[i] <- GD$psrf[2]
}
if(plot==TRUE){
plot(chain1$gen[int],R,main=paste("Gelman's R:",parameter),xlab="Generation",ylab="R", ...)
lines(chain1$gen[int],R,lwd=2)
lines(chain1$gen[int],R.UCI,lty=2)
}
return(data.frame("R"=R,"UCI.95"=R.UCI))
}
# Function for calculation of the posterior quantiles. Only needed for simulation study, not generally called by the user.
.posterior.Q <- function(parameter,chain1,chain2,pars,burnin=0.3){
postburn <- round(burnin*length(chain1$gen),0):length(chain1$gen)
chain <- mcmc.list(mcmc(chain1[[parameter]][postburn]),mcmc(chain2[[parameter]][postburn]))
posterior.q <- summary(chain,quantiles=seq(0,1,0.005))$quantiles
q <- which(names(sort(c(pars[[parameter]],posterior.q)))=="")
Q <- ((q-1)/2-0.25)/100#((q-1)+(simpar$pars$alpha-posterior.q[q-1])/(posterior.q[q+1]-posterior.q[q-1]))/100
Q
}
#' Return a posterior of shift locations
#'
#' @param chain A bayouMCMC chain
#' @param tree A tree of class 'phylo'
#' @param burnin A value giving the burnin proportion of the chain to be discarded
#' @param simpar An optional bayou formatted parameter list giving the true values (if data were simulated)
#' @param mag A logical indicating whether the average magnitude of the shifts should be returned
#'
#' @return A data frame with rows corresponding to postordered branches. \code{pp} indicates the
#' posterior probability of the branch containing a shift. \code{magnitude of theta2} gives the average
#' value of the new optima after a shift. \code{naive SE of theta2} gives the standard error of the new optima
#' not accounting for autocorrelation in the MCMC and \code{rel location} gives the average relative location
#' of the shift on the branch (between 0 and 1 for each branch).
#'
#' @export
Lposterior <- function(chain,tree,burnin=0, simpar=NULL,mag=TRUE){
pb.start <- ifelse(burnin>0,round(length(chain$gen)*burnin,0),1)
postburn <- pb.start:length(chain$gen)
chain <- lapply(chain, function(x) x[postburn])
ntips <- length(tree$tip.label)
shifts <- t(sapply(chain$sb,function(x) as.numeric(1:nrow(tree$edge) %in% x)))
theta <- sapply(1:length(chain$theta),function(x) chain$theta[[x]][chain$t2[[x]]])
branch.shifts <- chain$sb
theta.shifts <- tapply(unlist(theta),unlist(branch.shifts),mean)
theta.locs <- tapply(unlist(chain$loc), unlist(branch.shifts), mean)
thetaSE <- tapply(unlist(theta),unlist(branch.shifts),function(x) sd(x)/sqrt(length(x)))
N.theta.shifts <- tapply(unlist(branch.shifts),unlist(branch.shifts),length)
root.theta <- sapply(chain$theta,function(y) y[1])
OS <- rep(NA,length(tree$edge[,1]))
OS[as.numeric(names(theta.shifts))] <- theta.shifts
SE <- rep(NA,length(tree$edge[,1]))
SE[as.numeric(names(thetaSE))] <- thetaSE
locs <- rep(NA,length(tree$edge[,1]))
locs[as.numeric(names(theta.locs))] <- theta.locs
shifts.tot <- apply(shifts,2,sum)
shifts.prop <- shifts.tot/length(chain$gen)
all.branches <- rep(0,nrow(tree$edge))
Lpost <- data.frame("pp"=shifts.prop,"magnitude of theta2"=OS, "naive SE of theta2"=SE,"rel location"=locs/tree$edge.length)
return(Lpost)
}
#' Discards burnin
#'
#' @export
.discard.burnin <- function(chain,burnin.prop=0.3){
lapply(chain,function(x) x[(burnin.prop*length(x)):length(x)])
}
#' Tuning function, not currently used.
.tune.D <- function(D,accept,accept.type){
tuning.samp <- (length(accept)/2):length(accept)
acc <- tapply(accept[tuning.samp],accept.type[tuning.samp],mean)
acc.length <- tapply(accept[tuning.samp],accept.type[tuning.samp],length)
acc.tune <- acc/0.25
acc.tune[acc.tune<0.5] <- 0.5
acc.tune[acc.tune>2] <- 2
D$ak <- acc.tune['alpha']*D$ak
D$sk <- acc.tune['sig2']*D$sk
D$tk <- acc.tune['theta']*D$tk
D$bk <- D$tk*2
D <- lapply(D,function(x){ names(x) <- NULL; x})
return(list("D"=D,"acc.tune"=acc.tune))
}
#' Utility function for retrieving parameters from an MCMC chain
#'
#' @param i An integer giving the sample to retrieve
#' @param chain A bayouMCMC chain
#' @param model The parameterization used, either "OU", "QG" or "OUrepar"
#'
#' @return A bayou formatted parameter list
#'
#' @examples
#' \dontrun{
#' tree <- sim.bdtree(n=30)
#' tree$edge.length <- tree$edge.length/max(branching.times(tree))
#' prior <- make.prior(tree, dists=list(dk="cdpois", dsig2="dnorm",
#' dtheta="dnorm"),
#' param=list(dk=list(lambda=15, kmax=32),
#' dsig2=list(mean=1, sd=0.01),
#' dtheta=list(mean=0, sd=3)),
#' plot.prior=FALSE)
#' pars <- priorSim(prior, tree, plot=FALSE, nsim=1)$pars[[1]]
#' dat <- dataSim(pars, model="OU", phenogram=FALSE, tree)$dat
#' fit <- bayou.mcmc(tree, dat, model="OU", prior=prior,
#' new.dir=TRUE, ngen=5000, plot.freq=NULL)
#' chain <- load.bayou(fit, save.Rdata=TRUE, cleanup=TRUE)
#' plotBayoupars(pull.pars(300, chain), tree)
#' }
#' @export
pull.pars <- function(i,chain,model="OU"){
parorder <- switch(model,"QG"=c("h2","P","w2","Ne","k","ntheta","theta", "sb", "loc", "t2"), "OU"=c("alpha","sig2","k","ntheta","theta", "sb", "loc", "t2"),"OUrepar"=c("halflife","Vy","k","ntheta","theta", "sb", "loc", "t2"))
pars <- lapply(parorder,function(x) chain[[x]][[i]])
names(pars) <- parorder
return(pars)
}
#' Combine mcmc chains
#'
#' @param chain1 The first chain to be combined
#' @param chain2 The second chain to be combined
#' @param burnin.prop The proportion of burnin from each chain to be discarded
#'
#' @return A combined bayouMCMC chain
#'
#' @export
combine.chains <- function(chain1,chain2,burnin.prop=0){
nn <- names(chain1)
postburn <- (burnin.prop*(length(chain1$gen))+1):(length(chain1$gen))
chain1$gen <- chain1$gen + 0.1
chain2$gen <- chain2$gen + 0.2
chains <- lapply(nn,function(x) c(chain1[[x]][postburn],chain2[[x]][postburn]))
names(chains) <- nn
class(chains) <- c("bayouMCMC", "list")
return(chains)
}
.buildControl <- function(pars, prior, move.weights=NULL){
model <- attributes(prior)$model
if(is.null(move.weights)){
move.weights <- switch(model, "OU"=list("alpha"=4,"sig2"=2,"theta"=4, "slide"=2,"k"=10),
"OUrepar" = list("halflife"=4, "Vy"=2, "theta"=4, "slide"=2, "k"=10),
"QG" = list("h2"=2, "P"=2, "w2"=3, "Ne"=3, "theta"=4, "slide"=2, "k"=10))
}
ct <- unlist(move.weights)
total.weight <- sum(ct)
ct <- ct/sum(ct)
ct <- as.list(ct)
if(move.weights$k > 0){
bmax <- attributes(prior)$parameters$dsb$bmax
nbranch <- 2*attributes(prior)$parameters$dsb$ntips-2
prob <- attributes(prior)$parameters$dsb$prob
if(length(prob)==1){
prob <- rep(prob, nbranch)
prob[bmax==0] <- 0
}
if(length(bmax)==1){
bmax <- rep(bmax, nbranch)
bmax[prob==0] <- 0
}
type <- max(bmax)
if(type == Inf){
maxK <- attributes(prior)$parameters$dk$kmax
maxK <- ifelse(is.null(maxK), attributes(prior)$parameters$dsb$ntips*2, maxK)
maxK <- ifelse(!is.finite(maxK), attributes(prior)$parameters$dsb$ntips*2, maxK)
bdFx <- attributes(prior)$functions$dk
bdk <- sqrt(cumsum(c(0,bdFx(0:maxK,log=FALSE))))*0.9
}
if(type==1){
maxK <- nbranch-sum(bmax==0)
bdk <- (maxK - 0:maxK)/maxK
}
ct$bk <- bdk
ct$dk <- (1-bdk)
ct$sb <- list(bmax=bmax, prob=prob)
}
if(move.weights$slide > 0 & move.weights$k ==0){
bmax <- attributes(prior)$parameters$dsb$bmax
prob <- attributes(prior)$parameters$dsb$prob
ct$sb <- list(bmax=bmax, prob=prob)
}
return(ct)
}
#bdFx <- function(ct,max,pars,...){
# dk <- cumsum(c(0,dpois(0:max,pars$lambda*T)))
# bk <- 0.9-dk+0.1
# return(list(bk=bk,dk=dk))
#}
.updateControl <- function(ct, pars, fixed){
if(pars$k==0){
ctM <- ct
R <- sum(unlist(ctM[names(ctM) %in% c("slide","pos")],F,F))
ctM[names(ctM) == "slide"] <- 0
nR <- !(names(ctM) %in% c(fixed, "bk","dk","slide", "sb"))
ctM[nR] <-lapply(ct[names(ctM)[nR]],function(x) x+R/sum(nR))
ct <- ctM
}
return(ct)
}
.store.bayou <- function(i, pars, ll, pr, store, samp, chunk, parorder, files){
if(i%%samp==0){
j <- (i/samp)%%chunk
if(j!=0 & i>0){
store$sb[[j]] <- pars$sb
store$t2[[j]] <- pars$t2
store$loc[[j]] <- pars$loc
parline <- unlist(pars[parorder])
store$out[[j]] <- c(i,ll,pr,parline)
} else {
#chunk.mapst1[chunk,] <<- maps$t1
#chunk.mapst2[chunk,] <<- maps$t2
#chunk.mapsr2[chunk,] <<- maps$r2
store$sb[[chunk]] <- pars$sb
store$t2[[chunk]] <- pars$t2
store$loc[[chunk]] <- pars$loc
parline <- unlist(pars[parorder])
store$out[[chunk]] <- c(i,ll,pr,parline)
#write.table(chunk.mapst1,file=mapst1,append=TRUE,col.names=FALSE,row.names=FALSE)
#write.table(chunk.mapst2,file=mapst2,append=TRUE,col.names=FALSE,row.names=FALSE)
#write.table(chunk.mapsr2,file=mapsr2,append=TRUE,col.names=FALSE,row.names=FALSE)
lapply(store$out,function(x) cat(c(x,"\n"),file=files$pars.output,append=TRUE))
lapply(store$sb,function(x) cat(c(x,"\n"),file=files$mapsb,append=TRUE))
lapply(store$t2,function(x) cat(c(x,"\n"),file=files$mapst2,append=TRUE))
lapply(store$loc,function(x) cat(c(x,"\n"),file=files$mapsloc,append=TRUE))
#chunk.mapst1 <<- matrix(0,ncol=dim(oldmap)[1],nrow=chunk)
#chunk.mapst2 <<- matrix(0,ncol=dim(oldmap)[1],nrow=chunk)
#chunk.mapsr2 <<- matrix(0,ncol=dim(oldmap)[1],nrow=chunk)
#out <<- list()
store$sb <- list()
store$t2 <- list()
store$loc <- list()
store$out <- list()
}
}
return(store)
}
#' S3 method for printing bayouFit objects
#'
#' @param x A 'bayouFit' object produced by \code{bayou.mcmc}
#' @param ... Additional parameters passed to \code{print}
#'
#' @export
#' @method print bayouFit
print.bayouFit <- function(x, ...){
cat("bayou modelfit\n")
cat(paste(x$model, " parameterization\n\n",sep=""))
cat("Results are stored in directory\n")
out<-(paste(x$dir, x$outname,".*",sep=""))
cat(out,"\n")
cat(paste("To load results, use 'load.bayou(bayouFit)'\n\n",sep=""))
cat(paste(length(x$accept), " generations were run with the following acceptance probabilities:\n"))
accept.prob <- round(tapply(x$accept,x$accept.type,mean),2)
prop.N <- tapply(x$accept.type,x$accept.type,length)
print(accept.prob, ...)
cat(" Total number of proposals of each type:\n")
print(prop.N, ...)
}
#' Set the burnin proportion for bayouMCMC objects
#'
#' @param chain A bayouMCMC chain or an ssMCMC chain
#' @param burnin The burnin proportion of samples to be discarded from downstream analyses.
#'
#' @return A bayouMCMC chain or ssMCMC chain with burnin proportion stored in the attributes.
#'
#' @export
set.burnin <- function(chain, burnin=0.3){
cl <- class(chain)[1]
attributes(chain)$burnin = burnin
if(cl=="bayouMCMC") {
class(chain) <- c("bayouMCMC", "list")
}
if(cl=="ssMCMC"){
class(chain) <- c("ssMCMC", "list")
}
return(chain)
}
#' S3 method for summarizing bayouMCMC objects
#'
#' @param object A bayouMCMC object
#' @param ... Additional arguments passed to \code{print}
#'
#' @return An invisible list with two elements: \code{statistics} which provides
#' summary statistics for a bayouMCMC chain, and \code{branch.posteriors} which summarizes
#' branch specific data from a bayouMCMC chain.
#'
#' @export
#' @method summary bayouMCMC
summary.bayouMCMC <- function(object, ...){
tree <- attributes(object)$tree
model <- attributes(object)$model
if(is.null(attributes(object)$burnin)){
start <- 1
} else {
start <- round(attributes(object)$burnin*length(object$gen),0)
}
cat("bayou MCMC chain:", max(object$gen), "generations\n")
cat(length(object$gen), "samples, first", eval(start), "samples discarded as burnin\n")
postburn <- start:length(object$gen)
object <- lapply(object,function(x) x[postburn])
parorder <- switch(model,"QG"=c("lnL","prior", "h2","P","w2","Ne","k","ntheta"), "OU"=c("lnL","prior","alpha","sig2","k","ntheta"),"OUrepar"=c("lnL","prior","halflife","Vy","k","ntheta"))
summat <- matrix(unlist(object[parorder]),ncol=length(parorder))
colnames(summat) <- parorder
summat <- cbind(summat, "root"=sapply(object$theta,function(x) x[1]))
sum.1vars <- summary(mcmc(summat))
sum.theta <- summary(mcmc(unlist(object$theta)))
statistics <- rbind(cbind(sum.1vars$statistics, "Effective Size" = effectiveSize(summat)),"all theta"=c(sum.theta$statistics[1:2],rep(NA,3)))
cat("\n\nSummary statistics for parameters:\n")
print(statistics, ...)
Lpost <- Lposterior(object, tree)
Lpost.sorted <- Lpost[order(Lpost[,1],decreasing=TRUE),]
cat("\n\nBranches with posterior probabilities higher than 0.1:\n")
print(Lpost.sorted[Lpost.sorted[,1]>0.1,], ...)
out <- list(statistics=statistics, branch.posteriors=Lpost)
invisible(out)
}
#' Generate an overparameterized starting point for the MCMC
#'
#' This function takes a prior function and generates a starting point that can be entered for \code{startpar}
#' in the function \code{bayou.mcmc}
#'
#' @param prior A prior function
#' @param tree A phylogenetic tree of class 'phylo'
#' @param dat A named data vector
#'
#' @details This function creates an "overparameterized" starting point for running the mcmc. It gives n-1 tips a unique
#' optimum close to the actual data value. This is useful if you expect steep likelihood peaks that may be hard to find,
#' as these often will be easier to access from this overparameterized model. Generally, the overparameterization will have
#' a very high likelihood and a very low prior.
overparameterize.startingPoint <- function(prior, tree, dat){
tree <- reorder(tree, "postorder")
dat <- dat[tree$tip.label]
model <- attributes(prior)$model
ntips <- length(tree$tip.label)
startpar <- priorSim(prior, tree, plot=FALSE, nsim=1)[[1]][[1]]
theta <- rnorm(ntips, dat, 1e-5)
startpar$theta <- theta
startpar$k <- ntips-1
startpar$sb <- which(tree$edge[,2] < ntips)
startpar$loc <- rep(0, startpar$k)
startpar$t2 <- 2:ntips
startpar$ntheta <- startpar$k+1
plotBayoupars(startpar, tree, col=setNames(rainbow(startpar$ntheta), 1:startpar$ntheta))
return(startpar)
} | /bayou/R/bayou-mcmc-utilities.R | no_license | ingted/R-Examples | R | false | false | 20,337 | r | #' Loads a bayou object
#'
#' \code{load.bayou} loads a bayouFit object that was created using \code{bayou.mcmc()}
#'
#' @param bayouFit An object of class \code{bayouFit} produced by the function \code{bayou.mcmc()}
#' @param save.Rdata A logical indicating whether the resulting chains should be saved as an *.rds file
#' @param file An optional filename (possibly including path) for the saved *.rds file
#' @param cleanup A logical indicating whether the files produced by \code{bayou.mcmc()} should be removed.
#'
#' @details If both \code{save.Rdata} is \code{FALSE} and \code{cleanup} is \code{TRUE}, then \code{load.bayou} will trigger a
#' warning and ask for confirmation. In this case, if the results of \code{load.bayou()} are not stored in an object,
#' the results of the MCMC run will be permanently deleted.
#'
#' @examples
#' \dontrun{
#' data(chelonia)
#' tree <- chelonia$phy
#' dat <- chelonia$dat
#' prior <- make.prior(tree)
#' fit <- bayou.mcmc(tree, dat, model="OU", prior=prior,
#' new.dir=TRUE, ngen=5000)
#' chain <- load.bayou(fit, save.Rdata=FALSE, cleanup=TRUE)
#' plot(chain)
#' }
#' @export
load.bayou <- function(bayouFit, save.Rdata=TRUE, file=NULL,
cleanup=FALSE){#dir=NULL,outname="bayou",model="OU"){
tree <- bayouFit$tree
dat <- bayouFit$dat
outname <- bayouFit$outname
model <- bayouFit$model
dir <- bayouFit$dir
#mapsr2 <- read.table(file="mapsr2.dta",header=FALSE)
#mapsb <- read.table(file="mapsb.dta",header=FALSE)
#mapst2 <- read.table(file="mapst2.dta",header=FALSE)
mapsr2 <- scan(file=paste(dir,outname,".loc",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
mapsb <- scan(file=paste(dir,outname,".sb",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
mapst2 <- scan(file=paste(dir,outname,".t2",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
pars.out <- scan(file=paste(dir,outname,".pars",sep=""),what="",sep="\n",quiet=TRUE,blank.lines.skip=FALSE)
pars.out <- lapply(strsplit(pars.out,"[[:space:]]+"),as.numeric)
mapsr2 <- lapply(strsplit(mapsr2,"[[:space:]]+"),as.numeric)
mapsb <- lapply(strsplit(mapsb,"[[:space:]]+"),as.numeric)
mapst2 <- lapply(strsplit(mapst2,"[[:space:]]+"),as.numeric)
chain <- list()
if(model=="OU"){
chain$gen <- sapply(pars.out,function(x) x[1])
chain$lnL <- sapply(pars.out,function(x) x[2])
chain$prior <- sapply(pars.out,function(x) x[3])
chain$alpha <- sapply(pars.out,function(x) x[4])
chain$sig2 <- sapply(pars.out,function(x) x[5])
chain$k <- sapply(pars.out,function(x) x[6])
chain$ntheta <- sapply(pars.out,function(x) x[7])
chain$theta <- lapply(pars.out,function(x) x[-(1:7)])
chain$sb <- mapsb
chain$loc <- mapsr2
chain$t2 <- mapst2
}
if(model=="QG"){
chain$gen <- sapply(pars.out,function(x) x[1])
chain$lnL <- sapply(pars.out,function(x) x[2])
chain$prior <- sapply(pars.out,function(x) x[3])
chain$h2 <- sapply(pars.out,function(x) x[4])
chain$P <- sapply(pars.out,function(x) x[5])
chain$w2 <- sapply(pars.out,function(x) x[6])
chain$Ne <- sapply(pars.out,function(x) x[7])
chain$k <- sapply(pars.out,function(x) x[8])
chain$ntheta <- sapply(pars.out,function(x) x[9])
chain$theta <- lapply(pars.out,function(x) x[-(1:9)])
chain$sb <- mapsb
chain$loc <- mapsr2
chain$t2 <- mapst2
}
if(model=="OUrepar"){
chain$gen <- sapply(pars.out,function(x) x[1])
chain$lnL <- sapply(pars.out,function(x) x[2])
chain$prior <- sapply(pars.out,function(x) x[3])
chain$halflife <- sapply(pars.out,function(x) x[4])
chain$Vy <- sapply(pars.out,function(x) x[5])
chain$k <- sapply(pars.out,function(x) x[6])
chain$ntheta <- sapply(pars.out,function(x) x[7])
chain$theta <- lapply(pars.out,function(x) x[-(1:7)])
chain$sb <- mapsb
chain$loc <- mapsr2
chain$t2 <- mapst2
}
attributes(chain)$model <- bayouFit$model
attributes(chain)$tree <- tree
attributes(chain)$dat <- dat
class(chain) <- c("bayouMCMC", "list")
if(save.Rdata==FALSE & cleanup==TRUE){
ans <- toupper(readline("Warning: You have selected to delete all created MCMC files and not to save them as an .rds file.
Your mcmc results will not be saved on your hard drive. If you do not output to a object, your results will be lost.
Continue? (Y or N):"))
cleanup <- ifelse(ans=="Y", TRUE, FALSE)
}
if(save.Rdata){
if(is.null(file)){
save(chain, file=paste(bayouFit$dir,"../", outname, ".chain.rds",sep=""))
cat(paste("file saved to", paste(bayouFit$dir,"/",outname,".chain.rds\n",sep="")))
} else {
save(chain, file=file)
cat(paste("file saved to", file))
}
}
if(cleanup){
if(bayouFit$tmpdir){
unlink(dir,T,T)
cat(paste("deleting temporary directory", dir))
} else {
file.remove(paste(dir, outname, ".loc", sep=""))
file.remove(paste(dir, outname, ".t2", sep=""))
file.remove(paste(dir, outname, ".sb", sep=""))
file.remove(paste(dir, outname, ".pars", sep=""))
}
}
return(chain)
}
#' Calculate Gelman's R statistic
#'
#' @param parameter The name or number of the parameter to calculate the statistic on
#' @param chain1 The first bayouMCMC chain
#' @param chain2 The second bayouMCMC chain
#' @param freq The interval between which the diagnostic is calculated
#' @param start The first sample to calculate the diagnostic at
#' @param plot A logical indicating whether the results should be plotted
#' @param ... Optional arguments passed to \code{gelman.diag(...)} from the \code{coda} package
#'
#' @export
gelman.R <- function(parameter,chain1,chain2,freq=20,start=1,
plot=TRUE, ...){
R <- NULL
R.UCI <- NULL
int <- seq(start,length(chain1[[parameter]]),freq)
for(i in 1:length(int)){
chain.list <- mcmc.list(mcmc(chain1[[parameter]][1:int[i]]),mcmc(chain2[[parameter]][1:int[i]]))
GD <- gelman.diag(chain.list)
R[i] <- GD$psrf[1]
R.UCI[i] <- GD$psrf[2]
}
if(plot==TRUE){
plot(chain1$gen[int],R,main=paste("Gelman's R:",parameter),xlab="Generation",ylab="R", ...)
lines(chain1$gen[int],R,lwd=2)
lines(chain1$gen[int],R.UCI,lty=2)
}
return(data.frame("R"=R,"UCI.95"=R.UCI))
}
# Function for calculation of the posterior quantiles. Only needed for simulation study, not generally called by the user.
.posterior.Q <- function(parameter,chain1,chain2,pars,burnin=0.3){
postburn <- round(burnin*length(chain1$gen),0):length(chain1$gen)
chain <- mcmc.list(mcmc(chain1[[parameter]][postburn]),mcmc(chain2[[parameter]][postburn]))
posterior.q <- summary(chain,quantiles=seq(0,1,0.005))$quantiles
q <- which(names(sort(c(pars[[parameter]],posterior.q)))=="")
Q <- ((q-1)/2-0.25)/100#((q-1)+(simpar$pars$alpha-posterior.q[q-1])/(posterior.q[q+1]-posterior.q[q-1]))/100
Q
}
#' Return a posterior of shift locations
#'
#' @param chain A bayouMCMC chain
#' @param tree A tree of class 'phylo'
#' @param burnin A value giving the burnin proportion of the chain to be discarded
#' @param simpar An optional bayou formatted parameter list giving the true values (if data were simulated)
#' @param mag A logical indicating whether the average magnitude of the shifts should be returned
#'
#' @return A data frame with rows corresponding to postordered branches. \code{pp} indicates the
#' posterior probability of the branch containing a shift. \code{magnitude of theta2} gives the average
#' value of the new optima after a shift. \code{naive SE of theta2} gives the standard error of the new optima
#' not accounting for autocorrelation in the MCMC and \code{rel location} gives the average relative location
#' of the shift on the branch (between 0 and 1 for each branch).
#'
#' @export
Lposterior <- function(chain,tree,burnin=0, simpar=NULL,mag=TRUE){
pb.start <- ifelse(burnin>0,round(length(chain$gen)*burnin,0),1)
postburn <- pb.start:length(chain$gen)
chain <- lapply(chain, function(x) x[postburn])
ntips <- length(tree$tip.label)
shifts <- t(sapply(chain$sb,function(x) as.numeric(1:nrow(tree$edge) %in% x)))
theta <- sapply(1:length(chain$theta),function(x) chain$theta[[x]][chain$t2[[x]]])
branch.shifts <- chain$sb
theta.shifts <- tapply(unlist(theta),unlist(branch.shifts),mean)
theta.locs <- tapply(unlist(chain$loc), unlist(branch.shifts), mean)
thetaSE <- tapply(unlist(theta),unlist(branch.shifts),function(x) sd(x)/sqrt(length(x)))
N.theta.shifts <- tapply(unlist(branch.shifts),unlist(branch.shifts),length)
root.theta <- sapply(chain$theta,function(y) y[1])
OS <- rep(NA,length(tree$edge[,1]))
OS[as.numeric(names(theta.shifts))] <- theta.shifts
SE <- rep(NA,length(tree$edge[,1]))
SE[as.numeric(names(thetaSE))] <- thetaSE
locs <- rep(NA,length(tree$edge[,1]))
locs[as.numeric(names(theta.locs))] <- theta.locs
shifts.tot <- apply(shifts,2,sum)
shifts.prop <- shifts.tot/length(chain$gen)
all.branches <- rep(0,nrow(tree$edge))
Lpost <- data.frame("pp"=shifts.prop,"magnitude of theta2"=OS, "naive SE of theta2"=SE,"rel location"=locs/tree$edge.length)
return(Lpost)
}
#' Discards burnin
#'
#' @export
.discard.burnin <- function(chain,burnin.prop=0.3){
lapply(chain,function(x) x[(burnin.prop*length(x)):length(x)])
}
#' Tuning function, not currently used.
.tune.D <- function(D,accept,accept.type){
tuning.samp <- (length(accept)/2):length(accept)
acc <- tapply(accept[tuning.samp],accept.type[tuning.samp],mean)
acc.length <- tapply(accept[tuning.samp],accept.type[tuning.samp],length)
acc.tune <- acc/0.25
acc.tune[acc.tune<0.5] <- 0.5
acc.tune[acc.tune>2] <- 2
D$ak <- acc.tune['alpha']*D$ak
D$sk <- acc.tune['sig2']*D$sk
D$tk <- acc.tune['theta']*D$tk
D$bk <- D$tk*2
D <- lapply(D,function(x){ names(x) <- NULL; x})
return(list("D"=D,"acc.tune"=acc.tune))
}
#' Utility function for retrieving parameters from an MCMC chain
#'
#' @param i An integer giving the sample to retrieve
#' @param chain A bayouMCMC chain
#' @param model The parameterization used, either "OU", "QG" or "OUrepar"
#'
#' @return A bayou formatted parameter list
#'
#' @examples
#' \dontrun{
#' tree <- sim.bdtree(n=30)
#' tree$edge.length <- tree$edge.length/max(branching.times(tree))
#' prior <- make.prior(tree, dists=list(dk="cdpois", dsig2="dnorm",
#' dtheta="dnorm"),
#' param=list(dk=list(lambda=15, kmax=32),
#' dsig2=list(mean=1, sd=0.01),
#' dtheta=list(mean=0, sd=3)),
#' plot.prior=FALSE)
#' pars <- priorSim(prior, tree, plot=FALSE, nsim=1)$pars[[1]]
#' dat <- dataSim(pars, model="OU", phenogram=FALSE, tree)$dat
#' fit <- bayou.mcmc(tree, dat, model="OU", prior=prior,
#' new.dir=TRUE, ngen=5000, plot.freq=NULL)
#' chain <- load.bayou(fit, save.Rdata=TRUE, cleanup=TRUE)
#' plotBayoupars(pull.pars(300, chain), tree)
#' }
#' @export
pull.pars <- function(i,chain,model="OU"){
parorder <- switch(model,"QG"=c("h2","P","w2","Ne","k","ntheta","theta", "sb", "loc", "t2"), "OU"=c("alpha","sig2","k","ntheta","theta", "sb", "loc", "t2"),"OUrepar"=c("halflife","Vy","k","ntheta","theta", "sb", "loc", "t2"))
pars <- lapply(parorder,function(x) chain[[x]][[i]])
names(pars) <- parorder
return(pars)
}
#' Combine mcmc chains
#'
#' @param chain1 The first chain to be combined
#' @param chain2 The second chain to be combined
#' @param burnin.prop The proportion of burnin from each chain to be discarded
#'
#' @return A combined bayouMCMC chain
#'
#' @export
combine.chains <- function(chain1,chain2,burnin.prop=0){
nn <- names(chain1)
postburn <- (burnin.prop*(length(chain1$gen))+1):(length(chain1$gen))
chain1$gen <- chain1$gen + 0.1
chain2$gen <- chain2$gen + 0.2
chains <- lapply(nn,function(x) c(chain1[[x]][postburn],chain2[[x]][postburn]))
names(chains) <- nn
class(chains) <- c("bayouMCMC", "list")
return(chains)
}
.buildControl <- function(pars, prior, move.weights=NULL){
model <- attributes(prior)$model
if(is.null(move.weights)){
move.weights <- switch(model, "OU"=list("alpha"=4,"sig2"=2,"theta"=4, "slide"=2,"k"=10),
"OUrepar" = list("halflife"=4, "Vy"=2, "theta"=4, "slide"=2, "k"=10),
"QG" = list("h2"=2, "P"=2, "w2"=3, "Ne"=3, "theta"=4, "slide"=2, "k"=10))
}
ct <- unlist(move.weights)
total.weight <- sum(ct)
ct <- ct/sum(ct)
ct <- as.list(ct)
if(move.weights$k > 0){
bmax <- attributes(prior)$parameters$dsb$bmax
nbranch <- 2*attributes(prior)$parameters$dsb$ntips-2
prob <- attributes(prior)$parameters$dsb$prob
if(length(prob)==1){
prob <- rep(prob, nbranch)
prob[bmax==0] <- 0
}
if(length(bmax)==1){
bmax <- rep(bmax, nbranch)
bmax[prob==0] <- 0
}
type <- max(bmax)
if(type == Inf){
maxK <- attributes(prior)$parameters$dk$kmax
maxK <- ifelse(is.null(maxK), attributes(prior)$parameters$dsb$ntips*2, maxK)
maxK <- ifelse(!is.finite(maxK), attributes(prior)$parameters$dsb$ntips*2, maxK)
bdFx <- attributes(prior)$functions$dk
bdk <- sqrt(cumsum(c(0,bdFx(0:maxK,log=FALSE))))*0.9
}
if(type==1){
maxK <- nbranch-sum(bmax==0)
bdk <- (maxK - 0:maxK)/maxK
}
ct$bk <- bdk
ct$dk <- (1-bdk)
ct$sb <- list(bmax=bmax, prob=prob)
}
if(move.weights$slide > 0 & move.weights$k ==0){
bmax <- attributes(prior)$parameters$dsb$bmax
prob <- attributes(prior)$parameters$dsb$prob
ct$sb <- list(bmax=bmax, prob=prob)
}
return(ct)
}
#bdFx <- function(ct,max,pars,...){
# dk <- cumsum(c(0,dpois(0:max,pars$lambda*T)))
# bk <- 0.9-dk+0.1
# return(list(bk=bk,dk=dk))
#}
.updateControl <- function(ct, pars, fixed){
if(pars$k==0){
ctM <- ct
R <- sum(unlist(ctM[names(ctM) %in% c("slide","pos")],F,F))
ctM[names(ctM) == "slide"] <- 0
nR <- !(names(ctM) %in% c(fixed, "bk","dk","slide", "sb"))
ctM[nR] <-lapply(ct[names(ctM)[nR]],function(x) x+R/sum(nR))
ct <- ctM
}
return(ct)
}
.store.bayou <- function(i, pars, ll, pr, store, samp, chunk, parorder, files){
if(i%%samp==0){
j <- (i/samp)%%chunk
if(j!=0 & i>0){
store$sb[[j]] <- pars$sb
store$t2[[j]] <- pars$t2
store$loc[[j]] <- pars$loc
parline <- unlist(pars[parorder])
store$out[[j]] <- c(i,ll,pr,parline)
} else {
#chunk.mapst1[chunk,] <<- maps$t1
#chunk.mapst2[chunk,] <<- maps$t2
#chunk.mapsr2[chunk,] <<- maps$r2
store$sb[[chunk]] <- pars$sb
store$t2[[chunk]] <- pars$t2
store$loc[[chunk]] <- pars$loc
parline <- unlist(pars[parorder])
store$out[[chunk]] <- c(i,ll,pr,parline)
#write.table(chunk.mapst1,file=mapst1,append=TRUE,col.names=FALSE,row.names=FALSE)
#write.table(chunk.mapst2,file=mapst2,append=TRUE,col.names=FALSE,row.names=FALSE)
#write.table(chunk.mapsr2,file=mapsr2,append=TRUE,col.names=FALSE,row.names=FALSE)
lapply(store$out,function(x) cat(c(x,"\n"),file=files$pars.output,append=TRUE))
lapply(store$sb,function(x) cat(c(x,"\n"),file=files$mapsb,append=TRUE))
lapply(store$t2,function(x) cat(c(x,"\n"),file=files$mapst2,append=TRUE))
lapply(store$loc,function(x) cat(c(x,"\n"),file=files$mapsloc,append=TRUE))
#chunk.mapst1 <<- matrix(0,ncol=dim(oldmap)[1],nrow=chunk)
#chunk.mapst2 <<- matrix(0,ncol=dim(oldmap)[1],nrow=chunk)
#chunk.mapsr2 <<- matrix(0,ncol=dim(oldmap)[1],nrow=chunk)
#out <<- list()
store$sb <- list()
store$t2 <- list()
store$loc <- list()
store$out <- list()
}
}
return(store)
}
#' S3 method for printing bayouFit objects
#'
#' @param x A 'bayouFit' object produced by \code{bayou.mcmc}
#' @param ... Additional parameters passed to \code{print}
#'
#' @export
#' @method print bayouFit
print.bayouFit <- function(x, ...){
cat("bayou modelfit\n")
cat(paste(x$model, " parameterization\n\n",sep=""))
cat("Results are stored in directory\n")
out<-(paste(x$dir, x$outname,".*",sep=""))
cat(out,"\n")
cat(paste("To load results, use 'load.bayou(bayouFit)'\n\n",sep=""))
cat(paste(length(x$accept), " generations were run with the following acceptance probabilities:\n"))
accept.prob <- round(tapply(x$accept,x$accept.type,mean),2)
prop.N <- tapply(x$accept.type,x$accept.type,length)
print(accept.prob, ...)
cat(" Total number of proposals of each type:\n")
print(prop.N, ...)
}
#' Set the burnin proportion for bayouMCMC objects
#'
#' @param chain A bayouMCMC chain or an ssMCMC chain
#' @param burnin The burnin proportion of samples to be discarded from downstream analyses.
#'
#' @return A bayouMCMC chain or ssMCMC chain with burnin proportion stored in the attributes.
#'
#' @export
set.burnin <- function(chain, burnin=0.3){
cl <- class(chain)[1]
attributes(chain)$burnin = burnin
if(cl=="bayouMCMC") {
class(chain) <- c("bayouMCMC", "list")
}
if(cl=="ssMCMC"){
class(chain) <- c("ssMCMC", "list")
}
return(chain)
}
#' S3 method for summarizing bayouMCMC objects
#'
#' @param object A bayouMCMC object
#' @param ... Additional arguments passed to \code{print}
#'
#' @return An invisible list with two elements: \code{statistics} which provides
#' summary statistics for a bayouMCMC chain, and \code{branch.posteriors} which summarizes
#' branch specific data from a bayouMCMC chain.
#'
#' @export
#' @method summary bayouMCMC
summary.bayouMCMC <- function(object, ...){
tree <- attributes(object)$tree
model <- attributes(object)$model
if(is.null(attributes(object)$burnin)){
start <- 1
} else {
start <- round(attributes(object)$burnin*length(object$gen),0)
}
cat("bayou MCMC chain:", max(object$gen), "generations\n")
cat(length(object$gen), "samples, first", eval(start), "samples discarded as burnin\n")
postburn <- start:length(object$gen)
object <- lapply(object,function(x) x[postburn])
parorder <- switch(model,"QG"=c("lnL","prior", "h2","P","w2","Ne","k","ntheta"), "OU"=c("lnL","prior","alpha","sig2","k","ntheta"),"OUrepar"=c("lnL","prior","halflife","Vy","k","ntheta"))
summat <- matrix(unlist(object[parorder]),ncol=length(parorder))
colnames(summat) <- parorder
summat <- cbind(summat, "root"=sapply(object$theta,function(x) x[1]))
sum.1vars <- summary(mcmc(summat))
sum.theta <- summary(mcmc(unlist(object$theta)))
statistics <- rbind(cbind(sum.1vars$statistics, "Effective Size" = effectiveSize(summat)),"all theta"=c(sum.theta$statistics[1:2],rep(NA,3)))
cat("\n\nSummary statistics for parameters:\n")
print(statistics, ...)
Lpost <- Lposterior(object, tree)
Lpost.sorted <- Lpost[order(Lpost[,1],decreasing=TRUE),]
cat("\n\nBranches with posterior probabilities higher than 0.1:\n")
print(Lpost.sorted[Lpost.sorted[,1]>0.1,], ...)
out <- list(statistics=statistics, branch.posteriors=Lpost)
invisible(out)
}
#' Generate an overparameterized starting point for the MCMC
#'
#' This function takes a prior function and generates a starting point that can be entered for \code{startpar}
#' in the function \code{bayou.mcmc}
#'
#' @param prior A prior function
#' @param tree A phylogenetic tree of class 'phylo'
#' @param dat A named data vector
#'
#' @details This function creates an "overparameterized" starting point for running the mcmc. It gives n-1 tips a unique
#' optimum close to the actual data value. This is useful if you expect steep likelihood peaks that may be hard to find,
#' as these often will be easier to access from this overparameterized model. Generally, the overparameterization will have
#' a very high likelihood and a very low prior.
overparameterize.startingPoint <- function(prior, tree, dat){
tree <- reorder(tree, "postorder")
dat <- dat[tree$tip.label]
model <- attributes(prior)$model
ntips <- length(tree$tip.label)
startpar <- priorSim(prior, tree, plot=FALSE, nsim=1)[[1]][[1]]
theta <- rnorm(ntips, dat, 1e-5)
startpar$theta <- theta
startpar$k <- ntips-1
startpar$sb <- which(tree$edge[,2] < ntips)
startpar$loc <- rep(0, startpar$k)
startpar$t2 <- 2:ntips
startpar$ntheta <- startpar$k+1
plotBayoupars(startpar, tree, col=setNames(rainbow(startpar$ntheta), 1:startpar$ntheta))
return(startpar)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_ls.R
\name{drive_ls}
\alias{drive_ls}
\title{List contents of a folder.}
\usage{
drive_ls(path = "~/", pattern = NULL, type = NULL, ...)
}
\arguments{
\item{path}{Specifies a single folder on Google Drive whose contents you want
to list. Can be an actual path (character), a file id marked with
\code{\link[=as_id]{as_id()}}, or a \link{dribble}.}
\item{pattern}{Character. If provided, only the files whose names match this
regular expression are returned. This is implemented locally on the results
returned by the API.}
\item{type}{Character. If provided, only files of this type will be returned.
Can be anything that \code{\link[=drive_mime_type]{drive_mime_type()}} knows how to handle. This is
processed by googledrive and sent as a query parameter.}
\item{...}{Query parameters to pass along to the API query.}
}
\value{
An object of class \code{\link{dribble}}, a tibble with one row per
file.
}
\description{
List the contents of a folder on Google Drive, nonrecursively. Optionally,
filter for a regex in the file names and/or on MIME type. This is a thin
wrapper around \code{\link[=drive_find]{drive_find()}}.
}
\examples{
\dontrun{
## get contents of the folder 'abc' (non-recursive)
drive_ls("abc")
## get contents of folder 'abc' that contain the
## letters 'def'
drive_ls(path = "abc", pattern = "def")
## get all Google spreadsheets in folder 'abc'
## that contain the letters 'def'
drive_ls(path = "abc", pattern = "def", type = "spreadsheet")
}
}
| /man/drive_ls.Rd | no_license | dy-kim/googledrive | R | false | true | 1,558 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_ls.R
\name{drive_ls}
\alias{drive_ls}
\title{List contents of a folder.}
\usage{
drive_ls(path = "~/", pattern = NULL, type = NULL, ...)
}
\arguments{
\item{path}{Specifies a single folder on Google Drive whose contents you want
to list. Can be an actual path (character), a file id marked with
\code{\link[=as_id]{as_id()}}, or a \link{dribble}.}
\item{pattern}{Character. If provided, only the files whose names match this
regular expression are returned. This is implemented locally on the results
returned by the API.}
\item{type}{Character. If provided, only files of this type will be returned.
Can be anything that \code{\link[=drive_mime_type]{drive_mime_type()}} knows how to handle. This is
processed by googledrive and sent as a query parameter.}
\item{...}{Query parameters to pass along to the API query.}
}
\value{
An object of class \code{\link{dribble}}, a tibble with one row per
file.
}
\description{
List the contents of a folder on Google Drive, nonrecursively. Optionally,
filter for a regex in the file names and/or on MIME type. This is a thin
wrapper around \code{\link[=drive_find]{drive_find()}}.
}
\examples{
\dontrun{
## get contents of the folder 'abc' (non-recursive)
drive_ls("abc")
## get contents of folder 'abc' that contain the
## letters 'def'
drive_ls(path = "abc", pattern = "def")
## get all Google spreadsheets in folder 'abc'
## that contain the letters 'def'
drive_ls(path = "abc", pattern = "def", type = "spreadsheet")
}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/score_most.R
\name{score_most}
\alias{score_most}
\title{This returns the team who has been involved in the most games of each scoreline}
\usage{
score_most(df, score)
}
\arguments{
\item{df}{df}
\item{score}{score}
}
\description{
This returns the team who has been involved in the most games of each scoreline
}
\examples{
df <- engsoccerdata2
score_most(df, "6-6") # Arsenal 1 Charlton Athletic 1 Leicester City 1 Middlesbrough 1
score_most(df, "5-5") # Blackburn Rovers 3 West Ham United 3
score_most(df, "4-4") # Tottenham Hotspur 14
score_most(df, "3-3") # Manchester City 68 Wolverhampton Wanderers 68
score_most(df, "2-2") # Leicester City 274
score_most(df, "1-1") # Sheffield United 560
score_most(df, "0-0") # Notts County 363
score_most(df, "1-0") # Birmingham City 795 - most involved in 1-0 or 0-1 games
score_most(df, "8-0") # Arsenal 7 - most involved in 8-0 or 0-8 games
score_most(df, "9-1") # Notts County 4 - most involved in 4-1 or 1-4 games
}
| /man/score_most.Rd | no_license | amunnelly/engsoccerdata | R | false | false | 1,061 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/score_most.R
\name{score_most}
\alias{score_most}
\title{This returns the team who has been involved in the most games of each scoreline}
\usage{
score_most(df, score)
}
\arguments{
\item{df}{df}
\item{score}{score}
}
\description{
This returns the team who has been involved in the most games of each scoreline
}
\examples{
df <- engsoccerdata2
score_most(df, "6-6") # Arsenal 1 Charlton Athletic 1 Leicester City 1 Middlesbrough 1
score_most(df, "5-5") # Blackburn Rovers 3 West Ham United 3
score_most(df, "4-4") # Tottenham Hotspur 14
score_most(df, "3-3") # Manchester City 68 Wolverhampton Wanderers 68
score_most(df, "2-2") # Leicester City 274
score_most(df, "1-1") # Sheffield United 560
score_most(df, "0-0") # Notts County 363
score_most(df, "1-0") # Birmingham City 795 - most involved in 1-0 or 0-1 games
score_most(df, "8-0") # Arsenal 7 - most involved in 8-0 or 0-8 games
score_most(df, "9-1") # Notts County 4 - most involved in 4-1 or 1-4 games
}
|
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
rm(list = ls());
library(ggplot2);
library(gridExtra);
library(dplyr);
path <- "../data/";
if(!dir.exists(path)) {
path <- "../input/"; # changing path to Kaggle's environment
}
## ---- echo=FALSE, message=TRUE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# read raw data
load_data <- function(file) {
return (read.csv(paste0(path, file)));
};
train <- load_data("train.csv");
test <- load_data("test.csv");
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# combine test and train for pre-processing
test$Survived <- rep(NA, nrow(test));
comb <- rbind(train, test);
## ---- echo=TRUE, message=TRUE, warning=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# get index list for future re-split
train_index <- comb$PassengerId[!is.na(comb$Survived)]
test_index <- comb$PassengerId[ is.na(comb$Survived)]
comb$Set <- ifelse(comb$PassengerId %in% train_index, "Train", "Test");
## ---- echo=TRUE, message=TRUE, warning=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# plot survival per gender in train set
dftotal <- comb %>% filter(Survived==0| Survived==1) %>% select(Survived, Sex, Set)
dfsurvived <- comb %>% filter(Survived==1) %>% select(Survived, Sex, Set)
dftotal$count <- "Total"
dfsurvived$count <- "Survived"
df <- rbind(dftotal, dfsurvived);
ggplot(df, aes(Sex, fill = count)) + geom_bar(position="dodge") + labs(title="Survival per Gender in Train Set") + xlab("Gender")
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
total <- 2224;
killed <- 1502;
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
true_survival_rate <- 100*(total-killed)/total;
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
pop_count <- comb %>% filter(Set=="Train") %>% count(Sex);
pop_surv_count <- comb %>% filter(Set=="Train" & Survived==1) %>% count(Sex);
surv_likelihood <- pop_surv_count$n/pop_count$n;
comb$Prediction <- ifelse(surv_likelihood[comb$Sex] > 0.5, 1, 0);
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Train set survival rate
train_survival_rate <- 100*sum(pop_surv_count$n)/sum(pop_count$n);
print(sprintf("Train set survival rate %2.1f percent", train_survival_rate));
print(sprintf("Survival rate overestimation in train set = %2.1f percent", train_survival_rate - true_survival_rate));
## ---- echo=FALSE, message=TRUE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
map.gen <- c("female"=1, "male"=2);
weight <- ifelse( comb$Set=="Train", 100/length(train_index), 100/length(test_index));
ggplot(comb, aes( x = Sex, fill=Set)) + geom_bar(position="dodge", aes(weight=weight)) +
labs(title="Gender Distribution in Train and Test Sets", x="Gender", y="Percent");
print(sprintf("Female to population ratio in train set = %2.1f percent", 100*pop_count$n[map.gen["female"]]/sum(pop_count$n)));
print(sprintf("Male to population ratio in train set = %2.1f percent", 100*pop_count$n[map.gen["male"]]/sum(pop_count$n)));
train_gender_survived <- table(train$Sex[train$Survived==1]);
print(sprintf("Female survival rate in train set = %2.1f percent", 100*pop_surv_count$n[map.gen["female"]]/pop_count$n[map.gen["female"]]));
print(sprintf("Male survival rate in train set = %2.1f percent", 100*pop_surv_count$n[map.gen["male"]]/pop_count$n[map.gen["female"]]));
## ---- echo=FALSE, message=TRUE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
test_pop_count <- comb[test_index,] %>% count(Sex)
# calculate likelihood of survival per sex
print(sprintf("Female to population ratio in test set = %2.1f percent", 100*test_pop_count$n[map.gen["female"]]/sum(test_pop_count$n)));
print(sprintf("Male to population ratio in test set = %2.1f percent", 100*test_pop_count$n[map.gen["male"]]/sum(test_pop_count$n)));
# Total number of expected survivors
test_expected_surv = test_pop_count$n*surv_likelihood;
## ---- echo=FALSE, message=TRUE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
accuracy <- ifelse(surv_likelihood>0.5, surv_likelihood, 1-surv_likelihood)
print(sprintf("Optimal gender only predicted score %2.4f ", sum(test_pop_count$n*accuracy)/sum(test_pop_count$n)))
print(sprintf("Actual leader board (LB) score on the test set %2.4f ", 0.76555));
print(sprintf("Train set relative overstimation on LB %2.4f ", (sum(test_pop_count$n*accuracy)/sum(test_pop_count$n)/0.76555)));
## ---- echo=TRUE, message=TRUE, warning=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
submit <- data.frame(PassengerId = test_index, Survived = comb$Prediction[test_index]);
write.csv(submit, file = paste0("gender_only.csv"), row.names = FALSE, quote=F)
| /r/kernels/pliptor-optimal-titanic-for-gender-only-0-7655/script/optimal-titanic-for-gender-only-0-7655.R | no_license | helenaK/trustworthy-titanic | R | false | false | 6,667 | r | ## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
rm(list = ls());
library(ggplot2);
library(gridExtra);
library(dplyr);
path <- "../data/";
if(!dir.exists(path)) {
path <- "../input/"; # changing path to Kaggle's environment
}
## ---- echo=FALSE, message=TRUE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# read raw data
load_data <- function(file) {
return (read.csv(paste0(path, file)));
};
train <- load_data("train.csv");
test <- load_data("test.csv");
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# combine test and train for pre-processing
test$Survived <- rep(NA, nrow(test));
comb <- rbind(train, test);
## ---- echo=TRUE, message=TRUE, warning=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# get index list for future re-split
train_index <- comb$PassengerId[!is.na(comb$Survived)]
test_index <- comb$PassengerId[ is.na(comb$Survived)]
comb$Set <- ifelse(comb$PassengerId %in% train_index, "Train", "Test");
## ---- echo=TRUE, message=TRUE, warning=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# plot survival per gender in train set
dftotal <- comb %>% filter(Survived==0| Survived==1) %>% select(Survived, Sex, Set)
dfsurvived <- comb %>% filter(Survived==1) %>% select(Survived, Sex, Set)
dftotal$count <- "Total"
dfsurvived$count <- "Survived"
df <- rbind(dftotal, dfsurvived);
ggplot(df, aes(Sex, fill = count)) + geom_bar(position="dodge") + labs(title="Survival per Gender in Train Set") + xlab("Gender")
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
total <- 2224;
killed <- 1502;
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
true_survival_rate <- 100*(total-killed)/total;
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
pop_count <- comb %>% filter(Set=="Train") %>% count(Sex);
pop_surv_count <- comb %>% filter(Set=="Train" & Survived==1) %>% count(Sex);
surv_likelihood <- pop_surv_count$n/pop_count$n;
comb$Prediction <- ifelse(surv_likelihood[comb$Sex] > 0.5, 1, 0);
## ---- message=TRUE, warning=FALSE, include=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Train set survival rate
train_survival_rate <- 100*sum(pop_surv_count$n)/sum(pop_count$n);
print(sprintf("Train set survival rate %2.1f percent", train_survival_rate));
print(sprintf("Survival rate overestimation in train set = %2.1f percent", train_survival_rate - true_survival_rate));
## ---- echo=FALSE, message=TRUE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
map.gen <- c("female"=1, "male"=2);
weight <- ifelse( comb$Set=="Train", 100/length(train_index), 100/length(test_index));
ggplot(comb, aes( x = Sex, fill=Set)) + geom_bar(position="dodge", aes(weight=weight)) +
labs(title="Gender Distribution in Train and Test Sets", x="Gender", y="Percent");
print(sprintf("Female to population ratio in train set = %2.1f percent", 100*pop_count$n[map.gen["female"]]/sum(pop_count$n)));
print(sprintf("Male to population ratio in train set = %2.1f percent", 100*pop_count$n[map.gen["male"]]/sum(pop_count$n)));
train_gender_survived <- table(train$Sex[train$Survived==1]);
print(sprintf("Female survival rate in train set = %2.1f percent", 100*pop_surv_count$n[map.gen["female"]]/pop_count$n[map.gen["female"]]));
print(sprintf("Male survival rate in train set = %2.1f percent", 100*pop_surv_count$n[map.gen["male"]]/pop_count$n[map.gen["female"]]));
## ---- echo=FALSE, message=TRUE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
test_pop_count <- comb[test_index,] %>% count(Sex)
# calculate likelihood of survival per sex
print(sprintf("Female to population ratio in test set = %2.1f percent", 100*test_pop_count$n[map.gen["female"]]/sum(test_pop_count$n)));
print(sprintf("Male to population ratio in test set = %2.1f percent", 100*test_pop_count$n[map.gen["male"]]/sum(test_pop_count$n)));
# Total number of expected survivors
test_expected_surv = test_pop_count$n*surv_likelihood;
## ---- echo=FALSE, message=TRUE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
accuracy <- ifelse(surv_likelihood>0.5, surv_likelihood, 1-surv_likelihood)
print(sprintf("Optimal gender only predicted score %2.4f ", sum(test_pop_count$n*accuracy)/sum(test_pop_count$n)))
print(sprintf("Actual leader board (LB) score on the test set %2.4f ", 0.76555));
print(sprintf("Train set relative overstimation on LB %2.4f ", (sum(test_pop_count$n*accuracy)/sum(test_pop_count$n)/0.76555)));
## ---- echo=TRUE, message=TRUE, warning=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
submit <- data.frame(PassengerId = test_index, Survived = comb$Prediction[test_index]);
write.csv(submit, file = paste0("gender_only.csv"), row.names = FALSE, quote=F)
|
# Simulation tools to estimate the distribution of basic log-linear estimates
#' Simulate basic log-linear CRC experiments
#'
#' Replicate and summarize the generation and log-linear analysis of data sets that are consistent with
#' arbitrary log-linear models
#'
#' @param n.grid A vector of positive integers, by default \code{c(100,300,900,2700)}. Each integer is the number of
#' population units that are observed in a corresponding collection of simulations.
#' @param n.reps The number of replicates for each integer in \code{n.grid}, i.e., for each population size of interest.
#' @param u.vec A vector of log-linear parameters, excluding the intercept term. The length of the vector and the order
#' of its terms must correspond to the column names of the design matrix produced by \code{make.design.matrix(k)},
#' where \code{k} is the number of lists.
#' @param p0 Optional: a number in \code{(0,1)}, the fraction of the population that is to be undetected. See details.
#' @param models See \code{\link{lllcrc}}
#' @param ic See \code{\link{lllcrc}}
#' @param cell.adj See \code{\link{lllcrc}}
#' @param averaging \code{\link{lllcrc}}
#' @param fixed.sample.size Logical: If \code{TRUE}, the simulations fix the number of units that are detected, defining the true
#' population size such that the number of units detected is equal to its expectation. If \code{FALSE},
#' the observed population size is variable, such that the integers in \code{n.grid}
#' indicate only the expectations of the corresponding simulation sizes.
#' @details \code{u.vec}, together with the constraint that the multinomial probabilities sum to 1,
#' uniquely determines the unspecified intercept term. Specifying \code{p0} overdetermines
#' the intercept term. We rectify this overspecification by adjusting all main effects by the same
#' additive adjustment \code{a}, where the unique value of \code{a} is approximated with numerical methods.
#'
#' Once the log-linear terms are fully specified, we perform multinomial draws to simulate a CRC experiment.
#' We include the zero cell in the multinomial draw only if \code{fixed.sample.size = TRUE}.
#'
#' On each replicate, the data log-linear model search according to the parameters \code{models},
#' \code{ic}, \code{cell.adj}, and \code{averaging} produces an estimate of the missing cell. The
#' main matrix \code{res} of simulation results stores the ratios of the estimated missing cell over
#' the 'true' missing cell.
#' @return A list of class \code{llsim}, for "log-linear simulations". The list contains the set of multinomial
#' capture pattern probabilities \code{p}, the matrix \code{res} of simulation results, and many of the
#' arguments to the \code{llm.sim}.
#' @author Zach Kurtz
#' @examples
#' \dontrun{
#' ## A basic simulation with four lists.
#' # Begin by specifying the vector of log-linear parameters.
#' # The parameters must match the design matrix:
#' names(make.design.matrix(k=4))
#' u.vec = initialize.u.vec(k=4)
#' u.vec[5:10] = 2
#' ## Run the simulation with an adjustment to the main effects in
#' # u.vec such that the probability of nondetection is 0.5.
#' sim = llm.sim(n.grid = c(100,300,900,2700), n.reps = 10, u.vec,
#' p0 = 0.5, ic = "BIC", cell.adj = FALSE)
#' # View the results
#' plot(sim)
#' }
#' @export llm.sim
llm.sim = function(n.grid = c(100,300,900,2700), n.reps = 100, u.vec,
p0 = NULL, models = NULL, ic = "BICpi", cell.adj = TRUE, averaging = FALSE, fixed.sample.size = FALSE)
{
# Figure out the number of lists
if(length(u.vec) == 6){k = 3
}else if(length(u.vec) == 14){k = 4
}else if(length(u.vec) == 30){k = 5
}else{ stop("The given u.vec is not compatible with k = 3, 4, or 5")
}
# Determine the set of models for model search
if(is.null(models)) models = make.hierarchical.term.sets(k)
des = data.matrix(make.design.matrix(k))
if(!identical(names(u.vec), colnames(des))){
stop(paste("u.vec must be named with the same names and name order given in the\n",
"biggest model returned by make.hierarchical.term.sets(k)"))}
# Determine the multinomial probabilities by u.vec
if(!is.null(p0)){
# Compute the number main.adj to add to all main effects
# such that the implied intercept term is consistent with p0
u.vec = zero.inflate(u.vec, p0, k, des)
}
p = get.p.from.u(u.vec, des, k)
# Call the log-linear simulation workhorse
des = data.frame(des)
des$c = rep(NA, nrow(des))
res = matrix(NA, nrow = n.reps, ncol = length(n.grid))
colnames(res) = paste("n=", as.character(n.grid), sep = "")
s.grid = n.grid
# If we're not using a fixed observed sample size, we set the true population size
# to satisfy E(observed) = n.grid, approximately
if(!fixed.sample.size) s.grid = round(n.grid/(1-p$p0))
for(i in 1:length(n.grid)) {
n = n.grid[i]
res[,i] = replicate(n.reps, one.llm.sim(size = s.grid[i], k, p, des,
models, ic, cell.adj, averaging, fixed.sample.size))
}
out = list(p = p, res = res, n.grid = n.grid, u.vec = u.vec, ic = ic, cell.adj = cell.adj,
averaging = averaging, fixed.sample.size = fixed.sample.size)
class(out) = "llsim" # log-linear simulation
return(out)
}
#' Initialize log-linear parameters
#'
#' A tool for setting up the simulations of \code{\link{llm.sim}}.
#'
#' @param k The number of lists to be modeled
#' @return A vector of log-linear parameters, all initialized to zero, corresponding to the columns of
#' the most general design matrix (but no Rasch terms).
#' @author Zach Kurtz
#' @export initialize.u.vec
initialize.u.vec = function(k)
{
names.u = colnames(data.matrix(make.design.matrix(k)))
u.vec = rep(0, length(names.u))
names(u.vec) = names.u
return(u.vec)
}
one.llm.sim = function(size, k, p, des, models, ic, cell.adj, averaging, fixed.sample.size = FALSE)
{
# Multinomial sampling:
if(fixed.sample.size){
des$c = rmultinom(1, size, p$p.obs)
c0 = size*p$p0/(1-p$p0)
}else{
p.vec = c(as.numeric(p$p.obs), p$p0)
draws = rmultinom(1, size, p.vec)
des$c = draws[-length(draws),]
c0 = draws[length(draws)]
}
# Optionally, the cell adjustment
if(cell.adj) des$c = des$c + 1/2^(k-1)
# Loglinear modelling:
icd = ic.all(models, ddat = des, ic, normalized = FALSE)
if(averaging){
pred = sum(icd[, "est"] * icd[, "wghts"])
}else{
winner = which.min(icd[, "score"])
best.terms = models[[winner]]
pred = icd[winner, "est"]
}
# Compute the ratio of the estimated missing cell to the actual or expected missing cell
return(pred/c0)
}
get.p.from.u = function(u.vec, des, k)
{
p.obs = t(exp(des %*% u.vec))
colnames(p.obs) = apply(des[,1:k], 1, paste, collapse = "")
p0 = saturated.local(p.obs)
sump = p0+sum(p.obs)
p.obs = p.obs/sump
p0 = p0/sump
return(list(p0=p0, p.obs=p.obs))
}
zero.inflate = function(u.vec, p0, k, des)
{
loss.a = function(a){
u.vec[1:k] = u.vec[1:k] + a
return((get.p.from.u(u.vec, des, k)$p0 - p0)^2)
}
u.vec[1:k] = u.vec[1:k] + optimize(f = loss.a, lower = -10, upper = 10)$minimum
return(u.vec)
}
#' Plot the output of \code{\link{llm.sim}}
#'
#' @param x An object of class \code{llsim}
#' @param y.top The upper bound of the plotting window
#' @param probs The interval width, in terms of quantiles
#' @param main Plot title
#' @param ... Additional parameters to be passed into \code{plot}
#' @author Zach Kurtz
#' @method plot llsim
#' @export
plot.llsim = function(x, y.top = 2, probs = c(0.25, 0.75), main = NULL, ...)
{
if(is.null(main)) main = paste(nrow(x$res), "replications")
plot(c(0,0), c(0,0), type = "n", bty = "n", ylim = c(0, y.top), xlim = c(0.5,length(x$n.grid)+0.5),
ylab = "c0 estimated divided by \"truth\"", xaxt = "n", xlab = "Number of observed units", main = main)
abline(h = 1, lty = 2)
for(i in 1:length(x$n.grid)){
qt = quantile(x$res[,i], probs, na.rm = TRUE)
mn = mean(x$res[,i], na.rm = TRUE)
segments(x0 = i, x1 = i, y0 = qt[1], y1 = qt[2])
points(x = i, y = mn, pch = 16, cex = 0.8)
text(x = i, y = 0, labels = colnames(x$res)[i])
}
}
| /R/llsimulate.R | permissive | zkurtz/lllcrc | R | false | false | 7,994 | r | # Simulation tools to estimate the distribution of basic log-linear estimates
#' Simulate basic log-linear CRC experiments
#'
#' Replicate and summarize the generation and log-linear analysis of data sets that are consistent with
#' arbitrary log-linear models
#'
#' @param n.grid A vector of positive integers, by default \code{c(100,300,900,2700)}. Each integer is the number of
#' population units that are observed in a corresponding collection of simulations.
#' @param n.reps The number of replicates for each integer in \code{n.grid}, i.e., for each population size of interest.
#' @param u.vec A vector of log-linear parameters, excluding the intercept term. The length of the vector and the order
#' of its terms must correspond to the column names of the design matrix produced by \code{make.design.matrix(k)},
#' where \code{k} is the number of lists.
#' @param p0 Optional: a number in \code{(0,1)}, the fraction of the population that is to be undetected. See details.
#' @param models See \code{\link{lllcrc}}
#' @param ic See \code{\link{lllcrc}}
#' @param cell.adj See \code{\link{lllcrc}}
#' @param averaging \code{\link{lllcrc}}
#' @param fixed.sample.size Logical: If \code{TRUE}, the simulations fix the number of units that are detected, defining the true
#' population size such that the number of units detected is equal to its expectation. If \code{FALSE},
#' the observed population size is variable, such that the integers in \code{n.grid}
#' indicate only the expectations of the corresponding simulation sizes.
#' @details \code{u.vec}, together with the constraint that the multinomial probabilities sum to 1,
#' uniquely determines the unspecified intercept term. Specifying \code{p0} overdetermines
#' the intercept term. We rectify this overspecification by adjusting all main effects by the same
#' additive adjustment \code{a}, where the unique value of \code{a} is approximated with numerical methods.
#'
#' Once the log-linear terms are fully specified, we perform multinomial draws to simulate a CRC experiment.
#' We include the zero cell in the multinomial draw only if \code{fixed.sample.size = TRUE}.
#'
#' On each replicate, the data log-linear model search according to the parameters \code{models},
#' \code{ic}, \code{cell.adj}, and \code{averaging} produces an estimate of the missing cell. The
#' main matrix \code{res} of simulation results stores the ratios of the estimated missing cell over
#' the 'true' missing cell.
#' @return A list of class \code{llsim}, for "log-linear simulations". The list contains the set of multinomial
#' capture pattern probabilities \code{p}, the matrix \code{res} of simulation results, and many of the
#' arguments to the \code{llm.sim}.
#' @author Zach Kurtz
#' @examples
#' \dontrun{
#' ## A basic simulation with four lists.
#' # Begin by specifying the vector of log-linear parameters.
#' # The parameters must match the design matrix:
#' names(make.design.matrix(k=4))
#' u.vec = initialize.u.vec(k=4)
#' u.vec[5:10] = 2
#' ## Run the simulation with an adjustment to the main effects in
#' # u.vec such that the probability of nondetection is 0.5.
#' sim = llm.sim(n.grid = c(100,300,900,2700), n.reps = 10, u.vec,
#' p0 = 0.5, ic = "BIC", cell.adj = FALSE)
#' # View the results
#' plot(sim)
#' }
#' @export llm.sim
llm.sim = function(n.grid = c(100,300,900,2700), n.reps = 100, u.vec,
p0 = NULL, models = NULL, ic = "BICpi", cell.adj = TRUE, averaging = FALSE, fixed.sample.size = FALSE)
{
# Figure out the number of lists
if(length(u.vec) == 6){k = 3
}else if(length(u.vec) == 14){k = 4
}else if(length(u.vec) == 30){k = 5
}else{ stop("The given u.vec is not compatible with k = 3, 4, or 5")
}
# Determine the set of models for model search
if(is.null(models)) models = make.hierarchical.term.sets(k)
des = data.matrix(make.design.matrix(k))
if(!identical(names(u.vec), colnames(des))){
stop(paste("u.vec must be named with the same names and name order given in the\n",
"biggest model returned by make.hierarchical.term.sets(k)"))}
# Determine the multinomial probabilities by u.vec
if(!is.null(p0)){
# Compute the number main.adj to add to all main effects
# such that the implied intercept term is consistent with p0
u.vec = zero.inflate(u.vec, p0, k, des)
}
p = get.p.from.u(u.vec, des, k)
# Call the log-linear simulation workhorse
des = data.frame(des)
des$c = rep(NA, nrow(des))
res = matrix(NA, nrow = n.reps, ncol = length(n.grid))
colnames(res) = paste("n=", as.character(n.grid), sep = "")
s.grid = n.grid
# If we're not using a fixed observed sample size, we set the true population size
# to satisfy E(observed) = n.grid, approximately
if(!fixed.sample.size) s.grid = round(n.grid/(1-p$p0))
for(i in 1:length(n.grid)) {
n = n.grid[i]
res[,i] = replicate(n.reps, one.llm.sim(size = s.grid[i], k, p, des,
models, ic, cell.adj, averaging, fixed.sample.size))
}
out = list(p = p, res = res, n.grid = n.grid, u.vec = u.vec, ic = ic, cell.adj = cell.adj,
averaging = averaging, fixed.sample.size = fixed.sample.size)
class(out) = "llsim" # log-linear simulation
return(out)
}
#' Initialize log-linear parameters
#'
#' A tool for setting up the simulations of \code{\link{llm.sim}}.
#'
#' @param k The number of lists to be modeled
#' @return A vector of log-linear parameters, all initialized to zero, corresponding to the columns of
#' the most general design matrix (but no Rasch terms).
#' @author Zach Kurtz
#' @export initialize.u.vec
initialize.u.vec = function(k)
{
names.u = colnames(data.matrix(make.design.matrix(k)))
u.vec = rep(0, length(names.u))
names(u.vec) = names.u
return(u.vec)
}
one.llm.sim = function(size, k, p, des, models, ic, cell.adj, averaging, fixed.sample.size = FALSE)
{
# Multinomial sampling:
if(fixed.sample.size){
des$c = rmultinom(1, size, p$p.obs)
c0 = size*p$p0/(1-p$p0)
}else{
p.vec = c(as.numeric(p$p.obs), p$p0)
draws = rmultinom(1, size, p.vec)
des$c = draws[-length(draws),]
c0 = draws[length(draws)]
}
# Optionally, the cell adjustment
if(cell.adj) des$c = des$c + 1/2^(k-1)
# Loglinear modelling:
icd = ic.all(models, ddat = des, ic, normalized = FALSE)
if(averaging){
pred = sum(icd[, "est"] * icd[, "wghts"])
}else{
winner = which.min(icd[, "score"])
best.terms = models[[winner]]
pred = icd[winner, "est"]
}
# Compute the ratio of the estimated missing cell to the actual or expected missing cell
return(pred/c0)
}
get.p.from.u = function(u.vec, des, k)
{
p.obs = t(exp(des %*% u.vec))
colnames(p.obs) = apply(des[,1:k], 1, paste, collapse = "")
p0 = saturated.local(p.obs)
sump = p0+sum(p.obs)
p.obs = p.obs/sump
p0 = p0/sump
return(list(p0=p0, p.obs=p.obs))
}
zero.inflate = function(u.vec, p0, k, des)
{
loss.a = function(a){
u.vec[1:k] = u.vec[1:k] + a
return((get.p.from.u(u.vec, des, k)$p0 - p0)^2)
}
u.vec[1:k] = u.vec[1:k] + optimize(f = loss.a, lower = -10, upper = 10)$minimum
return(u.vec)
}
#' Plot the output of \code{\link{llm.sim}}
#'
#' @param x An object of class \code{llsim}
#' @param y.top The upper bound of the plotting window
#' @param probs The interval width, in terms of quantiles
#' @param main Plot title
#' @param ... Additional parameters to be passed into \code{plot}
#' @author Zach Kurtz
#' @method plot llsim
#' @export
plot.llsim = function(x, y.top = 2, probs = c(0.25, 0.75), main = NULL, ...)
{
if(is.null(main)) main = paste(nrow(x$res), "replications")
plot(c(0,0), c(0,0), type = "n", bty = "n", ylim = c(0, y.top), xlim = c(0.5,length(x$n.grid)+0.5),
ylab = "c0 estimated divided by \"truth\"", xaxt = "n", xlab = "Number of observed units", main = main)
abline(h = 1, lty = 2)
for(i in 1:length(x$n.grid)){
qt = quantile(x$res[,i], probs, na.rm = TRUE)
mn = mean(x$res[,i], na.rm = TRUE)
segments(x0 = i, x1 = i, y0 = qt[1], y1 = qt[2])
points(x = i, y = mn, pch = 16, cex = 0.8)
text(x = i, y = 0, labels = colnames(x$res)[i])
}
}
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/gwindow.R
\docType{class}
\name{gwindow}
\alias{GWindow}
\alias{GWindow-class}
\alias{gwindow}
\title{Main window constructor}
\usage{
gwindow(title = "", parent = NULL, handler = NULL, action = NULL, ...,
renderTo = NULL, width = NULL, height = NULL, ext.args = NULL)
}
\arguments{
\item{title}{Window title}
\item{parent}{One and only one gwindow per script should have no
parent specified. Otherwise, this should be a \code{gwindow}
instance.}
\item{handler}{Handler called when window is closed. (For subwindows only)}
\item{action}{action passed to handler}
\item{...}{ignored}
\item{renderTo}{Where to render window. For subwindows, this should be NULL. For main windows, this can be a DOM id or left as NULL, in which case the entire web page is used.}
\item{width}{width of a subwindow in pixels.}
\item{height}{height of a subwindow in pixels}
\item{ext.args}{extra args passed to the constructor}
}
\value{
An ExtContainer object
}
\description{
There can be more than one gwindow instance per script, but one is
special. This one is called without a \code{parent} object, which
otherwise is typically another \code{gwindow} instance. The
special window sets up the environment to store the callbacks
etc. Subwindows are possible. Simply pass a value of \code{NULL}
to the argument \code{renderTo}. This argument is used to specify
the DOM id of a \code{DIV} tag. If given, the GUI created by the
\code{gwindow} call will replace this part of the web page. If not
given, then a subwindow will be rendered.
%
The \code{visible<-} method can be used to recompute the layout. This is often useful as the last line of a script.
The \code{GWindow} class is used for windows and
subwindows. Windows in \pkg{gWidgetsWWW2} are rendered to parts of
the web page. In the simplest case, they are rendered to the
document body and are the only thing the user sees. However, one
can render to parts of a window as well. This is why we have a
\code{renderTo} argument in the constructor.
}
\details{
One of the instances on a page contains the "toplevel" object,
which routes handler requests and gives web page responses.
Subwindows are floating windows that appear on top of the web
page, like a dialog box.
The method \code{start_comet} will launch a long-poll process,
whereby the browser repeatedly queries the server for any
changes. This can be useful if one expects to launch a
long-running process and the handler that initiates this will time
out before the process is done. One needs only to add the
javascript commands to the queue.
}
\section{Methods}{
\describe{
\item{\code{do_layout()}}{Call layout method of container to recompute}
\item{\code{dump()}}{Display js_queue for debugging}
\item{\code{get_value(...)}}{Get main property, Can't query widget, so we store here}
\item{\code{set_value(value, ...)}}{Set main property, invoke change handler on change}
\item{\code{set_visible(value)}}{Show container and its siblings}
\item{\code{start_comet()}}{Turn on long-poll process for passing in commands from server}
}}
\examples{
w <- gwindow("Top level", renderTo="replaceme") ## no parent, so main one
g <- ggroup(cont=w)
b <- gbutton("click me for a subwindow", cont=g, handler=function(h,...) {
w1 <- gwindow("subwindow -- no renderTo", renderTo=NULL, parent=w)
g <- ggroup(cont=w1)
gbutton("dispose", cont=g, handler=function(h,...) dispose(w1))
})
w2 <- gwindow("render elsewhere", parent=w, renderTo="replacemetoo") ## renderst to part of page
}
| /man/gwindow.Rd | no_license | tokareff/gWidgetsWWW2 | R | false | false | 3,583 | rd | % Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/gwindow.R
\docType{class}
\name{gwindow}
\alias{GWindow}
\alias{GWindow-class}
\alias{gwindow}
\title{Main window constructor}
\usage{
gwindow(title = "", parent = NULL, handler = NULL, action = NULL, ...,
renderTo = NULL, width = NULL, height = NULL, ext.args = NULL)
}
\arguments{
\item{title}{Window title}
\item{parent}{One and only one gwindow per script should have no
parent specified. Otherwise, this should be a \code{gwindow}
instance.}
\item{handler}{Handler called when window is closed. (For subwindows only)}
\item{action}{action passed to handler}
\item{...}{ignored}
\item{renderTo}{Where to render window. For subwindows, this should be NULL. For main windows, this can be a DOM id or left as NULL, in which case the entire web page is used.}
\item{width}{width of a subwindow in pixels.}
\item{height}{height of a subwindow in pixels}
\item{ext.args}{extra args passed to the constructor}
}
\value{
An ExtContainer object
}
\description{
There can be more than one gwindow instance per script, but one is
special. This one is called without a \code{parent} object, which
otherwise is typically another \code{gwindow} instance. The
special window sets up the environment to store the callbacks
etc. Subwindows are possible. Simply pass a value of \code{NULL}
to the argument \code{renderTo}. This argument is used to specify
the DOM id of a \code{DIV} tag. If given, the GUI created by the
\code{gwindow} call will replace this part of the web page. If not
given, then a subwindow will be rendered.
%
The \code{visible<-} method can be used to recompute the layout. This is often useful as the last line of a script.
The \code{GWindow} class is used for windows and
subwindows. Windows in \pkg{gWidgetsWWW2} are rendered to parts of
the web page. In the simplest case, they are rendered to the
document body and are the only thing the user sees. However, one
can render to parts of a window as well. This is why we have a
\code{renderTo} argument in the constructor.
}
\details{
One of the instances on a page contains the "toplevel" object,
which routes handler requests and gives web page responses.
Subwindows are floating windows that appear on top of the web
page, like a dialog box.
The method \code{start_comet} will launch a long-poll process,
whereby the browser repeatedly queries the server for any
changes. This can be useful if one expects to launch a
long-running process and the handler that initiates this will time
out before the process is done. One needs only to add the
javascript commands to the queue.
}
\section{Methods}{
\describe{
\item{\code{do_layout()}}{Call layout method of container to recompute}
\item{\code{dump()}}{Display js_queue for debugging}
\item{\code{get_value(...)}}{Get main property, Can't query widget, so we store here}
\item{\code{set_value(value, ...)}}{Set main property, invoke change handler on change}
\item{\code{set_visible(value)}}{Show container and its siblings}
\item{\code{start_comet()}}{Turn on long-poll process for passing in commands from server}
}}
\examples{
w <- gwindow("Top level", renderTo="replaceme") ## no parent, so main one
g <- ggroup(cont=w)
b <- gbutton("click me for a subwindow", cont=g, handler=function(h,...) {
w1 <- gwindow("subwindow -- no renderTo", renderTo=NULL, parent=w)
g <- ggroup(cont=w1)
gbutton("dispose", cont=g, handler=function(h,...) dispose(w1))
})
w2 <- gwindow("render elsewhere", parent=w, renderTo="replacemetoo") ## renderst to part of page
}
|
###
### Nov 2006: Use list construct to make 'objects'
### sc
### sc$metamsingles,sc$metapvals.singles,sc$metams.singles, and TF singles, to be named
### pc
### pc$metampairs, pc$metapvals, pc$metams and TF pairs, to be named
##source("./utilitiesInteractions.R")
### For a metapair list, find which promoters have the matrix mat
findSingleMatInPairHits <- function(mat,metampairs){
nmeta <- length(metampairs)
promovec <- character()
for ( i in 1:nmeta ){
if ( mat %in% metampairs[[i]] ) { promovec <- c(promovec,names(metampairs[i])) }
}
return(promovec)
}
### For a metamsingles list, find which promoters have the matrix mat
findSingleMatHits <- function(mat,metamsingles){
nmeta <- length(metamsingles)
promovec <- character()
for ( i in 1:nmeta ){
if ( mat %in% metamsingles[[i]] ) { promovec <- c(promovec,names(metamsingles[i])) }
}
return(promovec)
}
### For a metapair list, find which promoters have the matrix mat, at below a certain pvalue threshold
findSingleMatHitsPval <- function(mat,pval.thresh,metampairs,metapvals){
nmeta <- length(metampairs)
promovec <- character()
for ( i in 1:nmeta ){
cat( i,"\n")
mpairs <- metampairs[[i]]
pvalvec <- metapvals[[i]]
fmp <- filterMpairs( pval.thresh, mpairs, pvalvec )
if ( mat %in% fmp ){ promovec <- c(promovec,names(metampairs[i])) }
}
return(promovec)
}
## filter pairmatrix collection by pvalue
filterMpairCollection <- function ( pval.thresh, mpaircollection, pvalcollection ){
returnList <- list()
for ( pss in names(mpaircollection) ) {
mpairmat <- mpaircollection[[pss]]
pvalvec <- pvalcollection[[pss]]
fm <- filterMpairs( pval.thresh, mpairmat, pvalvec )
if ( !is.null(fm)) {
returnList[[pss]] <- fm
}
}
return(returnList)
}
## Filter pairmatrix by pvalue
filterMpairs <- function( pval.thresh, mpairmat, pvalvec ){
if ( length(which(pvalvec<=pval.thresh))>1 ){
if ( !is.vector(mpairmat) ) {
indices.keep <- which(pvalvec <= pval.thresh )
return( mpairmat[indices.keep,])
} else {
if ( pvalvec <= pval.thresh ){ return(mpairmat)}
}
}
return(NULL) ## if none of the above are met
}
## Filter single hit matrix by pvalue
filterMsingles <- function( pval.thresh, msinglemat, pvalvec ){
## turns out we only need pvalvec, as it is labeled
return(names(which(pvalvec <= pval.thresh )))
}
## Find partners to a single pwm in a pairmatrix
findMatPartners <- function( mat, mpairmat ){
indices.matLeft <- which(mpairmat[,1]==mat)
indices.matRight <- which(mpairmat[,2]==mat)
partners <- c(mpairmat[indices.matLeft,2],mpairmat[indices.matRight,1])
return (partners)
}
## Give p-vales for the pairs involving matrix mat
findMatPartnerPvals <- function( mat, mpairmat, pvalvec ){
indices.matLeft <- which(mpairmat[,1]==mat)
indices.matRight <- which(mpairmat[,2]==mat)
partners <- c(mpairmat[indices.matLeft,2],mpairmat[indices.matRight,1])
pvals <- c(pvalvec[indices.matLeft],pvalvec[indices.matRight])
names(pvals) <- partners
return (pvals)
}
## filter PC collection by pvalue
filterPCbyPval <- function ( pc, pval.thresh ){
mpaircollection <- pc$metampairs
pvalcollection <- pc$metapvals
emcollection <- pc$metams
returnList <- list()
returnList$metampairs <- list()
returnList$metapvals <- list()
returnList$metams <- list()
for ( promoter in names(mpaircollection) ) {
mpairmat <- mpaircollection[[promoter]]
pvalvec <- pvalcollection[[promoter]]
emvec <- emcollection[[promoter]]
if ( length(which(pvalvec<=pval.thresh))>= 1 ){
if ( !is.vector(mpairmat) ) {
indices.keep <- which(pvalvec <= pval.thresh )
if ( length(indices.keep) > 0 ){
returnList$metampairs[[promoter]] <- mpairmat[indices.keep,]
returnList$metapvals[[promoter]] <- pvalvec[indices.keep]
returnList$metams[[promoter]] <- emvec[indices.keep]
}
} else { ## if single value
if ( pvalvec <= pval.thresh ){
returnList$metampairs[[promoter]] <- mpairmat
returnList$metapvals[[promoter]] <- pvalvec
returnList$metams[[promoter]] <- emvec
}
}
}
}
if ( length(returnList$metapvals)==0 ){
return ( NULL )
} else {
return(returnList)
}
}
## filter SC collection by pvalue
filterSCbyPval <- function ( sc, pval.thresh ){
msinglecollection <- sc$metamsingles
pvalcollection <- sc$metapvals.singles
emcollection <- sc$metams.singles
returnList <- list()
returnList$metamsingles <- list()
returnList$metapvals.singles <- list()
returnList$metams.singles <- list()
for ( promoter in names(msinglecollection) ) {
msingles <- msinglecollection[[promoter]]
pvalvec <- pvalcollection[[promoter]]
emvec <- emcollection[[promoter]]
indices.keep <- which(pvalvec <= pval.thresh )
if ( length(indices.keep) > 0 ){
returnList$metamsingles[[promoter]] <- msingles[indices.keep]
returnList$metapvals.singles[[promoter]] <- pvalvec[indices.keep]
returnList$metams.singles[[promoter]] <- emvec[indices.keep]
}
}
if ( length(returnList$metapvals.singles)==0 ){
return ( NULL )
} else {
return(returnList)
}
}
## filter PC collection by required matrices
## reqm is vector c(mat1,mat2) of two matrices
### Looks like this has yet to be completed
filterPCbyMatrices <- function ( pc , reqm ){
mpaircollection <- pc$metampairs
pvalcollection <- pc$metapvals
emcollection <- pc$metams
returnList <- list()
returnList$metampairs <- list()
returnList$metapvals <- list()
returnList$metams <- list()
for ( promoter in names(mpaircollection) ) {
##cat(promoter,"\n")
mpairmat <- mpaircollection[[promoter]]
pvalvec <- pvalcollection[[promoter]]
emvec <- emcollection[[promoter]]
if ( !is.vector(mpairmat) ){
for ( i in 1:nrow(mpairmat) ) {
if ( identical(sort(reqm),sort(mpairmat[i,]))) {
returnList$metampairs[[promoter]] <- mpairmat[i,]
returnList$metapvals[[promoter]] <- pvalvec[i]
returnList$metams[[promoter]] <- emvec[i]
}
}
} else {
if ( identical(sort(reqm),sort(mpairmat))) {
returnList$metampairs[[promoter]] <- mpairmat
returnList$metapvals[[promoter]] <- pvalvec
returnList$metams[[promoter]] <- emvec
}
}
}
if ( length(returnList$metapvals)==0 ){
return ( NULL )
} else {
return(returnList)
}
}
## all pairs in two sets, maintaining order
expandPairs <- function(set1,set2){
return.obj <- NULL
for ( s1 in set1 ){
for (s2 in set2 ){
return.obj <- rbind(return.obj,c(s1,s2))
}
}
return(return.obj)
}
## input PC, return TFs
## PC is indexed by ensids
## TFs indexed by psois
## possible criteria:
## 1. set of TFs
## specify tfs.subset if only a subset of tfs is to be considered
## tf.subset.both=TRUE if both are required
## tf.subset.both=FALSE if only one is required ( Not yet implemented )
## 2. interactome
createTFsetFromPairs <- function ( pc, interactome=TRUE, tfsubset=transfac.tfs.expressed, tfsubset.both=TRUE, noConnections=NULL,tf.dist.cn ){
allowed.tfs <- setdiff(as.character(cname.compare[tfsubset]),noConnections)
metampairs <- pc$metampairs
metapvals <- pc$metapvals ## not used, but retained in case we need it
metams <- pc$metams ## not used, but retained in case we need it
ensids <- names(metampairs)
##
## Compute filtered gene sets by the revised cutoff
##
returnList <- list()
for ( ensid in names(metampairs) ) {
mpairs <- metampairs[[ensid]]
ppvals <- metapvals[[ensid]]
ms <- metams[[ensid]]
psoi <- as.character(repProbes.ncbiID[entrezIDofEnsemblID[ensid]])
if ( length(psoi) > 1 ) {
cat ("Trouble ahead. Multiple psois for this ensid,",ensid,"\n" )
}
if ( is.na(psoi) ) {
cat ("Trouble:Could not map to psoi and/or eid for this ensid:",ensid,"\n" )
}
##
## Transcription factor pairs
##
tfpaircollection <- character()
if ( is.vector(mpairs ) ){ ## if one of one possible
soi.tfs <- fam2tf.tte.gname[[mpairs[1]]]
soi.tfs <- intersect( soi.tfs, allowed.tfs )
near.tfs <- fam2tf.tte.gname[[mpairs[2]]]
near.tfs <- intersect( near.tfs, allowed.tfs )
if ( (length(soi.tfs)>0) & (length(near.tfs)>0) ){
if ( interactome ) {
pairs <- grabPairs(soi.tfs,near.tfs,tf.dist.cn)
} else {
pairs <- expandPairs(soi.tfs,near.tfs)
}
if ( length(pairs) != 0 ) {
same.tf.logical = pairs[,1]==pairs[,2]
pairs <- pairs[ !same.tf.logical, ]
##if ( TRUE %in% (pairs[,1]==pairs[,2]) )stop("Error: self-pair")
tfpaircollection <- rbind(tfpaircollection,pairs)
}
}
} else {
for ( mpair.index in 1:nrow(mpairs) ){
soi.tfs <- fam2tf.tte.gname[[mpairs[mpair.index,1]]]
soi.tfs <- intersect( soi.tfs, allowed.tfs )
near.tfs <- fam2tf.tte.gname[[mpairs[mpair.index,2]]]
near.tfs <- intersect( near.tfs, allowed.tfs )
if ( (length(soi.tfs)>0) & (length(near.tfs)>0) ){
if ( interactome ) {
pairs <- grabPairs(soi.tfs,near.tfs,tf.dist.cn)
} else {
pairs <- expandPairs(soi.tfs,near.tfs)
}
if ( length(pairs) != 0 ) {
same.tf.logical = pairs[,1]==pairs[,2]
pairs <- pairs[ !same.tf.logical, ]
##if ( TRUE %in% (pairs[,1]==pairs[,2]) )stop("Error: self-pair")
tfpaircollection <- rbind(tfpaircollection,pairs)
}
}
}
}
rownames(tfpaircollection) <- NULL
if ( length(tfpaircollection) > 1 ){
tfpaircollection <- unique(t(apply(tfpaircollection, 1, sort))) ## sort first within a row, then finds unique rows
}
if ( length(tfpaircollection) > 0 ){
## Some psois correspond to multiple ensids
## We are going to treat all hypotheses for a given psoi on an equal footing, and stack them
if ( is.null(returnList[[psoi]]) ){
returnList[[psoi]] <- tfpaircollection
} else {
returnList[[psoi]] <- rbind(returnList[[psoi]],tfpaircollection)
returnList[[psoi]] <- unique(t(apply(returnList[[psoi]],1,sort)))
}
}
}
returnList
}
## input PC, return TFs
## PC is indexed by ensids
## TFs indexed by psois
## possible criteria:
## 1. set of TFs
createTFsetFromSingles <- function ( sc, tfsubset=transfac.tfs.expressed ){
allowed.tfs <- as.character(cname.compare[tfsubset])
metamsingles <- sc$metamsingles
metapvals.singles <- sc$metapvals.singles ## not used, but retained in case we need it
metams.singles <- sc$metams.singles ## not used, but retained in case we need it
ensids <- names(metamsingles)
##
## Compute filtered gene sets by the revised cutoff
##
returnList <- list()
for ( ensid in ensids ) {
msingles <- metamsingles[[ensid]]
ppvals <- metapvals.singles[[ensid]]
ms <- metams.singles[[ensid]]
psoi <- as.character(repProbes.ncbiID[entrezIDofEnsemblID[ensid]])
if ( length(psoi) > 1 ) {
cat ("Trouble ahead. Multiple psois for this ensid:",ensid,"\n" )
}
if ( is.na(psoi) ) {
cat ("Trouble ahead. This ensid has no repProbe:",ensid,"\n" )
}
##
## Transcription factor singles
##
tfs <- unique(sort(as.character(unlist(fam2tf.tte.gname[msingles]))))
tfs <- intersect( tfs, allowed.tfs )
if ( length(tfs) > 0 ){
## Some psois correspond to multiple ensids
## We are going to treat all hypotheses for a given psoi on an equal footing, and concatenate them
if ( is.null(returnList[[psoi]]) ){
returnList[[psoi]] <- tfs
} else {
returnList[[psoi]] <- c(returnList[[psoi]],tfs)
returnList[[psoi]] <- unique(sort(returnList[[psoi]]))
}
}
}
returnList
}
| /utils/utilitiesMeta.R | no_license | vthorsson/tfinf | R | false | false | 12,048 | r |
###
### Nov 2006: Use list construct to make 'objects'
### sc
### sc$metamsingles,sc$metapvals.singles,sc$metams.singles, and TF singles, to be named
### pc
### pc$metampairs, pc$metapvals, pc$metams and TF pairs, to be named
##source("./utilitiesInteractions.R")
### For a metapair list, find which promoters have the matrix mat
findSingleMatInPairHits <- function(mat,metampairs){
nmeta <- length(metampairs)
promovec <- character()
for ( i in 1:nmeta ){
if ( mat %in% metampairs[[i]] ) { promovec <- c(promovec,names(metampairs[i])) }
}
return(promovec)
}
### For a metamsingles list, find which promoters have the matrix mat
findSingleMatHits <- function(mat,metamsingles){
nmeta <- length(metamsingles)
promovec <- character()
for ( i in 1:nmeta ){
if ( mat %in% metamsingles[[i]] ) { promovec <- c(promovec,names(metamsingles[i])) }
}
return(promovec)
}
### For a metapair list, find which promoters have the matrix mat, at below a certain pvalue threshold
findSingleMatHitsPval <- function(mat,pval.thresh,metampairs,metapvals){
nmeta <- length(metampairs)
promovec <- character()
for ( i in 1:nmeta ){
cat( i,"\n")
mpairs <- metampairs[[i]]
pvalvec <- metapvals[[i]]
fmp <- filterMpairs( pval.thresh, mpairs, pvalvec )
if ( mat %in% fmp ){ promovec <- c(promovec,names(metampairs[i])) }
}
return(promovec)
}
## filter pairmatrix collection by pvalue
filterMpairCollection <- function ( pval.thresh, mpaircollection, pvalcollection ){
returnList <- list()
for ( pss in names(mpaircollection) ) {
mpairmat <- mpaircollection[[pss]]
pvalvec <- pvalcollection[[pss]]
fm <- filterMpairs( pval.thresh, mpairmat, pvalvec )
if ( !is.null(fm)) {
returnList[[pss]] <- fm
}
}
return(returnList)
}
## Filter pairmatrix by pvalue
filterMpairs <- function( pval.thresh, mpairmat, pvalvec ){
if ( length(which(pvalvec<=pval.thresh))>1 ){
if ( !is.vector(mpairmat) ) {
indices.keep <- which(pvalvec <= pval.thresh )
return( mpairmat[indices.keep,])
} else {
if ( pvalvec <= pval.thresh ){ return(mpairmat)}
}
}
return(NULL) ## if none of the above are met
}
## Filter single hit matrix by pvalue
filterMsingles <- function( pval.thresh, msinglemat, pvalvec ){
## turns out we only need pvalvec, as it is labeled
return(names(which(pvalvec <= pval.thresh )))
}
## Find partners to a single pwm in a pairmatrix
findMatPartners <- function( mat, mpairmat ){
indices.matLeft <- which(mpairmat[,1]==mat)
indices.matRight <- which(mpairmat[,2]==mat)
partners <- c(mpairmat[indices.matLeft,2],mpairmat[indices.matRight,1])
return (partners)
}
## Give p-vales for the pairs involving matrix mat
findMatPartnerPvals <- function( mat, mpairmat, pvalvec ){
indices.matLeft <- which(mpairmat[,1]==mat)
indices.matRight <- which(mpairmat[,2]==mat)
partners <- c(mpairmat[indices.matLeft,2],mpairmat[indices.matRight,1])
pvals <- c(pvalvec[indices.matLeft],pvalvec[indices.matRight])
names(pvals) <- partners
return (pvals)
}
## filter PC collection by pvalue
filterPCbyPval <- function ( pc, pval.thresh ){
mpaircollection <- pc$metampairs
pvalcollection <- pc$metapvals
emcollection <- pc$metams
returnList <- list()
returnList$metampairs <- list()
returnList$metapvals <- list()
returnList$metams <- list()
for ( promoter in names(mpaircollection) ) {
mpairmat <- mpaircollection[[promoter]]
pvalvec <- pvalcollection[[promoter]]
emvec <- emcollection[[promoter]]
if ( length(which(pvalvec<=pval.thresh))>= 1 ){
if ( !is.vector(mpairmat) ) {
indices.keep <- which(pvalvec <= pval.thresh )
if ( length(indices.keep) > 0 ){
returnList$metampairs[[promoter]] <- mpairmat[indices.keep,]
returnList$metapvals[[promoter]] <- pvalvec[indices.keep]
returnList$metams[[promoter]] <- emvec[indices.keep]
}
} else { ## if single value
if ( pvalvec <= pval.thresh ){
returnList$metampairs[[promoter]] <- mpairmat
returnList$metapvals[[promoter]] <- pvalvec
returnList$metams[[promoter]] <- emvec
}
}
}
}
if ( length(returnList$metapvals)==0 ){
return ( NULL )
} else {
return(returnList)
}
}
## filter SC collection by pvalue
filterSCbyPval <- function ( sc, pval.thresh ){
msinglecollection <- sc$metamsingles
pvalcollection <- sc$metapvals.singles
emcollection <- sc$metams.singles
returnList <- list()
returnList$metamsingles <- list()
returnList$metapvals.singles <- list()
returnList$metams.singles <- list()
for ( promoter in names(msinglecollection) ) {
msingles <- msinglecollection[[promoter]]
pvalvec <- pvalcollection[[promoter]]
emvec <- emcollection[[promoter]]
indices.keep <- which(pvalvec <= pval.thresh )
if ( length(indices.keep) > 0 ){
returnList$metamsingles[[promoter]] <- msingles[indices.keep]
returnList$metapvals.singles[[promoter]] <- pvalvec[indices.keep]
returnList$metams.singles[[promoter]] <- emvec[indices.keep]
}
}
if ( length(returnList$metapvals.singles)==0 ){
return ( NULL )
} else {
return(returnList)
}
}
## filter PC collection by required matrices
## reqm is vector c(mat1,mat2) of two matrices
### Looks like this has yet to be completed
filterPCbyMatrices <- function ( pc , reqm ){
mpaircollection <- pc$metampairs
pvalcollection <- pc$metapvals
emcollection <- pc$metams
returnList <- list()
returnList$metampairs <- list()
returnList$metapvals <- list()
returnList$metams <- list()
for ( promoter in names(mpaircollection) ) {
##cat(promoter,"\n")
mpairmat <- mpaircollection[[promoter]]
pvalvec <- pvalcollection[[promoter]]
emvec <- emcollection[[promoter]]
if ( !is.vector(mpairmat) ){
for ( i in 1:nrow(mpairmat) ) {
if ( identical(sort(reqm),sort(mpairmat[i,]))) {
returnList$metampairs[[promoter]] <- mpairmat[i,]
returnList$metapvals[[promoter]] <- pvalvec[i]
returnList$metams[[promoter]] <- emvec[i]
}
}
} else {
if ( identical(sort(reqm),sort(mpairmat))) {
returnList$metampairs[[promoter]] <- mpairmat
returnList$metapvals[[promoter]] <- pvalvec
returnList$metams[[promoter]] <- emvec
}
}
}
if ( length(returnList$metapvals)==0 ){
return ( NULL )
} else {
return(returnList)
}
}
## all pairs in two sets, maintaining order
expandPairs <- function(set1,set2){
return.obj <- NULL
for ( s1 in set1 ){
for (s2 in set2 ){
return.obj <- rbind(return.obj,c(s1,s2))
}
}
return(return.obj)
}
## input PC, return TFs
## PC is indexed by ensids
## TFs indexed by psois
## possible criteria:
## 1. set of TFs
## specify tfs.subset if only a subset of tfs is to be considered
## tf.subset.both=TRUE if both are required
## tf.subset.both=FALSE if only one is required ( Not yet implemented )
## 2. interactome
createTFsetFromPairs <- function ( pc, interactome=TRUE, tfsubset=transfac.tfs.expressed, tfsubset.both=TRUE, noConnections=NULL,tf.dist.cn ){
allowed.tfs <- setdiff(as.character(cname.compare[tfsubset]),noConnections)
metampairs <- pc$metampairs
metapvals <- pc$metapvals ## not used, but retained in case we need it
metams <- pc$metams ## not used, but retained in case we need it
ensids <- names(metampairs)
##
## Compute filtered gene sets by the revised cutoff
##
returnList <- list()
for ( ensid in names(metampairs) ) {
mpairs <- metampairs[[ensid]]
ppvals <- metapvals[[ensid]]
ms <- metams[[ensid]]
psoi <- as.character(repProbes.ncbiID[entrezIDofEnsemblID[ensid]])
if ( length(psoi) > 1 ) {
cat ("Trouble ahead. Multiple psois for this ensid,",ensid,"\n" )
}
if ( is.na(psoi) ) {
cat ("Trouble:Could not map to psoi and/or eid for this ensid:",ensid,"\n" )
}
##
## Transcription factor pairs
##
tfpaircollection <- character()
if ( is.vector(mpairs ) ){ ## if one of one possible
soi.tfs <- fam2tf.tte.gname[[mpairs[1]]]
soi.tfs <- intersect( soi.tfs, allowed.tfs )
near.tfs <- fam2tf.tte.gname[[mpairs[2]]]
near.tfs <- intersect( near.tfs, allowed.tfs )
if ( (length(soi.tfs)>0) & (length(near.tfs)>0) ){
if ( interactome ) {
pairs <- grabPairs(soi.tfs,near.tfs,tf.dist.cn)
} else {
pairs <- expandPairs(soi.tfs,near.tfs)
}
if ( length(pairs) != 0 ) {
same.tf.logical = pairs[,1]==pairs[,2]
pairs <- pairs[ !same.tf.logical, ]
##if ( TRUE %in% (pairs[,1]==pairs[,2]) )stop("Error: self-pair")
tfpaircollection <- rbind(tfpaircollection,pairs)
}
}
} else {
for ( mpair.index in 1:nrow(mpairs) ){
soi.tfs <- fam2tf.tte.gname[[mpairs[mpair.index,1]]]
soi.tfs <- intersect( soi.tfs, allowed.tfs )
near.tfs <- fam2tf.tte.gname[[mpairs[mpair.index,2]]]
near.tfs <- intersect( near.tfs, allowed.tfs )
if ( (length(soi.tfs)>0) & (length(near.tfs)>0) ){
if ( interactome ) {
pairs <- grabPairs(soi.tfs,near.tfs,tf.dist.cn)
} else {
pairs <- expandPairs(soi.tfs,near.tfs)
}
if ( length(pairs) != 0 ) {
same.tf.logical = pairs[,1]==pairs[,2]
pairs <- pairs[ !same.tf.logical, ]
##if ( TRUE %in% (pairs[,1]==pairs[,2]) )stop("Error: self-pair")
tfpaircollection <- rbind(tfpaircollection,pairs)
}
}
}
}
rownames(tfpaircollection) <- NULL
if ( length(tfpaircollection) > 1 ){
tfpaircollection <- unique(t(apply(tfpaircollection, 1, sort))) ## sort first within a row, then finds unique rows
}
if ( length(tfpaircollection) > 0 ){
## Some psois correspond to multiple ensids
## We are going to treat all hypotheses for a given psoi on an equal footing, and stack them
if ( is.null(returnList[[psoi]]) ){
returnList[[psoi]] <- tfpaircollection
} else {
returnList[[psoi]] <- rbind(returnList[[psoi]],tfpaircollection)
returnList[[psoi]] <- unique(t(apply(returnList[[psoi]],1,sort)))
}
}
}
returnList
}
## input PC, return TFs
## PC is indexed by ensids
## TFs indexed by psois
## possible criteria:
## 1. set of TFs
createTFsetFromSingles <- function ( sc, tfsubset=transfac.tfs.expressed ){
allowed.tfs <- as.character(cname.compare[tfsubset])
metamsingles <- sc$metamsingles
metapvals.singles <- sc$metapvals.singles ## not used, but retained in case we need it
metams.singles <- sc$metams.singles ## not used, but retained in case we need it
ensids <- names(metamsingles)
##
## Compute filtered gene sets by the revised cutoff
##
returnList <- list()
for ( ensid in ensids ) {
msingles <- metamsingles[[ensid]]
ppvals <- metapvals.singles[[ensid]]
ms <- metams.singles[[ensid]]
psoi <- as.character(repProbes.ncbiID[entrezIDofEnsemblID[ensid]])
if ( length(psoi) > 1 ) {
cat ("Trouble ahead. Multiple psois for this ensid:",ensid,"\n" )
}
if ( is.na(psoi) ) {
cat ("Trouble ahead. This ensid has no repProbe:",ensid,"\n" )
}
##
## Transcription factor singles
##
tfs <- unique(sort(as.character(unlist(fam2tf.tte.gname[msingles]))))
tfs <- intersect( tfs, allowed.tfs )
if ( length(tfs) > 0 ){
## Some psois correspond to multiple ensids
## We are going to treat all hypotheses for a given psoi on an equal footing, and concatenate them
if ( is.null(returnList[[psoi]]) ){
returnList[[psoi]] <- tfs
} else {
returnList[[psoi]] <- c(returnList[[psoi]],tfs)
returnList[[psoi]] <- unique(sort(returnList[[psoi]]))
}
}
}
returnList
}
|
rm(list=ls(all=TRUE))
#load packages
library(MuMIn)
#STEP #1: Import DATA (Run Trigger Size by Stock & District)
Data1<- read.table("clipboard", header=T, sep="\t") #108 Tah
Data2<- read.table("clipboard", header=T, sep="\t") #106-41 Tah
#STEP #2: Determine if data is normally distributed (p-value should be >0.05)
eda.norm <- function(x, ...)
{
par(mfrow=c(2,2))
if(sum(is.na(x)) > 0)
warning("NA's were removed before plotting")
x <- x[!is.na(x)]
hist(x, main = "Histogram and non-\nparametric density estimate", prob = T)
iqd <- summary(x)[5] - summary(x)[2]
lines(density(x, width = 2 * iqd))
boxplot(x, main = "Boxplot", ...)
qqnorm(x)
qqline(x)
plot.ecdf(x, main="Empirical and normal cdf")
LIM <- par("usr")
y <- seq(LIM[1],LIM[2],length=100)
lines(y, pnorm(y, mean(x), sqrt(var(x))))
shapiro.test(x)
}
attach(Data)
eda.norm(Prop)
#STEP #3: RUN MODELS
A1 <- lm(formula = logitProp~ (lnStatWeek),data=Data1, subset=Size=="Low")
A2 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data1, subset=Size=="Low")
A3 <- lm(formula = logitProp ~ poly(lnStatWeek,3),data=Data1, subset=Size=="Low")
A4 <- lm(formula = logitProp~ (lnStatWeek),data=Data1, subset=Size=="High")
A5 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data1, subset=Size=="High")
A6 <- lm(formula = logitProp~ poly(lnStatWeek,3),data=Data1, subset=Size=="High")
A7 <- lm(formula = logitProp~ (lnStatWeek),data=Data1)
A8 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data1)
A9 <- lm(formula = logitProp~ poly(lnStatWeek,3),data=Data1)
B1 <- lm(formula = logitProp~ (lnStatWeek),data=Data2, subset=Size=="Low")
B2 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data2, subset=Size=="Low")
B3 <- lm(formula = logitProp ~ poly(lnStatWeek,3),data=Data2, subset=Size=="Low")
B4 <- lm(formula = logitProp~ (lnStatWeek),data=Data2, subset=Size=="High")
B5 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data2, subset=Size=="High")
B6 <- lm(formula = logitProp~ poly(lnStatWeek,3),data=Data2, subset=Size=="High")
B7 <- lm(formula = logitProp~ (lnStatWeek),data=Data2)
B8 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data2)
B9 <- lm(formula = logitProp~ poly(lnStatWeek,3),data=Data2)
#STEP #4: OUTPUT OF MODELS
data.frame (AICc(A1,A2,A3,A4,A5,A6,A7,A8,A9))
data.frame (AICc(B1,B2,B3,B4,B5,B6,B7,B8,B9))
summary(A1)#Go through each model summary to get R squared and coefficient values for tables
nd<-data.frame(lnStatWeek=c(3.17805,
3.21888,
3.25810,
3.29584,
3.33220,
3.36730,
3.40120,
3.43399,
3.46574,
3.49651,
3.52636,
3.55535,
3.58352,
3.61092))
prediction<-predict(B9, newdata=nd, interval="prediction")
prediction
#STEP #5: OUTPUT PREDICTION FRAME FOR ALL MODELS
pred.frame<-data.frame(lnStatWeek=seq(3.178054,3.8,0.01))
pc_A1<-predict(A1,newdata=pred.frame,interval="confidence", level=0.95)
pc_A2<-predict(A2,newdata=pred.frame,interval="confidence", level=0.95)
pc_A3<-predict(A3,newdata=pred.frame,interval="confidence", level=0.95)
pc_A4<-predict(A4,newdata=pred.frame,interval="confidence", level=0.95)
pc_A5<-predict(A5,newdata=pred.frame,interval="confidence", level=0.95)
pc_A6<-predict(A6,newdata=pred.frame,interval="confidence", level=0.95)
pc_A7<-predict(A7,newdata=pred.frame,interval="confidence", level=0.95)
pc_A8<-predict(A8,newdata=pred.frame,interval="confidence", level=0.95)
pc_A9<-predict(A9,newdata=pred.frame,interval="confidence", level=0.95)
pc_B1<-predict(B1,newdata=pred.frame,interval="confidence", level=0.95)
pc_B2<-predict(B2,newdata=pred.frame,interval="confidence", level=0.95)
pc_B3<-predict(B3,newdata=pred.frame,interval="confidence", level=0.95)
pc_B4<-predict(B4,newdata=pred.frame,interval="confidence", level=0.95)
pc_B5<-predict(B5,newdata=pred.frame,interval="confidence", level=0.95)
pc_B6<-predict(B6,newdata=pred.frame,interval="confidence", level=0.95)
pc_B7<-predict(B7,newdata=pred.frame,interval="confidence", level=0.95)
pc_B8<-predict(B8,newdata=pred.frame,interval="confidence", level=0.95)
pc_B9<-predict(B9,newdata=pred.frame,interval="confidence", level=0.95)
pc_A1
pc_A6
pc_A9
pc_B3
pc_B6
pc_B9
#STEP #6: GRAPH BEST MODELS (Change model number in plots for low, medium, high, & combined models)
#D108 Tahltan
Data3<- read.table("clipboard", header=T, sep="\t") #Fitted Data
par(mfrow=c(2,3))
plot(Prop~StatWeek,data=Data1, subset=Size=="Low", las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main="<40,000 run size D108 fishery",
font.lab=1,xlim=c(24,34), ylim=c(0,0.8),
cex.lab=1, cex.main=1, font.axis=1)
lines (A1~StatWeek,data=Data3,col=1, pch=8, cex=0.8)
plot(Prop~StatWeek,data=Data1, subset=Size=="High", las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main=">80,000 run size D108 fishery",
font.lab=1,
cex.lab=1, cex.main=1, font.axis=1)
lines (A6~StatWeek,data=Data3,col=1, pch=8, cex=1.8)
plot(Prop~StatWeek,data=Data1, las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main="D108 fishery",
font.lab=1,
cex.lab=1, cex.main=1, font.axis=1)
lines (A9~StatWeek,data=Data3,col=1, pch=8, cex=1.8)
plot(Prop~StatWeek,data=Data2, subset=Size=="Low", las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main="<40,000 run size D106-41/42 fishery",
font.lab=1,
cex.lab=1, cex.main=1, font.axis=1)
lines (B3~StatWeek,data=Data3,col=1, pch=8, cex=0.8)
plot(Prop~StatWeek,data=Data2, subset=Size=="High", las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main=">80,000 run size D106-41/42 fishery",
font.lab=1,
cex.lab=1, cex.main=1, font.axis=1)
lines (B6~StatWeek,data=Data3,col=1, pch=8, cex=1.8)
plot(Prop~StatWeek,data=Data2, las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main="D106-41/42 fishery",
font.lab=1,
cex.lab=1, cex.main=1, font.axis=1)
lines (B9~StatWeek,data=Data3,col=1, pch=8, cex=1.8)
#STEP #7: DIAGNOSTICS (BEST MODEL); Change model number based on model that figures pertain to
par(mfrow=c(2,2))
plot(A9, which=2, main="Figure A")
plot(A9, which=2, main="Figure B")
plot(A9, which=3, main="Figure C")
plot(A9, which=4, main="Figure D")
plot(A9, which=5, main="Figure E")
plot(A9, which=6, main="Figure F")
#STEP #8: SHOW FITTED VALUES IN A FIGURE FOR LOW, HIGH
par(mfrow=c(1,2))
plot(A1~StatWeek,data=Data3,las=1, type="l", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion",
font.lab=1, ylim=c(0,0.7), xlim=c(24,41),
cex.lab=1, cex.main=1, font.axis=1)
lines(A6~StatWeek,data=Data3,col=2, pch=16, cex=0.8)
lines(A9~StatWeek,data=Data3,col=3, pch=16, cex=0.8)
legend (34,0.7, legend=c("<40,000", ">80,000", "All Data"),
cex=0.75,pch=16,col=c(1,2,3), bty="n", lwd=1)
plot(B3~StatWeek,data=Data3,las=1, type="l", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion",
font.lab=1, ylim=c(0,0.7), xlim=c(24,41),
cex.lab=1, cex.main=1, font.axis=1)
lines(B6~StatWeek,data=Data3,col=2, pch=16, cex=0.8)
lines(B9~StatWeek,data=Data3,col=3, pch=16, cex=0.8)
legend (34,0.7, legend=c("<40,000", ">80,000", "All Data"),
cex=0.75,pch=16,col=c(1,2,3), bty="n", lwd=1)
| /prior prop code & results (model years 2014-2016)/40,000 & 80,000 ORIG/Run Trigger Size Tah (40000 & 80000).R | no_license | fssem1/Stikine-management-model | R | false | false | 7,648 | r | rm(list=ls(all=TRUE))
#load packages
library(MuMIn)
#STEP #1: Import DATA (Run Trigger Size by Stock & District)
Data1<- read.table("clipboard", header=T, sep="\t") #108 Tah
Data2<- read.table("clipboard", header=T, sep="\t") #106-41 Tah
#STEP #2: Determine if data is normally distributed (p-value should be >0.05)
eda.norm <- function(x, ...)
{
par(mfrow=c(2,2))
if(sum(is.na(x)) > 0)
warning("NA's were removed before plotting")
x <- x[!is.na(x)]
hist(x, main = "Histogram and non-\nparametric density estimate", prob = T)
iqd <- summary(x)[5] - summary(x)[2]
lines(density(x, width = 2 * iqd))
boxplot(x, main = "Boxplot", ...)
qqnorm(x)
qqline(x)
plot.ecdf(x, main="Empirical and normal cdf")
LIM <- par("usr")
y <- seq(LIM[1],LIM[2],length=100)
lines(y, pnorm(y, mean(x), sqrt(var(x))))
shapiro.test(x)
}
attach(Data)
eda.norm(Prop)
#STEP #3: RUN MODELS
A1 <- lm(formula = logitProp~ (lnStatWeek),data=Data1, subset=Size=="Low")
A2 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data1, subset=Size=="Low")
A3 <- lm(formula = logitProp ~ poly(lnStatWeek,3),data=Data1, subset=Size=="Low")
A4 <- lm(formula = logitProp~ (lnStatWeek),data=Data1, subset=Size=="High")
A5 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data1, subset=Size=="High")
A6 <- lm(formula = logitProp~ poly(lnStatWeek,3),data=Data1, subset=Size=="High")
A7 <- lm(formula = logitProp~ (lnStatWeek),data=Data1)
A8 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data1)
A9 <- lm(formula = logitProp~ poly(lnStatWeek,3),data=Data1)
B1 <- lm(formula = logitProp~ (lnStatWeek),data=Data2, subset=Size=="Low")
B2 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data2, subset=Size=="Low")
B3 <- lm(formula = logitProp ~ poly(lnStatWeek,3),data=Data2, subset=Size=="Low")
B4 <- lm(formula = logitProp~ (lnStatWeek),data=Data2, subset=Size=="High")
B5 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data2, subset=Size=="High")
B6 <- lm(formula = logitProp~ poly(lnStatWeek,3),data=Data2, subset=Size=="High")
B7 <- lm(formula = logitProp~ (lnStatWeek),data=Data2)
B8 <- lm(formula = logitProp~ poly(lnStatWeek,2),data=Data2)
B9 <- lm(formula = logitProp~ poly(lnStatWeek,3),data=Data2)
#STEP #4: OUTPUT OF MODELS
data.frame (AICc(A1,A2,A3,A4,A5,A6,A7,A8,A9))
data.frame (AICc(B1,B2,B3,B4,B5,B6,B7,B8,B9))
summary(A1)#Go through each model summary to get R squared and coefficient values for tables
nd<-data.frame(lnStatWeek=c(3.17805,
3.21888,
3.25810,
3.29584,
3.33220,
3.36730,
3.40120,
3.43399,
3.46574,
3.49651,
3.52636,
3.55535,
3.58352,
3.61092))
prediction<-predict(B9, newdata=nd, interval="prediction")
prediction
#STEP #5: OUTPUT PREDICTION FRAME FOR ALL MODELS
pred.frame<-data.frame(lnStatWeek=seq(3.178054,3.8,0.01))
pc_A1<-predict(A1,newdata=pred.frame,interval="confidence", level=0.95)
pc_A2<-predict(A2,newdata=pred.frame,interval="confidence", level=0.95)
pc_A3<-predict(A3,newdata=pred.frame,interval="confidence", level=0.95)
pc_A4<-predict(A4,newdata=pred.frame,interval="confidence", level=0.95)
pc_A5<-predict(A5,newdata=pred.frame,interval="confidence", level=0.95)
pc_A6<-predict(A6,newdata=pred.frame,interval="confidence", level=0.95)
pc_A7<-predict(A7,newdata=pred.frame,interval="confidence", level=0.95)
pc_A8<-predict(A8,newdata=pred.frame,interval="confidence", level=0.95)
pc_A9<-predict(A9,newdata=pred.frame,interval="confidence", level=0.95)
pc_B1<-predict(B1,newdata=pred.frame,interval="confidence", level=0.95)
pc_B2<-predict(B2,newdata=pred.frame,interval="confidence", level=0.95)
pc_B3<-predict(B3,newdata=pred.frame,interval="confidence", level=0.95)
pc_B4<-predict(B4,newdata=pred.frame,interval="confidence", level=0.95)
pc_B5<-predict(B5,newdata=pred.frame,interval="confidence", level=0.95)
pc_B6<-predict(B6,newdata=pred.frame,interval="confidence", level=0.95)
pc_B7<-predict(B7,newdata=pred.frame,interval="confidence", level=0.95)
pc_B8<-predict(B8,newdata=pred.frame,interval="confidence", level=0.95)
pc_B9<-predict(B9,newdata=pred.frame,interval="confidence", level=0.95)
pc_A1
pc_A6
pc_A9
pc_B3
pc_B6
pc_B9
#STEP #6: GRAPH BEST MODELS (Change model number in plots for low, medium, high, & combined models)
#D108 Tahltan
Data3<- read.table("clipboard", header=T, sep="\t") #Fitted Data
par(mfrow=c(2,3))
plot(Prop~StatWeek,data=Data1, subset=Size=="Low", las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main="<40,000 run size D108 fishery",
font.lab=1,xlim=c(24,34), ylim=c(0,0.8),
cex.lab=1, cex.main=1, font.axis=1)
lines (A1~StatWeek,data=Data3,col=1, pch=8, cex=0.8)
plot(Prop~StatWeek,data=Data1, subset=Size=="High", las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main=">80,000 run size D108 fishery",
font.lab=1,
cex.lab=1, cex.main=1, font.axis=1)
lines (A6~StatWeek,data=Data3,col=1, pch=8, cex=1.8)
plot(Prop~StatWeek,data=Data1, las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main="D108 fishery",
font.lab=1,
cex.lab=1, cex.main=1, font.axis=1)
lines (A9~StatWeek,data=Data3,col=1, pch=8, cex=1.8)
plot(Prop~StatWeek,data=Data2, subset=Size=="Low", las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main="<40,000 run size D106-41/42 fishery",
font.lab=1,
cex.lab=1, cex.main=1, font.axis=1)
lines (B3~StatWeek,data=Data3,col=1, pch=8, cex=0.8)
plot(Prop~StatWeek,data=Data2, subset=Size=="High", las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main=">80,000 run size D106-41/42 fishery",
font.lab=1,
cex.lab=1, cex.main=1, font.axis=1)
lines (B6~StatWeek,data=Data3,col=1, pch=8, cex=1.8)
plot(Prop~StatWeek,data=Data2, las=1, type="p", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion", main="D106-41/42 fishery",
font.lab=1,
cex.lab=1, cex.main=1, font.axis=1)
lines (B9~StatWeek,data=Data3,col=1, pch=8, cex=1.8)
#STEP #7: DIAGNOSTICS (BEST MODEL); Change model number based on model that figures pertain to
par(mfrow=c(2,2))
plot(A9, which=2, main="Figure A")
plot(A9, which=2, main="Figure B")
plot(A9, which=3, main="Figure C")
plot(A9, which=4, main="Figure D")
plot(A9, which=5, main="Figure E")
plot(A9, which=6, main="Figure F")
#STEP #8: SHOW FITTED VALUES IN A FIGURE FOR LOW, HIGH
par(mfrow=c(1,2))
plot(A1~StatWeek,data=Data3,las=1, type="l", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion",
font.lab=1, ylim=c(0,0.7), xlim=c(24,41),
cex.lab=1, cex.main=1, font.axis=1)
lines(A6~StatWeek,data=Data3,col=2, pch=16, cex=0.8)
lines(A9~StatWeek,data=Data3,col=3, pch=16, cex=0.8)
legend (34,0.7, legend=c("<40,000", ">80,000", "All Data"),
cex=0.75,pch=16,col=c(1,2,3), bty="n", lwd=1)
plot(B3~StatWeek,data=Data3,las=1, type="l", pch=16, cex=0.8,
col=1,xlab="Statistical Week", ylab="Proportion",
font.lab=1, ylim=c(0,0.7), xlim=c(24,41),
cex.lab=1, cex.main=1, font.axis=1)
lines(B6~StatWeek,data=Data3,col=2, pch=16, cex=0.8)
lines(B9~StatWeek,data=Data3,col=3, pch=16, cex=0.8)
legend (34,0.7, legend=c("<40,000", ">80,000", "All Data"),
cex=0.75,pch=16,col=c(1,2,3), bty="n", lwd=1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toSpatialPoints.R
\name{toSpatialPoints}
\alias{toSpatialPoints}
\title{toSpatialPoints}
\usage{
toSpatialPoints(x, lonlat, verbose = TRUE)
}
\arguments{
\item{x}{an object of class "data.frame"}
\item{lonlat}{a vector of length 2 given the lon/lat column names e.g. c("Lon","Lat")}
\item{verbose}{TRUE (by default) to display logs}
}
\value{
an object of class "SpatialPoints"
}
\description{
Convert a data.frame with lonlat columns to a SpatialPoints object
}
\author{
Emmanuel Blondel \email{emmanuel.blondel1@gmail.com}
}
| /man/toSpatialPoints.Rd | no_license | openfigis/RFigisGeo | R | false | true | 607 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toSpatialPoints.R
\name{toSpatialPoints}
\alias{toSpatialPoints}
\title{toSpatialPoints}
\usage{
toSpatialPoints(x, lonlat, verbose = TRUE)
}
\arguments{
\item{x}{an object of class "data.frame"}
\item{lonlat}{a vector of length 2 given the lon/lat column names e.g. c("Lon","Lat")}
\item{verbose}{TRUE (by default) to display logs}
}
\value{
an object of class "SpatialPoints"
}
\description{
Convert a data.frame with lonlat columns to a SpatialPoints object
}
\author{
Emmanuel Blondel \email{emmanuel.blondel1@gmail.com}
}
|
#raw data
#####
pat <- "nmer/R_RawData.xlsx"
pat <- "R_RawData.xlsx"
##
db <- read_excel("R_RawData.xlsx",2,col_names = TRUE,skip=1)
rs <- read_excel(pat,3,col_names = TRUE,skip=1)
nhr <- read_excel(pat,4,col_names = TRUE,skip=5)
ld <- read_excel(pat,7,col_names = TRUE,skip=5)
dd <- read_excel(pat,5,col_names = TRUE,skip=1)
ap <- read_excel(pat,6,col_names = TRUE,skip=1)
ar <- read_excel(pat,9,col_names = TRUE,skip=1)
ap <- merge(ap,ar)
ldp <- read_excel(pat,11,col_names = TRUE,skip=6)
ldp <- ldp[1:3,3:8]
#####
db <- mutate(db,
n= total
)
db <- mutate(db,
Manual= total
)
db[["Modify"]]<-
paste0('
<div class="btn-group" role="group" aria-label="Basic example">
<button type="button" class="btn btn-secondary delete" id=modify_',1:nrow(db),'>Change</button>
</div>
')
db[["Edit"]]<-
paste0('
<div class="btn-group" role="group" aria-label="Basic example">
<button type="button" class="btn btn-secondary delete" id=modify_',1:nrow(db),'><span class="glyphicon glyphicon-edit" aria-hidden="true"></span></button>
</div>
')
##################nhr
# year<-c(2018:2023)
# MEA <- rep(1.1,6)
# APAC <- rep(1.3,6)
# AMR <- rep(1.4,6)
# EUR <- rep(1.2,6)
# nhr <- data_frame(year, MEA, APAC,AMR, EUR)
##################################################################### cd
db <- mutate(db,
NHR= case_when(Region == "MEA" ~ nhr$MEA[1], Region == "APAC" ~ nhr$APAC[1],
Region == "AMR" ~ nhr$AMR[1], Region == "EUR" ~ nhr$EUR[1])
)
#####
#rs
#####
rs <- merge(rs,dd)
rs <- merge(rs,ap)
rs <- mutate(rs, as_is_s = total)
rs <- mutate(rs, opt =case_when(is.na(`Right-Sized Numbers`) ~ as_is_s, TRUE ~ `Right-Sized Numbers`))
rs <- mutate(rs, crvt = (as_is_s+opt)/2)
rs <- mutate(rs, sels = crvt)
rs <- mutate(rs, subt = as_is_s - sels)
rs <- mutate(rs, addt = sels - as_is_s)
#####
#end rs
#nhr
#####
rs <- mutate(rs,
NHR18= case_when(Region == "MEA" ~ nhr$MEA[1], Region == "APAC" ~ nhr$APAC[1],
Region == "AMR" ~ nhr$AMR[1], Region == "EUR" ~ nhr$EUR[1])
)
rs <- mutate(rs,
NHR19= case_when(Region == "MEA" ~ nhr$MEA[2], Region == "APAC" ~ nhr$APAC[2],
Region == "AMR" ~ nhr$AMR[2], Region == "EUR" ~ nhr$EUR[2])
)
rs <- mutate(rs,
NHR20= case_when(Region == "MEA" ~ nhr$MEA[3], Region == "APAC" ~ nhr$APAC[3],
Region == "AMR" ~ nhr$AMR[3], Region == "EUR" ~ nhr$EUR[3])
)
rs <- mutate(rs,
NHR21= case_when(Region == "MEA" ~ nhr$MEA[4], Region == "APAC" ~ nhr$APAC[4],
Region == "AMR" ~ nhr$AMR[4], Region == "EUR" ~ nhr$EUR[4])
)
rs <- mutate(rs,
NHR22= case_when(Region == "MEA" ~ nhr$MEA[5], Region == "APAC" ~ nhr$APAC[5],
Region == "AMR" ~ nhr$AMR[5], Region == "EUR" ~ nhr$EUR[5])
)
rs <- mutate(rs,
NHR23= case_when(Region == "MEA" ~ nhr$MEA[6], Region == "APAC" ~ nhr$APAC[6],
Region == "AMR" ~ nhr$AMR[6], Region == "EUR" ~ nhr$EUR[6])
)
rs <- mutate(rs, o18 = as_is_s - `2018`)
rs <- mutate(rs, s18 = o18*(1+NHR19))
rs <- mutate(rs, o19 = as_is_s - `2019`)
rs <- mutate(rs, s19 = o19*(1+NHR20))
rs <- mutate(rs, o20 = as_is_s - `2020`)
rs <- mutate(rs, s20 = o20*(1+NHR21))
rs <- mutate(rs, o21 = as_is_s - `2021`)
rs <- mutate(rs, s21 = o21*(1+NHR22))
rs <- mutate(rs, o22 = as_is_s - `2022`)
rs <- mutate(rs, s22 = o22*(1+NHR23))
rs <- mutate(rs, o23 = as_is_s - `2023`)
#####
#end nhr
# demand Driver
#####
rowSums(rs[,c(rs$`DD1 Weightage`,rs$`DD2 Weightage`)], na.rm=TRUE)
rs <- mutate(rs, dd_test = rowSums(rs[,c("DD1 Weightage","DD2 Weightage","DD3 Weightage","DD4 Weightage","DD5 Weightage")], na.rm=TRUE) )
rs <- mutate(rs, `DD1 Weightage` = case_when(is.na( `DD1 Weightage`) ~ 1, TRUE ~ `DD1 Weightage`))
rs <- mutate(rs, `DD1 2018` = case_when(is.na( `DD1 2018`) ~ 1, TRUE ~ `DD1 2018`))
rs <- mutate(rs, `DD1 2019` = case_when(is.na( `DD1 2019`) ~ 1, TRUE ~ `DD1 2019`))
rs <- mutate(rs, `DD1 2020` = case_when(is.na( `DD1 2020`) ~ 1, TRUE ~ `DD1 2020`))
rs <- mutate(rs, `DD1 2021` = case_when(is.na( `DD1 2021`) ~ 1, TRUE ~ `DD1 2021`))
rs <- mutate(rs, `DD1 2022` = case_when(is.na( `DD1 2022`) ~ 1, TRUE ~ `DD1 2022`))
rs <- mutate(rs, `DD1 2023` = case_when(is.na( `DD1 2023`) ~ 1, TRUE ~ `DD1 2023`))
rs <- mutate(rs, dd_c = `DD1 Weightage` + 1 - dd_test)
rs <- mutate(rs, dds19 = sels * dd_c * `DD1 2019` / `DD1 2018`+ case_when(is.na( `DD2 2018`) ~ 0, TRUE ~ sels *dd_c *`DD2 Weightage`*`DD2 2019` / `DD2 2018`))
rs <- mutate(rs, dds20 = sels * dd_c * `DD1 2020` / `DD1 2018`+ case_when(is.na( `DD2 2019`) ~ 0, TRUE ~ sels *dd_c *`DD2 Weightage`*`DD2 2020` / `DD2 2018`))
rs <- mutate(rs, dds21 = sels * dd_c * `DD1 2021` / `DD1 2018`+ case_when(is.na( `DD2 2020`) ~ 0, TRUE ~ sels *dd_c *`DD2 Weightage`*`DD2 2021` / `DD2 2018`))
rs <- mutate(rs, dds22 = sels * dd_c * `DD1 2022` / `DD1 2018`+ case_when(is.na( `DD2 2021`) ~ 0, TRUE ~ sels *dd_c *`DD2 Weightage`*`DD2 2022` / `DD2 2018`))
rs <- mutate(rs, dds23 = sels * dd_c * `DD1 2023` / `DD1 2018`+ case_when(is.na( `DD2 2022`) ~ 0, TRUE ~ sels *dd_c *`DD2 Weightage`*`DD2 2023` / `DD2 2018`))
#####
#end DD
#ld
#####
rs <- mutate(rs, ld1 = ld$`T=1`)
rs <- mutate(rs, ld2 = ld$`T=2`)
rs <- mutate(rs, ld3 = ld$`T=3`)
rs <- mutate(rs, ld4 = ld$`T=4`)
rs <- mutate(rs, ld5 = ld$`T=5`)
#ld procent cal
rs <- mutate(rs, ldt1 = ldp$`t=1`[3]*ld1/2000)
rs <- mutate(rs, ldt2 = (ldp$`t=1`[3]+ldp$`t=2`[3])*ld2/2000)
rs <- mutate(rs, ldt3 = (ldp$`t=1`[3]+ldp$`t=2`[3]+ldp$`t=3`[3])*ld3/2000)
rs <- mutate(rs, ldt4 = (ldp$`t=1`[3]+ldp$`t=2`[3]+ldp$`t=3`[3]+ldp$`t=4`[3])*ld4/2000)
rs <- mutate(rs, ldt5 = (ldp$`t=1`[3]+ldp$`t=2`[3]+ldp$`t=3`[3]+ldp$`t=4`[3]+ldp$`t=5`[3])*ld5/2000)
#ld FTE Impact
rs <- mutate(rs, fte1 = (1-ldt1)*dds19)
rs <- mutate(rs, fte2 = (1-ldt2)*dds20)
rs <- mutate(rs, fte3 = (1-ldt3)*dds21)
rs <- mutate(rs, fte4 = (1-ldt4)*dds22)
rs <- mutate(rs, fte5 = (1-ldt5)*dds23)
#####
#end
| /nmar/prep_dat.R | no_license | lukuiR/Rpublic | R | false | false | 6,314 | r | #raw data
#####
pat <- "nmer/R_RawData.xlsx"
pat <- "R_RawData.xlsx"
##
db <- read_excel("R_RawData.xlsx",2,col_names = TRUE,skip=1)
rs <- read_excel(pat,3,col_names = TRUE,skip=1)
nhr <- read_excel(pat,4,col_names = TRUE,skip=5)
ld <- read_excel(pat,7,col_names = TRUE,skip=5)
dd <- read_excel(pat,5,col_names = TRUE,skip=1)
ap <- read_excel(pat,6,col_names = TRUE,skip=1)
ar <- read_excel(pat,9,col_names = TRUE,skip=1)
ap <- merge(ap,ar)
ldp <- read_excel(pat,11,col_names = TRUE,skip=6)
ldp <- ldp[1:3,3:8]
#####
db <- mutate(db,
n= total
)
db <- mutate(db,
Manual= total
)
db[["Modify"]]<-
paste0('
<div class="btn-group" role="group" aria-label="Basic example">
<button type="button" class="btn btn-secondary delete" id=modify_',1:nrow(db),'>Change</button>
</div>
')
db[["Edit"]]<-
paste0('
<div class="btn-group" role="group" aria-label="Basic example">
<button type="button" class="btn btn-secondary delete" id=modify_',1:nrow(db),'><span class="glyphicon glyphicon-edit" aria-hidden="true"></span></button>
</div>
')
##################nhr
# year<-c(2018:2023)
# MEA <- rep(1.1,6)
# APAC <- rep(1.3,6)
# AMR <- rep(1.4,6)
# EUR <- rep(1.2,6)
# nhr <- data_frame(year, MEA, APAC,AMR, EUR)
##################################################################### cd
db <- mutate(db,
NHR= case_when(Region == "MEA" ~ nhr$MEA[1], Region == "APAC" ~ nhr$APAC[1],
Region == "AMR" ~ nhr$AMR[1], Region == "EUR" ~ nhr$EUR[1])
)
#####
#rs
#####
rs <- merge(rs,dd)
rs <- merge(rs,ap)
rs <- mutate(rs, as_is_s = total)
rs <- mutate(rs, opt =case_when(is.na(`Right-Sized Numbers`) ~ as_is_s, TRUE ~ `Right-Sized Numbers`))
rs <- mutate(rs, crvt = (as_is_s+opt)/2)
rs <- mutate(rs, sels = crvt)
rs <- mutate(rs, subt = as_is_s - sels)
rs <- mutate(rs, addt = sels - as_is_s)
#####
#end rs
#nhr
#####
rs <- mutate(rs,
NHR18= case_when(Region == "MEA" ~ nhr$MEA[1], Region == "APAC" ~ nhr$APAC[1],
Region == "AMR" ~ nhr$AMR[1], Region == "EUR" ~ nhr$EUR[1])
)
rs <- mutate(rs,
NHR19= case_when(Region == "MEA" ~ nhr$MEA[2], Region == "APAC" ~ nhr$APAC[2],
Region == "AMR" ~ nhr$AMR[2], Region == "EUR" ~ nhr$EUR[2])
)
rs <- mutate(rs,
NHR20= case_when(Region == "MEA" ~ nhr$MEA[3], Region == "APAC" ~ nhr$APAC[3],
Region == "AMR" ~ nhr$AMR[3], Region == "EUR" ~ nhr$EUR[3])
)
rs <- mutate(rs,
NHR21= case_when(Region == "MEA" ~ nhr$MEA[4], Region == "APAC" ~ nhr$APAC[4],
Region == "AMR" ~ nhr$AMR[4], Region == "EUR" ~ nhr$EUR[4])
)
rs <- mutate(rs,
NHR22= case_when(Region == "MEA" ~ nhr$MEA[5], Region == "APAC" ~ nhr$APAC[5],
Region == "AMR" ~ nhr$AMR[5], Region == "EUR" ~ nhr$EUR[5])
)
rs <- mutate(rs,
NHR23= case_when(Region == "MEA" ~ nhr$MEA[6], Region == "APAC" ~ nhr$APAC[6],
Region == "AMR" ~ nhr$AMR[6], Region == "EUR" ~ nhr$EUR[6])
)
rs <- mutate(rs, o18 = as_is_s - `2018`)
rs <- mutate(rs, s18 = o18*(1+NHR19))
rs <- mutate(rs, o19 = as_is_s - `2019`)
rs <- mutate(rs, s19 = o19*(1+NHR20))
rs <- mutate(rs, o20 = as_is_s - `2020`)
rs <- mutate(rs, s20 = o20*(1+NHR21))
rs <- mutate(rs, o21 = as_is_s - `2021`)
rs <- mutate(rs, s21 = o21*(1+NHR22))
rs <- mutate(rs, o22 = as_is_s - `2022`)
rs <- mutate(rs, s22 = o22*(1+NHR23))
rs <- mutate(rs, o23 = as_is_s - `2023`)
#####
#end nhr
# demand Driver
#####
rowSums(rs[,c(rs$`DD1 Weightage`,rs$`DD2 Weightage`)], na.rm=TRUE)
rs <- mutate(rs, dd_test = rowSums(rs[,c("DD1 Weightage","DD2 Weightage","DD3 Weightage","DD4 Weightage","DD5 Weightage")], na.rm=TRUE) )
rs <- mutate(rs, `DD1 Weightage` = case_when(is.na( `DD1 Weightage`) ~ 1, TRUE ~ `DD1 Weightage`))
rs <- mutate(rs, `DD1 2018` = case_when(is.na( `DD1 2018`) ~ 1, TRUE ~ `DD1 2018`))
rs <- mutate(rs, `DD1 2019` = case_when(is.na( `DD1 2019`) ~ 1, TRUE ~ `DD1 2019`))
rs <- mutate(rs, `DD1 2020` = case_when(is.na( `DD1 2020`) ~ 1, TRUE ~ `DD1 2020`))
rs <- mutate(rs, `DD1 2021` = case_when(is.na( `DD1 2021`) ~ 1, TRUE ~ `DD1 2021`))
rs <- mutate(rs, `DD1 2022` = case_when(is.na( `DD1 2022`) ~ 1, TRUE ~ `DD1 2022`))
rs <- mutate(rs, `DD1 2023` = case_when(is.na( `DD1 2023`) ~ 1, TRUE ~ `DD1 2023`))
rs <- mutate(rs, dd_c = `DD1 Weightage` + 1 - dd_test)
rs <- mutate(rs, dds19 = sels * dd_c * `DD1 2019` / `DD1 2018`+ case_when(is.na( `DD2 2018`) ~ 0, TRUE ~ sels *dd_c *`DD2 Weightage`*`DD2 2019` / `DD2 2018`))
rs <- mutate(rs, dds20 = sels * dd_c * `DD1 2020` / `DD1 2018`+ case_when(is.na( `DD2 2019`) ~ 0, TRUE ~ sels *dd_c *`DD2 Weightage`*`DD2 2020` / `DD2 2018`))
rs <- mutate(rs, dds21 = sels * dd_c * `DD1 2021` / `DD1 2018`+ case_when(is.na( `DD2 2020`) ~ 0, TRUE ~ sels *dd_c *`DD2 Weightage`*`DD2 2021` / `DD2 2018`))
rs <- mutate(rs, dds22 = sels * dd_c * `DD1 2022` / `DD1 2018`+ case_when(is.na( `DD2 2021`) ~ 0, TRUE ~ sels *dd_c *`DD2 Weightage`*`DD2 2022` / `DD2 2018`))
rs <- mutate(rs, dds23 = sels * dd_c * `DD1 2023` / `DD1 2018`+ case_when(is.na( `DD2 2022`) ~ 0, TRUE ~ sels *dd_c *`DD2 Weightage`*`DD2 2023` / `DD2 2018`))
#####
#end DD
#ld
#####
rs <- mutate(rs, ld1 = ld$`T=1`)
rs <- mutate(rs, ld2 = ld$`T=2`)
rs <- mutate(rs, ld3 = ld$`T=3`)
rs <- mutate(rs, ld4 = ld$`T=4`)
rs <- mutate(rs, ld5 = ld$`T=5`)
#ld procent cal
rs <- mutate(rs, ldt1 = ldp$`t=1`[3]*ld1/2000)
rs <- mutate(rs, ldt2 = (ldp$`t=1`[3]+ldp$`t=2`[3])*ld2/2000)
rs <- mutate(rs, ldt3 = (ldp$`t=1`[3]+ldp$`t=2`[3]+ldp$`t=3`[3])*ld3/2000)
rs <- mutate(rs, ldt4 = (ldp$`t=1`[3]+ldp$`t=2`[3]+ldp$`t=3`[3]+ldp$`t=4`[3])*ld4/2000)
rs <- mutate(rs, ldt5 = (ldp$`t=1`[3]+ldp$`t=2`[3]+ldp$`t=3`[3]+ldp$`t=4`[3]+ldp$`t=5`[3])*ld5/2000)
#ld FTE Impact
rs <- mutate(rs, fte1 = (1-ldt1)*dds19)
rs <- mutate(rs, fte2 = (1-ldt2)*dds20)
rs <- mutate(rs, fte3 = (1-ldt3)*dds21)
rs <- mutate(rs, fte4 = (1-ldt4)*dds22)
rs <- mutate(rs, fte5 = (1-ldt5)*dds23)
#####
#end
|
/DS-R/Sesion 2/Ejemplo 1.R | no_license | miguelmontcerv/Bedu_Fase_2 | R | false | false | 1,275 | r | ||
### Instalar los packages necesarios ###
# install.packages("rvest")
# install.packages("data.table")
# install.packages("ggplot2")
### Llamar los packages a utilizar ###
library('rvest')
library(data.table)
library(ggplot2)
#==================== usando Xvideos ====================#
# Se busca en la pagina Xvideo: ANAL con filtro de valoracion
# Inicializando la var de archivo con el nombre de la página a utilizar
paginaXVideos <- 'https://www.xvideos.com/?k=anal'
# Leyendo el html del archivo
webpageXVideos <- read_html(paginaXVideos)
# Extraccion del texto contenido en la clase thumb-under
contenidoWebXVideos <- html_nodes(webpageXVideos,'.thumb-under > p > a')
print (contenidoWebXVideos)
# Viendo el contenido de la posición 1 de la variable contenidoWebXVideos
print(contenidoWebXVideos[1])
# Extrayendo los links de los videos
linksVIDEOS <- html_attr(contenidoWebXVideos,"href")
# Arreglando los links de todos los videos
for(i in 1:27){
LinksXvideo <- print(paste("http://www.xvideos.com",linksVIDEOS,sep = ""))
}
# Viendo que tiene la posicion 1 de la variable todosLosLinksXvideo
print(LinksXvideo[1])
# Viendo cuantas variables tiene LinksXvideo
length(LinksXvideo)
# Extrayendo el texto de contenidoWebXVideos
textoXVideos <- html_text(contenidoWebXVideos)
# Viendo que tiene la posicion 1 la variable textoXVideos
print(textoXVideos[1])
# Extraccion de duracion de cada video
DurationXVideos <- html_nodes(webpageXVideos,'.duration')
#Limpieza de los datos de duracion
DuracionXVideos <- html_text(DurationXVideos)
# Viendo que tiene la posición 1 de la variable DuracionXVideos
print(DuracionXVideos[1])
# Primer paso para extraer el numero de visitas de cada video
VistasXVideos <- html_nodes(webpageXVideos,'.thumb-under > p > span')
# Limpiando los datos para tener solo el texto
texto_VistasXVideos <- html_text(VistasXVideos)
# Separando el texto obtenido con un guion para despues eliminar la duracion
split_VistasXVideos <- strsplit(texto_VistasXVideos,"-")
# Obteniendo el primer dato de views
viewsXVideos <- list()
for(i in 1:length(split_VistasXVideos)){
print(split_VistasXVideos[[i]][[2]])
viewsXVideos[i] <- split_VistasXVideos[[i]][[2]]
}
# Limpiando los datos obtenidos de views
viewsXVideos <- gsub("Views","",viewsXVideos)
viewsXVideos <- gsub(" ","",viewsXVideos)
viewsXVideos <- gsub("k","-k",viewsXVideos)
viewsXVideos <- gsub("M","-M",viewsXVideos)
# Separando los datos para luego reemplazar k y M numericamente
Visitas <- strsplit(viewsXVideos,"-")
# Crear funcion para reemplazar k y M numericamente #
# VisitasXVideo: string -> double
# VisitasXVideo: entrega la cantidad de visitas de cada video
# si aparece una k se multiplica el numero por mil
# si aparece una M se multimplica por un millon
# Ejemplo: VisitasXVideo(4k)-> 4000
VisitasXVideo <- function (entrada){
# para los elementos que no tienen ni k, ni M, se usa is.na
if(is.na(entrada[2])){
entrada[1] <- as.numeric(entrada[1])
}else if(entrada[2]=="k"){
entrada[1] <- as.numeric(entrada[1])*1000
}else if(entrada[2]=="M"){
entrada[1] <- as.numeric(entrada[1])*1000000
}
return(entrada[1])
}
# Recorriendo cada elemento aplicando la funcion VisitasXVideo
for(i in 1:length(Visitas)){
Visitas[i] <- VisitasXVideo(Visitas[[i]])
}
# Ver la posicion 1 de visitas
Visitas[1]
# Extrae los elementos de la lista y los pasa a una lista
unlistVisitas <- unlist(Visitas)
# Crear lista para agregar likes extraidos
Me_Gusta <- list()
# Crear lista para agregar dislikes extraidos
No_Me_Gusta <- list()
### Extrayendo likes y dislikes por cada uno de los links sin for ###
Leer_link01 <-read_html(LinksXvideo[1])
Rating_link01 <- html_nodes(Leer_link01, '.rating-inbtn')
Texto_rating01 <- html_text(Rating_link01)
Me_Gusta <- c(Me_Gusta, Texto_rating01[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating01[2])
Leer_link02 <-read_html(LinksXvideo[2])
Rating_link02 <- html_nodes(Leer_link02, '.rating-inbtn')
Texto_rating02 <- html_text(Rating_link02)
Me_Gusta <- c(Me_Gusta, Texto_rating02[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating02[2])
Leer_link03 <-read_html(LinksXvideo[3])
Rating_link03 <- html_nodes(Leer_link03, '.rating-inbtn')
Texto_rating03 <- html_text(Rating_link03)
Me_Gusta <- c(Me_Gusta, Texto_rating03[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating03[2])
Leer_link04 <-read_html(LinksXvideo[4])
Rating_link04 <- html_nodes(Leer_link04, '.rating-inbtn')
Texto_rating04 <- html_text(Rating_link01)
Me_Gusta <- c(Me_Gusta, Texto_rating04[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating04[2])
Leer_link05 <-read_html(LinksXvideo[5])
Rating_link05 <- html_nodes(Leer_link05, '.rating-inbtn')
Texto_rating05 <- html_text(Rating_link05)
Me_Gusta <- c(Me_Gusta, Texto_rating05[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating05[2])
Leer_link06 <-read_html(LinksXvideo[6])
Rating_link06 <- html_nodes(Leer_link06, '.rating-inbtn')
Texto_rating06 <- html_text(Rating_link06)
Me_Gusta <- c(Me_Gusta, Texto_rating06[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating06[2])
Leer_link07 <-read_html(LinksXvideo[7])
Rating_link07 <- html_nodes(Leer_link07, '.rating-inbtn')
Texto_rating07 <- html_text(Rating_link07)
Me_Gusta <- c(Me_Gusta, Texto_rating07[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating07[2])
Leer_link08 <-read_html(LinksXvideo[8])
Rating_link08 <- html_nodes(Leer_link08, '.rating-inbtn')
Texto_rating08 <- html_text(Rating_link08)
Me_Gusta <- c(Me_Gusta, Texto_rating08[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating08[2])
Leer_link09 <-read_html(LinksXvideo[9])
Rating_link09 <- html_nodes(Leer_link09, '.rating-inbtn')
Texto_rating09 <- html_text(Rating_link09)
Me_Gusta <- c(Me_Gusta, Texto_rating09[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating09[2])
Leer_link10 <-read_html(LinksXvideo[10])
Rating_link10 <- html_nodes(Leer_link10, '.rating-inbtn')
Texto_rating10 <- html_text(Rating_link10)
Me_Gusta <- c(Me_Gusta, Texto_rating10[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating10[2])
Leer_link11 <-read_html(LinksXvideo[11])
Rating_link11 <- html_nodes(Leer_link11, '.rating-inbtn')
Texto_rating11 <- html_text(Rating_link11)
Me_Gusta <- c(Me_Gusta, Texto_rating11[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating11[2])
Leer_link12 <-read_html(LinksXvideo[12])
Rating_link12 <- html_nodes(Leer_link12, '.rating-inbtn')
Texto_rating12 <- html_text(Rating_link12)
Me_Gusta <- c(Me_Gusta, Texto_rating12[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating12[2])
Leer_link13 <-read_html(LinksXvideo[13])
Rating_link13 <- html_nodes(Leer_link13, '.rating-inbtn')
Texto_rating13 <- html_text(Rating_link13)
Me_Gusta <- c(Me_Gusta, Texto_rating13[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating13[2])
Leer_link14 <-read_html(LinksXvideo[14])
Rating_link14 <- html_nodes(Leer_link14, '.rating-inbtn')
Texto_rating14 <- html_text(Rating_link14)
Me_Gusta <- c(Me_Gusta, Texto_rating14[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating14[2])
Leer_link15 <-read_html(LinksXvideo[15])
Rating_link15 <- html_nodes(Leer_link15, '.rating-inbtn')
Texto_rating15 <- html_text(Rating_link15)
Me_Gusta <- c(Me_Gusta, Texto_rating15[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating15[2])
Leer_link16 <-read_html(LinksXvideo[16])
Rating_link16 <- html_nodes(Leer_link16, '.rating-inbtn')
Texto_rating16 <- html_text(Rating_link16)
Me_Gusta <- c(Me_Gusta, Texto_rating16[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating16[2])
Leer_link17 <-read_html(LinksXvideo[17])
Rating_link17 <- html_nodes(Leer_link17, '.rating-inbtn')
Texto_rating17 <- html_text(Rating_link17)
Me_Gusta <- c(Me_Gusta, Texto_rating17[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating17[2])
Leer_link18 <-read_html(LinksXvideo[18])
Rating_link18 <- html_nodes(Leer_link18, '.rating-inbtn')
Texto_rating18 <- html_text(Rating_link18)
Me_Gusta <- c(Me_Gusta, Texto_rating18[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating18[2])
Leer_link19 <-read_html(LinksXvideo[19])
Rating_link19 <- html_nodes(Leer_link19, '.rating-inbtn')
Texto_rating19 <- html_text(Rating_link19)
Me_Gusta <- c(Me_Gusta, Texto_rating19[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating19[2])
Leer_link20 <-read_html(LinksXvideo[20])
Rating_link20 <- html_nodes(Leer_link20, '.rating-inbtn')
Texto_rating20 <- html_text(Rating_link20)
Me_Gusta <- c(Me_Gusta, Texto_rating20[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating20[2])
Leer_link21 <-read_html(LinksXvideo[21])
Rating_link21 <- html_nodes(Leer_link21, '.rating-inbtn')
Texto_rating21 <- html_text(Rating_link21)
Me_Gusta <- c(Me_Gusta, Texto_rating21[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating21[2])
Leer_link22 <-read_html(LinksXvideo[22])
Rating_link22 <- html_nodes(Leer_link22, '.rating-inbtn')
Texto_rating22 <- html_text(Rating_link22)
Me_Gusta <- c(Me_Gusta, Texto_rating22[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating22[2])
Leer_link23 <-read_html(LinksXvideo[23])
Rating_link23 <- html_nodes(Leer_link23, '.rating-inbtn')
Texto_rating23 <- html_text(Rating_link23)
Me_Gusta <- c(Me_Gusta, Texto_rating23[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating23[2])
Leer_link24 <-read_html(LinksXvideo[24])
Rating_link24 <- html_nodes(Leer_link24, '.rating-inbtn')
Texto_rating24 <- html_text(Rating_link24)
Me_Gusta <- c(Me_Gusta, Texto_rating24[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating24[2])
Leer_link25 <-read_html(LinksXvideo[25])
Rating_link25 <- html_nodes(Leer_link25, '.rating-inbtn')
Texto_rating25 <- html_text(Rating_link25)
Me_Gusta <- c(Me_Gusta, Texto_rating25[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating25[2])
Leer_link26 <-read_html(LinksXvideo[26])
Rating_link26 <- html_nodes(Leer_link26, '.rating-inbtn')
Texto_rating26 <- html_text(Rating_link26)
Me_Gusta <- c(Me_Gusta, Texto_rating26[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating26[2])
Leer_link27 <-read_html(LinksXvideo[27])
Rating_link27 <- html_nodes(Leer_link27, '.rating-inbtn')
Texto_rating27 <- html_text(Rating_link27)
Me_Gusta <- c(Me_Gusta, Texto_rating27[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating27[2])
# Verificando que la lista posea 27 variables
length(Me_Gusta)
length(No_Me_Gusta)
# Arreglando los datos extraidos
Me_Gusta <- gsub("k","-k",Me_Gusta)
Me_Gusta <- strsplit(Me_Gusta, "-")
No_Me_Gusta <- gsub("k","-k",No_Me_Gusta)
No_Me_Gusta <- strsplit(No_Me_Gusta, "-")
# Recorriendo cada elemento de las listas aplicando la funcion VisitasXVideo
for(i in 1:length(Me_Gusta)){
Me_Gusta[i] <- VisitasXVideo(Me_Gusta[[i]])
}
for(i in 1:length(No_Me_Gusta)){
No_Me_Gusta[i] <- VisitasXVideo(No_Me_Gusta[[i]])
}
# Extrae los elementos de una lista y los pasa a una lista
unlistMe_Gusta <- unlist(Me_Gusta)
unlistNo_Me_Gusta <- unlist(No_Me_Gusta)
# Se genera una variable tipo, donde 5 es GAY
Tipo <- list("7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7")
unlistTipo <- unlist(Tipo)
# Se genera una tabla con los datos obtenidos para GAY
dfANAL <- data.frame(LINKS = LinksXvideo, TITULO= textoXVideos, TIPO= unlistTipo, VISITAS= unlistVisitas, ME_GUSTA= unlistMe_Gusta, NO_ME_GUSTA= unlistNo_Me_Gusta)
# Almacenando la informacion en CSV
write.csv(dfANAL, file="Tabla07.csv") | /Data_Anal.R | no_license | BarbaraBlue/Xvideo-BigData | R | false | false | 11,132 | r | ### Instalar los packages necesarios ###
# install.packages("rvest")
# install.packages("data.table")
# install.packages("ggplot2")
### Llamar los packages a utilizar ###
library('rvest')
library(data.table)
library(ggplot2)
#==================== usando Xvideos ====================#
# Se busca en la pagina Xvideo: ANAL con filtro de valoracion
# Inicializando la var de archivo con el nombre de la página a utilizar
paginaXVideos <- 'https://www.xvideos.com/?k=anal'
# Leyendo el html del archivo
webpageXVideos <- read_html(paginaXVideos)
# Extraccion del texto contenido en la clase thumb-under
contenidoWebXVideos <- html_nodes(webpageXVideos,'.thumb-under > p > a')
print (contenidoWebXVideos)
# Viendo el contenido de la posición 1 de la variable contenidoWebXVideos
print(contenidoWebXVideos[1])
# Extrayendo los links de los videos
linksVIDEOS <- html_attr(contenidoWebXVideos,"href")
# Arreglando los links de todos los videos
for(i in 1:27){
LinksXvideo <- print(paste("http://www.xvideos.com",linksVIDEOS,sep = ""))
}
# Viendo que tiene la posicion 1 de la variable todosLosLinksXvideo
print(LinksXvideo[1])
# Viendo cuantas variables tiene LinksXvideo
length(LinksXvideo)
# Extrayendo el texto de contenidoWebXVideos
textoXVideos <- html_text(contenidoWebXVideos)
# Viendo que tiene la posicion 1 la variable textoXVideos
print(textoXVideos[1])
# Extraccion de duracion de cada video
DurationXVideos <- html_nodes(webpageXVideos,'.duration')
#Limpieza de los datos de duracion
DuracionXVideos <- html_text(DurationXVideos)
# Viendo que tiene la posición 1 de la variable DuracionXVideos
print(DuracionXVideos[1])
# Primer paso para extraer el numero de visitas de cada video
VistasXVideos <- html_nodes(webpageXVideos,'.thumb-under > p > span')
# Limpiando los datos para tener solo el texto
texto_VistasXVideos <- html_text(VistasXVideos)
# Separando el texto obtenido con un guion para despues eliminar la duracion
split_VistasXVideos <- strsplit(texto_VistasXVideos,"-")
# Obteniendo el primer dato de views
viewsXVideos <- list()
for(i in 1:length(split_VistasXVideos)){
print(split_VistasXVideos[[i]][[2]])
viewsXVideos[i] <- split_VistasXVideos[[i]][[2]]
}
# Limpiando los datos obtenidos de views
viewsXVideos <- gsub("Views","",viewsXVideos)
viewsXVideos <- gsub(" ","",viewsXVideos)
viewsXVideos <- gsub("k","-k",viewsXVideos)
viewsXVideos <- gsub("M","-M",viewsXVideos)
# Separando los datos para luego reemplazar k y M numericamente
Visitas <- strsplit(viewsXVideos,"-")
# Crear funcion para reemplazar k y M numericamente #
# VisitasXVideo: string -> double
# VisitasXVideo: entrega la cantidad de visitas de cada video
# si aparece una k se multiplica el numero por mil
# si aparece una M se multimplica por un millon
# Ejemplo: VisitasXVideo(4k)-> 4000
VisitasXVideo <- function (entrada){
# para los elementos que no tienen ni k, ni M, se usa is.na
if(is.na(entrada[2])){
entrada[1] <- as.numeric(entrada[1])
}else if(entrada[2]=="k"){
entrada[1] <- as.numeric(entrada[1])*1000
}else if(entrada[2]=="M"){
entrada[1] <- as.numeric(entrada[1])*1000000
}
return(entrada[1])
}
# Recorriendo cada elemento aplicando la funcion VisitasXVideo
for(i in 1:length(Visitas)){
Visitas[i] <- VisitasXVideo(Visitas[[i]])
}
# Ver la posicion 1 de visitas
Visitas[1]
# Extrae los elementos de la lista y los pasa a una lista
unlistVisitas <- unlist(Visitas)
# Crear lista para agregar likes extraidos
Me_Gusta <- list()
# Crear lista para agregar dislikes extraidos
No_Me_Gusta <- list()
### Extrayendo likes y dislikes por cada uno de los links sin for ###
Leer_link01 <-read_html(LinksXvideo[1])
Rating_link01 <- html_nodes(Leer_link01, '.rating-inbtn')
Texto_rating01 <- html_text(Rating_link01)
Me_Gusta <- c(Me_Gusta, Texto_rating01[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating01[2])
Leer_link02 <-read_html(LinksXvideo[2])
Rating_link02 <- html_nodes(Leer_link02, '.rating-inbtn')
Texto_rating02 <- html_text(Rating_link02)
Me_Gusta <- c(Me_Gusta, Texto_rating02[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating02[2])
Leer_link03 <-read_html(LinksXvideo[3])
Rating_link03 <- html_nodes(Leer_link03, '.rating-inbtn')
Texto_rating03 <- html_text(Rating_link03)
Me_Gusta <- c(Me_Gusta, Texto_rating03[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating03[2])
Leer_link04 <-read_html(LinksXvideo[4])
Rating_link04 <- html_nodes(Leer_link04, '.rating-inbtn')
Texto_rating04 <- html_text(Rating_link01)
Me_Gusta <- c(Me_Gusta, Texto_rating04[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating04[2])
Leer_link05 <-read_html(LinksXvideo[5])
Rating_link05 <- html_nodes(Leer_link05, '.rating-inbtn')
Texto_rating05 <- html_text(Rating_link05)
Me_Gusta <- c(Me_Gusta, Texto_rating05[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating05[2])
Leer_link06 <-read_html(LinksXvideo[6])
Rating_link06 <- html_nodes(Leer_link06, '.rating-inbtn')
Texto_rating06 <- html_text(Rating_link06)
Me_Gusta <- c(Me_Gusta, Texto_rating06[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating06[2])
Leer_link07 <-read_html(LinksXvideo[7])
Rating_link07 <- html_nodes(Leer_link07, '.rating-inbtn')
Texto_rating07 <- html_text(Rating_link07)
Me_Gusta <- c(Me_Gusta, Texto_rating07[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating07[2])
Leer_link08 <-read_html(LinksXvideo[8])
Rating_link08 <- html_nodes(Leer_link08, '.rating-inbtn')
Texto_rating08 <- html_text(Rating_link08)
Me_Gusta <- c(Me_Gusta, Texto_rating08[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating08[2])
Leer_link09 <-read_html(LinksXvideo[9])
Rating_link09 <- html_nodes(Leer_link09, '.rating-inbtn')
Texto_rating09 <- html_text(Rating_link09)
Me_Gusta <- c(Me_Gusta, Texto_rating09[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating09[2])
Leer_link10 <-read_html(LinksXvideo[10])
Rating_link10 <- html_nodes(Leer_link10, '.rating-inbtn')
Texto_rating10 <- html_text(Rating_link10)
Me_Gusta <- c(Me_Gusta, Texto_rating10[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating10[2])
Leer_link11 <-read_html(LinksXvideo[11])
Rating_link11 <- html_nodes(Leer_link11, '.rating-inbtn')
Texto_rating11 <- html_text(Rating_link11)
Me_Gusta <- c(Me_Gusta, Texto_rating11[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating11[2])
Leer_link12 <-read_html(LinksXvideo[12])
Rating_link12 <- html_nodes(Leer_link12, '.rating-inbtn')
Texto_rating12 <- html_text(Rating_link12)
Me_Gusta <- c(Me_Gusta, Texto_rating12[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating12[2])
Leer_link13 <-read_html(LinksXvideo[13])
Rating_link13 <- html_nodes(Leer_link13, '.rating-inbtn')
Texto_rating13 <- html_text(Rating_link13)
Me_Gusta <- c(Me_Gusta, Texto_rating13[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating13[2])
Leer_link14 <-read_html(LinksXvideo[14])
Rating_link14 <- html_nodes(Leer_link14, '.rating-inbtn')
Texto_rating14 <- html_text(Rating_link14)
Me_Gusta <- c(Me_Gusta, Texto_rating14[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating14[2])
Leer_link15 <-read_html(LinksXvideo[15])
Rating_link15 <- html_nodes(Leer_link15, '.rating-inbtn')
Texto_rating15 <- html_text(Rating_link15)
Me_Gusta <- c(Me_Gusta, Texto_rating15[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating15[2])
Leer_link16 <-read_html(LinksXvideo[16])
Rating_link16 <- html_nodes(Leer_link16, '.rating-inbtn')
Texto_rating16 <- html_text(Rating_link16)
Me_Gusta <- c(Me_Gusta, Texto_rating16[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating16[2])
Leer_link17 <-read_html(LinksXvideo[17])
Rating_link17 <- html_nodes(Leer_link17, '.rating-inbtn')
Texto_rating17 <- html_text(Rating_link17)
Me_Gusta <- c(Me_Gusta, Texto_rating17[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating17[2])
Leer_link18 <-read_html(LinksXvideo[18])
Rating_link18 <- html_nodes(Leer_link18, '.rating-inbtn')
Texto_rating18 <- html_text(Rating_link18)
Me_Gusta <- c(Me_Gusta, Texto_rating18[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating18[2])
Leer_link19 <-read_html(LinksXvideo[19])
Rating_link19 <- html_nodes(Leer_link19, '.rating-inbtn')
Texto_rating19 <- html_text(Rating_link19)
Me_Gusta <- c(Me_Gusta, Texto_rating19[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating19[2])
Leer_link20 <-read_html(LinksXvideo[20])
Rating_link20 <- html_nodes(Leer_link20, '.rating-inbtn')
Texto_rating20 <- html_text(Rating_link20)
Me_Gusta <- c(Me_Gusta, Texto_rating20[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating20[2])
Leer_link21 <-read_html(LinksXvideo[21])
Rating_link21 <- html_nodes(Leer_link21, '.rating-inbtn')
Texto_rating21 <- html_text(Rating_link21)
Me_Gusta <- c(Me_Gusta, Texto_rating21[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating21[2])
Leer_link22 <-read_html(LinksXvideo[22])
Rating_link22 <- html_nodes(Leer_link22, '.rating-inbtn')
Texto_rating22 <- html_text(Rating_link22)
Me_Gusta <- c(Me_Gusta, Texto_rating22[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating22[2])
Leer_link23 <-read_html(LinksXvideo[23])
Rating_link23 <- html_nodes(Leer_link23, '.rating-inbtn')
Texto_rating23 <- html_text(Rating_link23)
Me_Gusta <- c(Me_Gusta, Texto_rating23[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating23[2])
Leer_link24 <-read_html(LinksXvideo[24])
Rating_link24 <- html_nodes(Leer_link24, '.rating-inbtn')
Texto_rating24 <- html_text(Rating_link24)
Me_Gusta <- c(Me_Gusta, Texto_rating24[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating24[2])
Leer_link25 <-read_html(LinksXvideo[25])
Rating_link25 <- html_nodes(Leer_link25, '.rating-inbtn')
Texto_rating25 <- html_text(Rating_link25)
Me_Gusta <- c(Me_Gusta, Texto_rating25[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating25[2])
Leer_link26 <-read_html(LinksXvideo[26])
Rating_link26 <- html_nodes(Leer_link26, '.rating-inbtn')
Texto_rating26 <- html_text(Rating_link26)
Me_Gusta <- c(Me_Gusta, Texto_rating26[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating26[2])
Leer_link27 <-read_html(LinksXvideo[27])
Rating_link27 <- html_nodes(Leer_link27, '.rating-inbtn')
Texto_rating27 <- html_text(Rating_link27)
Me_Gusta <- c(Me_Gusta, Texto_rating27[1])
No_Me_Gusta <- c(No_Me_Gusta, Texto_rating27[2])
# Verificando que la lista posea 27 variables
length(Me_Gusta)
length(No_Me_Gusta)
# Arreglando los datos extraidos
Me_Gusta <- gsub("k","-k",Me_Gusta)
Me_Gusta <- strsplit(Me_Gusta, "-")
No_Me_Gusta <- gsub("k","-k",No_Me_Gusta)
No_Me_Gusta <- strsplit(No_Me_Gusta, "-")
# Recorriendo cada elemento de las listas aplicando la funcion VisitasXVideo
for(i in 1:length(Me_Gusta)){
Me_Gusta[i] <- VisitasXVideo(Me_Gusta[[i]])
}
for(i in 1:length(No_Me_Gusta)){
No_Me_Gusta[i] <- VisitasXVideo(No_Me_Gusta[[i]])
}
# Extrae los elementos de una lista y los pasa a una lista
unlistMe_Gusta <- unlist(Me_Gusta)
unlistNo_Me_Gusta <- unlist(No_Me_Gusta)
# Se genera una variable tipo, donde 5 es GAY
Tipo <- list("7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7","7")
unlistTipo <- unlist(Tipo)
# Se genera una tabla con los datos obtenidos para GAY
dfANAL <- data.frame(LINKS = LinksXvideo, TITULO= textoXVideos, TIPO= unlistTipo, VISITAS= unlistVisitas, ME_GUSTA= unlistMe_Gusta, NO_ME_GUSTA= unlistNo_Me_Gusta)
# Almacenando la informacion en CSV
write.csv(dfANAL, file="Tabla07.csv") |
#' Qiita Comments API
#'
#' Get, write, update or delete comments via Qiita API.
#'
#' @name qiita_comment
#' @param comment_id Comment ID.
#' @param item_id Item (article) ID.
#' @param per_page Number of items per one page.
#' @param page_offset Number of offset pages.
#' @param page_limit Max number of pages to retrieve.
#' @param body body of the item
#' @examples
#' \dontrun{
#' # get a comment by id
#' qiita_get_comments(comment_id = "1fdbb164e19d79e10203")
#'
#' # get comments by item id
#' qiita_get_comments(item_id = "b4130186e1e095719dcb")
#'
#' # post a comment to some item
#' qiita_post_comment(item_id = "123456789", body = "Thank you!!!")
#' }
#' @export
qiita_get_comments <- function(comment_id = NULL, item_id = NULL,
per_page = 100L, page_offset = 0L, page_limit = 1L) {
if(!is.null(comment_id) && !is.null(item_id)) stop("You cannot specify comment_id and item_id both")
if(is.null(comment_id) && is.null(item_id)) stop("Please specify commend_id or item_id")
# Get a comment by ID (No pagenation is needed)
if(!is.null(comment_id)){
result <- purrr::map(comment_id, qiita_get_single_comment_by_id)
return(result)
}
if(!is.null(item_id)) {
result <- purrr::map(item_id, qiita_get_comments_by_item,
per_page = per_page, page_offset = page_offset, page_limit = page_limit)
return(purrr::flatten(result))
}
}
qiita_get_single_comment_by_id <- function(comment_id) {
path <- sprintf("/api/v2/comments/%s", comment_id)
qiita_api("GET", path = path)
}
qiita_get_comments_by_item <- function(item_id, per_page, page_offset, page_limit) {
path <- sprintf("/api/v2/items/%s/comments", item_id)
result <- qiita_api("GET", path = path,
per_page = per_page, page_offset = page_offset, page_limit = page_limit)
}
#' @rdname qiita_comment
#' @export
qiita_delete_comment <- function(comment_id) {
if(!purrr::is_scalar_character(comment_id)) stop("comment_id must be a scalar character!")
path <- sprintf("/api/v2/comments/%s", comment_id)
qiita_api("DELETE", path = path)
}
#' @rdname qiita_comment
#' @export
qiita_update_comment <- function(comment_id, body) {
if(!purrr::is_scalar_character(comment_id)) stop("comment_id must be a scalar character!")
if(!purrr::is_scalar_character(body)) stop("body must be a scalar character!")
path <- sprintf("/api/v2/comments/%s", comment_id)
qiita_api("PATCH", path = path,
payload = qiita_util_payload(body = body))
}
#' @rdname qiita_comment
#' @export
qiita_post_comment <- function(item_id, body) {
if(!purrr::is_scalar_character(item_id)) stop("item_id must be a scalar character!")
if(!purrr::is_scalar_character(body)) stop("body must be a scalar character!")
path <- sprintf("/api/v2/items/%s/comments", item_id)
qiita_api("POST", path = path,
payload = qiita_util_payload(body = body))
}
| /R/comment.r | no_license | yutannihilation/qiitr | R | false | false | 2,924 | r | #' Qiita Comments API
#'
#' Get, write, update or delete comments via Qiita API.
#'
#' @name qiita_comment
#' @param comment_id Comment ID.
#' @param item_id Item (article) ID.
#' @param per_page Number of items per one page.
#' @param page_offset Number of offset pages.
#' @param page_limit Max number of pages to retrieve.
#' @param body body of the item
#' @examples
#' \dontrun{
#' # get a comment by id
#' qiita_get_comments(comment_id = "1fdbb164e19d79e10203")
#'
#' # get comments by item id
#' qiita_get_comments(item_id = "b4130186e1e095719dcb")
#'
#' # post a comment to some item
#' qiita_post_comment(item_id = "123456789", body = "Thank you!!!")
#' }
#' @export
qiita_get_comments <- function(comment_id = NULL, item_id = NULL,
per_page = 100L, page_offset = 0L, page_limit = 1L) {
if(!is.null(comment_id) && !is.null(item_id)) stop("You cannot specify comment_id and item_id both")
if(is.null(comment_id) && is.null(item_id)) stop("Please specify commend_id or item_id")
# Get a comment by ID (No pagenation is needed)
if(!is.null(comment_id)){
result <- purrr::map(comment_id, qiita_get_single_comment_by_id)
return(result)
}
if(!is.null(item_id)) {
result <- purrr::map(item_id, qiita_get_comments_by_item,
per_page = per_page, page_offset = page_offset, page_limit = page_limit)
return(purrr::flatten(result))
}
}
qiita_get_single_comment_by_id <- function(comment_id) {
path <- sprintf("/api/v2/comments/%s", comment_id)
qiita_api("GET", path = path)
}
qiita_get_comments_by_item <- function(item_id, per_page, page_offset, page_limit) {
path <- sprintf("/api/v2/items/%s/comments", item_id)
result <- qiita_api("GET", path = path,
per_page = per_page, page_offset = page_offset, page_limit = page_limit)
}
#' @rdname qiita_comment
#' @export
qiita_delete_comment <- function(comment_id) {
if(!purrr::is_scalar_character(comment_id)) stop("comment_id must be a scalar character!")
path <- sprintf("/api/v2/comments/%s", comment_id)
qiita_api("DELETE", path = path)
}
#' @rdname qiita_comment
#' @export
qiita_update_comment <- function(comment_id, body) {
if(!purrr::is_scalar_character(comment_id)) stop("comment_id must be a scalar character!")
if(!purrr::is_scalar_character(body)) stop("body must be a scalar character!")
path <- sprintf("/api/v2/comments/%s", comment_id)
qiita_api("PATCH", path = path,
payload = qiita_util_payload(body = body))
}
#' @rdname qiita_comment
#' @export
qiita_post_comment <- function(item_id, body) {
if(!purrr::is_scalar_character(item_id)) stop("item_id must be a scalar character!")
if(!purrr::is_scalar_character(body)) stop("body must be a scalar character!")
path <- sprintf("/api/v2/items/%s/comments", item_id)
qiita_api("POST", path = path,
payload = qiita_util_payload(body = body))
}
|
selected <- to.plot
rowMeans(selected[,samples]) -> selected$mean.global
selected$direction <- ifelse(selected$mean.global > ((selected$male.HFD.mean + selected$female.HFD.mean)/2), "DOWN", "UP")
selected %>% filter(direction == "UP") %>% select(results.gene_name) %>%
write.table(quote = FALSE, row.names = FALSE)
selected %>% filter(direction == "DOWN") %>% select(results.gene_name) %>%
write.table(quote = FALSE, row.names = FALSE)
go.up <- read.delim('GO_up.txt')
go.down <- read.delim('GO_down.txt')
kegg.up <-read.delim('KEGG_up.txt')
kegg.down <- read.delim('KEGG_down.txt')
go.up$Genes %>% as.character() %>% strsplit(";") %>% sapply(length) -> go.up$gene_no
go.down$Genes %>% as.character() %>% strsplit(";") %>% sapply(length) -> go.down$gene_no
kegg.up$Genes %>% as.character() %>% strsplit(";") %>% sapply(length) -> kegg.up$gene_no
kegg.down$Genes %>% as.character() %>% strsplit(";") %>% sapply(length) -> kegg.down$gene_no
go.up$Term[which((go.up$gene_no > 2) & (go.up$gene_no < 4))]
go.up$Genes[which((go.up$gene_no > 2) & (go.up$gene_no < 4))][4]
kegg.up$Term[which(kegg.up$gene_no > 2)]
kegg.up$Genes[which(kegg.up$gene_no > 2)][9]
# top GO and KEGG for upregulated genes:
# vesicle transport: APLP2;KIF5A;AGAP2;KIF1A;RAB11B
# vesicle transport in synapse: CANX
# chemical synaptyic transmission: KIF5A;SLC1A3;SYN1;PAFAH1B1
# MAPK cascade: YWHAB;CALM1;SPTAN1;SPTBN1
# wnt signalling: GNAO1;GNB1;CALM1;CPE
# axonogenesis: KIF5C;KIF5A;SPTAN1;SPTBN1
# cytoskeleton-dependent intracellular transport: DYNLL2
# lipid transport: PSAP
# cellular response to glucagon stimulus: PRKAR1A
# ephrin receptor signaling pathway: ACTB (also: Gastric acid secretion), ACTR2
# nuclear-transcribed mRNA catabolic process : EIF4A2,DDX5
# inositol phosphate catabolic process: NUDT3
# regulation of insulin secretion: SLC25A4, ATP1B2
# post-translational protein modification: SPARCL1
# purine nucleotide metabolic process: GKU1
# cholesterol biosynthetic process: CNBP
# KEGG: dopaminergic synapse endocytosis GNAO1;KIF5C;KIF5A;GNB1;CALM1;AGAP2;RAB11B
# KEGG: glutamatergic synapse: GNAO1;GNB1;SLC1A3
assigned.genes <- "GNAO1;KIF5C;KIF5A;GNB1;CALM1;KIF5C;KIF5A;AGAP2;RAB11B;APLP2;KIF5A;AGAP2;KIF1A;RAB11B;KIF5A;SLC1A3;SYN1;PAFAH1B1;YWHAB;CALM1;SPTAN1;SPTBN1;GNAO1;GNB1;CALM1;KIF5C;KIF5A;SPTAN1;SPTBN1"
assigned.genes %>% strsplit(";") %>% unlist() -> assigned.genes
go.up$Genes %>% as.character() %>% strsplit(";") %>% unlist() %>% unique() -> go.up.genes
go.up.genes[!(go.up.genes %in% assigned.genes)]
go.down$Term[(go.down$gene_no > 8)]
go.down$Genes[(go.down$gene_no > 8)][15]
kegg.down$Term[which(kegg.down$gene_no > 4)]
kegg.down$Genes[which(kegg.down$gene_no > 4)][9]
# top GO and KEGG for downregulated genes:
# respiratory electron transport chain: NDUFA13;NDUFA7;NDUFA6;NDUFB11;NDUFB5;NDUFB4;NDUFA2;SDHC;COX6B1
# rRNA processing / ribosome biogenesis: RPL31;RPL34;RPS3;RPL13;RPS3A;RPS11;RPL18;RPS10;RPL17
# proteasome-mediated ubiquitin-dependent protein catabolic process : PSMB4;PSMC5;PSMC3;PSMB3;PSMB1
# KEGG: thermogenesis: NDUFA13;NDUFA7;ATP5PD;NDUFA6;NDUFB11;NDUFB5;NDUFB4;NDUFA2;SDHC;COX6B1
# KEGG: Non-alcoholic fatty liver disease (NAFLD): NDUFA13;NDUFA7;NDUFA6;NDUFB11;NDUFB5;NDUFB4;NDUFA2;SDHC;COX6B1
# KEGG: proteasome: PSMB4;PSMC5;PSMC3;PSMB3;PSMB1
assigned.genes <- "GNAO1;KIF5C;KIF5A;GNB1;CALM1;KIF5C;KIF5A;AGAP2;RAB11B;APLP2;KIF5A;AGAP2;KIF1A;RAB11B;KIF5A;SLC1A3;SYN1;PAFAH1B1;YWHAB;CALM1;SPTAN1;SPTBN1;GNAO1;GNB1;CALM1;KIF5C;KIF5A;SPTAN1;SPTBN1"
assigned.genes %>% strsplit(";") %>% unlist() -> assigned.genes
go.down$Genes %>% as.character() %>% strsplit(";") %>% unlist() %>% unique() -> go.down.genes
go.down.genes[!(go.down.genes %in% assigned.genes)]
| /functional-gene-analysis.R | no_license | ippas/ifpan-kinga-dieta | R | false | false | 3,727 | r |
selected <- to.plot
rowMeans(selected[,samples]) -> selected$mean.global
selected$direction <- ifelse(selected$mean.global > ((selected$male.HFD.mean + selected$female.HFD.mean)/2), "DOWN", "UP")
selected %>% filter(direction == "UP") %>% select(results.gene_name) %>%
write.table(quote = FALSE, row.names = FALSE)
selected %>% filter(direction == "DOWN") %>% select(results.gene_name) %>%
write.table(quote = FALSE, row.names = FALSE)
go.up <- read.delim('GO_up.txt')
go.down <- read.delim('GO_down.txt')
kegg.up <-read.delim('KEGG_up.txt')
kegg.down <- read.delim('KEGG_down.txt')
go.up$Genes %>% as.character() %>% strsplit(";") %>% sapply(length) -> go.up$gene_no
go.down$Genes %>% as.character() %>% strsplit(";") %>% sapply(length) -> go.down$gene_no
kegg.up$Genes %>% as.character() %>% strsplit(";") %>% sapply(length) -> kegg.up$gene_no
kegg.down$Genes %>% as.character() %>% strsplit(";") %>% sapply(length) -> kegg.down$gene_no
go.up$Term[which((go.up$gene_no > 2) & (go.up$gene_no < 4))]
go.up$Genes[which((go.up$gene_no > 2) & (go.up$gene_no < 4))][4]
kegg.up$Term[which(kegg.up$gene_no > 2)]
kegg.up$Genes[which(kegg.up$gene_no > 2)][9]
# top GO and KEGG for upregulated genes:
# vesicle transport: APLP2;KIF5A;AGAP2;KIF1A;RAB11B
# vesicle transport in synapse: CANX
# chemical synaptyic transmission: KIF5A;SLC1A3;SYN1;PAFAH1B1
# MAPK cascade: YWHAB;CALM1;SPTAN1;SPTBN1
# wnt signalling: GNAO1;GNB1;CALM1;CPE
# axonogenesis: KIF5C;KIF5A;SPTAN1;SPTBN1
# cytoskeleton-dependent intracellular transport: DYNLL2
# lipid transport: PSAP
# cellular response to glucagon stimulus: PRKAR1A
# ephrin receptor signaling pathway: ACTB (also: Gastric acid secretion), ACTR2
# nuclear-transcribed mRNA catabolic process : EIF4A2,DDX5
# inositol phosphate catabolic process: NUDT3
# regulation of insulin secretion: SLC25A4, ATP1B2
# post-translational protein modification: SPARCL1
# purine nucleotide metabolic process: GKU1
# cholesterol biosynthetic process: CNBP
# KEGG: dopaminergic synapse endocytosis GNAO1;KIF5C;KIF5A;GNB1;CALM1;AGAP2;RAB11B
# KEGG: glutamatergic synapse: GNAO1;GNB1;SLC1A3
assigned.genes <- "GNAO1;KIF5C;KIF5A;GNB1;CALM1;KIF5C;KIF5A;AGAP2;RAB11B;APLP2;KIF5A;AGAP2;KIF1A;RAB11B;KIF5A;SLC1A3;SYN1;PAFAH1B1;YWHAB;CALM1;SPTAN1;SPTBN1;GNAO1;GNB1;CALM1;KIF5C;KIF5A;SPTAN1;SPTBN1"
assigned.genes %>% strsplit(";") %>% unlist() -> assigned.genes
go.up$Genes %>% as.character() %>% strsplit(";") %>% unlist() %>% unique() -> go.up.genes
go.up.genes[!(go.up.genes %in% assigned.genes)]
go.down$Term[(go.down$gene_no > 8)]
go.down$Genes[(go.down$gene_no > 8)][15]
kegg.down$Term[which(kegg.down$gene_no > 4)]
kegg.down$Genes[which(kegg.down$gene_no > 4)][9]
# top GO and KEGG for downregulated genes:
# respiratory electron transport chain: NDUFA13;NDUFA7;NDUFA6;NDUFB11;NDUFB5;NDUFB4;NDUFA2;SDHC;COX6B1
# rRNA processing / ribosome biogenesis: RPL31;RPL34;RPS3;RPL13;RPS3A;RPS11;RPL18;RPS10;RPL17
# proteasome-mediated ubiquitin-dependent protein catabolic process : PSMB4;PSMC5;PSMC3;PSMB3;PSMB1
# KEGG: thermogenesis: NDUFA13;NDUFA7;ATP5PD;NDUFA6;NDUFB11;NDUFB5;NDUFB4;NDUFA2;SDHC;COX6B1
# KEGG: Non-alcoholic fatty liver disease (NAFLD): NDUFA13;NDUFA7;NDUFA6;NDUFB11;NDUFB5;NDUFB4;NDUFA2;SDHC;COX6B1
# KEGG: proteasome: PSMB4;PSMC5;PSMC3;PSMB3;PSMB1
assigned.genes <- "GNAO1;KIF5C;KIF5A;GNB1;CALM1;KIF5C;KIF5A;AGAP2;RAB11B;APLP2;KIF5A;AGAP2;KIF1A;RAB11B;KIF5A;SLC1A3;SYN1;PAFAH1B1;YWHAB;CALM1;SPTAN1;SPTBN1;GNAO1;GNB1;CALM1;KIF5C;KIF5A;SPTAN1;SPTBN1"
assigned.genes %>% strsplit(";") %>% unlist() -> assigned.genes
go.down$Genes %>% as.character() %>% strsplit(";") %>% unlist() %>% unique() -> go.down.genes
go.down.genes[!(go.down.genes %in% assigned.genes)]
|
install.packages("jsonlite")
library(jsonlite)
install.packages("rapportools")
library(rapportools)
install.packages("tidyverse")
library(tidyverse)
install.packages("dplyr")
library(dplyr)
install.packages("tm")
library(tm)
install.packages("fastDummies")
library(fastDummies)
install.packages("caret")
library(caret)
install.packages("kernlab")
library(kernlab)
install.packages("e1071")
library(e1071)
install.packages("arules")
library(arules)
install.packages("arulesViz")
library(arulesViz)
install.packages("MLmetrics")
library(MLmetrics)
setwd("C:/Data/study/IST-687/Project/ist687") # set this to the location where the json file is stored
source("munging.R") # set this to the location of the munging.R file
####### association rules mining on all data #######
df = jsonlite::fromJSON("fall2019-survey-M02.json")
dfBinnedData = getBinnedData(df)
dfTnx = as(dfBinnedData, "transactions")
##### association rules #####
rulesetPromoters <- apriori(dfTnx,
parameter=list(support=0.005,confidence=0.5),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Promoter")))
rulesetDetractors <- apriori(dfTnx,
parameter=list(support=0.05,confidence=0.8),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Detractor")))
summary(quality(rulesetPromoters)$lift)
summary(quality(rulesetDetractors)$lift)
length(rulesetPromoters[quality(rulesetPromoters)$lift > 2.35])
length(rulesetDetractors[quality(rulesetDetractors)$lift > 1.99])
arules::inspect(rulesetPromoters[quality(rulesetPromoters)$lift > 2.35])
arules::inspect(rulesetDetractors[quality(rulesetDetractors)$lift > 1.99])
inspectDT(rulesetPromoters[quality(rulesetPromoters)$lift > 2.35])
inspectDT(rulesetDetractors[quality(rulesetDetractors)$lift > 1.99])
# The columns with strong association were considered for classification modeling
##### classification models for all data #####
analysisColumns = c("Airline.Status", "Type.of.Travel", "Eating.and.Drinking.at.Airport", "Departure.Delay.in.Minutes", "Flights.Per.Year", "Price.Sensitivity", "olong", "dlat", "Total.Freq.Flyer.Accts")
analysisData = dfBinnedData[, names(dfBinnedData) %in% c(analysisColumns, "Likelihood.to.recommend") ]
analysisData$Likelihood.to.recommend[is.na(analysisData$Likelihood.to.recommend)] = "Passive"
analysisData = prepareForAnalysis(analysisData, analysisColumns)
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 5)
set.seed(1)
inTraining = createDataPartition(analysisData$Likelihood.to.recommend, p = 0.75, list = FALSE)
trainData = analysisData[inTraining,]
testData = analysisData[-inTraining,]
logitBoost <- train(factor(Likelihood.to.recommend) ~., data = trainData, method = "LogitBoost", trControl=fitControl, preProcess = c("center", "scale"), tuneLength = 10)
logitBoost
# train accuracy - 79.55%
plot(logitBoost)
result = predict(logitBoost, testData)
sum(result == testData$Likelihood.to.recommend)/length(testData$Likelihood.to.recommend)
# test accuracy - 80.48%
F1_Score(testData$Likelihood.to.recommend, result)
# F1 score - 87.67%
varImp(logitBoost)
personalTravelResult = predict(logitBoost, testData[testData$`Type.of.Travel_Personal Travel` == 1,])
sum(personalTravelResult == "Detractor")/nrow(testData[testData$`Type.of.Travel_Personal Travel` == 1,])
# Inference - Personal travel customers have a strong relationship with their ratings.
###### association rules mining on Personal Travel data #######
personalTravel = df[str_trim(df$Type.of.Travel) == "Personal Travel", names(df) != "Type.of.Travel"]
personalTravelBinned = getBinnedData(personalTravel)
personalTravelTnx = as(personalTravelBinned, "transactions")
rulesetPromoters <- apriori(personalTravelTnx,
parameter=list(support=0.005,confidence=0.5),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Promoter")))
rulesetDetractors <- apriori(personalTravelTnx,
parameter=list(support=0.05,confidence=0.8),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Detractor")))
summary(quality(rulesetPromoters)$lift)
summary(quality(rulesetDetractors)$lift)
length(rulesetDetractors[quality(rulesetDetractors)$lift > 1.178])
arules::inspect(rulesetPromoters)
arules::inspect(rulesetDetractors[quality(rulesetDetractors)$lift > 1.178])
inspectDT(rulesetPromoters)
inspectDT(rulesetDetractors[quality(rulesetDetractors)$lift > 1.178])
# The information was already available.
# Trying to get a better model with Airline.Status == "Blue" since this has the bulk of the data (2504/3212).
personalBlue = df[str_trim(df$Type.of.Travel) == "Personal Travel" & str_trim(df$Airline.Status) == "Blue", !names(df) %in% c("Type.of.Travel", "Airline.Status")]
personalBlueBinned = getBinnedData(personalBlue)
personalBlueTnx = as(personalBlueBinned, "transactions")
rulesetPromoters <- apriori(personalBlueTnx,
parameter=list(support=0.002,confidence=0.5),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Promoter")))
rulesetDetractors <- apriori(personalBlueTnx,
parameter=list(support=0.05,confidence=0.8),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Detractor")))
summary(quality(rulesetPromoters)$lift)
summary(quality(rulesetDetractors)$lift)
length(rulesetPromoters[quality(rulesetPromoters)$lift > 28])
length(rulesetDetractors[quality(rulesetDetractors)$lift > 1.11])
arules::inspect(rulesetPromoters[quality(rulesetPromoters)$lift > 28])
arules::inspect(rulesetDetractors[quality(rulesetDetractors)$lift > 1.11])
inspectDT(rulesetPromoters[quality(rulesetPromoters)$lift > 28])
inspectDT(rulesetDetractors[quality(rulesetDetractors)$lift > 1.11])
# The columns with strong association were considered for classification modeling.
##### classification models for Personal Travel, Blue data #####
analysisColumns1 = c("Age", "Gender", "Flight.Distance", "Eating.and.Drinking.at.Airport",
"olong", "Arrival.Delay.in.Minutes", "Loyalty", "Total.Freq.Flyer.Accts")
analysisData1 = personalBlueBinned[, names(personalBlueBinned) %in% c(analysisColumns1, "Likelihood.to.recommend")]
analysisData1 = prepareForAnalysis(analysisData1, analysisColumns1)
set.seed(1)
inTraining1 = createDataPartition(analysisData1$Likelihood.to.recommend, p = .75, list = FALSE)
trainData1 = analysisData1[inTraining1,]
testData1 = analysisData1[-inTraining1,]
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 5)
logitBoost1 <- train(factor(Likelihood.to.recommend) ~., data = trainData1, method = "LogitBoost", trControl=fitControl, preProcess = c("center", "scale"), tuneLength = 10)
logitBoost1
# train accuracy - 89.84%
plot(logitBoost1)
result5 = predict(logitBoost1, newdata = testData1)
sum(result5 == testData1$Likelihood.to.recommend)/length(testData1$Likelihood.to.recommend)
# test accuracy - 89.76%
F1_Score(testData1$Likelihood.to.recommend, result5)
# F1 score - 94.68%
varImp(logitBoost1)
svmRadial1 <- train(factor(Likelihood.to.recommend) ~., data = trainData1, method = "svmRadial", trControl=fitControl, preProcess = c("center", "scale"), tuneLength = 10)
svmRadial1
# train accuracy - 89.92%
plot(svmRadial1)
result6 = predict(svmRadial1, newdata = testData1)
sum(result6 == testData1$Likelihood.to.recommend)/length(testData1$Likelihood.to.recommend)
# test accuracy - 89.44%
F1_Score(testData1$Likelihood.to.recommend, result6)
# F1 score - 94.4%
varImp(svmRadial1)
femalePredict = predict(svmRadial1, testData1[testData1$Gender_Male == 0,])
sum(femalePredict == "Detractor")/nrow(testData1[testData1$Gender_Male == 0,])
varImp(svmRadial1)
sum(personalBlueBinned$Likelihood.to.recommend == "Detractor")/length(personalBlueBinned$Likelihood.to.recommend)
# The high accuracy of the model proves the strong association between the customers and their ratings.
# The female customers with type of travel as Personal, airline status as Blue and with no frequent flyer accounts tend to be detactors.
# Customers with origin longitude between -120 - -95 tend to be detractors.
nrow(df[df$Gender == "Female",])/nrow(df)
nrow(personalBlue[personalBlue$Gender == "Female",])/nrow(personalBlue)
mean(df$Likelihood.to.recommend[df$Gender == "Female"])
mean(personalBlue$Likelihood.to.recommend[personalBlue$Gender == "Female"])
# The airline should focus more on the female customers with type of travel as Personal and airline status
# as Blue as their average ratings are significantly lower than that of all female customers and they have
# a tendency to be detractors as proven by the model. They also contribute more to the overall ratings as their
# ratio is higher in this category than the overall data.
nrow(df[df$Gender == "Female" & df$Total.Freq.Flyer.Accts == 0,])/nrow(df)
nrow(personalBlue[personalBlue$Gender == "Female" & personalBlue$Total.Freq.Flyer.Accts == 0,])/nrow(personalBlue)
mean(df$Likelihood.to.recommend[df$Gender == "Female" & df$Total.Freq.Flyer.Accts == 0])
mean(personalBlue$Likelihood.to.recommend[personalBlue$Gender == "Female" & personalBlue$Total.Freq.Flyer.Accts == 0])
# The above conclusion is also true for female customers with no frequent flyer accounts.
nrow(dfBinnedData[dfBinnedData$Type.of.Travel == "Personal Travel" & df$Airline.Status == "Blue" & df$Gender == "Female",])/nrow(dfBinnedData)
mean(df$Likelihood.to.recommend[df$Type.of.Travel == "Personal Travel"])
mean(df$Likelihood.to.recommend)
mean(df$Likelihood.to.recommend[df$Type.of.Travel == "Personal Travel" & df$Airline.Status == "Blue" & df$Gender == "Female"])
mean(df$Likelihood.to.recommend[df$Gender == "Female"])
| /IST687-IntroductionToDataScience/Script/Advanced_Model2.R | no_license | nishithamv/MSADS-Portfolio | R | false | false | 10,094 | r | install.packages("jsonlite")
library(jsonlite)
install.packages("rapportools")
library(rapportools)
install.packages("tidyverse")
library(tidyverse)
install.packages("dplyr")
library(dplyr)
install.packages("tm")
library(tm)
install.packages("fastDummies")
library(fastDummies)
install.packages("caret")
library(caret)
install.packages("kernlab")
library(kernlab)
install.packages("e1071")
library(e1071)
install.packages("arules")
library(arules)
install.packages("arulesViz")
library(arulesViz)
install.packages("MLmetrics")
library(MLmetrics)
setwd("C:/Data/study/IST-687/Project/ist687") # set this to the location where the json file is stored
source("munging.R") # set this to the location of the munging.R file
####### association rules mining on all data #######
df = jsonlite::fromJSON("fall2019-survey-M02.json")
dfBinnedData = getBinnedData(df)
dfTnx = as(dfBinnedData, "transactions")
##### association rules #####
rulesetPromoters <- apriori(dfTnx,
parameter=list(support=0.005,confidence=0.5),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Promoter")))
rulesetDetractors <- apriori(dfTnx,
parameter=list(support=0.05,confidence=0.8),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Detractor")))
summary(quality(rulesetPromoters)$lift)
summary(quality(rulesetDetractors)$lift)
length(rulesetPromoters[quality(rulesetPromoters)$lift > 2.35])
length(rulesetDetractors[quality(rulesetDetractors)$lift > 1.99])
arules::inspect(rulesetPromoters[quality(rulesetPromoters)$lift > 2.35])
arules::inspect(rulesetDetractors[quality(rulesetDetractors)$lift > 1.99])
inspectDT(rulesetPromoters[quality(rulesetPromoters)$lift > 2.35])
inspectDT(rulesetDetractors[quality(rulesetDetractors)$lift > 1.99])
# The columns with strong association were considered for classification modeling
##### classification models for all data #####
analysisColumns = c("Airline.Status", "Type.of.Travel", "Eating.and.Drinking.at.Airport", "Departure.Delay.in.Minutes", "Flights.Per.Year", "Price.Sensitivity", "olong", "dlat", "Total.Freq.Flyer.Accts")
analysisData = dfBinnedData[, names(dfBinnedData) %in% c(analysisColumns, "Likelihood.to.recommend") ]
analysisData$Likelihood.to.recommend[is.na(analysisData$Likelihood.to.recommend)] = "Passive"
analysisData = prepareForAnalysis(analysisData, analysisColumns)
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 5)
set.seed(1)
inTraining = createDataPartition(analysisData$Likelihood.to.recommend, p = 0.75, list = FALSE)
trainData = analysisData[inTraining,]
testData = analysisData[-inTraining,]
logitBoost <- train(factor(Likelihood.to.recommend) ~., data = trainData, method = "LogitBoost", trControl=fitControl, preProcess = c("center", "scale"), tuneLength = 10)
logitBoost
# train accuracy - 79.55%
plot(logitBoost)
result = predict(logitBoost, testData)
sum(result == testData$Likelihood.to.recommend)/length(testData$Likelihood.to.recommend)
# test accuracy - 80.48%
F1_Score(testData$Likelihood.to.recommend, result)
# F1 score - 87.67%
varImp(logitBoost)
personalTravelResult = predict(logitBoost, testData[testData$`Type.of.Travel_Personal Travel` == 1,])
sum(personalTravelResult == "Detractor")/nrow(testData[testData$`Type.of.Travel_Personal Travel` == 1,])
# Inference - Personal travel customers have a strong relationship with their ratings.
###### association rules mining on Personal Travel data #######
personalTravel = df[str_trim(df$Type.of.Travel) == "Personal Travel", names(df) != "Type.of.Travel"]
personalTravelBinned = getBinnedData(personalTravel)
personalTravelTnx = as(personalTravelBinned, "transactions")
rulesetPromoters <- apriori(personalTravelTnx,
parameter=list(support=0.005,confidence=0.5),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Promoter")))
rulesetDetractors <- apriori(personalTravelTnx,
parameter=list(support=0.05,confidence=0.8),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Detractor")))
summary(quality(rulesetPromoters)$lift)
summary(quality(rulesetDetractors)$lift)
length(rulesetDetractors[quality(rulesetDetractors)$lift > 1.178])
arules::inspect(rulesetPromoters)
arules::inspect(rulesetDetractors[quality(rulesetDetractors)$lift > 1.178])
inspectDT(rulesetPromoters)
inspectDT(rulesetDetractors[quality(rulesetDetractors)$lift > 1.178])
# The information was already available.
# Trying to get a better model with Airline.Status == "Blue" since this has the bulk of the data (2504/3212).
personalBlue = df[str_trim(df$Type.of.Travel) == "Personal Travel" & str_trim(df$Airline.Status) == "Blue", !names(df) %in% c("Type.of.Travel", "Airline.Status")]
personalBlueBinned = getBinnedData(personalBlue)
personalBlueTnx = as(personalBlueBinned, "transactions")
rulesetPromoters <- apriori(personalBlueTnx,
parameter=list(support=0.002,confidence=0.5),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Promoter")))
rulesetDetractors <- apriori(personalBlueTnx,
parameter=list(support=0.05,confidence=0.8),
appearance = list(default="lhs", rhs=("Likelihood.to.recommend=Detractor")))
summary(quality(rulesetPromoters)$lift)
summary(quality(rulesetDetractors)$lift)
length(rulesetPromoters[quality(rulesetPromoters)$lift > 28])
length(rulesetDetractors[quality(rulesetDetractors)$lift > 1.11])
arules::inspect(rulesetPromoters[quality(rulesetPromoters)$lift > 28])
arules::inspect(rulesetDetractors[quality(rulesetDetractors)$lift > 1.11])
inspectDT(rulesetPromoters[quality(rulesetPromoters)$lift > 28])
inspectDT(rulesetDetractors[quality(rulesetDetractors)$lift > 1.11])
# The columns with strong association were considered for classification modeling.
##### classification models for Personal Travel, Blue data #####
analysisColumns1 = c("Age", "Gender", "Flight.Distance", "Eating.and.Drinking.at.Airport",
"olong", "Arrival.Delay.in.Minutes", "Loyalty", "Total.Freq.Flyer.Accts")
analysisData1 = personalBlueBinned[, names(personalBlueBinned) %in% c(analysisColumns1, "Likelihood.to.recommend")]
analysisData1 = prepareForAnalysis(analysisData1, analysisColumns1)
set.seed(1)
inTraining1 = createDataPartition(analysisData1$Likelihood.to.recommend, p = .75, list = FALSE)
trainData1 = analysisData1[inTraining1,]
testData1 = analysisData1[-inTraining1,]
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 5)
logitBoost1 <- train(factor(Likelihood.to.recommend) ~., data = trainData1, method = "LogitBoost", trControl=fitControl, preProcess = c("center", "scale"), tuneLength = 10)
logitBoost1
# train accuracy - 89.84%
plot(logitBoost1)
result5 = predict(logitBoost1, newdata = testData1)
sum(result5 == testData1$Likelihood.to.recommend)/length(testData1$Likelihood.to.recommend)
# test accuracy - 89.76%
F1_Score(testData1$Likelihood.to.recommend, result5)
# F1 score - 94.68%
varImp(logitBoost1)
svmRadial1 <- train(factor(Likelihood.to.recommend) ~., data = trainData1, method = "svmRadial", trControl=fitControl, preProcess = c("center", "scale"), tuneLength = 10)
svmRadial1
# train accuracy - 89.92%
plot(svmRadial1)
result6 = predict(svmRadial1, newdata = testData1)
sum(result6 == testData1$Likelihood.to.recommend)/length(testData1$Likelihood.to.recommend)
# test accuracy - 89.44%
F1_Score(testData1$Likelihood.to.recommend, result6)
# F1 score - 94.4%
varImp(svmRadial1)
femalePredict = predict(svmRadial1, testData1[testData1$Gender_Male == 0,])
sum(femalePredict == "Detractor")/nrow(testData1[testData1$Gender_Male == 0,])
varImp(svmRadial1)
sum(personalBlueBinned$Likelihood.to.recommend == "Detractor")/length(personalBlueBinned$Likelihood.to.recommend)
# The high accuracy of the model proves the strong association between the customers and their ratings.
# The female customers with type of travel as Personal, airline status as Blue and with no frequent flyer accounts tend to be detactors.
# Customers with origin longitude between -120 - -95 tend to be detractors.
nrow(df[df$Gender == "Female",])/nrow(df)
nrow(personalBlue[personalBlue$Gender == "Female",])/nrow(personalBlue)
mean(df$Likelihood.to.recommend[df$Gender == "Female"])
mean(personalBlue$Likelihood.to.recommend[personalBlue$Gender == "Female"])
# The airline should focus more on the female customers with type of travel as Personal and airline status
# as Blue as their average ratings are significantly lower than that of all female customers and they have
# a tendency to be detractors as proven by the model. They also contribute more to the overall ratings as their
# ratio is higher in this category than the overall data.
nrow(df[df$Gender == "Female" & df$Total.Freq.Flyer.Accts == 0,])/nrow(df)
nrow(personalBlue[personalBlue$Gender == "Female" & personalBlue$Total.Freq.Flyer.Accts == 0,])/nrow(personalBlue)
mean(df$Likelihood.to.recommend[df$Gender == "Female" & df$Total.Freq.Flyer.Accts == 0])
mean(personalBlue$Likelihood.to.recommend[personalBlue$Gender == "Female" & personalBlue$Total.Freq.Flyer.Accts == 0])
# The above conclusion is also true for female customers with no frequent flyer accounts.
nrow(dfBinnedData[dfBinnedData$Type.of.Travel == "Personal Travel" & df$Airline.Status == "Blue" & df$Gender == "Female",])/nrow(dfBinnedData)
mean(df$Likelihood.to.recommend[df$Type.of.Travel == "Personal Travel"])
mean(df$Likelihood.to.recommend)
mean(df$Likelihood.to.recommend[df$Type.of.Travel == "Personal Travel" & df$Airline.Status == "Blue" & df$Gender == "Female"])
mean(df$Likelihood.to.recommend[df$Gender == "Female"])
|
library(reliaR)
### Name: BurrXsurvival
### Title: Survival related functions for the BurrX distribution
### Aliases: BurrXsurvival crf.burrX hburrX hra.burrX sburrX
### Keywords: survival
### ** Examples
## load data set
data(bearings)
## Maximum Likelihood(ML) Estimates of alpha & lambda for the data(bearings)
## Estimates of alpha & lambda using 'maxLik' package
## alpha.est = 1.1989515, lambda.est = 0.0130847
## Reliability indicators for data(bearings):
## Reliability function
sburrX(bearings, 1.1989515, 0.0130847)
## Hazard function
hburrX(bearings, 1.1989515, 0.0130847)
## hazard rate average(hra)
hra.burrX(bearings, 1.1989515, 0.0130847)
## Conditional reliability function (age component=0)
crf.burrX(bearings, 0.00, 1.1989515, 0.0130847)
## Conditional reliability function (age component=3.0)
crf.burrX(bearings, 3.0, 1.1989515, 0.0130847)
| /data/genthat_extracted_code/reliaR/examples/BurrXsurvival.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 872 | r | library(reliaR)
### Name: BurrXsurvival
### Title: Survival related functions for the BurrX distribution
### Aliases: BurrXsurvival crf.burrX hburrX hra.burrX sburrX
### Keywords: survival
### ** Examples
## load data set
data(bearings)
## Maximum Likelihood(ML) Estimates of alpha & lambda for the data(bearings)
## Estimates of alpha & lambda using 'maxLik' package
## alpha.est = 1.1989515, lambda.est = 0.0130847
## Reliability indicators for data(bearings):
## Reliability function
sburrX(bearings, 1.1989515, 0.0130847)
## Hazard function
hburrX(bearings, 1.1989515, 0.0130847)
## hazard rate average(hra)
hra.burrX(bearings, 1.1989515, 0.0130847)
## Conditional reliability function (age component=0)
crf.burrX(bearings, 0.00, 1.1989515, 0.0130847)
## Conditional reliability function (age component=3.0)
crf.burrX(bearings, 3.0, 1.1989515, 0.0130847)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{mixtureexample_gradlogprior}
\alias{mixtureexample_gradlogprior}
\title{Evaluate gradient of mixture example prior density}
\usage{
mixtureexample_gradlogprior(x)
}
\arguments{
\item{x}{evaluation points}
}
\value{
gradient values
}
\description{
Evaluate gradient of mixture example prior density
}
| /man/mixtureexample_gradlogprior.Rd | no_license | jeremyhengjm/GibbsFlow | R | false | true | 398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{mixtureexample_gradlogprior}
\alias{mixtureexample_gradlogprior}
\title{Evaluate gradient of mixture example prior density}
\usage{
mixtureexample_gradlogprior(x)
}
\arguments{
\item{x}{evaluation points}
}
\value{
gradient values
}
\description{
Evaluate gradient of mixture example prior density
}
|
##################################################
# This code takes MLE for SNPs (betas and se.betas)
# and performs a hiearchical model for bias reduction
# the code also calcualtes the proportion of FRR explained by a set of SNPS
# incorporating:
# the uncertainty of the SNP estimates
# bias reduction
# uncertainty in the pre-specified FRR
# David Conti
#################################################
library(R2jags)
set.seed(123)
setwd("/Users/davidconti/Google Drive/Collaborations/CORECT/Manuscripts/CORECT OncoArray Main Effects/BiasCorrection/CORECT.BiasCorrection")
d <- read.table("GRS_dataset.txt", header=T, sep="\t")
Y <- d$status
W <- d[,c("SEX",
"PC1","PC2","PC3","PC4",
"EstherVerdi_Fire","PuertoRico","Galeon_Spain_Tribe","SWEDEN_Wolk","MECC_Sephardi","ColoCare","MECC_Jew_unknown",
"MECC_Ashkenazi_MSKCC","MECC_nonJew_nonArab","SWEDEN_Lindblom","MCCS","ATBC",
"MEC","USC_HRT_CRC","UK_SEARCH","NHS2")]
X <- d[, c("all.GRS.1","all.GRS.2","all.GRS.3","all.GRS.5","all.GRS.6","all.GRS.7")] # GRS #4 is baseline
reg <- glm(Y~as.matrix(X)+as.matrix(W), family=binomial)
coef <- summary(reg)$coef[2:7,]
##### from data
beta.hat <- coef[,1]
M <- length(beta.hat)
se.beta <- coef[,2]
prec.beta <- 1/(se.beta^2)
M <- length(beta.hat)
zeros <- rep(0, M)
###### priors on effects
sigma <- c(0.35, 0.25, 0.15, 0.15, 0.25, 0.35)
sigma2 <- sigma^2
for(i in 1:M) { print(c(exp(0-1.96*sigma[i]),exp(0+1.96*sigma[i]))) }
###### Run JAGs hierarchical model
# define JAGS model
model.string <-
"model {
C <- 10000 #this just has to be large enough to ensure all phi[m]'s > 0
for(m in 1:M) {
beta[m] ~ dnorm(beta.hat[m], prec.beta[m]) # generate the MLE effect estimates
OR[m] <- exp(beta[m])
# normal prior on beta using the zeroes trick
phi[m] <- C-l[m]
l[m] <- -0.5*log(2*3.14) - 0.5*log(sigma2[m]) - 0.5 * pow((beta[m]-0),2)/sigma2[m]
zeros[m] ~ dpois(phi[m])
}
}"
jdata <- list(beta.hat=beta.hat, prec.beta=prec.beta,
M=M, sigma2=sigma2,
zeros=zeros)
var.s <- c("OR", "beta")
model.fit <- jags.model(file=textConnection(model.string), data=jdata, n.chains=1, n.adapt=5000)
update(model.fit, n.iter=10000, progress.bar="text")
model.fit <- coda.samples(model=model.fit, variable.names=var.s, n.iter=20000, thin=2, quiet=F)
model.coda <- as.data.frame(model.fit[[1]])
est <- apply(model.coda, 2, mean)
beta.est <- est[grep("beta", names(est))]
OR.est <- est[grep("OR", names(est))]
# output results
# plot of SNP bias reduction
pdf("SumStat.BiasReduced.RiskScores.Estimates.pdf")
plot(beta.hat, beta.est, pch=16, xlab="MLE estimate", ylab="Biased Reduced Estimate", ylim=c(-1.5,1.5), xlim=c(-1.5,1.5))
abline(a=0, b=1)
dev.off()
r <- summary(model.fit)
write.table(r$statistics, file="Summary.SumStat.BiasReduced.RiskScores.Estimates.txt", sep="\t")
write.table(r$quantiles, file="Summary.SumStat.BiasReduced.RiskScores.Estimates.txt", sep="\t", append=T)
| /SumStat.BiasReduced.RiskScore.R | no_license | dvconti/CORECT.BiasCorrection | R | false | false | 2,981 | r | ##################################################
# This code takes MLE for SNPs (betas and se.betas)
# and performs a hiearchical model for bias reduction
# the code also calcualtes the proportion of FRR explained by a set of SNPS
# incorporating:
# the uncertainty of the SNP estimates
# bias reduction
# uncertainty in the pre-specified FRR
# David Conti
#################################################
library(R2jags)
set.seed(123)
setwd("/Users/davidconti/Google Drive/Collaborations/CORECT/Manuscripts/CORECT OncoArray Main Effects/BiasCorrection/CORECT.BiasCorrection")
d <- read.table("GRS_dataset.txt", header=T, sep="\t")
Y <- d$status
W <- d[,c("SEX",
"PC1","PC2","PC3","PC4",
"EstherVerdi_Fire","PuertoRico","Galeon_Spain_Tribe","SWEDEN_Wolk","MECC_Sephardi","ColoCare","MECC_Jew_unknown",
"MECC_Ashkenazi_MSKCC","MECC_nonJew_nonArab","SWEDEN_Lindblom","MCCS","ATBC",
"MEC","USC_HRT_CRC","UK_SEARCH","NHS2")]
X <- d[, c("all.GRS.1","all.GRS.2","all.GRS.3","all.GRS.5","all.GRS.6","all.GRS.7")] # GRS #4 is baseline
reg <- glm(Y~as.matrix(X)+as.matrix(W), family=binomial)
coef <- summary(reg)$coef[2:7,]
##### from data
beta.hat <- coef[,1]
M <- length(beta.hat)
se.beta <- coef[,2]
prec.beta <- 1/(se.beta^2)
M <- length(beta.hat)
zeros <- rep(0, M)
###### priors on effects
sigma <- c(0.35, 0.25, 0.15, 0.15, 0.25, 0.35)
sigma2 <- sigma^2
for(i in 1:M) { print(c(exp(0-1.96*sigma[i]),exp(0+1.96*sigma[i]))) }
###### Run JAGs hierarchical model
# define JAGS model
model.string <-
"model {
C <- 10000 #this just has to be large enough to ensure all phi[m]'s > 0
for(m in 1:M) {
beta[m] ~ dnorm(beta.hat[m], prec.beta[m]) # generate the MLE effect estimates
OR[m] <- exp(beta[m])
# normal prior on beta using the zeroes trick
phi[m] <- C-l[m]
l[m] <- -0.5*log(2*3.14) - 0.5*log(sigma2[m]) - 0.5 * pow((beta[m]-0),2)/sigma2[m]
zeros[m] ~ dpois(phi[m])
}
}"
jdata <- list(beta.hat=beta.hat, prec.beta=prec.beta,
M=M, sigma2=sigma2,
zeros=zeros)
var.s <- c("OR", "beta")
model.fit <- jags.model(file=textConnection(model.string), data=jdata, n.chains=1, n.adapt=5000)
update(model.fit, n.iter=10000, progress.bar="text")
model.fit <- coda.samples(model=model.fit, variable.names=var.s, n.iter=20000, thin=2, quiet=F)
model.coda <- as.data.frame(model.fit[[1]])
est <- apply(model.coda, 2, mean)
beta.est <- est[grep("beta", names(est))]
OR.est <- est[grep("OR", names(est))]
# output results
# plot of SNP bias reduction
pdf("SumStat.BiasReduced.RiskScores.Estimates.pdf")
plot(beta.hat, beta.est, pch=16, xlab="MLE estimate", ylab="Biased Reduced Estimate", ylim=c(-1.5,1.5), xlim=c(-1.5,1.5))
abline(a=0, b=1)
dev.off()
r <- summary(model.fit)
write.table(r$statistics, file="Summary.SumStat.BiasReduced.RiskScores.Estimates.txt", sep="\t")
write.table(r$quantiles, file="Summary.SumStat.BiasReduced.RiskScores.Estimates.txt", sep="\t", append=T)
|
#' Calculating agreement for dichotomous variables.
#'
#' Function to calculate specific agreement and overall proportion of agreement for dichotomous variables.
#'
#' @param ratings A dataframe or matrix of N x P with N the number of observations and P the number of raters.
#' @param CI Logical, indicates if confidence intervals have to be calculated
#' @param ConfLevel The confidence level to be used in calculating the confidence intervals. Possible values
#' are \code{"continuity"}, \code{"Fleiss"} and \code{"bootstrap"}.
#' @param correction Method of calculating the confidence intervals (de Vet et al., 2017). The confidence intervals (CI) can be calculated using
#' continuity correction, Fleiss correction or by use of bootstrap samples.
#' @param NrBoot In case of bootstrap methodology to calculate the CI, the number of bootstrap samples.
#' @param Parallel Logical, indicates if parallel computing has to be used to compute the confidence intervals.
#' Implemented only when using bootstrapping to calculate the confidence intervals.
#' @param no_cores Number of cores if parallel computing is used. Default is 1 core less than the number of
#' cores present.
#'
#' @return A list with the following components:
#' @return \item{SumTable}{The summed table as defined in the article of de Vet et al. (2017)}
#' @return \item{ObservedAgreem}{The overall proportion of agreement (with CI).}
#' @return \item{SpecificAgreem}{The specific agreement for each of the categories (with CIs).}
#' @references De Vet HCW, Mokkink LB, Terwee CB, Hoekstra OS, Knol DL. Clinicians are
#' right not to like Cohen’s k. \emph{BMJ} 2013;346:f2125.
#' @references de Vet, H.C.W., Terwee C.B., Knol, D.L., Bouter, L.M. (2006). When to use agreement
#' versus reliability measures. \emph{Journal of Clinical Epidemiology}, Vol.59(10), pp.1033-1039
#' @references de Vet, H.C.W., Dikmans, R.E., Eekhout, I. (2017). Specific agreement on dichotomous outcomes can be
#' calculated for more than two raters. \emph{Journal of Clinical Epidemiology}, Vol.83, pp.85-89
#'
#' @details This function is based on the functions as given in the appendix of the article of de Vet et al. (2017).
#'
#' @examples
#' # Load data
#' data(Agreement_deVetArticle)
#' Df = Agreement_deVetArticle
#'
#' DiagnosticAgreement.deVet(Df)
DiagnosticAgreement.deVet <- function(ratings, CI = T, ConfLevel = 0.95, correction=c("continuity","Fleiss", "bootstrap"), NrBoot = 1e3,
Parallel = F, no_cores = detectCores() - 1){
if(length(unique(unlist(ratings)))>2) stop("Multinomial variable (number of unique values > 2). Consider using PA.matrix")
if(is.matrix(ratings)) ratings = as.data.frame(ratings)
stopifnot(is.numeric(NrBoot))
if(NrBoot < 200) stop("200 is the minimum number of bootstrap samples.")
correction = match.arg(correction)
SumTab = sumtable(ratings)
AgreemTab = Agreement(SumTab)
AgreemTab = SpecAgreem(AgreemTab)
if(CI) AgreemTab = CIAgreem(AgreemTab, level=ConfLevel, AgreemStat = "all", correction=correction, NrBoot = NrBoot,
Parallel=Parallel, no_cores = co_cores)
return(AgreemTab)
}
| /R/DiagnosticAgreement.deVet.R | no_license | BavoDC/AGREL | R | false | false | 3,173 | r | #' Calculating agreement for dichotomous variables.
#'
#' Function to calculate specific agreement and overall proportion of agreement for dichotomous variables.
#'
#' @param ratings A dataframe or matrix of N x P with N the number of observations and P the number of raters.
#' @param CI Logical, indicates if confidence intervals have to be calculated
#' @param ConfLevel The confidence level to be used in calculating the confidence intervals. Possible values
#' are \code{"continuity"}, \code{"Fleiss"} and \code{"bootstrap"}.
#' @param correction Method of calculating the confidence intervals (de Vet et al., 2017). The confidence intervals (CI) can be calculated using
#' continuity correction, Fleiss correction or by use of bootstrap samples.
#' @param NrBoot In case of bootstrap methodology to calculate the CI, the number of bootstrap samples.
#' @param Parallel Logical, indicates if parallel computing has to be used to compute the confidence intervals.
#' Implemented only when using bootstrapping to calculate the confidence intervals.
#' @param no_cores Number of cores if parallel computing is used. Default is 1 core less than the number of
#' cores present.
#'
#' @return A list with the following components:
#' @return \item{SumTable}{The summed table as defined in the article of de Vet et al. (2017)}
#' @return \item{ObservedAgreem}{The overall proportion of agreement (with CI).}
#' @return \item{SpecificAgreem}{The specific agreement for each of the categories (with CIs).}
#' @references De Vet HCW, Mokkink LB, Terwee CB, Hoekstra OS, Knol DL. Clinicians are
#' right not to like Cohen’s k. \emph{BMJ} 2013;346:f2125.
#' @references de Vet, H.C.W., Terwee C.B., Knol, D.L., Bouter, L.M. (2006). When to use agreement
#' versus reliability measures. \emph{Journal of Clinical Epidemiology}, Vol.59(10), pp.1033-1039
#' @references de Vet, H.C.W., Dikmans, R.E., Eekhout, I. (2017). Specific agreement on dichotomous outcomes can be
#' calculated for more than two raters. \emph{Journal of Clinical Epidemiology}, Vol.83, pp.85-89
#'
#' @details This function is based on the functions as given in the appendix of the article of de Vet et al. (2017).
#'
#' @examples
#' # Load data
#' data(Agreement_deVetArticle)
#' Df = Agreement_deVetArticle
#'
#' DiagnosticAgreement.deVet(Df)
DiagnosticAgreement.deVet <- function(ratings, CI = T, ConfLevel = 0.95, correction=c("continuity","Fleiss", "bootstrap"), NrBoot = 1e3,
Parallel = F, no_cores = detectCores() - 1){
if(length(unique(unlist(ratings)))>2) stop("Multinomial variable (number of unique values > 2). Consider using PA.matrix")
if(is.matrix(ratings)) ratings = as.data.frame(ratings)
stopifnot(is.numeric(NrBoot))
if(NrBoot < 200) stop("200 is the minimum number of bootstrap samples.")
correction = match.arg(correction)
SumTab = sumtable(ratings)
AgreemTab = Agreement(SumTab)
AgreemTab = SpecAgreem(AgreemTab)
if(CI) AgreemTab = CIAgreem(AgreemTab, level=ConfLevel, AgreemStat = "all", correction=correction, NrBoot = NrBoot,
Parallel=Parallel, no_cores = co_cores)
return(AgreemTab)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.