content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SimulateTransitService.R
\docType{data}
\name{D4cModels_ls}
\alias{D4cModels_ls}
\title{D4c simulation models}
\format{a list containing 3 components
\describe{
\item{NormD4_PtQt}{matrix of normalized D4c values by place type and quantile}
\item{D4SupplyRatio_Ua}{numeric vector of urbanized area average D4c values by urbanized area}
\item{AveD4cModel_ls}{list containing components for linear model to predict urbanized area average D4c model}
}}
\source{
SimulateTransitService.R script
}
\usage{
D4cModels_ls
}
\description{
Models to predict average D4c value for urbanized area and to the predict the
D4c values for SimBzones from the urbanized area average and from the
SimBzones place types.
}
\keyword{datasets}
| /sources/modules/VESimTransportSupply/man/D4cModels_ls.Rd | permissive | rickdonnelly/VisionEval-Dev | R | false | true | 806 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SimulateTransitService.R
\docType{data}
\name{D4cModels_ls}
\alias{D4cModels_ls}
\title{D4c simulation models}
\format{a list containing 3 components
\describe{
\item{NormD4_PtQt}{matrix of normalized D4c values by place type and quantile}
\item{D4SupplyRatio_Ua}{numeric vector of urbanized area average D4c values by urbanized area}
\item{AveD4cModel_ls}{list containing components for linear model to predict urbanized area average D4c model}
}}
\source{
SimulateTransitService.R script
}
\usage{
D4cModels_ls
}
\description{
Models to predict average D4c value for urbanized area and to the predict the
D4c values for SimBzones from the urbanized area average and from the
SimBzones place types.
}
\keyword{datasets}
|
/Zajecia_2018_02_26/wprowadzenie_do_R.R | no_license | AdamWrobel/AGH-2018 | R | false | false | 5,063 | r | ||
pdf(file="fortinlemieux.pdf",family="Times",paper="USr",width=0,height=0)
library(dplyr)
library(ggplot2)
library(reshape2)
library(car)
library(haven)
options(scipen=10000)
options(digits=4)
rm(list=ls())
cpi79 <- 72.6
cpi88 <- 118.3
mw79 <- 2.90 * cpi79/cpi79
mw88 <- 3.35 * cpi79/cpi88
morg88 <- read_dta(file="morg88.dta")
morg88 <- filter(morg88,
sex==2,
age>=16 & age<=65,
I25c==0,
paidhre==1)
morg79 <- read_dta(file="morg79.dta")
morg79 <- filter(morg79,
sex==2,
age>=16 & age<=65,
I25c==0,
paidhre==1)
morg <- bind_rows(morg79,morg88)
morg <- mutate(morg,
rearnhre = ifelse(year==1979, earnhre/100 * cpi79/cpi79, earnhre/100 * cpi79/cpi88),
Year = factor(year))
df79 <- data.frame(x1=mw79, x2=mw79, y1=0.01, y2=2.7, Year=factor(1979))
df88 <- data.frame(x1=mw88, x2=mw88, y1=0.01, y2=2.7, Year=factor(1988))
ggplot(morg) + geom_density(aes(x=rearnhre, group=Year, linetype=Year),trim=TRUE, bw=0.05) +
scale_x_continuous(trans="log",breaks=c(2,5,10,25)) + theme(legend.position = "none") +
coord_cartesian(xlim=c(1,30)) +
geom_segment(aes(x = x1, y = y1, xend = x2, yend = y2), data = df79, alpha=0.5) +
geom_segment(aes(x = x1, y = y1, xend = x2, yend = y2), data = df88, alpha=0.5) +
annotate("text", x = 10, y = 0.2, label = "1988") +
annotate("text", x = 4, y = 1.1, label = "1979") +
annotate("text", x = 2.5, y = 2.6, label = "Minimum wage") +
annotate("text", x = 1.8, y = 2.1, label = "MW 1988") +
annotate("text", x = 2.6, y = 2.1, label = "MW 1979")
| /lab-04-visualization/fortinlemieux.R | no_license | michaelaoash/git-econ753 | R | false | false | 1,710 | r | pdf(file="fortinlemieux.pdf",family="Times",paper="USr",width=0,height=0)
library(dplyr)
library(ggplot2)
library(reshape2)
library(car)
library(haven)
options(scipen=10000)
options(digits=4)
rm(list=ls())
cpi79 <- 72.6
cpi88 <- 118.3
mw79 <- 2.90 * cpi79/cpi79
mw88 <- 3.35 * cpi79/cpi88
morg88 <- read_dta(file="morg88.dta")
morg88 <- filter(morg88,
sex==2,
age>=16 & age<=65,
I25c==0,
paidhre==1)
morg79 <- read_dta(file="morg79.dta")
morg79 <- filter(morg79,
sex==2,
age>=16 & age<=65,
I25c==0,
paidhre==1)
morg <- bind_rows(morg79,morg88)
morg <- mutate(morg,
rearnhre = ifelse(year==1979, earnhre/100 * cpi79/cpi79, earnhre/100 * cpi79/cpi88),
Year = factor(year))
df79 <- data.frame(x1=mw79, x2=mw79, y1=0.01, y2=2.7, Year=factor(1979))
df88 <- data.frame(x1=mw88, x2=mw88, y1=0.01, y2=2.7, Year=factor(1988))
ggplot(morg) + geom_density(aes(x=rearnhre, group=Year, linetype=Year),trim=TRUE, bw=0.05) +
scale_x_continuous(trans="log",breaks=c(2,5,10,25)) + theme(legend.position = "none") +
coord_cartesian(xlim=c(1,30)) +
geom_segment(aes(x = x1, y = y1, xend = x2, yend = y2), data = df79, alpha=0.5) +
geom_segment(aes(x = x1, y = y1, xend = x2, yend = y2), data = df88, alpha=0.5) +
annotate("text", x = 10, y = 0.2, label = "1988") +
annotate("text", x = 4, y = 1.1, label = "1979") +
annotate("text", x = 2.5, y = 2.6, label = "Minimum wage") +
annotate("text", x = 1.8, y = 2.1, label = "MW 1988") +
annotate("text", x = 2.6, y = 2.1, label = "MW 1979")
|
#' Global variables and functions
#'
#' @importFrom utils globalVariables
#'
#' @description Creates global variables for package
utils::globalVariables(c('%>%', ':=', 'AGEGRP', 'COUNTY', 'CTYNAME', 'STATE', 'STNAME', 'SUMLEV', 'TOT_MALE', 'TOT_POP',
'YEAR', 'acs_data', 'city', 'city_to_county', 'company', 'county', 'data', 'date_type',
'effective_date', 'everything', 'lat', 'layoff_reason', 'layoffs', 'lm', 'long',
'n_employees', 'n_layoffs', 'notice_date', 'pop', 'predict', 'received_date', 'subregion',
'type', 'warnSample', 'year'))
| /R/globals.R | no_license | jacob-light/jobsec | R | false | false | 644 | r | #' Global variables and functions
#'
#' @importFrom utils globalVariables
#'
#' @description Creates global variables for package
utils::globalVariables(c('%>%', ':=', 'AGEGRP', 'COUNTY', 'CTYNAME', 'STATE', 'STNAME', 'SUMLEV', 'TOT_MALE', 'TOT_POP',
'YEAR', 'acs_data', 'city', 'city_to_county', 'company', 'county', 'data', 'date_type',
'effective_date', 'everything', 'lat', 'layoff_reason', 'layoffs', 'lm', 'long',
'n_employees', 'n_layoffs', 'notice_date', 'pop', 'predict', 'received_date', 'subregion',
'type', 'warnSample', 'year'))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{adv}
\alias{adv}
\title{GIS advisory dataset for Hurricane Sandy Adv 18}
\format{An object of class \code{list} of length 4.}
\source{
\url{http://www.nhc.noaa.gov/gis/archive_forecast_results.php?id=al18&year=2012&name=Hurricane\%20SANDY}
}
\usage{
adv
}
\description{
GIS advisory dataset for Hurricane Sandy Adv 18
}
\keyword{datasets}
| /man/adv.Rd | permissive | ElioRoca/rrricanes | R | false | true | 445 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{adv}
\alias{adv}
\title{GIS advisory dataset for Hurricane Sandy Adv 18}
\format{An object of class \code{list} of length 4.}
\source{
\url{http://www.nhc.noaa.gov/gis/archive_forecast_results.php?id=al18&year=2012&name=Hurricane\%20SANDY}
}
\usage{
adv
}
\description{
GIS advisory dataset for Hurricane Sandy Adv 18
}
\keyword{datasets}
|
source(here::here("lib", "common.R"))
load_raw <- function(raw_data_dir, n_max) {
d <- load_all_csvs(raw_data_dir, n_max = n_max)
bundle_raw(d$data, d$loading_problems)
}
clean <- function(d, helpers) {
tr_race <- c(
B = "black",
H = "hispanic",
I = "asian/pacific islander",
# NOTE: L corresponds to "Black Hispanic" which is mapped to "hispanic".
# This is consistent with coding policies in other states.
L = "hispanic",
N = "other",
W = "white",
O = "other"
)
tr_reason_for_stop <- c(
AP = "APB",
CS = "Call for Service",
EQ = "Equipment/Inspection Violation",
MO = "Motorist Assist/Courtesy",
OT = "Other Traffic Violation",
RV = "Registration Violation",
SB = "Seatbelt Violation",
SD = "Special Detail/Directed Patrol",
SP = "Speeding",
SU = "Suspicious Person",
VO = "Violation of City/Town Ordinance",
WA = "Warrant"
)
tr_reason_for_search <- c(
"A" = "Incident to Arrest",
"C" = "Plain View",
"I" = "Inventory/Tow",
"O" = "Odor of Drugs/Alcohol",
"P" = "Probable Cause",
"R" = "Reasonable Suspicion",
"T" = "Terry Frisk"
)
d$data %>%
add_raw_colname_prefix(
OperatorRace,
OperatorSex,
ResultOfStop,
SearchResultOne,
SearchResultTwo,
SearchResultThree,
BasisForStop
) %>%
rename(
# NOTE: Best lead on mapping trooper zone to location:
# http://www.scannewengland.net/wiki/index.php?title=Rhode_Island_State_Police
zone = Zone,
department_id = AgencyORI,
vehicle_make = Make,
vehicle_model = Model
) %>%
mutate(
date = parse_date(StopDate, "%Y%m%d"),
time = parse_time(StopTime, "%H%M"),
subject_yob = YearOfBirth,
subject_race = fast_tr(raw_OperatorRace, tr_race),
subject_sex = fast_tr(raw_OperatorSex, tr_sex),
# NOTE: Data received in Apr 2016 were specifically from a request for
# vehicular stops.
type = "vehicular",
arrest_made = raw_ResultOfStop == "D" | raw_ResultOfStop == "P",
citation_issued = raw_ResultOfStop == "M",
warning_issued = raw_ResultOfStop == "W",
outcome = first_of(
"arrest" = arrest_made,
"citation" = citation_issued,
"warning" = warning_issued
),
contraband_drugs = (raw_SearchResultOne == 'D') | (raw_SearchResultTwo == 'D') | (raw_SearchResultThree == 'D'),
contraband_alcohol = (raw_SearchResultOne == 'A') | (raw_SearchResultTwo == 'A') | (raw_SearchResultThree == 'A'),
contraband_weapons = (raw_SearchResultOne == 'W') | (raw_SearchResultTwo == 'W') | (raw_SearchResultThree == 'W'),
contraband_other = (raw_SearchResultOne %in% c('M','O')) |
(raw_SearchResultTwo %in% c('M','O')) |
(raw_SearchResultThree %in% c('M','O')),
contraband_true = contraband_drugs | contraband_weapons | contraband_alcohol | contraband_other,
contraband_false = is.na(contraband_true) &
(!contraband_drugs | !contraband_weapons | !contraband_alcohol | !contraband_other),
contraband_found = if_else(contraband_false, FALSE, contraband_true),
frisk_performed = Frisked == "Y",
# NOTE: only 10 Searched are NA -- we assume these to be F
search_conducted = Searched == "Y" & !is.na(Searched),
multi_search_reasons = str_c_na(
SearchReasonOne,
SearchReasonTwo,
SearchReasonThree,
sep = "|"
),
search_basis = first_of(
"plain view" = str_detect(multi_search_reasons, "C"),
"probable cause" = str_detect(multi_search_reasons, "O|P|R"),
"other" = str_detect(multi_search_reasons, "A|I"),
"probable cause" = search_conducted
),
reason_for_search = str_c_na(
fast_tr(SearchReasonOne, tr_reason_for_search),
fast_tr(SearchReasonTwo, tr_reason_for_search),
fast_tr(SearchReasonThree, tr_reason_for_search),
sep = "|"
),
reason_for_search = if_else(
reason_for_search == "",
NA_character_,
reason_for_search
),
reason_for_stop = fast_tr(raw_BasisForStop, tr_reason_for_stop)
) %>%
standardize(d$metadata)
}
| /lib/states/ri/statewide.R | no_license | stanford-policylab/opp | R | false | false | 4,260 | r | source(here::here("lib", "common.R"))
load_raw <- function(raw_data_dir, n_max) {
d <- load_all_csvs(raw_data_dir, n_max = n_max)
bundle_raw(d$data, d$loading_problems)
}
clean <- function(d, helpers) {
tr_race <- c(
B = "black",
H = "hispanic",
I = "asian/pacific islander",
# NOTE: L corresponds to "Black Hispanic" which is mapped to "hispanic".
# This is consistent with coding policies in other states.
L = "hispanic",
N = "other",
W = "white",
O = "other"
)
tr_reason_for_stop <- c(
AP = "APB",
CS = "Call for Service",
EQ = "Equipment/Inspection Violation",
MO = "Motorist Assist/Courtesy",
OT = "Other Traffic Violation",
RV = "Registration Violation",
SB = "Seatbelt Violation",
SD = "Special Detail/Directed Patrol",
SP = "Speeding",
SU = "Suspicious Person",
VO = "Violation of City/Town Ordinance",
WA = "Warrant"
)
tr_reason_for_search <- c(
"A" = "Incident to Arrest",
"C" = "Plain View",
"I" = "Inventory/Tow",
"O" = "Odor of Drugs/Alcohol",
"P" = "Probable Cause",
"R" = "Reasonable Suspicion",
"T" = "Terry Frisk"
)
d$data %>%
add_raw_colname_prefix(
OperatorRace,
OperatorSex,
ResultOfStop,
SearchResultOne,
SearchResultTwo,
SearchResultThree,
BasisForStop
) %>%
rename(
# NOTE: Best lead on mapping trooper zone to location:
# http://www.scannewengland.net/wiki/index.php?title=Rhode_Island_State_Police
zone = Zone,
department_id = AgencyORI,
vehicle_make = Make,
vehicle_model = Model
) %>%
mutate(
date = parse_date(StopDate, "%Y%m%d"),
time = parse_time(StopTime, "%H%M"),
subject_yob = YearOfBirth,
subject_race = fast_tr(raw_OperatorRace, tr_race),
subject_sex = fast_tr(raw_OperatorSex, tr_sex),
# NOTE: Data received in Apr 2016 were specifically from a request for
# vehicular stops.
type = "vehicular",
arrest_made = raw_ResultOfStop == "D" | raw_ResultOfStop == "P",
citation_issued = raw_ResultOfStop == "M",
warning_issued = raw_ResultOfStop == "W",
outcome = first_of(
"arrest" = arrest_made,
"citation" = citation_issued,
"warning" = warning_issued
),
contraband_drugs = (raw_SearchResultOne == 'D') | (raw_SearchResultTwo == 'D') | (raw_SearchResultThree == 'D'),
contraband_alcohol = (raw_SearchResultOne == 'A') | (raw_SearchResultTwo == 'A') | (raw_SearchResultThree == 'A'),
contraband_weapons = (raw_SearchResultOne == 'W') | (raw_SearchResultTwo == 'W') | (raw_SearchResultThree == 'W'),
contraband_other = (raw_SearchResultOne %in% c('M','O')) |
(raw_SearchResultTwo %in% c('M','O')) |
(raw_SearchResultThree %in% c('M','O')),
contraband_true = contraband_drugs | contraband_weapons | contraband_alcohol | contraband_other,
contraband_false = is.na(contraband_true) &
(!contraband_drugs | !contraband_weapons | !contraband_alcohol | !contraband_other),
contraband_found = if_else(contraband_false, FALSE, contraband_true),
frisk_performed = Frisked == "Y",
# NOTE: only 10 Searched are NA -- we assume these to be F
search_conducted = Searched == "Y" & !is.na(Searched),
multi_search_reasons = str_c_na(
SearchReasonOne,
SearchReasonTwo,
SearchReasonThree,
sep = "|"
),
search_basis = first_of(
"plain view" = str_detect(multi_search_reasons, "C"),
"probable cause" = str_detect(multi_search_reasons, "O|P|R"),
"other" = str_detect(multi_search_reasons, "A|I"),
"probable cause" = search_conducted
),
reason_for_search = str_c_na(
fast_tr(SearchReasonOne, tr_reason_for_search),
fast_tr(SearchReasonTwo, tr_reason_for_search),
fast_tr(SearchReasonThree, tr_reason_for_search),
sep = "|"
),
reason_for_search = if_else(
reason_for_search == "",
NA_character_,
reason_for_search
),
reason_for_stop = fast_tr(raw_BasisForStop, tr_reason_for_stop)
) %>%
standardize(d$metadata)
}
|
#' Draws scatter plot with all effect estimates against AIC
#'
#' \code{all_plot_aic()} generates a scatter plot with all effect estimates against AIC.
#'
#' @export
#' @param data \emph{Object} from \code{all_cox}, \code{all_glm}, \code{all_speedglm}, or \code{all_glm}, including all effect estimate values.
#' @param xlab \emph{Character} string for x-axis name. Default is \code{"AIC"}
#' @param ylab \emph{Character} string for y-axis name. Default depends on original model types.
#' @param title \emph{Character} for plot title. Default is \code{"NULL"}.
#' @return A \pkg{ggplot2} object: scatter plot
#' @examples
#' vlist <- c("Age", "Sex", "Married", "BMI", "Education", "Income")
#' results <- all_cox(crude = "Surv(t0, t1, Endpoint) ~ Diabetes", xlist = vlist, data = diab_df)
#' all_plot_aic(results)
#' @name all_plot_aic
all_plot_aic <- function(data,
xlab = "AIC",
ylab = NULL,
title = NULL) {
result_df <- data$estimate
if (is.null(ylab)) {
if (data$fun == "all_cox") {
ylab <- "Hazard ratio"
} else if (data$fun == "all_lm") {
ylab <- "Coefficient"
} else if (data$family == "poisson") {
ylab <- "Rate ratio"
} else if (data$family == "binomial") {
ylab <- "Odds ratio"
}
else {
ylab <- "Effect estimates"
}
}
hline <- ifelse(data$fun == "all_lm", 0, 1)
df_scatter <- result_df %>%
dplyr::mutate(p_value = p^(log(0.5) / log(0.05)))
ggplot(data = df_scatter, aes(x = aic, y = estimate)) +
geom_point(shape = 1) +
labs(x = xlab, y = ylab, title = title) +
theme_bw() +
geom_hline(yintercept = hline, linetype = "dashed")
}
| /R/all_plot_aic.R | no_license | cran/allestimates | R | false | false | 1,748 | r | #' Draws scatter plot with all effect estimates against AIC
#'
#' \code{all_plot_aic()} generates a scatter plot with all effect estimates against AIC.
#'
#' @export
#' @param data \emph{Object} from \code{all_cox}, \code{all_glm}, \code{all_speedglm}, or \code{all_glm}, including all effect estimate values.
#' @param xlab \emph{Character} string for x-axis name. Default is \code{"AIC"}
#' @param ylab \emph{Character} string for y-axis name. Default depends on original model types.
#' @param title \emph{Character} for plot title. Default is \code{"NULL"}.
#' @return A \pkg{ggplot2} object: scatter plot
#' @examples
#' vlist <- c("Age", "Sex", "Married", "BMI", "Education", "Income")
#' results <- all_cox(crude = "Surv(t0, t1, Endpoint) ~ Diabetes", xlist = vlist, data = diab_df)
#' all_plot_aic(results)
#' @name all_plot_aic
all_plot_aic <- function(data,
xlab = "AIC",
ylab = NULL,
title = NULL) {
result_df <- data$estimate
if (is.null(ylab)) {
if (data$fun == "all_cox") {
ylab <- "Hazard ratio"
} else if (data$fun == "all_lm") {
ylab <- "Coefficient"
} else if (data$family == "poisson") {
ylab <- "Rate ratio"
} else if (data$family == "binomial") {
ylab <- "Odds ratio"
}
else {
ylab <- "Effect estimates"
}
}
hline <- ifelse(data$fun == "all_lm", 0, 1)
df_scatter <- result_df %>%
dplyr::mutate(p_value = p^(log(0.5) / log(0.05)))
ggplot(data = df_scatter, aes(x = aic, y = estimate)) +
geom_point(shape = 1) +
labs(x = xlab, y = ylab, title = title) +
theme_bw() +
geom_hline(yintercept = hline, linetype = "dashed")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_parameters.R
\name{load_parameters_table}
\alias{load_parameters_table}
\title{Load parameters table}
\usage{
load_parameters_table()
}
\description{
Load the parameters.csv file as a table.
}
| /1 - R Package/man/load_parameters_table.Rd | permissive | abilinski/COVID19 | R | false | true | 276 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_parameters.R
\name{load_parameters_table}
\alias{load_parameters_table}
\title{Load parameters table}
\usage{
load_parameters_table()
}
\description{
Load the parameters.csv file as a table.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{tc_bcu}
\alias{tc_bcu}
\title{Cotización Dólar USA Billete}
\format{Un data-frame con cinco variables:
\describe{
\item{\code{Moneda}}{Moneda a cotizar}
\item{\code{Fecha}}{Fecha de cotización}
\item{\code{Venta}}{Precio venta}
\item{\code{Compra}}{Precio compra}
\item{\code{Arbitraje}}{Arbitraje}
}
Se tiene 4770 cotizaciones diarias del dólar usa billete publicadas
entre el 03/01/2000 y el 28/12/2018.}
\usage{
tc_bcu
}
\description{
Cotización histórica Dólar USA Billete, desde el año 2000 a la fecha.
}
\examples{
tc_bcu
}
\keyword{datasets}
| /man/tc_bcu.Rd | no_license | transformauy/tc | R | false | true | 665 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{tc_bcu}
\alias{tc_bcu}
\title{Cotización Dólar USA Billete}
\format{Un data-frame con cinco variables:
\describe{
\item{\code{Moneda}}{Moneda a cotizar}
\item{\code{Fecha}}{Fecha de cotización}
\item{\code{Venta}}{Precio venta}
\item{\code{Compra}}{Precio compra}
\item{\code{Arbitraje}}{Arbitraje}
}
Se tiene 4770 cotizaciones diarias del dólar usa billete publicadas
entre el 03/01/2000 y el 28/12/2018.}
\usage{
tc_bcu
}
\description{
Cotización histórica Dólar USA Billete, desde el año 2000 a la fecha.
}
\examples{
tc_bcu
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_stats.R
\name{add_stats}
\alias{add_stats}
\title{Add statistical output to a tidy stats list}
\usage{
add_stats(results, output, identifier = NULL, type = NULL,
confirmatory = NULL, notes = NULL, class = NULL, args = NULL)
}
\arguments{
\item{results}{A tidystats list.}
\item{output}{Output of a statistical test or a data frame. If a data frame
is provided, it must already be in a tidy format.}
\item{identifier}{A character string identifying the model. Automatically
created if not provided.}
\item{type}{A character string indicating the type of test. One of
"hypothesis", "manipulation check", "contrast", "descriptives", or "other".
Can be abbreviated.}
\item{confirmatory}{A boolean to indicate whether the statistical test was
confirmatory (TRUE) or exploratory (FALSE). Can be NA.}
\item{notes}{A character string to add additional information. Some
statistical tests produce notes information, which will be overwritten if
notes are provided.}
\item{class}{A character string to indicate which function was used to
produce the output. See 'Details' for a list of supported functions.}
\item{args}{An optional list of additional arguments. Can be used to specify
how model results should be summarized.}
}
\description{
\code{add_stats} adds output to a tidystats list. It can take either the
output of a statistical test as input or a data frame. See Details for more
information on adding data frames.
}
\details{
Some statistical functions produce unidentifiable output, which
means \code{tidystats} cannot figure out how to tidy the data. To add these
results, you can provide a class via the class argument or you can manually
tidy the results yourself and add the resulting data frame via add_stats().
A list of supported classes are:
- \code{confint}
}
\examples{
# Create an empty list to store the results in
results <- list()
# Example: t-test
model_t_test <- t.test(extra ~ group, data = sleep)
results <- add_stats(results, model_t_test, identifier = "t_test")
# Example: correlation
x <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1)
y <- c( 2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8)
model_correlation <- cor.test(x, y)
# Add output to the results list, only storing the correlation and p-value
results <- add_stats(results, model_correlation, identifier = "correlation")
# Example: Regression
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
model_lm <- lm(weight ~ group)
# Add output to the results list, with notes
results <- add_stats(results, model_lm, identifier = "regression", notes =
"regression example")
# Example: ANOVA
model_aov <- aov(yield ~ block + N * P * K, npk)
results <- add_stats(results, model_aov, identifier = "ANOVA")
# Example: Within-subjects ANOVA
model_aov_within <- aov(extra ~ group + Error(ID/group), data = sleep)
results <- add_stats(results, model_aov_within, identifier = "ANOVA_within")
# Example: Manual chi-squared test of independence
library(tibble)
x_squared_data <- tibble(
statistic = c("X-squared", "df", "p"),
value = c(5.4885, 6, 0.4828),
method = "Chi-squared test of independence"
)
results <- add_stats(results, x_squared_data, identifier = "x_squared")
}
| /man/add_stats.Rd | permissive | ikbentimkramer/tidystats-v0.3 | R | false | true | 3,408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_stats.R
\name{add_stats}
\alias{add_stats}
\title{Add statistical output to a tidy stats list}
\usage{
add_stats(results, output, identifier = NULL, type = NULL,
confirmatory = NULL, notes = NULL, class = NULL, args = NULL)
}
\arguments{
\item{results}{A tidystats list.}
\item{output}{Output of a statistical test or a data frame. If a data frame
is provided, it must already be in a tidy format.}
\item{identifier}{A character string identifying the model. Automatically
created if not provided.}
\item{type}{A character string indicating the type of test. One of
"hypothesis", "manipulation check", "contrast", "descriptives", or "other".
Can be abbreviated.}
\item{confirmatory}{A boolean to indicate whether the statistical test was
confirmatory (TRUE) or exploratory (FALSE). Can be NA.}
\item{notes}{A character string to add additional information. Some
statistical tests produce notes information, which will be overwritten if
notes are provided.}
\item{class}{A character string to indicate which function was used to
produce the output. See 'Details' for a list of supported functions.}
\item{args}{An optional list of additional arguments. Can be used to specify
how model results should be summarized.}
}
\description{
\code{add_stats} adds output to a tidystats list. It can take either the
output of a statistical test as input or a data frame. See Details for more
information on adding data frames.
}
\details{
Some statistical functions produce unidentifiable output, which
means \code{tidystats} cannot figure out how to tidy the data. To add these
results, you can provide a class via the class argument or you can manually
tidy the results yourself and add the resulting data frame via add_stats().
A list of supported classes are:
- \code{confint}
}
\examples{
# Create an empty list to store the results in
results <- list()
# Example: t-test
model_t_test <- t.test(extra ~ group, data = sleep)
results <- add_stats(results, model_t_test, identifier = "t_test")
# Example: correlation
x <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1)
y <- c( 2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8)
model_correlation <- cor.test(x, y)
# Add output to the results list, only storing the correlation and p-value
results <- add_stats(results, model_correlation, identifier = "correlation")
# Example: Regression
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
model_lm <- lm(weight ~ group)
# Add output to the results list, with notes
results <- add_stats(results, model_lm, identifier = "regression", notes =
"regression example")
# Example: ANOVA
model_aov <- aov(yield ~ block + N * P * K, npk)
results <- add_stats(results, model_aov, identifier = "ANOVA")
# Example: Within-subjects ANOVA
model_aov_within <- aov(extra ~ group + Error(ID/group), data = sleep)
results <- add_stats(results, model_aov_within, identifier = "ANOVA_within")
# Example: Manual chi-squared test of independence
library(tibble)
x_squared_data <- tibble(
statistic = c("X-squared", "df", "p"),
value = c(5.4885, 6, 0.4828),
method = "Chi-squared test of independence"
)
results <- add_stats(results, x_squared_data, identifier = "x_squared")
}
|
add2<-function(x,y){
x+y
} | /My first function in R.R | no_license | PravallikaaMohan/datasciencecoursera | R | false | false | 30 | r | add2<-function(x,y){
x+y
} |
getGaussianVariablesByBoxMuller <- function(u1, u2){
r <- sqrt(-2 * log(u1))
theta <- 2 * pi * u2
c(r * cos(theta), r * sin(theta))
} | /src/fun-getGaussianVariablesByBoxMuller.R | no_license | kkosc/Derivatives-Pricing-Methods | R | false | false | 146 | r | getGaussianVariablesByBoxMuller <- function(u1, u2){
r <- sqrt(-2 * log(u1))
theta <- 2 * pi * u2
c(r * cos(theta), r * sin(theta))
} |
library(randomForest)
library(ggplot2)
library(dplyr)
library(tree)
library(cvTools)
ld_train=read.csv("C:/Users/user/Downloads/housing_train.csv",stringsAsFactors = F)
ld_test= read.csv("C:/Users/user/Downloads/housing_test.csv",stringsAsFactors = F)
ld_test$Price=NA
str(ld_train)
ld_train$data='train'
ld_test$data='test'
ld_all=rbind(ld_train,ld_test)
lapply(ld_all,function(x) sum(is.na(x)))
ld_all$CouncilArea=NULL
ld_all$YearBuilt=NULL
ld_all$Method=NULL
ld_all=ld_all[!(is.na(ld_all$Distance)),]
for(col in names(ld_all)){
if(sum(is.na(ld_all[,col]))>0 & !(col %in% c("data","Price"))){
ld_all[is.na(ld_all[,col]),col]=mean(ld_all[,col],na.rm=T)
}
}
View(ld_all)
CreateDummies=function(data,var,freq_cutoff=0){
t=table(data[,var])
t=t[t>freq_cutoff]
t=sort(t)
categories=names(t)[-1]
for( cat in categories){
name=paste(var,cat,sep="_")
name=gsub(" ","",name)
name=gsub("-","_",name)
name=gsub("\\?","Q",name)
name=gsub("<","LT_",name)
name=gsub("\\+","",name)
data[,name]=as.numeric(data[,var]==cat)
}
data[,var]=NULL
return(data)
}
glimpse(ld_all)
ld_all=CreateDummies(ld_all ,"Suburb",500)
ld_all=CreateDummies(ld_all,"Address",500)
ld_all=CreateDummies(ld_all ,"SellerG",500)
ld_all=CreateDummies(ld_all ,"Type",500)
ld_all$Bedroom2=as.integer(ld_all$Bedroom2)
ld_all$Bathroom=as.integer(ld_all$Bathroom)
ld_all$Car=as.integer(ld_all$Car)
View(ld_all)
ld_train=ld_all %>% filter(data=='train') %>% select(-data)
ld_test=ld_all %>% filter(data=='test') %>% select(-data,-Price)
##
set.seed(2)
s=sample(1:nrow(ld_train),0.7*nrow(ld_train))
ld_train1=ld_train[s,]
ld_train2=ld_train[-s,]
### Building A Deecison Tree
ld.tree=tree(Price~.,data=ld_train1)
## Tree in text format
ld.tree
## Visual Format
plot(ld.tree)
text(ld.tree)
## Performance on validation set
val.IR=predict(ld.tree,newdata = ld_train2)
rmse_val=((val.IR)-(ld_train2$Price))^2 %>% mean() %>% sqrt()
rmse_val
ld.tree.final=tree(Price~.,data=ld_train)
test.pred=predict(ld.tree.final,newdata=ld_test)
write.csv(test.pred,"mysubmission.csv",row.names = F)
| /RealestateDataSet/Real_DecisionTree.R | no_license | Srujana-Gunde/RealEstate_R | R | false | false | 2,144 | r | library(randomForest)
library(ggplot2)
library(dplyr)
library(tree)
library(cvTools)
ld_train=read.csv("C:/Users/user/Downloads/housing_train.csv",stringsAsFactors = F)
ld_test= read.csv("C:/Users/user/Downloads/housing_test.csv",stringsAsFactors = F)
ld_test$Price=NA
str(ld_train)
ld_train$data='train'
ld_test$data='test'
ld_all=rbind(ld_train,ld_test)
lapply(ld_all,function(x) sum(is.na(x)))
ld_all$CouncilArea=NULL
ld_all$YearBuilt=NULL
ld_all$Method=NULL
ld_all=ld_all[!(is.na(ld_all$Distance)),]
for(col in names(ld_all)){
if(sum(is.na(ld_all[,col]))>0 & !(col %in% c("data","Price"))){
ld_all[is.na(ld_all[,col]),col]=mean(ld_all[,col],na.rm=T)
}
}
View(ld_all)
CreateDummies=function(data,var,freq_cutoff=0){
t=table(data[,var])
t=t[t>freq_cutoff]
t=sort(t)
categories=names(t)[-1]
for( cat in categories){
name=paste(var,cat,sep="_")
name=gsub(" ","",name)
name=gsub("-","_",name)
name=gsub("\\?","Q",name)
name=gsub("<","LT_",name)
name=gsub("\\+","",name)
data[,name]=as.numeric(data[,var]==cat)
}
data[,var]=NULL
return(data)
}
glimpse(ld_all)
ld_all=CreateDummies(ld_all ,"Suburb",500)
ld_all=CreateDummies(ld_all,"Address",500)
ld_all=CreateDummies(ld_all ,"SellerG",500)
ld_all=CreateDummies(ld_all ,"Type",500)
ld_all$Bedroom2=as.integer(ld_all$Bedroom2)
ld_all$Bathroom=as.integer(ld_all$Bathroom)
ld_all$Car=as.integer(ld_all$Car)
View(ld_all)
ld_train=ld_all %>% filter(data=='train') %>% select(-data)
ld_test=ld_all %>% filter(data=='test') %>% select(-data,-Price)
##
set.seed(2)
s=sample(1:nrow(ld_train),0.7*nrow(ld_train))
ld_train1=ld_train[s,]
ld_train2=ld_train[-s,]
### Building A Deecison Tree
ld.tree=tree(Price~.,data=ld_train1)
## Tree in text format
ld.tree
## Visual Format
plot(ld.tree)
text(ld.tree)
## Performance on validation set
val.IR=predict(ld.tree,newdata = ld_train2)
rmse_val=((val.IR)-(ld_train2$Price))^2 %>% mean() %>% sqrt()
rmse_val
ld.tree.final=tree(Price~.,data=ld_train)
test.pred=predict(ld.tree.final,newdata=ld_test)
write.csv(test.pred,"mysubmission.csv",row.names = F)
|
library(testthat)
library(I2C2)
suppressWarnings(RNGversion("3.5.0"))
test_check("I2C2")
| /tests/testthat.R | no_license | muschellij2/I2C2 | R | false | false | 90 | r | library(testthat)
library(I2C2)
suppressWarnings(RNGversion("3.5.0"))
test_check("I2C2")
|
library(shiny)
library(ggplot2)
library(reshape2)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Workout 2"),
fluidRow(
column(3,
wellPanel(
sliderInput("initial",
"Initial Amount",
min = 0,
max = 100000,
value = 1000,
step = 500),
sliderInput("annual",
"Annual Contribution:",
min = 0,
max = 50000,
value = 2000,
step = 500)
)
),
column(3,
wellPanel(
sliderInput("return",
"Return Rate (in %):",
min = 0,
max = 20,
value = 5,
step = 0.1),
sliderInput("growth",
"Growth Rate (in %):",
min = 0,
max = 20,
value = 2,
step = 0.1)
)
),
column(3,
wellPanel(
sliderInput("years",
"Years:",
min = 0,
max = 50,
value = 20,
step = 1),
selectInput("select", label = h3("Facet?"),
choices = list("Yes" = TRUE, "No" = FALSE),
selected = FALSE)
)
),
mainPanel(
plotOutput("timeline"),
tableOutput("balances")
)
)
)
server <- function(input, output) {
future_value = function(amount = 0, rate = 0, years = 0){
return(amount*(1+rate)^years)
}
annuity = function(contrib = 0, rate = 0, years = 0){
return(contrib*(((1+rate)^years)-1)/rate)
}
growing_annuity = function(contrib = 0, rate = 0, growth = 0, years = 0){
return(contrib*(((1+rate)^years)-(1+growth)^years)/(rate-growth))
}
output$balances <- renderTable({
no_contrib <- rep(0, input$years)
fixed_contrib <- rep(0, input$years)
growing_contrib <- rep(0, input$years)
year <- seq(0,input$years,1)
for (i in 0:input$years+1) {
no_contrib[i] = future_value(amount = input$initial, rate = input$return/100, years = i-1)
fixed_contrib[i] = no_contrib[i] + annuity(contrib = input$annual, rate = input$return/100, years = i-1)
growing_contrib[i] = no_contrib[i] + growing_annuity(contrib = input$annual, rate = input$return/100, growth = input$growth/100, years = i-1)
}
modalities <- data.frame(year, no_contrib, fixed_contrib, growing_contrib)
modalities
})
output$timeline <- renderPlot({
no_contrib <- rep(0, input$years)
fixed_contrib <- rep(0, input$years)
growing_contrib <- rep(0, input$years)
year <- seq(0,input$years,1)
for (i in 0:input$years+1) {
no_contrib[i] = future_value(amount = input$initial, rate = input$return/100, years = i-1)
fixed_contrib[i] = no_contrib[i] + annuity(contrib = input$annual, rate = input$return/100, years = i-1)
growing_contrib[i] = no_contrib[i] + growing_annuity(contrib = input$annual, rate = input$return/100, growth = input$growth/100, years = i-1)
}
modalities <- data.frame(year, no_contrib, fixed_contrib, growing_contrib)
modalities <- na.omit(modalities)
modalities2 <- melt(modalities, id.vars = "year")
if(input$select == FALSE){
ggplot(data = modalities2, aes(x = year, y = value, group = variable, colour = variable)) +
geom_point() +
geom_line() +
ggtitle("Three modes of investing") +
theme_bw() +
ylab("balance") +
labs(color = "modality")
}
else{
ggplot(data = modalities2, aes(x = year, y = value, group = variable, colour = variable)) +
geom_area(aes(fill = variable), alpha = 0.5) +
geom_point() +
geom_line() +
ggtitle("Three modes of investing") +
theme_bw() +
ylab("balance") +
labs(color = "modality") +
facet_wrap(~ variable)
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /app.R | no_license | hamza-rana/workout02 | R | false | false | 4,363 | r | library(shiny)
library(ggplot2)
library(reshape2)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Workout 2"),
fluidRow(
column(3,
wellPanel(
sliderInput("initial",
"Initial Amount",
min = 0,
max = 100000,
value = 1000,
step = 500),
sliderInput("annual",
"Annual Contribution:",
min = 0,
max = 50000,
value = 2000,
step = 500)
)
),
column(3,
wellPanel(
sliderInput("return",
"Return Rate (in %):",
min = 0,
max = 20,
value = 5,
step = 0.1),
sliderInput("growth",
"Growth Rate (in %):",
min = 0,
max = 20,
value = 2,
step = 0.1)
)
),
column(3,
wellPanel(
sliderInput("years",
"Years:",
min = 0,
max = 50,
value = 20,
step = 1),
selectInput("select", label = h3("Facet?"),
choices = list("Yes" = TRUE, "No" = FALSE),
selected = FALSE)
)
),
mainPanel(
plotOutput("timeline"),
tableOutput("balances")
)
)
)
server <- function(input, output) {
future_value = function(amount = 0, rate = 0, years = 0){
return(amount*(1+rate)^years)
}
annuity = function(contrib = 0, rate = 0, years = 0){
return(contrib*(((1+rate)^years)-1)/rate)
}
growing_annuity = function(contrib = 0, rate = 0, growth = 0, years = 0){
return(contrib*(((1+rate)^years)-(1+growth)^years)/(rate-growth))
}
output$balances <- renderTable({
no_contrib <- rep(0, input$years)
fixed_contrib <- rep(0, input$years)
growing_contrib <- rep(0, input$years)
year <- seq(0,input$years,1)
for (i in 0:input$years+1) {
no_contrib[i] = future_value(amount = input$initial, rate = input$return/100, years = i-1)
fixed_contrib[i] = no_contrib[i] + annuity(contrib = input$annual, rate = input$return/100, years = i-1)
growing_contrib[i] = no_contrib[i] + growing_annuity(contrib = input$annual, rate = input$return/100, growth = input$growth/100, years = i-1)
}
modalities <- data.frame(year, no_contrib, fixed_contrib, growing_contrib)
modalities
})
output$timeline <- renderPlot({
no_contrib <- rep(0, input$years)
fixed_contrib <- rep(0, input$years)
growing_contrib <- rep(0, input$years)
year <- seq(0,input$years,1)
for (i in 0:input$years+1) {
no_contrib[i] = future_value(amount = input$initial, rate = input$return/100, years = i-1)
fixed_contrib[i] = no_contrib[i] + annuity(contrib = input$annual, rate = input$return/100, years = i-1)
growing_contrib[i] = no_contrib[i] + growing_annuity(contrib = input$annual, rate = input$return/100, growth = input$growth/100, years = i-1)
}
modalities <- data.frame(year, no_contrib, fixed_contrib, growing_contrib)
modalities <- na.omit(modalities)
modalities2 <- melt(modalities, id.vars = "year")
if(input$select == FALSE){
ggplot(data = modalities2, aes(x = year, y = value, group = variable, colour = variable)) +
geom_point() +
geom_line() +
ggtitle("Three modes of investing") +
theme_bw() +
ylab("balance") +
labs(color = "modality")
}
else{
ggplot(data = modalities2, aes(x = year, y = value, group = variable, colour = variable)) +
geom_area(aes(fill = variable), alpha = 0.5) +
geom_point() +
geom_line() +
ggtitle("Three modes of investing") +
theme_bw() +
ylab("balance") +
labs(color = "modality") +
facet_wrap(~ variable)
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
m <- 3.2
psi <- 1
smallchi <- c(1e-20, 1e-50, 1e-200, .Machine$double.xmin)
regchi <- c(1, 10, 100, 1000)
moments_GIG_raw <- function(m, lambda, chi, psi,
omega=sqrt(chi*psi), eta=sqrt(chi/psi)){
return(besselK(omega,lambda+m,TRUE)/besselK(omega,lambda,TRUE)*eta^m)
}
# DmomentsGIG_Dm <- function(m, lam, chi, psi, h=1e-5)
# (moments_GIG(m+h, lam, chi, psi) - moments_GIG(m-h, lam, chi, psi)) / 2 / h
DmomentsGIG_Dm <- function(m, lam, chi, psi,
omega=sqrt(chi*psi), eta=sqrt(chi/psi), h=1e-5)
(moments_GIG(m+h, lam, omega=omega, eta=eta) - moments_GIG(m-h, lam, omega=omega, eta=eta)) / 2 / h
| /tests/testthat/helper.R | permissive | tnitithum/GIGdist | R | false | false | 657 | r | m <- 3.2
psi <- 1
smallchi <- c(1e-20, 1e-50, 1e-200, .Machine$double.xmin)
regchi <- c(1, 10, 100, 1000)
moments_GIG_raw <- function(m, lambda, chi, psi,
omega=sqrt(chi*psi), eta=sqrt(chi/psi)){
return(besselK(omega,lambda+m,TRUE)/besselK(omega,lambda,TRUE)*eta^m)
}
# DmomentsGIG_Dm <- function(m, lam, chi, psi, h=1e-5)
# (moments_GIG(m+h, lam, chi, psi) - moments_GIG(m-h, lam, chi, psi)) / 2 / h
DmomentsGIG_Dm <- function(m, lam, chi, psi,
omega=sqrt(chi*psi), eta=sqrt(chi/psi), h=1e-5)
(moments_GIG(m+h, lam, omega=omega, eta=eta) - moments_GIG(m-h, lam, omega=omega, eta=eta)) / 2 / h
|
library(timereg)
### Name: timecox
### Title: Fit Cox model with partly timevarying effects.
### Aliases: timecox
### Keywords: survival
### ** Examples
data(sTRACE)
# Fits time-varying Cox model
out<-timecox(Surv(time/365,status==9)~age+sex+diabetes+chf+vf,
data=sTRACE,max.time=7,n.sim=100)
summary(out)
par(mfrow=c(2,3))
plot(out)
par(mfrow=c(2,3))
plot(out,score=TRUE)
# Fits semi-parametric time-varying Cox model
out<-timecox(Surv(time/365,status==9)~const(age)+const(sex)+
const(diabetes)+chf+vf,data=sTRACE,max.time=7,n.sim=100)
summary(out)
par(mfrow=c(2,3))
plot(out)
| /data/genthat_extracted_code/timereg/examples/timecox.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 591 | r | library(timereg)
### Name: timecox
### Title: Fit Cox model with partly timevarying effects.
### Aliases: timecox
### Keywords: survival
### ** Examples
data(sTRACE)
# Fits time-varying Cox model
out<-timecox(Surv(time/365,status==9)~age+sex+diabetes+chf+vf,
data=sTRACE,max.time=7,n.sim=100)
summary(out)
par(mfrow=c(2,3))
plot(out)
par(mfrow=c(2,3))
plot(out,score=TRUE)
# Fits semi-parametric time-varying Cox model
out<-timecox(Surv(time/365,status==9)~const(age)+const(sex)+
const(diabetes)+chf+vf,data=sTRACE,max.time=7,n.sim=100)
summary(out)
par(mfrow=c(2,3))
plot(out)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{date_log}
\alias{date_log}
\title{Print current timestamp for logging}
\usage{
date_log()
}
\value{
Current timestamp as character.
}
\description{
Print current timestamp for logging
}
| /man/date_log.Rd | permissive | pdiakumis/dracarys | R | false | true | 278 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{date_log}
\alias{date_log}
\title{Print current timestamp for logging}
\usage{
date_log()
}
\value{
Current timestamp as character.
}
\description{
Print current timestamp for logging
}
|
##
## CREATE TRAINING DATA
##
rm(list = ls())
# Libs
library(stringr)
library(purrr)
library(readr)
library(glue)
library(magrittr)
# CONST
TIMESTEPS <- 30
MAX_TEXT_LEN <- 200000
FILE <- "data/r_scripts_text.txt"
# Save info for training
saveRDS(MAX_TEXT_LEN, "data/max_text_len.rds")
saveRDS(TIMESTEPS, "data/timesteps.rds")
# Load data
text_raw <- read_file(FILE,
locale = locale(encoding = "ascii"))
text_total <- text_raw %>%
strsplit(., "") %>%
unlist()
vocab <- text_total %>%
unique() %>%
sort()
vocab_size <- vocab %>% length()
total_text_size <- text_total %>% length()
# Store vocab size
saveRDS(vocab_size, "data/vocab_size.rds")
saveRDS(vocab, "data/vocab.rds")
n_steps <- total_text_size %/% MAX_TEXT_LEN
# Save training data in chunks on drive
for (j in seq_len(n_steps)) {
cat("Prepare chunk", j, "-----------\n")
start_idx <- MAX_TEXT_LEN * (j - 1) + 1
stop_idx <- if (j == n_steps) total_text_size else start_idx + MAX_TEXT_LEN - 1
text <- text_total[start_idx:stop_idx]
text_size <- text %>% length()
sentence <- vapply(
seq_len(text_size - TIMESTEPS + 1),
function(.i) {
text[.i:(.i + TIMESTEPS - 1)]
},
vector("character", TIMESTEPS)
) %>% t()
next_char <- vapply(
seq_len(text_size - TIMESTEPS + 1),
function(.i) {
text[.i + TIMESTEPS]
},
vector("character", 1)
) %>% as.vector()
# Vectorization
cat("... create arrays\n")
x <- array(0, dim = c(length(next_char) - 1, TIMESTEPS, vocab_size))
y <- array(0, dim = c(length(next_char) - 1, vocab_size))
for (i in seq_len(length(next_char) - 1)) {
x[i,,] <- sapply(vocab, function(x){
as.integer(x == sentence[[i]])
})
y[i,] <- as.integer(vocab == next_char[i])
}
cat("... write to drive\n")
saveRDS(text, glue("data/data_for_callback/text{j}.rds"))
saveRDS(x, glue("data/training_data/x{j}.rds"))
saveRDS(y, glue("data/training_data/y{j}.rds"))
}
| /R/create_training_data.R | no_license | tkrabel/rcoder | R | false | false | 2,011 | r | ##
## CREATE TRAINING DATA
##
rm(list = ls())
# Libs
library(stringr)
library(purrr)
library(readr)
library(glue)
library(magrittr)
# CONST
TIMESTEPS <- 30
MAX_TEXT_LEN <- 200000
FILE <- "data/r_scripts_text.txt"
# Save info for training
saveRDS(MAX_TEXT_LEN, "data/max_text_len.rds")
saveRDS(TIMESTEPS, "data/timesteps.rds")
# Load data
text_raw <- read_file(FILE,
locale = locale(encoding = "ascii"))
text_total <- text_raw %>%
strsplit(., "") %>%
unlist()
vocab <- text_total %>%
unique() %>%
sort()
vocab_size <- vocab %>% length()
total_text_size <- text_total %>% length()
# Store vocab size
saveRDS(vocab_size, "data/vocab_size.rds")
saveRDS(vocab, "data/vocab.rds")
n_steps <- total_text_size %/% MAX_TEXT_LEN
# Save training data in chunks on drive
for (j in seq_len(n_steps)) {
cat("Prepare chunk", j, "-----------\n")
start_idx <- MAX_TEXT_LEN * (j - 1) + 1
stop_idx <- if (j == n_steps) total_text_size else start_idx + MAX_TEXT_LEN - 1
text <- text_total[start_idx:stop_idx]
text_size <- text %>% length()
sentence <- vapply(
seq_len(text_size - TIMESTEPS + 1),
function(.i) {
text[.i:(.i + TIMESTEPS - 1)]
},
vector("character", TIMESTEPS)
) %>% t()
next_char <- vapply(
seq_len(text_size - TIMESTEPS + 1),
function(.i) {
text[.i + TIMESTEPS]
},
vector("character", 1)
) %>% as.vector()
# Vectorization
cat("... create arrays\n")
x <- array(0, dim = c(length(next_char) - 1, TIMESTEPS, vocab_size))
y <- array(0, dim = c(length(next_char) - 1, vocab_size))
for (i in seq_len(length(next_char) - 1)) {
x[i,,] <- sapply(vocab, function(x){
as.integer(x == sentence[[i]])
})
y[i,] <- as.integer(vocab == next_char[i])
}
cat("... write to drive\n")
saveRDS(text, glue("data/data_for_callback/text{j}.rds"))
saveRDS(x, glue("data/training_data/x{j}.rds"))
saveRDS(y, glue("data/training_data/y{j}.rds"))
}
|
authors.mainpanel <- mainPanel(
fluidPage(
tags$h3("Listado de Autores:"),
tags$h2(""),
tags$h4(
tags$a(href="http://jmcartiles.netlify.com/",
"José Manuel Cazorla Artiles")
),
tags$p("Doctorando en Universidad de Las Palmas de Gran Canaria"),
tags$h2(""),
tags$h4(
tags$a(href="https://github.com/chrglez",
"Christian González Martel")
),
tags$p("Profesor en Universidad de Las Palmas de Gran Canaria")
), align = "center") | /authors_elements.R | no_license | jmcartiles/canary_islands_tourism_dashboard | R | false | false | 538 | r |
authors.mainpanel <- mainPanel(
fluidPage(
tags$h3("Listado de Autores:"),
tags$h2(""),
tags$h4(
tags$a(href="http://jmcartiles.netlify.com/",
"José Manuel Cazorla Artiles")
),
tags$p("Doctorando en Universidad de Las Palmas de Gran Canaria"),
tags$h2(""),
tags$h4(
tags$a(href="https://github.com/chrglez",
"Christian González Martel")
),
tags$p("Profesor en Universidad de Las Palmas de Gran Canaria")
), align = "center") |
#Production function Estimation (by Olley and Pakes, 1996) with panel bootstraping
#Xi Chen, STATEC Luxembourg
#Version 1.1, 15/09/2013-04/12/2013
#Functions for estimating the production function, the package "plyr", "mgcv", "plm" is required
Estimat = function(panel, GAM=F){
#Estimation with Olley and Pakes method without bootstraping
#"panel" should be an objet of class "data.frame" and variables are in LOG and named as "ln__"
#"GAM" use of mgcv for generalized addditive model estimation (slower)
require(plyr)
require(mgcv)
require(plm)
d<- panel
#The first-stage estimation
if (GAM==F){
lnK2=d$lnK*d$lnK
lnK3=d$lnK*d$lnK*d$lnK
lnI2=d$lnI*d$lnI
lnI3=d$lnI*d$lnI*d$lnI
reg1= lm (lnY ~ lnL + lnM +lnK +lnK2+ lnK3 +lnI +lnI2+ lnI3, data=d )}
else{reg1= gam(lnY ~ lnL + lnM + s(lnK,lnI),data=d)}
#summary(reg1)
bl=reg1$coefficients[2]
bm=reg1$coefficients[3]
phi=reg1$fitted.values-bl*d$lnL-bm*d$lnM
panel2=pdata.frame(cbind(d,phi), c("id","year"))
#The second-stage estimation
sres2 = function (betak,data){
#the current value
w = data$phi-betak*data$lnK
#the lag value
wl = lag(w)
wl2 = wl*wl
wl3 = wl*wl*wl
balanced=subset(data.frame(cbind(data,w,wl,wl2,wl3)),year!=1)
reg = lm(w~wl+wl2+wl3,data=balanced)
summary(reg)$coefficients
res2=reg$residuals*reg$residuals
Sres2=sum(res2)
return(Sres2) }
#plot for visualizing the optimization
# test=matrix(NA, ncol=2,nrow=150)
# for (i in -50:100){
# test[i+50,1]=i/100+0.025
# test[i+50,2]=sres2(bk=(i/100+0.025),data=panel2)
# }
# plot(test[,1],test[,2])
b0=0
bnlm = optim(b0,sres2,data=panel2, method ="L-BFGS-B")
bk=c("lnK"=bnlm$par)
return(c(bl,bm,bk))
}
EstBoot = function(NBoot,RBoot,panelBoot){
#Panel Bootraping for estimating the standard errors of estimator
#"NBoot" number of individuals
#"RBoot" number of replications
#"panelBoot" the data set in question
N=NBoot
R=RBoot
d=panelBoot
ResBoot=Estimat(panel=d, GAM=F)
for (j in 1:R){
set.seed(1234+j)
random=as.integer(runif(N, min=1, max=N))
resampled=subset(d,id==random[1])
for (i in 2:N){
Resampled=subset(d,id==random[i])
resampled=rbind(resampled,Resampled)
}
ResBoot=cbind(ResBoot,Estimat(panel=resampled, GAM=F))
}
#write.csv(ResBoot, file = "ResBoot.csv")
SdBoot=matrix(NA,ncol=1,nrow=dim(ResBoot)[1])
for (i in 1:dim(ResBoot)[1]){
SdBoot[i]=sd(ResBoot[i,])
}
BootRes=cbind(ResBoot[,1],SdBoot,ResBoot[,1]/SdBoot)
return(BootRes)
}
EstProd = function (panel,GAM=F){
#productivity estimation (include the exogeneous shocks)
#"panel" should be an objet of class "data.frame" and variables are in LOG and named as "ln__"
#"GAM" use of mgcv for generalized addditive model estimation (slower)
d=panel
gam=GAM
est=Estimat(panel=d, GAM=gam)
productiv=d$lnY-est[1]*d$lnL-est[2]*d$lnM-est[3]*d$lnK
return(productiv)
}
| /OlleyPakesEstimation_v1.1.r | no_license | chenxi20/MissingCapital | R | false | false | 2,918 | r | #Production function Estimation (by Olley and Pakes, 1996) with panel bootstraping
#Xi Chen, STATEC Luxembourg
#Version 1.1, 15/09/2013-04/12/2013
#Functions for estimating the production function, the package "plyr", "mgcv", "plm" is required
Estimat = function(panel, GAM=F){
#Estimation with Olley and Pakes method without bootstraping
#"panel" should be an objet of class "data.frame" and variables are in LOG and named as "ln__"
#"GAM" use of mgcv for generalized addditive model estimation (slower)
require(plyr)
require(mgcv)
require(plm)
d<- panel
#The first-stage estimation
if (GAM==F){
lnK2=d$lnK*d$lnK
lnK3=d$lnK*d$lnK*d$lnK
lnI2=d$lnI*d$lnI
lnI3=d$lnI*d$lnI*d$lnI
reg1= lm (lnY ~ lnL + lnM +lnK +lnK2+ lnK3 +lnI +lnI2+ lnI3, data=d )}
else{reg1= gam(lnY ~ lnL + lnM + s(lnK,lnI),data=d)}
#summary(reg1)
bl=reg1$coefficients[2]
bm=reg1$coefficients[3]
phi=reg1$fitted.values-bl*d$lnL-bm*d$lnM
panel2=pdata.frame(cbind(d,phi), c("id","year"))
#The second-stage estimation
sres2 = function (betak,data){
#the current value
w = data$phi-betak*data$lnK
#the lag value
wl = lag(w)
wl2 = wl*wl
wl3 = wl*wl*wl
balanced=subset(data.frame(cbind(data,w,wl,wl2,wl3)),year!=1)
reg = lm(w~wl+wl2+wl3,data=balanced)
summary(reg)$coefficients
res2=reg$residuals*reg$residuals
Sres2=sum(res2)
return(Sres2) }
#plot for visualizing the optimization
# test=matrix(NA, ncol=2,nrow=150)
# for (i in -50:100){
# test[i+50,1]=i/100+0.025
# test[i+50,2]=sres2(bk=(i/100+0.025),data=panel2)
# }
# plot(test[,1],test[,2])
b0=0
bnlm = optim(b0,sres2,data=panel2, method ="L-BFGS-B")
bk=c("lnK"=bnlm$par)
return(c(bl,bm,bk))
}
EstBoot = function(NBoot,RBoot,panelBoot){
#Panel Bootraping for estimating the standard errors of estimator
#"NBoot" number of individuals
#"RBoot" number of replications
#"panelBoot" the data set in question
N=NBoot
R=RBoot
d=panelBoot
ResBoot=Estimat(panel=d, GAM=F)
for (j in 1:R){
set.seed(1234+j)
random=as.integer(runif(N, min=1, max=N))
resampled=subset(d,id==random[1])
for (i in 2:N){
Resampled=subset(d,id==random[i])
resampled=rbind(resampled,Resampled)
}
ResBoot=cbind(ResBoot,Estimat(panel=resampled, GAM=F))
}
#write.csv(ResBoot, file = "ResBoot.csv")
SdBoot=matrix(NA,ncol=1,nrow=dim(ResBoot)[1])
for (i in 1:dim(ResBoot)[1]){
SdBoot[i]=sd(ResBoot[i,])
}
BootRes=cbind(ResBoot[,1],SdBoot,ResBoot[,1]/SdBoot)
return(BootRes)
}
EstProd = function (panel,GAM=F){
#productivity estimation (include the exogeneous shocks)
#"panel" should be an objet of class "data.frame" and variables are in LOG and named as "ln__"
#"GAM" use of mgcv for generalized addditive model estimation (slower)
d=panel
gam=GAM
est=Estimat(panel=d, GAM=gam)
productiv=d$lnY-est[1]*d$lnL-est[2]*d$lnM-est[3]*d$lnK
return(productiv)
}
|
#'@rdname particlefilter
#'@title Particle Filter
#'@description Particle filter with storage of trajectories
#'@param nparticles a number of particles
#'@param model a list representing a model, for instance as given by \code{\link{get_ar}}.
#'@param theta a parameter to give to the model functions
#'@param observations a matrix of observations of size datalength x dimension(observation)
#'@return A list with all the trajectories, stored in an array of dimension dimx x (T+1) x N,
#'and the associated normalized weights
#'@export
particlefilter <- function(nparticles, model, theta, observations){
datalength <- nrow(observations)
# create tree representation of the trajectories
Tree <- new(TreeClass, nparticles, 10*nparticles*model$dimension, model$dimension)
# initialization
model_precomputed <- model$precompute(theta)
xparticles <- model$rinit(nparticles, theta, model$rinit_rand(nparticles, theta), model_precomputed)
Tree$init(xparticles)
#
normweights <- rep(1/nparticles, nparticles)
# step t > 1
for (time in 1:datalength){
ancestors <- multinomial_resampling_n(normweights, nparticles)
# if no observation or first time, no resampling
if (time == 1 || (time > 1 && is.na(observations[time-1,1]))){
ancestors <- 1:nparticles
}
xparticles <- xparticles[,ancestors]
if (is.null(dim(xparticles))) xparticles <- matrix(xparticles, nrow = model$dimension)
xparticles <- model$rtransition(xparticles, theta, time, model$rtransition_rand(nparticles, theta), model_precomputed)
#
logw <- model$dmeasurement(xparticles, theta, observations[time,], model_precomputed)
maxlw <- max(logw)
w <- exp(logw - maxlw)
normweights <- w / sum(w)
#
Tree$update(xparticles, ancestors - 1)
}
trajectories <- array(dim = c(model$dimension, datalength + 1, nparticles))
for (k in 0:(nparticles-1)){
trajectories[,,k+1] <- Tree$get_path(k)
}
return(list(trajectories = trajectories, weights = normweights))
} | /R/particlefilter.R | no_license | particlemontecarlo/unbiased_pimh | R | false | false | 2,004 | r | #'@rdname particlefilter
#'@title Particle Filter
#'@description Particle filter with storage of trajectories
#'@param nparticles a number of particles
#'@param model a list representing a model, for instance as given by \code{\link{get_ar}}.
#'@param theta a parameter to give to the model functions
#'@param observations a matrix of observations of size datalength x dimension(observation)
#'@return A list with all the trajectories, stored in an array of dimension dimx x (T+1) x N,
#'and the associated normalized weights
#'@export
particlefilter <- function(nparticles, model, theta, observations){
datalength <- nrow(observations)
# create tree representation of the trajectories
Tree <- new(TreeClass, nparticles, 10*nparticles*model$dimension, model$dimension)
# initialization
model_precomputed <- model$precompute(theta)
xparticles <- model$rinit(nparticles, theta, model$rinit_rand(nparticles, theta), model_precomputed)
Tree$init(xparticles)
#
normweights <- rep(1/nparticles, nparticles)
# step t > 1
for (time in 1:datalength){
ancestors <- multinomial_resampling_n(normweights, nparticles)
# if no observation or first time, no resampling
if (time == 1 || (time > 1 && is.na(observations[time-1,1]))){
ancestors <- 1:nparticles
}
xparticles <- xparticles[,ancestors]
if (is.null(dim(xparticles))) xparticles <- matrix(xparticles, nrow = model$dimension)
xparticles <- model$rtransition(xparticles, theta, time, model$rtransition_rand(nparticles, theta), model_precomputed)
#
logw <- model$dmeasurement(xparticles, theta, observations[time,], model_precomputed)
maxlw <- max(logw)
w <- exp(logw - maxlw)
normweights <- w / sum(w)
#
Tree$update(xparticles, ancestors - 1)
}
trajectories <- array(dim = c(model$dimension, datalength + 1, nparticles))
for (k in 0:(nparticles-1)){
trajectories[,,k+1] <- Tree$get_path(k)
}
return(list(trajectories = trajectories, weights = normweights))
} |
evaluate <- function(TrueLabelsPath, PredLabelsPath){
"
Script to evaluate the performance of the classifier.
It returns multiple evaluation measures: the confusion matrix, median F1-score, F1-score for each class, accuracy, percentage of unlabeled, population size.
The percentage of unlabeled cells is find by checking for cells that are labeled 'Unassigned', 'unassigned', 'Unknown', 'Nodexx', 'rand', or 'ambiguous'.
Parameters
----------
TrueLabelsPath: csv file with the true labels (format: one column, no index)
PredLabelsPath: csv file with the predicted labels (format: one column, no index)
Returns
-------
Conf: confusion matrix
MedF1 : median F1-score
F1 : F1-score per class
Acc : accuracy
PercUnl : percentage of unlabeled cells
PopSize : number of cells per cell type
"
# import python package: sklearn.metrics
library(reticulate)
# use_python('/home/drizzle_zhang/tools/anaconda3/bin/python3', required = T)
use_python('/home/zy/tools/anaconda3/bin/python3', required = T)
# py_config()
py_module_available('sklearn')
metrics <- import('sklearn.metrics')
true_lab <- unlist(read.csv(TrueLabelsPath, stringsAsFactors = F))
pred_lab <- unlist(read.csv(PredLabelsPath, stringsAsFactors = F))
unique_true <- unlist(unique(true_lab))
# unique_pred <- unlist(unique(pred_lab))
# unique_all <- unique(c(unique_true,unique_pred))
conf <- table(true_lab,pred_lab)
pop_size <- rowSums(conf)
pred_lab = gsub('Node..','Node',pred_lab)
conf_F1 <- table(true_lab,pred_lab)#,exclude = c('unassigned','Unassigned','Unknown','rand','Node','ambiguous'))
F1 <- vector()
sum_acc <- 0
for (i in c(1:length(unique_true))) {
findLabel = colnames(conf_F1) == (row.names(conf_F1))[i]
if (sum(findLabel) > 0) {
prec <- conf_F1[i,findLabel] / colSums(conf_F1)[findLabel]
rec <- conf_F1[i,findLabel] / rowSums(conf_F1)[i]
if (prec == 0 || rec == 0){
F1[i] = 0
} else{
F1[i] <- (2*prec*rec) / (prec + rec)
}
sum_acc <- sum_acc + conf_F1[i,findLabel]
} else {
F1[i] = 0
}
}
names(F1) <- names(pop_size)
med_F1 <- median(F1)
mean_F1 <- mean(F1)
acc <- sum_acc/sum(conf_F1)
total <- length(pred_lab)
# num_unlab <- sum(pred_lab == 'unassigned') + sum(pred_lab == 'Unassigned') + sum(pred_lab == 'rand') + sum(pred_lab == 'Unknown') + sum(pred_lab == 'Node') + sum(pred_lab == 'ambiguous')
num_unlab <- sum(!(pred_lab %in% unique_true))
per_unlab <- num_unlab / total
true_lab <- true_lab[(pred_lab %in% unique_true)]
pred_lab <- pred_lab[(pred_lab %in% unique_true)]
weighted.macro.F1 <- metrics$f1_score(true_lab, pred_lab, average = 'weighted')
# metrics$f1_score(true_lab, pred_lab, average = 'macro')
# metrics$accuracy_score(true_lab, pred_lab)
result <- list(Conf = conf, MedF1 = med_F1, F1 = F1, Mean_F1 = mean_F1, Acc = acc,
WMean_F1 = weighted.macro.F1,
PercUnl = per_unlab, PopSize = pop_size)
return(result)
}
| /scRef/codes/Benchmark/cross_validation/train4_test1/pbmcsca_10Xv3/evaluate.R | permissive | Drizzle-Zhang/bioinformatics | R | false | false | 3,073 | r | evaluate <- function(TrueLabelsPath, PredLabelsPath){
"
Script to evaluate the performance of the classifier.
It returns multiple evaluation measures: the confusion matrix, median F1-score, F1-score for each class, accuracy, percentage of unlabeled, population size.
The percentage of unlabeled cells is find by checking for cells that are labeled 'Unassigned', 'unassigned', 'Unknown', 'Nodexx', 'rand', or 'ambiguous'.
Parameters
----------
TrueLabelsPath: csv file with the true labels (format: one column, no index)
PredLabelsPath: csv file with the predicted labels (format: one column, no index)
Returns
-------
Conf: confusion matrix
MedF1 : median F1-score
F1 : F1-score per class
Acc : accuracy
PercUnl : percentage of unlabeled cells
PopSize : number of cells per cell type
"
# import python package: sklearn.metrics
library(reticulate)
# use_python('/home/drizzle_zhang/tools/anaconda3/bin/python3', required = T)
use_python('/home/zy/tools/anaconda3/bin/python3', required = T)
# py_config()
py_module_available('sklearn')
metrics <- import('sklearn.metrics')
true_lab <- unlist(read.csv(TrueLabelsPath, stringsAsFactors = F))
pred_lab <- unlist(read.csv(PredLabelsPath, stringsAsFactors = F))
unique_true <- unlist(unique(true_lab))
# unique_pred <- unlist(unique(pred_lab))
# unique_all <- unique(c(unique_true,unique_pred))
conf <- table(true_lab,pred_lab)
pop_size <- rowSums(conf)
pred_lab = gsub('Node..','Node',pred_lab)
conf_F1 <- table(true_lab,pred_lab)#,exclude = c('unassigned','Unassigned','Unknown','rand','Node','ambiguous'))
F1 <- vector()
sum_acc <- 0
for (i in c(1:length(unique_true))) {
findLabel = colnames(conf_F1) == (row.names(conf_F1))[i]
if (sum(findLabel) > 0) {
prec <- conf_F1[i,findLabel] / colSums(conf_F1)[findLabel]
rec <- conf_F1[i,findLabel] / rowSums(conf_F1)[i]
if (prec == 0 || rec == 0){
F1[i] = 0
} else{
F1[i] <- (2*prec*rec) / (prec + rec)
}
sum_acc <- sum_acc + conf_F1[i,findLabel]
} else {
F1[i] = 0
}
}
names(F1) <- names(pop_size)
med_F1 <- median(F1)
mean_F1 <- mean(F1)
acc <- sum_acc/sum(conf_F1)
total <- length(pred_lab)
# num_unlab <- sum(pred_lab == 'unassigned') + sum(pred_lab == 'Unassigned') + sum(pred_lab == 'rand') + sum(pred_lab == 'Unknown') + sum(pred_lab == 'Node') + sum(pred_lab == 'ambiguous')
num_unlab <- sum(!(pred_lab %in% unique_true))
per_unlab <- num_unlab / total
true_lab <- true_lab[(pred_lab %in% unique_true)]
pred_lab <- pred_lab[(pred_lab %in% unique_true)]
weighted.macro.F1 <- metrics$f1_score(true_lab, pred_lab, average = 'weighted')
# metrics$f1_score(true_lab, pred_lab, average = 'macro')
# metrics$accuracy_score(true_lab, pred_lab)
result <- list(Conf = conf, MedF1 = med_F1, F1 = F1, Mean_F1 = mean_F1, Acc = acc,
WMean_F1 = weighted.macro.F1,
PercUnl = per_unlab, PopSize = pop_size)
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mechanical.R
\name{mechanical}
\alias{mechanical}
\title{mechanical model: use an exponential function to describe the coefficients away from roads}
\usage{
mechanical(variabledf, y_var = c("day_value", "night_value",
"value_mean"), pop_var = "pop3k", distance_centre, roadtypes = 3,
buffers_in, buffers_out, training, test, nls2start = NA,
Road_varname = "ROAD", normalize = F)
}
\arguments{
\item{variabledf}{the dataframe containing predictors and dependent variable}
\item{y_var}{name of the dependent variable.}
\item{pop_var}{the name of an additional variable as a linear term, usually population withn a buffer, a string.
@param Road_varname the name of variables contains road buffers, e.g. "ROAD"}
\item{distance_centre}{the distance to centre from each buffer. (b2-b1)/2 + b1}
\item{training}{the index for the rows used for training.}
\item{test}{the index for the rows used for testing.}
\item{nls2start}{the start value for nls2. if providing an nls2start, the nls2 from nls2 package is used. Details see nls2.}
\item{normalize}{if True, the road ring is normalised by the area (default is false)}
}
\value{
An object of nls
}
\description{
mechanical model: use an exponential function to describe the coefficients away from roads
}
\details{
This method used nls for modelling. This function also prints errormatrix, the exponential model; plot coefficient. The modelling and evaluation should be separated at a later stage, now putting together for exploration only.
}
| /globalLUR/globalLUR/man/mechanical.Rd | no_license | PJNation/globalLUR | R | false | true | 1,579 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mechanical.R
\name{mechanical}
\alias{mechanical}
\title{mechanical model: use an exponential function to describe the coefficients away from roads}
\usage{
mechanical(variabledf, y_var = c("day_value", "night_value",
"value_mean"), pop_var = "pop3k", distance_centre, roadtypes = 3,
buffers_in, buffers_out, training, test, nls2start = NA,
Road_varname = "ROAD", normalize = F)
}
\arguments{
\item{variabledf}{the dataframe containing predictors and dependent variable}
\item{y_var}{name of the dependent variable.}
\item{pop_var}{the name of an additional variable as a linear term, usually population withn a buffer, a string.
@param Road_varname the name of variables contains road buffers, e.g. "ROAD"}
\item{distance_centre}{the distance to centre from each buffer. (b2-b1)/2 + b1}
\item{training}{the index for the rows used for training.}
\item{test}{the index for the rows used for testing.}
\item{nls2start}{the start value for nls2. if providing an nls2start, the nls2 from nls2 package is used. Details see nls2.}
\item{normalize}{if True, the road ring is normalised by the area (default is false)}
}
\value{
An object of nls
}
\description{
mechanical model: use an exponential function to describe the coefficients away from roads
}
\details{
This method used nls for modelling. This function also prints errormatrix, the exponential model; plot coefficient. The modelling and evaluation should be separated at a later stage, now putting together for exploration only.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_RandomForest.R
\name{fit.rf}
\alias{fit.rf}
\title{Build Random Forest model}
\usage{
\method{fit}{rf}(x)
}
\arguments{
\item{x}{A training dataset with calculated Chemical Descriptors}
}
\value{
Returns a trained model ready to predict
}
\description{
Build Random Forest model
}
\examples{
\donttest{
rf <- fit.rf(training)}
}
| /man/fit.rf.Rd | no_license | RECETOX/Retip | R | false | true | 413 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_RandomForest.R
\name{fit.rf}
\alias{fit.rf}
\title{Build Random Forest model}
\usage{
\method{fit}{rf}(x)
}
\arguments{
\item{x}{A training dataset with calculated Chemical Descriptors}
}
\value{
Returns a trained model ready to predict
}
\description{
Build Random Forest model
}
\examples{
\donttest{
rf <- fit.rf(training)}
}
|
phylostruct.rev <- function (samp, tree, comminmax=NULL, env = NULL, metric = c("psv", "psr", "pse", "psc", "sppregs"), null.model = c("frequency", "richness", "independentswap", "trialswap", "other"), runs = 100, it = 1000, alpha = 0.05, fam = "binomial")
{
metric <- match.arg(metric)
null.model <- match.arg(null.model)
if (metric == "sppregs") {
nulls <- t(replicate(runs, sppregs(randomizeMatrix(samp, null.model = null.model, iterations = it), env, tree, fam = fam)$correlations))
obs <- sppregs(samp, env, tree, fam = fam)$correlations
mean.null <- apply(nulls, 2, mean)
quantiles.null <- t(apply(nulls, 2, quantile, probs = c(alpha/2, 1 - (alpha/2))))
if ((null.model != "independentswap") && (null.model != "trialswap")) {
it = NA
}
return(list(metric = metric, null.model = null.model, runs = runs, it = it, obs = obs, mean.null = mean.null, quantiles.null = quantiles.null, phylo.structure = NULL, nulls = nulls))
}
else {
if (missing(comminmax)) {
nulls <- switch(metric, psv = replicate(runs, mean(psv(as.matrix(randomizeMatrix(samp, null.model = null.model, iterations = it)), tree, compute.var = FALSE)[, 1], na.rm = TRUE)), psr = replicate(runs, mean(psr(as.matrix(randomizeMatrix(samp, null.model = null.model, iterations = it)), tree, compute.var = FALSE)[, 1], na.rm = TRUE)), pse = replicate(runs, mean(pse(as.matrix(randomizeMatrix(samp, null.model = null.model, iterations = it)), tree)[, 1], na.rm = TRUE)), psc = replicate(runs, mean(psc(as.matrix(randomizeMatrix(samp, null.model = null.model, iterations = it)), tree)[, 1], na.rm = TRUE)))
quantiles.null <- quantile(nulls, probs = c(alpha/2, 1 - (alpha/2)))
mean.null <- mean(nulls)
mean.obs <- switch(metric, psv = mean(psv(samp, tree, compute.var = FALSE)[, 1], na.rm = TRUE), psr = mean(psr(samp, tree, compute.var = FALSE)[, 1], na.rm = TRUE), pse = mean(pse(samp, tree)[, 1], na.rm = TRUE), psc = mean(psc(samp, tree)[, 1], na.rm = TRUE))
if (mean.obs <= quantiles.null[1]) {
phylo.structure = "underdispersed"
}
else {
if (mean.obs >= quantiles.null[2]) {
phylo.structure = "overdispersed"
}
else {
phylo.structure = "random"
}
}
if ((null.model != "independentswap") && (null.model != "trialswap")) {
it = NA
}
return(list(metric = metric, null.model = null.model, runs = runs, it = it, mean.obs = mean.obs, mean.null = mean.null, quantiles.null = quantiles.null, phylo.structure = phylo.structure, null.means = nulls))
}
else if(null.model == "other") {
Species <- unique(tree$tip.label)
#null.coms <- com.simulator(comminmax[1], comminmax[2], nrow(samp), Species)
nulls <- switch(metric, psv = replicate(runs, mean(psv(as.matrix(com.simulator(comminmax[1], comminmax[2], nrow(samp), Species)), tree, compute.var = FALSE)[, 1], na.rm = TRUE)), psr = replicate(runs, mean(psr(as.matrix(com.simulator(comminmax[1], comminmax[2], nrow(samp), Species)), tree, compute.var = FALSE)[, 1], na.rm = TRUE)), pse = replicate(runs, mean(pse(as.matrix(com.simulator(comminmax[1], comminmax[2], nrow(samp), Species)), tree)[, 1], na.rm = TRUE)), psc = replicate(runs, mean(psc(as.matrix(com.simulator(comminmax[1], comminmax[2], nrow(samp), Species)), tree)[, 1], na.rm = TRUE)))
quantiles.null <- quantile(nulls, probs = c(alpha/2, 1 - (alpha/2)))
mean.null <- mean(nulls)
mean.obs <- switch(metric, psv = mean(psv(samp, tree, compute.var = FALSE)[, 1], na.rm = TRUE), psr = mean(psr(samp, tree, compute.var = FALSE)[, 1], na.rm = TRUE), pse = mean(pse(samp, tree)[, 1], na.rm = TRUE), psc = mean(psc(samp, tree)[, 1], na.rm = TRUE))
if (mean.obs <= quantiles.null[1]) {
phylo.structure = "underdispersed"
}
else {
if (mean.obs >= quantiles.null[2]) {
phylo.structure = "overdispersed"
}
else {
phylo.structure = "random"
}
}
if ((null.model != "independentswap") && (null.model != "trialswap")) {
it = NA
}
return(list(metric = metric, null.model = null.model, runs = runs, it = it, mean.obs = mean.obs, mean.null = mean.null, quantiles.null = quantiles.null, phylo.structure = phylo.structure, null.means = nulls))
}
}
}
phylostruct.rev(comm, brach, c(2,5), env=NULL, metric="psv", null.model="other", runs=1000, alpha= 0.05)
phylostruct.rev(comm, brach, env=NULL, metric="psv", null.model="richness", runs=100, alpha= 0.05) | /Google Drive/Brachymeles_CA/r_code/phylostruct_revised.R | no_license | nahuron/thesis | R | false | false | 5,060 | r | phylostruct.rev <- function (samp, tree, comminmax=NULL, env = NULL, metric = c("psv", "psr", "pse", "psc", "sppregs"), null.model = c("frequency", "richness", "independentswap", "trialswap", "other"), runs = 100, it = 1000, alpha = 0.05, fam = "binomial")
{
metric <- match.arg(metric)
null.model <- match.arg(null.model)
if (metric == "sppregs") {
nulls <- t(replicate(runs, sppregs(randomizeMatrix(samp, null.model = null.model, iterations = it), env, tree, fam = fam)$correlations))
obs <- sppregs(samp, env, tree, fam = fam)$correlations
mean.null <- apply(nulls, 2, mean)
quantiles.null <- t(apply(nulls, 2, quantile, probs = c(alpha/2, 1 - (alpha/2))))
if ((null.model != "independentswap") && (null.model != "trialswap")) {
it = NA
}
return(list(metric = metric, null.model = null.model, runs = runs, it = it, obs = obs, mean.null = mean.null, quantiles.null = quantiles.null, phylo.structure = NULL, nulls = nulls))
}
else {
if (missing(comminmax)) {
nulls <- switch(metric, psv = replicate(runs, mean(psv(as.matrix(randomizeMatrix(samp, null.model = null.model, iterations = it)), tree, compute.var = FALSE)[, 1], na.rm = TRUE)), psr = replicate(runs, mean(psr(as.matrix(randomizeMatrix(samp, null.model = null.model, iterations = it)), tree, compute.var = FALSE)[, 1], na.rm = TRUE)), pse = replicate(runs, mean(pse(as.matrix(randomizeMatrix(samp, null.model = null.model, iterations = it)), tree)[, 1], na.rm = TRUE)), psc = replicate(runs, mean(psc(as.matrix(randomizeMatrix(samp, null.model = null.model, iterations = it)), tree)[, 1], na.rm = TRUE)))
quantiles.null <- quantile(nulls, probs = c(alpha/2, 1 - (alpha/2)))
mean.null <- mean(nulls)
mean.obs <- switch(metric, psv = mean(psv(samp, tree, compute.var = FALSE)[, 1], na.rm = TRUE), psr = mean(psr(samp, tree, compute.var = FALSE)[, 1], na.rm = TRUE), pse = mean(pse(samp, tree)[, 1], na.rm = TRUE), psc = mean(psc(samp, tree)[, 1], na.rm = TRUE))
if (mean.obs <= quantiles.null[1]) {
phylo.structure = "underdispersed"
}
else {
if (mean.obs >= quantiles.null[2]) {
phylo.structure = "overdispersed"
}
else {
phylo.structure = "random"
}
}
if ((null.model != "independentswap") && (null.model != "trialswap")) {
it = NA
}
return(list(metric = metric, null.model = null.model, runs = runs, it = it, mean.obs = mean.obs, mean.null = mean.null, quantiles.null = quantiles.null, phylo.structure = phylo.structure, null.means = nulls))
}
else if(null.model == "other") {
Species <- unique(tree$tip.label)
#null.coms <- com.simulator(comminmax[1], comminmax[2], nrow(samp), Species)
nulls <- switch(metric, psv = replicate(runs, mean(psv(as.matrix(com.simulator(comminmax[1], comminmax[2], nrow(samp), Species)), tree, compute.var = FALSE)[, 1], na.rm = TRUE)), psr = replicate(runs, mean(psr(as.matrix(com.simulator(comminmax[1], comminmax[2], nrow(samp), Species)), tree, compute.var = FALSE)[, 1], na.rm = TRUE)), pse = replicate(runs, mean(pse(as.matrix(com.simulator(comminmax[1], comminmax[2], nrow(samp), Species)), tree)[, 1], na.rm = TRUE)), psc = replicate(runs, mean(psc(as.matrix(com.simulator(comminmax[1], comminmax[2], nrow(samp), Species)), tree)[, 1], na.rm = TRUE)))
quantiles.null <- quantile(nulls, probs = c(alpha/2, 1 - (alpha/2)))
mean.null <- mean(nulls)
mean.obs <- switch(metric, psv = mean(psv(samp, tree, compute.var = FALSE)[, 1], na.rm = TRUE), psr = mean(psr(samp, tree, compute.var = FALSE)[, 1], na.rm = TRUE), pse = mean(pse(samp, tree)[, 1], na.rm = TRUE), psc = mean(psc(samp, tree)[, 1], na.rm = TRUE))
if (mean.obs <= quantiles.null[1]) {
phylo.structure = "underdispersed"
}
else {
if (mean.obs >= quantiles.null[2]) {
phylo.structure = "overdispersed"
}
else {
phylo.structure = "random"
}
}
if ((null.model != "independentswap") && (null.model != "trialswap")) {
it = NA
}
return(list(metric = metric, null.model = null.model, runs = runs, it = it, mean.obs = mean.obs, mean.null = mean.null, quantiles.null = quantiles.null, phylo.structure = phylo.structure, null.means = nulls))
}
}
}
phylostruct.rev(comm, brach, c(2,5), env=NULL, metric="psv", null.model="other", runs=1000, alpha= 0.05)
phylostruct.rev(comm, brach, env=NULL, metric="psv", null.model="richness", runs=100, alpha= 0.05) |
# Project : Getting and Cleaning Data
# Name : Linyan Le
# Date : 2021-08-19
# 0. Preparation
# 0.1 Load packages
library(plyr)
# 0.2 Download the dataset
if(!file.exists("./data")){dir.create("./data")}
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, destfile = "./data/projectdataset.zip")
# 0.3 Unzip the dataset
unzip(zipfile = "./data/projectdataset.zip", exdir = "./data")
# 1. Merges the training and the test sets to create one data set.
# 1.1 Reading files
# 1.1.1 Reading training datasets
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
# 1.1.2 Reading test datasets
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
# 1.1.3 Reading feature vector
features <- read.table("./data/UCI HAR Dataset/features.txt")
# 1.1.4 Reading activity labels
activityLabels = read.table("./data/UCI HAR Dataset/activity_labels.txt")
# 1.2 Assigning columns variable names
colnames(x_train) <- features[,2]
colnames(y_train) <- "activityID"
colnames(subject_train) <- "subjectID"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityID"
colnames(subject_test) <- "subjectID"
colnames(activityLabels) <- c("activityID", "activityType")
# 1.3 Merging all datasets into one set
alltrain <- cbind(y_train, subject_train, x_train)
alltest <- cbind(y_test, subject_test, x_test)
allinone <- rbind(alltrain, alltest)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 2.1 Reading column names
colNames <- colnames(allinone)
# 2.2 Create vector for defining ID, mean, and sd
mean_and_std <- (grepl("activityID", colNames) |
grepl("subjectID", colNames) |
grepl("mean..", colNames) |
grepl("std...", colNames)
)
# 2.3 Making necessary subset
setforMeanandStd <- allinone[ , mean_and_std == TRUE]
# 3. Uses descriptive activity names to name the activities in the data set
setWithActivityNames <- merge(setforMeanandStd, activityLabels,
by = "activityID",
all.x = TRUE)
# 4. Appropriately labels the data set with descriptive variable names
# see 1.3, 2.2, 2.3
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
# 5.1 Making a second tidy data set
tidyDataSet <- aggregate(. ~ subjectID + activityID, data = setforMeanandStd, mean)
tidyDataSet <- tidyDataSet[order(tidyDataSet$subjectID, tidyDataSet$activityID), ]
# 5.2 Writing second tidy data set into a txt file
write.table(tidyDataSet, "tidyDataSet.txt", row.names = FALSE)
| /Getting and Cleaning Data/Week4/Assignment/run_analysis.R | no_license | LiamChim/datasciencecoursera | R | false | false | 3,007 | r | # Project : Getting and Cleaning Data
# Name : Linyan Le
# Date : 2021-08-19
# 0. Preparation
# 0.1 Load packages
library(plyr)
# 0.2 Download the dataset
if(!file.exists("./data")){dir.create("./data")}
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, destfile = "./data/projectdataset.zip")
# 0.3 Unzip the dataset
unzip(zipfile = "./data/projectdataset.zip", exdir = "./data")
# 1. Merges the training and the test sets to create one data set.
# 1.1 Reading files
# 1.1.1 Reading training datasets
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
# 1.1.2 Reading test datasets
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
# 1.1.3 Reading feature vector
features <- read.table("./data/UCI HAR Dataset/features.txt")
# 1.1.4 Reading activity labels
activityLabels = read.table("./data/UCI HAR Dataset/activity_labels.txt")
# 1.2 Assigning columns variable names
colnames(x_train) <- features[,2]
colnames(y_train) <- "activityID"
colnames(subject_train) <- "subjectID"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityID"
colnames(subject_test) <- "subjectID"
colnames(activityLabels) <- c("activityID", "activityType")
# 1.3 Merging all datasets into one set
alltrain <- cbind(y_train, subject_train, x_train)
alltest <- cbind(y_test, subject_test, x_test)
allinone <- rbind(alltrain, alltest)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 2.1 Reading column names
colNames <- colnames(allinone)
# 2.2 Create vector for defining ID, mean, and sd
mean_and_std <- (grepl("activityID", colNames) |
grepl("subjectID", colNames) |
grepl("mean..", colNames) |
grepl("std...", colNames)
)
# 2.3 Making necessary subset
setforMeanandStd <- allinone[ , mean_and_std == TRUE]
# 3. Uses descriptive activity names to name the activities in the data set
setWithActivityNames <- merge(setforMeanandStd, activityLabels,
by = "activityID",
all.x = TRUE)
# 4. Appropriately labels the data set with descriptive variable names
# see 1.3, 2.2, 2.3
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
# 5.1 Making a second tidy data set
tidyDataSet <- aggregate(. ~ subjectID + activityID, data = setforMeanandStd, mean)
tidyDataSet <- tidyDataSet[order(tidyDataSet$subjectID, tidyDataSet$activityID), ]
# 5.2 Writing second tidy data set into a txt file
write.table(tidyDataSet, "tidyDataSet.txt", row.names = FALSE)
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'fm'
scenario <- 30
param <- 1
anal_type <- "sing"
ss <- ss.bounds%>%
dplyr::filter(method == "fm", scenario.id == scenario)
do_val <- 0.05
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(mice, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%fm_ci(ss$M2,'y')
#define missingness parameters and do rates
m.param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss <- m.param%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = fm_ci,
sing_anal = T,
mice_anal = F,
m2 = ss$M2, seed = 10000*scenario + x,
method = method,
alpha = alpha
))%>%
dplyr::select(missing, results)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.sing.sum.R')
h0.sing.sum(x1)
| /sim_pgms/fm/do5/2xcontH0_sc30_do5_sing.R | no_license | yuliasidi/nibinom_apply | R | false | false | 2,230 | r | library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'fm'
scenario <- 30
param <- 1
anal_type <- "sing"
ss <- ss.bounds%>%
dplyr::filter(method == "fm", scenario.id == scenario)
do_val <- 0.05
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(mice, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%fm_ci(ss$M2,'y')
#define missingness parameters and do rates
m.param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss <- m.param%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = fm_ci,
sing_anal = T,
mice_anal = F,
m2 = ss$M2, seed = 10000*scenario + x,
method = method,
alpha = alpha
))%>%
dplyr::select(missing, results)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.sing.sum.R')
h0.sing.sum(x1)
|
rmysql_doublelist2<-function(){
#mydb = dbConnect(MySQL(), user='appuser', password='appuser123', dbname='mstore', host='192.168.2.45');
mydb = dbConnect(SQLServer(),"192.168.2.113/MicrosoftBIRetailDemo;user=appuser;password=appuser123");
rs = dbSendQuery(mydb, "select * from VW_RetailDataset");
#Pulls only 1st row
data = fetch(rs, n=1);
preData<-sapply(data,typeof);
preData.df=data.frame(preData);
Dimension <- subset(preData.df, preData== "character");
preDim <- data.frame(row.names(Dimension));
names(preDim)[1] <-paste("Dimension");
Dim <- toJSON(preDim)
Measure <- subset(preData.df, preData!= "character");
preMeasure <- data.frame(row.names(Measure));
names(preMeasure)[1] <-paste("Measure");
Measure <- toJSON(preMeasure)
Dim2<-fromJSON(Dim);
Measure2<-fromJSON(Measure);
merge <- rbind.fill(Dim2,Measure2);
meregeJSON<- toJSON(merge);
return(meregeJSON)
} | /R/rmysql_doublelist2.R | no_license | stevensuting/demo | R | false | false | 876 | r | rmysql_doublelist2<-function(){
#mydb = dbConnect(MySQL(), user='appuser', password='appuser123', dbname='mstore', host='192.168.2.45');
mydb = dbConnect(SQLServer(),"192.168.2.113/MicrosoftBIRetailDemo;user=appuser;password=appuser123");
rs = dbSendQuery(mydb, "select * from VW_RetailDataset");
#Pulls only 1st row
data = fetch(rs, n=1);
preData<-sapply(data,typeof);
preData.df=data.frame(preData);
Dimension <- subset(preData.df, preData== "character");
preDim <- data.frame(row.names(Dimension));
names(preDim)[1] <-paste("Dimension");
Dim <- toJSON(preDim)
Measure <- subset(preData.df, preData!= "character");
preMeasure <- data.frame(row.names(Measure));
names(preMeasure)[1] <-paste("Measure");
Measure <- toJSON(preMeasure)
Dim2<-fromJSON(Dim);
Measure2<-fromJSON(Measure);
merge <- rbind.fill(Dim2,Measure2);
meregeJSON<- toJSON(merge);
return(meregeJSON)
} |
#**INITIAL STEPS**
#####In excel remove the simbol ## from the names of the table and rename the samples acccording to the code used in the gradient e.g. GRA_S10_D_F_A10
library(stats)
library(base)
library(dplyr)
library(dplyr)
library(tidyr)
library(knitr)
library(PMCMR)
library(vegan)
library(betapart)
library(stringr)
library(permute)
library(lattice)
library(ecodist)
library(ade4)
library(ggplot2)
library(Imap)
#**TABLES AND COMMUNITY MATRIXES**
#####open table with names including Region and habitat parameters
s2_raw_all <- read.table("../genetic/Data_in/Collembola/s2_raw_all_Collembola_threshold.txt", sep = ",", header=TRUE)
dim(s2_raw_all)
#**Table Haplotipos**
#####remove additional columns and leave only names (of haplotipes), samples and taxa (and threshold in this case)
s2_raw_all[,c(1:52,68)]->s2_raw
dim(s2_raw) ##51 samples = 51 plus 1 neg (the second neg from DOM_REPS is not there because all 0)
colnames(s2_raw)
#####Applying the conservative threshold (this is a binary column)
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:52]->s2_raw_threshold_h ##remove threshold col
dim(s2_raw_threshold_h)
colnames(s2_raw_threshold_h)
#**Table levels clustering**
#####remove additional columns and leave only names (of haplotipes), samples and taxa (and threshold in this case)
s2_raw_all[,c(1:66,68)]->s2_raw
dim(s2_raw) ##49 samples = 48 plus 1 neg (the second neg from DOM_REPS is not there because all 0)
colnames(s2_raw)
##**Applying the conservative threshold (this is a binary column)_0.005**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.005 ##remove threshold col
dim(s2_raw_threshold0.005)
colnames(s2_raw_threshold0.005)
##**Applying the conservative threshold (this is a binary column)_0.015**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.015 ##remove threshold col
dim(s2_raw_threshold0.015)
colnames(s2_raw_threshold0.015)
##**Applying the conservative threshold (this is a binary column)_0.020**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.02 ##remove threshold col
dim(s2_raw_threshold0.02)
colnames(s2_raw_threshold0.02)
##**Applying the conservative threshold (this is a binary column)_0.03**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.03 ##remove threshold col
dim(s2_raw_threshold0.03)
colnames(s2_raw_threshold0.03)
##**Applying the conservative threshold (this is a binary column)_0.05**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.05 ##remove threshold col
dim(s2_raw_threshold0.05)
colnames(s2_raw_threshold0.05)
##**Applying the conservative threshold (this is a binary column)_0.075**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.075 ##remove threshold col
dim(s2_raw_threshold0.075)
colnames(s2_raw_threshold0.075)
##**Applying the conservative threshold (this is a binary column)GMYC_0.029**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.029 ##remove threshold col
dim(s2_raw_threshold0.029)
colnames(s2_raw_threshold0.029)
#
#**level Haplotipos**
#####delete the ocurrences with less than 4 reads by library (same criteria than denoising)
#####s2_raw_threshold->s2_f4_with_abundance_h
#####s2_f4_with_abundance_h[s2_f4_with_abundance_h<4]<-0 ##2 warning corresponding wiht the columms of the names and taxa
##**transform in present/absence table**
s2_raw_threshold_h->s2_f4_h #NOTA_Nancy: Tengo un subset de Colembolos
s2_f4_h[s2_f4_h>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_f4_h[,2:52]->data_h #Nota_Nancy: Modifique el numero: 2:50 por 1:52.
rowSums(data_h)
length(which(rowSums(data_h)!=0))
length(which(rowSums(data_h)==0))
##**Collembola**
t(s2_f4_h)->t_s2_f4_h ##trasp
t_s2_f4_h[2:52,]->community_Collembola_h #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(community_Collembola_h)<-t_s2_f4_h[1,]
as.data.frame(community_Collembola_h)->community_Collembola_h ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola_h)
community_Collembola_h[order(row.names(community_Collembola_h)),]->community_Collembola_h ##order samples
write.table (community_Collembola_h, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola_h.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola_h.txt")->community_Collembola_h
####submatrixes of the Nevado Toluca del lado West de los haplotypes. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola_h)
community_Collembola_h[which(str_extract (row.names(community_Collembola_h), "_W_") %in% "_W_"),]->community_Collembola_h_West
dim(community_Collembola_h_West)
community_Collembola_h_West[,which(colSums(community_Collembola_h_West)!=0)]->community_Collembola_h_West ##to remove no data colums
dim(community_Collembola_h_West)
write.table (community_Collembola_h_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola_h_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola_h_West.txt")->community_Collembola_h_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.005**
unique(s2_raw_threshold0.005$limite0.005)->levels_limite0.005
data.frame()->s2_raw_Collembola_limite0.005
for (i in 1:length(unique (s2_raw_threshold0.005$limite0.005)))
{
levels_limite0.005[i]->level
s2_raw_threshold0.005[which(s2_raw_threshold0.005$limite0.005==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names,
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.005,sum)->s2_raw_Collembola_limite0.005
}
#####delete the ocurrences with less than 4 reads by library (same criteria than denoising)
#####s2_raw_Collembola_limite0.005->s2_f4_with_abundance_Collembola_limite0.005
#####s2_f4_with_abundance_Collembola_limite0.005[s2_f4_with_abundance_Collembola_limite0.005<4]<-0 ##2 warning corresponding wiht the columms of the names and taxa
##**transform in present/absence table**
s2_raw_Collembola_limite0.005->s2_raw_Collembola_limite0.005
s2_raw_Collembola_limite0.005[s2_raw_Collembola_limite0.005>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.005[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols**
##**Collembola**
t(s2_raw_Collembola_limite0.005)->t_s2_f4_Collembola_limite0.005 ##trasp
t_s2_f4_Collembola_limite0.005[1:51,]->community_Collembola_limite0.005 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.005)<-community_Collembola_limite0.005[1,]
as.data.frame(community_Collembola_limite0.005)->community_Collembola0.005 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.005)
community_Collembola0.005[order(row.names(community_Collembola0.005)),]->community_Collembola0.005 ##order samples
write.table (community_Collembola0.005, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.005.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.005.txt")->community_Collembola0.005
####submatrixes of the Nevado Toluca del lado West de nivel 0.5%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca
dim(community_Collembola0.005)
community_Collembola0.005[which(str_extract (row.names(community_Collembola0.005), "_W_") %in% "_W_"),]->community_Collembola0.005_West
dim(community_Collembola0.005_West)
community_Collembola0.005_West[,which(colSums(community_Collembola0.005_West)!=0)]->community_Collembola0.005_West ##to remove no data colums
dim(community_Collembola0.005_West)
write.table (community_Collembola0.005_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.005_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.005_West.txt")->community_Collembola0.005_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.015**
unique (s2_raw_threshold0.015$limite0.015)->levels_limite0.015
data.frame()->s2_raw_Collembola_limite0.015
for (i in 1:length(unique (s2_raw_threshold0.015$limite0.015)))
{
levels_limite0.015[i]->level
s2_raw_threshold0.015[which(s2_raw_threshold0.015$limite0.015==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names, level and also the negative column
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.015,sum)->s2_raw_Collembola_limite0.015
}
#####delete the ocurrences with less than 4 reads by library (same criteria than denoising)
#####s2_raw_Collembola_limite0.015->s2_f4_with_abundance_Collembola_limite0.015
#####s2_f4_with_abundance_Collembola_limite0.015[s2_f4_with_abundance_Collembola_limite0.015<4]<-0 ##2 warning corresponding wiht the columms of the names and taxa
##**transform in present/absence table**
s2_raw_Collembola_limite0.015->s2_raw_Collembola_limite0.015
s2_raw_Collembola_limite0.015[s2_raw_Collembola_limite0.015>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.015[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola**
t(s2_raw_Collembola_limite0.015)->t_s2_f4_Collembola_limite0.015 ##trasp
t_s2_f4_Collembola_limite0.015[1:51,]->community_Collembola_limite0.015 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.015)<-community_Collembola_limite0.015[1,]
as.data.frame(community_Collembola_limite0.015)->community_Collembola0.015 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.015)
community_Collembola0.015[order(row.names(community_Collembola0.015)),]->community_Collembola0.015 ##order samples
write.table (community_Collembola0.015, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.015.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.015.txt")->community_Collembola0.015
####submatrixes of the Nevado Toluca del lado West de nivel 1.5%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola0.015)
community_Collembola0.015[which(str_extract (row.names(community_Collembola0.015), "_W_") %in% "_W_"),]->community_Collembola0.015_West
dim(community_Collembola0.015_West)
community_Collembola0.015_West[,which(colSums(community_Collembola0.015_West)!=0)]->community_Collembola0.015_West ##to remove no data colums
dim(community_Collembola0.015_West)
write.table (community_Collembola0.015_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.015_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.015_West.txt")->community_Collembola0.015_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.02**
unique(s2_raw_threshold0.02$limite0.02)->levels_limite0.02
data.frame()->s2_raw_Collembola_limite0.02
for (i in 1:length(unique (s2_raw_threshold0.02$limite0.02)))
{
levels_limite0.02[i]->level
s2_raw_threshold0.02[which(s2_raw_threshold0.02$limite0.02==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names,
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.02,sum)->s2_raw_Collembola_limite0.02
}
#####delete the ocurrences with less than 4 reads by library (same criteria than denoising)
#####s2_raw_Collembola_limite0.02->s2_f4_with_abundance_Collembola_limite0.02
#####s2_f4_with_abundance_Collembola_limite0.02[s2_f4_with_abundance_Collembola_limite0.02<4]<-0 ##2 warning corresponding wiht the columms of the names and taxa
##**transform in present/absence table**
s2_raw_Collembola_limite0.02->s2_raw_Collembola_limite0.02
s2_raw_Collembola_limite0.02[s2_raw_Collembola_limite0.02>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.02[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola**
t(s2_raw_Collembola_limite0.02)->t_s2_f4_Collembola_limite0.02 ##trasp
t_s2_f4_Collembola_limite0.02[1:51,]->community_Collembola_limite0.02 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.02)<-community_Collembola_limite0.02[1,]
as.data.frame(community_Collembola_limite0.02)->community_Collembola0.02 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.02)
community_Collembola0.02[order(row.names(community_Collembola0.02)),]->community_Collembola0.02 ##order samples
write.table (community_Collembola0.02, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.02.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.02.txt")->community_Collembola0.02
####submatrixes of the Nevado Toluca del lado West de nivel 2.0%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola0.02)
community_Collembola0.02[which(str_extract (row.names(community_Collembola0.02), "_W_") %in% "_W_"),]->community_Collembola0.02_West
dim(community_Collembola0.02_West)
community_Collembola0.02_West[,which(colSums(community_Collembola0.02_West)!=0)]->community_Collembola0.02_West ##to remove no data colums
dim(community_Collembola0.02_West)
write.table (community_Collembola0.02_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.02_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.02_West.txt")->community_Collembola0.02_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.03**
unique (s2_raw_threshold0.03$limite0.03)->levels_limite0.03
data.frame()->s2_raw_Collembola_limite0.03
for (i in 1:length(unique (s2_raw_threshold0.03$limite0.03)))
{
levels_limite0.03[i]->level
s2_raw_threshold0.03[which(s2_raw_threshold0.03$limite0.03==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names, level and also the negative column
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.03,sum)->s2_raw_Collembola_limite0.03
}
##**transform in present/absence table**
s2_raw_Collembola_limite0.03->s2_raw_Collembola_limite0.03
s2_raw_Collembola_limite0.03[s2_raw_Collembola_limite0.03>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.03[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola**
t(s2_raw_Collembola_limite0.03)->t_s2_f4_Collembola_limite0.03 ##trasp
t_s2_f4_Collembola_limite0.03[1:51,]->community_Collembola_limite0.03 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.03)<-community_Collembola_limite0.03[1,]
as.data.frame(community_Collembola_limite0.03)->community_Collembola0.03 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.03)
community_Collembola0.03[order(row.names(community_Collembola0.03)),]->community_Collembola0.03 ##order samples
write.table (community_Collembola0.03, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.03.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.03.txt")->community_Collembola0.03
####submatrixes of the Nevado Toluca del lado West de nivel 3.0%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola0.03)
community_Collembola0.03[which(str_extract (row.names(community_Collembola0.03), "_W_") %in% "_W_"),]->community_Collembola0.03_West
dim(community_Collembola0.03_West)
community_Collembola0.03_West[,which(colSums(community_Collembola0.03_West)!=0)]->community_Collembola0.03_West ##to remove no data colums
dim(community_Collembola0.03_West)
write.table (community_Collembola0.03_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.03_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.03_West.txt")->community_Collembola0.03_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.05**
unique (s2_raw_threshold0.05$limite0.05)->levels_limite0.05
data.frame()->s2_raw_Collembola_limite0.05
for (i in 1:length(unique (s2_raw_threshold0.05$limite0.05)))
{
levels_limite0.05[i]->level
s2_raw_threshold0.05[which(s2_raw_threshold0.05$limite0.05==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names, level and also the negative column
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.05,sum)->s2_raw_Collembola_limite0.05
}
##**transform in present/absence table**
s2_raw_Collembola_limite0.05->s2_raw_Collembola_limite0.05
s2_raw_Collembola_limite0.05[s2_raw_Collembola_limite0.05>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.05[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola**
t(s2_raw_Collembola_limite0.05)->t_s2_f4_Collembola_limite0.05 ##trasp
t_s2_f4_Collembola_limite0.05[1:51,]->community_Collembola_limite0.05 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.05)<-community_Collembola_limite0.05[1,]
as.data.frame(community_Collembola_limite0.05)->community_Collembola0.05 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.05)
community_Collembola0.05[order(row.names(community_Collembola0.05)),]->community_Collembola0.05 ##order samples
write.table (community_Collembola0.05, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.05.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.05.txt")->community_Collembola0.05
####submatrixes of the Nevado Toluca del lado West de nivel 5.0%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola0.05)
community_Collembola0.05[which(str_extract (row.names(community_Collembola0.05), "_W_") %in% "_W_"),]->community_Collembola0.05_West
dim(community_Collembola0.05_West)
community_Collembola0.05_West[,which(colSums(community_Collembola0.05_West)!=0)]->community_Collembola0.05_West ##to remove no data colums
dim(community_Collembola0.05_West)
write.table (community_Collembola0.05_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.05_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.05_West.txt")->community_Collembola0.05_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.075**
unique (s2_raw_threshold0.075$limite0.075)->levels_limite0.075
data.frame()->s2_raw_Collembola_limite0.075
for (i in 1:length(unique (s2_raw_threshold0.075$limite0.075)))
{
levels_limite0.075[i]->level
s2_raw_threshold0.075[which(s2_raw_threshold0.075$limite0.075==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names, level and also the negative column
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.075,sum)->s2_raw_Collembola_limite0.075
}
##**transform in present/absence table**
s2_raw_Collembola_limite0.075->s2_raw_Collembola_limite0.075
s2_raw_Collembola_limite0.075[s2_raw_Collembola_limite0.075>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.075[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola**
t(s2_raw_Collembola_limite0.075)->t_s2_f4_Collembola_limite0.075 ##trasp
t_s2_f4_Collembola_limite0.075[1:51,]->community_Collembola_limite0.075 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.075)<-community_Collembola_limite0.075[1,]
as.data.frame(community_Collembola_limite0.075)->community_Collembola0.075 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.075)
community_Collembola0.075[order(row.names(community_Collembola0.075)),]->community_Collembola0.075 ##order samples
write.table (community_Collembola0.075, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.075.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.075.txt")->community_Collembola0.075
####submatrixes of the Nevado Toluca del lado West de nivel 7.5%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola0.075)
community_Collembola0.075[which(str_extract (row.names(community_Collembola0.075), "_W_") %in% "_W_"),]->community_Collembola0.075_West
dim(community_Collembola0.075_West)
community_Collembola0.075_West[,which(colSums(community_Collembola0.075_West)!=0)]->community_Collembola0.075_West ##to remove no data colums
dim(community_Collembola0.075_West)
write.table (community_Collembola0.075_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.075_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.075_West.txt")->community_Collembola0.075_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE GMYC_0.029**
unique(s2_raw_threshold0.029$limite0.029)->levels_limite0.029
data.frame()->s2_raw_Collembola_limite0.029
for (i in 1:length(unique (s2_raw_threshold0.029$limite0.029)))
{
levels_limite0.029[i]->level
s2_raw_threshold0.029[which(s2_raw_threshold0.029$limite0.029==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names,
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.029,sum)->s2_raw_Collembola_limite0.029
}
#####delete the ocurrences with less than 4 reads by library (same criteria than denoising)
#####s2_raw_Collembola_limite0.029->s2_f4_with_abundance_Collembola_limite0.029
#####s2_f4_with_abundance_Collembola_limite0.029[s2_f4_with_abundance_Collembola_limite0.029<4]<-0 ##2 warning corresponding wiht the columms of the names and taxa
##**transform in present/absence table**
s2_raw_Collembola_limite0.029->s2_raw_Collembola_limite0.029
s2_raw_Collembola_limite0.029[s2_raw_Collembola_limite0.029>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.029[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola*
t(s2_raw_Collembola_limite0.029)->t_s2_f4_Collembola_limite0.029 ##trasp
t_s2_f4_Collembola_limite0.029[1:51,]->community_Collembola_limite0.029 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.029)<-community_Collembola_limite0.029[1,]
as.data.frame(community_Collembola_limite0.029)->community_Collembola0.029 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.029)
community_Collembola0.029[order(row.names(community_Collembola0.029)),]->community_Collembola0.029 ##order samples
write.table (community_Collembola0.029, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.029.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.029.txt")->community_Collembola0.029
####submatrixes by SITE in Nevado Toluca. NOTA_Nancy: Quiero hacer tablas que incluyan datos de una montañas en la epoca de lluvida con localidades dentro del Nevado de Toluca.
dim(community_Collembola0.029)
community_Collembola0.029[which(str_extract (row.names(community_Collembola0.029), "_W_") %in% "_W_"),]->community_Collembola0.029_West
dim(community_Collembola0.029_West)
community_Collembola0.029_West[,which(colSums(community_Collembola0.029_West)!=0)]->community_Collembola0.029_West ##to remove no data colums
dim(community_Collembola0.029_West)
write.table (community_Collembola0.029_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.029_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.029_West.txt")->community_Collembola0.029_West
#**BETADIVERSITY ORDINATIONS by SITE**
#**Collembola**
##**beta general_Level_Haplotipos**
beta.pair(community_Collembola_h_West, index.family="sorensen")->beta.pair_CollembolaWest_h
##**beta general_Level_0.005**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.005_West, index.family="sorensen")->beta.pair_CollembolaWest_0.005
##**beta general_Level_0.015**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.015_West, index.family="sorensen")->beta.pair_CollembolaWest_0.015
##**beta general_Level_0.02**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.02_West, index.family="sorensen")->beta.pair_CollembolaWest_0.02
##**beta general_Level_0.03**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.03_West, index.family="sorensen")->beta.pair_CollembolaWest_0.03
##**beta general_Level_0.05**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.05_West, index.family="sorensen")->beta.pair_CollembolaWest_0.05
##**beta general_Level_0.075
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.075_West, index.family="sorensen")->beta.pair_CollembolaWest_0.075
##**beta general_Level_0.029**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.029_West, index.family="sorensen")->beta.pair_CollembolaWest_0.029
#
##**FORMAS DE OBTENER LA MATRIX effective resistance**
Resistance_matrix_West <- read.table("../spatial/IBDistanceMatrix/geomatrix_Site_West.txt", sep = ",", header=T, row.names = 1)
dim(Resistance_matrix_West)
class(Resistance_matrix_West)
Resistance_matrix_West <- as.matrix(Resistance_matrix_West)
class(Resistance_matrix_West)
Resistance_matrix_West[order(row.names(Resistance_matrix_West)),order(colnames(Resistance_matrix_West))]->Resistance_matrix_West #Ordena la Resistance matrix con la de beta. ##important order both matrixes
Resistance_matrix_West[upper.tri(Resistance_matrix_West)] <- NA
Resistance_matrix_West
class(Resistance_matrix_West)
Resistance_matrix_West <- as.dist(Resistance_matrix_West)
Resistance_matrix_West
#
##**Generating similarity values and adding 0.001 to avoid LOG(0)**
1-beta.pair_CollembolaWest_h$beta.sim +0.001->all_h_betasim
1-beta.pair_CollembolaWest_0.005$beta.sim+0.001->all_0.005_betasim
1-beta.pair_CollembolaWest_0.015$beta.sim+0.001->all_0.015_betasim
1-beta.pair_CollembolaWest_0.02$beta.sim+0.001->all_0.02_betasim
1-beta.pair_CollembolaWest_0.03$beta.sim+0.001->all_0.03_betasim
1-beta.pair_CollembolaWest_0.05$beta.sim+0.001->all_0.05_betasim
1-beta.pair_CollembolaWest_0.075$beta.sim+0.001->all_0.075_betasim
1-beta.pair_CollembolaWest_0.029$beta.sim+0.001->all_0.029_betasim
#
##**log log linear regression multiple levels (fractality 1)**
MRM(log(all_0.005_betasim)~log(all_h_betasim))
MRM(log(all_0.015_betasim)~log(all_h_betasim))
MRM(log(all_0.02_betasim)~log(all_h_betasim))
MRM(log(all_0.03_betasim)~log(all_h_betasim))
MRM(log(all_0.05_betasim)~log(all_h_betasim))
MRM(log(all_0.075_betasim)~log(all_h_betasim))
MRM(log(all_0.029_betasim)~log(all_h_betasim))
#
##**only to plot linear regressions**
log(all_h_betasim)->log_all_h_betasim
log(all_0.005_betasim)->log_all_0.005_betasim
log(all_0.015_betasim)->log_all_0.015_betasim
log(all_0.02_betasim)->log_all_0.02_betasim
log(all_0.03_betasim)->log_all_0.03_betasim
log(all_0.05_betasim)->log_all_0.05_betasim
log(all_0.075_betasim)->log_all_0.075_betasim
log(all_0.029_betasim)->log_all_0.029_betasim
#
###estimated <- seq(-4, 0, 0.01) nuevo
linear.model_0.005 <- lm(log_all_0.005_betasim~ log_all_h_betasim)
summary(linear.model_0.005)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.005 <- predict(linear.model_0.005,list(log_all_h_betasim=estimated))
linear.model_0.015 <- lm(log_all_0.015_betasim~ log_all_h_betasim)
summary(linear.model_0.015)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.015 <- predict(linear.model_0.015,list(log_all_h_betasim=estimated))
linear.model_0.02 <- lm(log_all_0.02_betasim~ log_all_h_betasim)
summary(linear.model_0.02)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.02 <- predict(linear.model_0.02,list(log_all_h_betasim=estimated))
linear.model_0.03 <- lm(log_all_0.03_betasim~ log_all_h_betasim)
summary(linear.model_0.03)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.03 <- predict(linear.model_0.03,list(log_all_h_betasim=estimated))
linear.model_0.05 <- lm(log_all_0.05_betasim~ log_all_h_betasim)
summary(linear.model_0.05)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.05 <- predict(linear.model_0.05,list(log_all_h_betasim=estimated))
linear.model_0.075 <- lm(log_all_0.075_betasim~ log_all_h_betasim)
summary(linear.model_0.075)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.075 <- predict(linear.model_0.075,list(log_all_h_betasim=estimated))
linear.model_0.029 <- lm(log_all_0.029_betasim~ log_all_h_betasim)
summary(linear.model_0.029)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.029 <- predict(linear.model_0.029,list(log_all_h_betasim=estimated))
#
##palette(gray(0:20 / 20))
##colors: "#003695", "#c997a9", "#9d9cc6", "#d49e57", "#9cb15b", "#fbd048", "#93dfff", "#F4A582"
plot(log_all_h_betasim, log_all_0.005_betasim, pch=20, main="log_all_forestCollembola_similarity_multilevel", col = "#003695", ylim=c(-3.4,-0.05),xlim=c(-3.8,-0.1),ylab="log_similarity",xlab="log_similarity_h_level", cex.lab= 1.5, cex.axis= 1.5)
lines(estimated, counts.lineal_0.005,lwd=2, col = "#003695", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.015_betasim, pch=20, col="#c997a9")
lines(estimated, counts.lineal_0.015,lwd=2, col = "#c997a9", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.02_betasim, pch=20, col="#9d9cc6")
lines(estimated, counts.lineal_0.02,lwd=2, col = "#9d9cc6", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.029_betasim, pch=20, col="#F4A582")
lines(estimated, counts.lineal_0.029,lwd=2, col="#F4A582", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.03_betasim, pch=20, col="#d49e57")
lines(estimated, counts.lineal_0.03,lwd=2, col = "#d49e57", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.05_betasim, pch=20, col="#fbd048")
lines(estimated, counts.lineal_0.05,lwd=2, col = "#fbd048", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.075_betasim, pch=20, col= "#93dfff")
lines(estimated, counts.lineal_0.075,lwd=2, col = "#93dfff", xlab = "Time (s)", ylab = "Counts")
#
##**Decay using geomatrix**
decay.model(all_h_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_h_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_h
decay.model(all_0.005_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.005_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.005
decay.model(all_0.015_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.015_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.015
decay.model(all_0.02_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.02_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.02
decay.model(all_0.03_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.03_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.03
decay.model(all_0.05_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.05_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.05
decay.model(all_0.075_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.075_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.075
decay.model(all_0.029_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.029_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.029
#
##palette(gray(0:20 / 20))
##colors: "#003695", "#c997a9", "#9d9cc6", "#d49e57", "#9cb15b", "#fbd048", "#93dfff", "#F4A582"
##**Plot with 6 levels: h, 1.5, 3, 5, 7.5, GMYC**
##**Plot all levels**
plot.decay(decay_h, ylim=c(0,1.0), xlim=c(1,3200), pch=20, lwd=4, cex.lab= 1.5, cex.axis= 1.5, col="#003695")
##plot.decay(decay_0.005,add=T,pch=20,lwd=4,col="#c997a9")
plot.decay(decay_0.015,add=T,pch=20,lwd=4,col="#9d9cc6")
##plot.decay(decay_0.02,add=T,pch=20,lwd=4,col="#d49e57")
plot.decay(decay_0.03,add=T,pch=20,lwd=4,col="#9cb15b")
plot.decay(decay_0.05,add=T,pch=20,lwd=4,col="#fbd048")
plot.decay(decay_0.075,add=T,pch=20,lwd=4,col="#93dfff")
plot.decay(decay_0.029,add=T, pch=20, lty=3, lwd=4, col="#F4A582")
##**Plot levels: h,3, 5, y GMYC.**
##**Plot all levels**
plot.decay(decay_h, ylim=c(0,1.0), xlim=c(1,3200), pch=20, lwd=4, cex.lab= 1.5, cex.axis= 1.5, col="#003695")
##plot.decay(decay_0.005,add=T,pch=20,lwd=4,col="#c997a9")
##plot.decay(decay_0.015,add=T,pch=20,lwd=4,col="#9d9cc6")
##plot.decay(decay_0.02,add=T,pch=20,lwd=4,col="#d49e57")
plot.decay(decay_0.03,add=T,pch=20,lwd=4,col="#9cb15b")
plot.decay(decay_0.05,add=T,pch=20,lwd=4,col="#fbd048")
plot.decay(decay_0.075,add=T,pch=20,lwd=4,col="#93dfff")
plot.decay(decay_0.029,add=T, pch=20, lty=3, lwd=4, col="#F4A582")
##**Plot with levels: h, 0.5, 1.5, 3, 5, 7.5, GMYC**
##**Plot all levels**
plot.decay(decay_h, ylim=c(0,1.0), xlim=c(1,3200), pch=20, lwd=4, cex.lab= 1.5, cex.axis= 1.5, col="#003695")
plot.decay(decay_0.005,add=T,pch=20,lwd=4,col="#c997a9")
plot.decay(decay_0.015,add=T,pch=20,lwd=4,col="#9d9cc6")
##plot.decay(decay_0.02,add=T,pch=20,lwd=4,col="#d49e57")
plot.decay(decay_0.029,add=T,pch=20,lwd=4,col="#92000A")
plot.decay(decay_0.03,add=T,pch=20,lty=3,lwd=4,col="#9cb15b")
plot.decay(decay_0.05,add=T,pch=20,lwd=4,col="#fbd048")
plot.decay(decay_0.075,add=T,pch=20,lwd=4,col="#93dfff")
#
##**rsquared and pval of ddecays and slopes (Plot all levels)**
cbind (decay_h$pseudo.r.squared,decay_0.005$pseudo.r.squared,decay_0.015$pseudo.r.squared,decay_0.02$pseudo.r.squared,decay_0.03$pseudo.r.squared,decay_0.05$pseudo.r.squared, decay_0.075$pseudo.r.squared, decay_0.029$pseudo.r.squared)->rsquared
colnames(rsquared)<-c("h","0.005","0.015","0.02","0.03","0.05", "0.075","GMYC")
rsquared
cbind (decay_h$p.value, decay_0.005$p.value, decay_0.015$p.value, decay_0.02$p.value, decay_0.03$p.value,decay_0.05$p.value, decay_0.075$p.value, decay_0.029$p.value)->p.value
rbind(rsquared,p.value)->rsquared
text (x=barplot(rsquared[1,],ylim=c(0,1.0),cex.lab= 1.4, cex.axis= 1.4, cex=1.4, xlab="levels", ylab="exp_var_geomatrix", main="all_forestCollembola_geomatrix_exp_var"), y=rsquared[1,],label=rsquared[2,],po=3,cex=0.9)
cbind (decay_h$b.slope,decay_0.005$b.slope,decay_0.015$b.slope, decay_0.02$b.slope, decay_0.03$b.slope,decay_0.05$b.slope, decay_0.075$b.slope, decay_0.029$b.slope)->b.slope
barplot(b.slope,main="all_forestCollembola_geomatrix_slopes_ddcay")
##**rsquared and pval of ddecays and slopes (levels: h, GMYC, 1.5, 3, 5, 7.5)**
cbind (decay_h$pseudo.r.squared,decay_0.015$pseudo.r.squared,decay_0.03$pseudo.r.squared,decay_0.05$pseudo.r.squared,decay_0.075$pseudo.r.squared, decay_0.029$pseudo.r.squared)->rsquared
colnames(rsquared)<-c("h", "0.015","0.03","0.05", "0.075", "GMYC")
rsquared
cbind (decay_h$p.value, decay_0.015$p.value, decay_0.03$p.value,decay_0.05$p.value, decay_0.075$p.value, decay_0.029$p.value)->p.value
rbind(rsquared,p.value)->rsquared
text (x=barplot(rsquared[1,],ylim=c(0,1.0),cex.lab= 1.4, cex.axis= 1.4, cex=1.4, xlab="levels", ylab="exp_var_geomatrix", main="all_forestCollembola_geomatrix_exp_var"), y=rsquared[1,],label=rsquared[2,],po=3,cex=0.9)
cbind (decay_h$b.slope,decay_0.015$b.slope, decay_0.03$b.slope,decay_0.05$b.slope, decay_0.075$b.slopedecay_0.029$b.slope)->b.slope
barplot(b.slope,main="all_forestCollembola_geomatrix_slopes_ddcay")
##**rsquared and pval of ddecays and slopes (levels: levels: h, GMYC, 3 and 5)**
cbind (decay_h$pseudo.r.squared,decay_0.03$pseudo.r.squared,decay_0.05$pseudo.r.squared, decay_0.029$pseudo.r.squared)->rsquared
colnames(rsquared)<-c("h", "0.03","0.05", "GMYC")
rsquared
cbind (decay_h$p.value, decay_0.03$p.value,decay_0.05$p.value, decay_0.029$p.value)->p.value
rbind(rsquared,p.value)->rsquared
text (x=barplot(rsquared[1,],ylim=c(0,1.0),cex.lab= 1.4, cex.axis= 1.4, cex=1.4, xlab="levels", ylab="exp_var_geomatrix", main="all_forestCollembola_geomatrix_exp_var"), y=rsquared[1,],label=rsquared[2,],po=3,cex=0.9)
cbind (decay_h$b.slope,decay_0.03$b.slope,decay_0.05$b.slope, decay_0.029$b.slope)->b.slope
barplot(b.slope,main="all_forestCollembola_geomatrix_slopes_ddcay")
##**rsquared and pval of ddecays and slopes (levels: h, 0.5, 1.5, 3, 5, 7.5)**
cbind (decay_h$pseudo.r.squared,decay_0.005$pseudo.r.squared,decay_0.015$pseudo.r.squared,decay_0.03$pseudo.r.squared, decay_0.05$pseudo.r.squared, decay_0.075$pseudo.r.squared)->rsquared
colnames(rsquared)<-c("h","0.005","0.015","0.03","0.05", "0.075")
rsquared
cbind (decay_h$p.value, decay_0.005$p.value, decay_0.015$p.value, decay_0.03$p.value, decay_0.05$p.value, decay_0.075$p.value)->p.value
rbind(rsquared,p.value)->rsquared
text (x=barplot(rsquared[1,],ylim=c(0,1.0),cex.lab= 1.4, cex.axis= 1.4, cex=1.4, xlab="levels", ylab="exp_var_geomatrix", main="all_forestCollembola_geomatrix_exp_var"), y=rsquared[1,],label=rsquared[2,],po=3,cex=0.9)
cbind (decay_h$b.slope,decay_0.005$b.slope,decay_0.015$b.slope, decay_0.03$b.slope,decay_0.05$b.slope, decay_0.075$b.slope)->b.slope
barplot(b.slope,main="all_forestCollembola_geomatrix_slopes_ddcay")
##**log log pearson correlations (fractality_2)**
levels<-c(1,2,3,4,5,6,7,9)
rbind (decay_h$a.intercept,decay_0.005$a.intercept,decay_0.015$a.intercept,decay_0.02$a.intercept,decay_0.03$a.intercept,decay_0.05$a.intercept, decay_0.075$a.intercept, decay_0.029$a.intercept)->intercepts
intercepts
##**h, 0.005, 0.015, 0.02, 0.03, 0.05, 0.75, 0.029**
##**beta.pair_CollembolaWest_h**
beta.multi(community_Collembola_h_West, index.family="sorensen")->diss_mean_h
diss_mean_h$beta.SIM->diss_mean_h
beta.multi(community_Collembola0.005_West, index.family="sorensen")->diss_mean_0.005
diss_mean_0.005$beta.SIM->diss_mean_0.005
beta.multi(community_Collembola0.015_West, index.family="sorensen")->diss_mean_0.015
diss_mean_0.015$beta.SIM->diss_mean_0.015
beta.multi(community_Collembola0.02_West, index.family="sorensen")->diss_mean_0.02
diss_mean_0.02$beta.SIM->diss_mean_0.02
beta.multi(community_Collembola0.03_West, index.family="sorensen")->diss_mean_0.03
diss_mean_0.03$beta.SIM->diss_mean_0.03
beta.multi(community_Collembola0.05_West, index.family="sorensen")->diss_mean_0.05
diss_mean_0.05$beta.SIM->diss_mean_0.05
beta.multi(community_Collembola0.075_West, index.family="sorensen")->diss_mean_0.075
diss_mean_0.075$beta.SIM->diss_mean_0.075
beta.multi(community_Collembola0.029_West, index.family="sorensen")->diss_mean_0.029
diss_mean_0.029$beta.SIM->diss_mean_0.029
rbind(diss_mean_h,diss_mean_0.005,diss_mean_0.015,diss_mean_0.02,diss_mean_0.03,diss_mean_0.05,diss_mean_0.075, diss_mean_0.029)->mean_diss
1-mean_diss+0.001->mean_sim
mean_sim
dim(community_Collembola_h_West)[2]->n_h
dim(community_Collembola0.005_West)[2]->n_0.005
dim(community_Collembola0.015_West)[2]->n_0.015
dim(community_Collembola0.02_West)[2]->n_0.02
dim(community_Collembola0.03_West)[2]->n_0.03
dim(community_Collembola0.05_West)[2]->n_0.05
dim(community_Collembola0.075_West)[2]->n_0.075
dim(community_Collembola0.029_West)[2]->n_0.029
rbind (n_h,n_0.005,n_0.015,n_0.02,n_0.03,n_0.05,n_0.075, n_0.029)->n_lineages
n_lineages
cor.test(log(levels),log(intercepts))
plot(log(intercepts)~log(levels), lwd=2, pch=16, cex=1.5, cex.lab= 1.5, cex.axis=1.5, col= c("#003695", "#c997a9", "#9d9cc6", "#d49e57", "#9cb15b", "#fbd048", "#93dfff", "#F4A582"))
abline(lm(log(intercepts)~log(levels)))
cor.test(log(levels),log(n_lineages))
plot(log(n_lineages)~log(levels), lwd=2, pch=16,cex=1.5, cex.lab= 1.5, cex.axis= 1.5, col= c("#003695", "#c997a9", "#9d9cc6", "#d49e57", "#9cb15b", "#fbd048", "#93dfff", "#F4A582"))
abline(lm(log(n_lineages)~log(levels)))
cor.test(log(levels),log(mean_sim))
plot(log(mean_sim)~log(levels), lwd=2, pch=16,cex=1.5, cex.lab= 1.5, cex.axis= 1.5, col= c("#003695", "#c997a9", "#9d9cc6", "#d49e57", "#9cb15b", "#fbd048", "#93dfff", "#F4A582"))
abline(lm(log(mean_sim)~log(levels)))
#**END**
| /bin/Collembola/multilevel_distance_decay_Collembola_IBDist_West.R | no_license | AliciaMstt/Multihierarchical_NevadoToluca | R | false | false | 47,760 | r | #**INITIAL STEPS**
#####In excel remove the simbol ## from the names of the table and rename the samples acccording to the code used in the gradient e.g. GRA_S10_D_F_A10
library(stats)
library(base)
library(dplyr)
library(dplyr)
library(tidyr)
library(knitr)
library(PMCMR)
library(vegan)
library(betapart)
library(stringr)
library(permute)
library(lattice)
library(ecodist)
library(ade4)
library(ggplot2)
library(Imap)
#**TABLES AND COMMUNITY MATRIXES**
#####open table with names including Region and habitat parameters
s2_raw_all <- read.table("../genetic/Data_in/Collembola/s2_raw_all_Collembola_threshold.txt", sep = ",", header=TRUE)
dim(s2_raw_all)
#**Table Haplotipos**
#####remove additional columns and leave only names (of haplotipes), samples and taxa (and threshold in this case)
s2_raw_all[,c(1:52,68)]->s2_raw
dim(s2_raw) ##51 samples = 51 plus 1 neg (the second neg from DOM_REPS is not there because all 0)
colnames(s2_raw)
#####Applying the conservative threshold (this is a binary column)
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:52]->s2_raw_threshold_h ##remove threshold col
dim(s2_raw_threshold_h)
colnames(s2_raw_threshold_h)
#**Table levels clustering**
#####remove additional columns and leave only names (of haplotipes), samples and taxa (and threshold in this case)
s2_raw_all[,c(1:66,68)]->s2_raw
dim(s2_raw) ##49 samples = 48 plus 1 neg (the second neg from DOM_REPS is not there because all 0)
colnames(s2_raw)
##**Applying the conservative threshold (this is a binary column)_0.005**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.005 ##remove threshold col
dim(s2_raw_threshold0.005)
colnames(s2_raw_threshold0.005)
##**Applying the conservative threshold (this is a binary column)_0.015**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.015 ##remove threshold col
dim(s2_raw_threshold0.015)
colnames(s2_raw_threshold0.015)
##**Applying the conservative threshold (this is a binary column)_0.020**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.02 ##remove threshold col
dim(s2_raw_threshold0.02)
colnames(s2_raw_threshold0.02)
##**Applying the conservative threshold (this is a binary column)_0.03**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.03 ##remove threshold col
dim(s2_raw_threshold0.03)
colnames(s2_raw_threshold0.03)
##**Applying the conservative threshold (this is a binary column)_0.05**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.05 ##remove threshold col
dim(s2_raw_threshold0.05)
colnames(s2_raw_threshold0.05)
##**Applying the conservative threshold (this is a binary column)_0.075**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.075 ##remove threshold col
dim(s2_raw_threshold0.075)
colnames(s2_raw_threshold0.075)
##**Applying the conservative threshold (this is a binary column)GMYC_0.029**
s2_raw[which(s2_raw$conservative_threshold == "1"),]->s2_raw_threshold
s2_raw_threshold [,1:66]->s2_raw_threshold0.029 ##remove threshold col
dim(s2_raw_threshold0.029)
colnames(s2_raw_threshold0.029)
#
#**level Haplotipos**
#####delete the ocurrences with less than 4 reads by library (same criteria than denoising)
#####s2_raw_threshold->s2_f4_with_abundance_h
#####s2_f4_with_abundance_h[s2_f4_with_abundance_h<4]<-0 ##2 warning corresponding wiht the columms of the names and taxa
##**transform in present/absence table**
s2_raw_threshold_h->s2_f4_h #NOTA_Nancy: Tengo un subset de Colembolos
s2_f4_h[s2_f4_h>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_f4_h[,2:52]->data_h #Nota_Nancy: Modifique el numero: 2:50 por 1:52.
rowSums(data_h)
length(which(rowSums(data_h)!=0))
length(which(rowSums(data_h)==0))
##**Collembola**
t(s2_f4_h)->t_s2_f4_h ##trasp
t_s2_f4_h[2:52,]->community_Collembola_h #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(community_Collembola_h)<-t_s2_f4_h[1,]
as.data.frame(community_Collembola_h)->community_Collembola_h ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola_h)
community_Collembola_h[order(row.names(community_Collembola_h)),]->community_Collembola_h ##order samples
write.table (community_Collembola_h, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola_h.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola_h.txt")->community_Collembola_h
####submatrixes of the Nevado Toluca del lado West de los haplotypes. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola_h)
community_Collembola_h[which(str_extract (row.names(community_Collembola_h), "_W_") %in% "_W_"),]->community_Collembola_h_West
dim(community_Collembola_h_West)
community_Collembola_h_West[,which(colSums(community_Collembola_h_West)!=0)]->community_Collembola_h_West ##to remove no data colums
dim(community_Collembola_h_West)
write.table (community_Collembola_h_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola_h_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola_h_West.txt")->community_Collembola_h_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.005**
unique(s2_raw_threshold0.005$limite0.005)->levels_limite0.005
data.frame()->s2_raw_Collembola_limite0.005
for (i in 1:length(unique (s2_raw_threshold0.005$limite0.005)))
{
levels_limite0.005[i]->level
s2_raw_threshold0.005[which(s2_raw_threshold0.005$limite0.005==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names,
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.005,sum)->s2_raw_Collembola_limite0.005
}
#####delete the ocurrences with less than 4 reads by library (same criteria than denoising)
#####s2_raw_Collembola_limite0.005->s2_f4_with_abundance_Collembola_limite0.005
#####s2_f4_with_abundance_Collembola_limite0.005[s2_f4_with_abundance_Collembola_limite0.005<4]<-0 ##2 warning corresponding wiht the columms of the names and taxa
##**transform in present/absence table**
s2_raw_Collembola_limite0.005->s2_raw_Collembola_limite0.005
s2_raw_Collembola_limite0.005[s2_raw_Collembola_limite0.005>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.005[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols**
##**Collembola**
t(s2_raw_Collembola_limite0.005)->t_s2_f4_Collembola_limite0.005 ##trasp
t_s2_f4_Collembola_limite0.005[1:51,]->community_Collembola_limite0.005 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.005)<-community_Collembola_limite0.005[1,]
as.data.frame(community_Collembola_limite0.005)->community_Collembola0.005 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.005)
community_Collembola0.005[order(row.names(community_Collembola0.005)),]->community_Collembola0.005 ##order samples
write.table (community_Collembola0.005, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.005.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.005.txt")->community_Collembola0.005
####submatrixes of the Nevado Toluca del lado West de nivel 0.5%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca
dim(community_Collembola0.005)
community_Collembola0.005[which(str_extract (row.names(community_Collembola0.005), "_W_") %in% "_W_"),]->community_Collembola0.005_West
dim(community_Collembola0.005_West)
community_Collembola0.005_West[,which(colSums(community_Collembola0.005_West)!=0)]->community_Collembola0.005_West ##to remove no data colums
dim(community_Collembola0.005_West)
write.table (community_Collembola0.005_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.005_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.005_West.txt")->community_Collembola0.005_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.015**
unique (s2_raw_threshold0.015$limite0.015)->levels_limite0.015
data.frame()->s2_raw_Collembola_limite0.015
for (i in 1:length(unique (s2_raw_threshold0.015$limite0.015)))
{
levels_limite0.015[i]->level
s2_raw_threshold0.015[which(s2_raw_threshold0.015$limite0.015==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names, level and also the negative column
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.015,sum)->s2_raw_Collembola_limite0.015
}
#####delete the ocurrences with less than 4 reads by library (same criteria than denoising)
#####s2_raw_Collembola_limite0.015->s2_f4_with_abundance_Collembola_limite0.015
#####s2_f4_with_abundance_Collembola_limite0.015[s2_f4_with_abundance_Collembola_limite0.015<4]<-0 ##2 warning corresponding wiht the columms of the names and taxa
##**transform in present/absence table**
s2_raw_Collembola_limite0.015->s2_raw_Collembola_limite0.015
s2_raw_Collembola_limite0.015[s2_raw_Collembola_limite0.015>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.015[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola**
t(s2_raw_Collembola_limite0.015)->t_s2_f4_Collembola_limite0.015 ##trasp
t_s2_f4_Collembola_limite0.015[1:51,]->community_Collembola_limite0.015 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.015)<-community_Collembola_limite0.015[1,]
as.data.frame(community_Collembola_limite0.015)->community_Collembola0.015 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.015)
community_Collembola0.015[order(row.names(community_Collembola0.015)),]->community_Collembola0.015 ##order samples
write.table (community_Collembola0.015, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.015.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.015.txt")->community_Collembola0.015
####submatrixes of the Nevado Toluca del lado West de nivel 1.5%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola0.015)
community_Collembola0.015[which(str_extract (row.names(community_Collembola0.015), "_W_") %in% "_W_"),]->community_Collembola0.015_West
dim(community_Collembola0.015_West)
community_Collembola0.015_West[,which(colSums(community_Collembola0.015_West)!=0)]->community_Collembola0.015_West ##to remove no data colums
dim(community_Collembola0.015_West)
write.table (community_Collembola0.015_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.015_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.015_West.txt")->community_Collembola0.015_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.02**
unique(s2_raw_threshold0.02$limite0.02)->levels_limite0.02
data.frame()->s2_raw_Collembola_limite0.02
for (i in 1:length(unique (s2_raw_threshold0.02$limite0.02)))
{
levels_limite0.02[i]->level
s2_raw_threshold0.02[which(s2_raw_threshold0.02$limite0.02==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names,
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.02,sum)->s2_raw_Collembola_limite0.02
}
#####delete the ocurrences with less than 4 reads by library (same criteria than denoising)
#####s2_raw_Collembola_limite0.02->s2_f4_with_abundance_Collembola_limite0.02
#####s2_f4_with_abundance_Collembola_limite0.02[s2_f4_with_abundance_Collembola_limite0.02<4]<-0 ##2 warning corresponding wiht the columms of the names and taxa
##**transform in present/absence table**
s2_raw_Collembola_limite0.02->s2_raw_Collembola_limite0.02
s2_raw_Collembola_limite0.02[s2_raw_Collembola_limite0.02>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.02[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola**
t(s2_raw_Collembola_limite0.02)->t_s2_f4_Collembola_limite0.02 ##trasp
t_s2_f4_Collembola_limite0.02[1:51,]->community_Collembola_limite0.02 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.02)<-community_Collembola_limite0.02[1,]
as.data.frame(community_Collembola_limite0.02)->community_Collembola0.02 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.02)
community_Collembola0.02[order(row.names(community_Collembola0.02)),]->community_Collembola0.02 ##order samples
write.table (community_Collembola0.02, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.02.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.02.txt")->community_Collembola0.02
####submatrixes of the Nevado Toluca del lado West de nivel 2.0%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola0.02)
community_Collembola0.02[which(str_extract (row.names(community_Collembola0.02), "_W_") %in% "_W_"),]->community_Collembola0.02_West
dim(community_Collembola0.02_West)
community_Collembola0.02_West[,which(colSums(community_Collembola0.02_West)!=0)]->community_Collembola0.02_West ##to remove no data colums
dim(community_Collembola0.02_West)
write.table (community_Collembola0.02_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.02_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.02_West.txt")->community_Collembola0.02_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.03**
unique (s2_raw_threshold0.03$limite0.03)->levels_limite0.03
data.frame()->s2_raw_Collembola_limite0.03
for (i in 1:length(unique (s2_raw_threshold0.03$limite0.03)))
{
levels_limite0.03[i]->level
s2_raw_threshold0.03[which(s2_raw_threshold0.03$limite0.03==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names, level and also the negative column
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.03,sum)->s2_raw_Collembola_limite0.03
}
##**transform in present/absence table**
s2_raw_Collembola_limite0.03->s2_raw_Collembola_limite0.03
s2_raw_Collembola_limite0.03[s2_raw_Collembola_limite0.03>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.03[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola**
t(s2_raw_Collembola_limite0.03)->t_s2_f4_Collembola_limite0.03 ##trasp
t_s2_f4_Collembola_limite0.03[1:51,]->community_Collembola_limite0.03 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.03)<-community_Collembola_limite0.03[1,]
as.data.frame(community_Collembola_limite0.03)->community_Collembola0.03 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.03)
community_Collembola0.03[order(row.names(community_Collembola0.03)),]->community_Collembola0.03 ##order samples
write.table (community_Collembola0.03, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.03.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.03.txt")->community_Collembola0.03
####submatrixes of the Nevado Toluca del lado West de nivel 3.0%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola0.03)
community_Collembola0.03[which(str_extract (row.names(community_Collembola0.03), "_W_") %in% "_W_"),]->community_Collembola0.03_West
dim(community_Collembola0.03_West)
community_Collembola0.03_West[,which(colSums(community_Collembola0.03_West)!=0)]->community_Collembola0.03_West ##to remove no data colums
dim(community_Collembola0.03_West)
write.table (community_Collembola0.03_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.03_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.03_West.txt")->community_Collembola0.03_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.05**
unique (s2_raw_threshold0.05$limite0.05)->levels_limite0.05
data.frame()->s2_raw_Collembola_limite0.05
for (i in 1:length(unique (s2_raw_threshold0.05$limite0.05)))
{
levels_limite0.05[i]->level
s2_raw_threshold0.05[which(s2_raw_threshold0.05$limite0.05==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names, level and also the negative column
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.05,sum)->s2_raw_Collembola_limite0.05
}
##**transform in present/absence table**
s2_raw_Collembola_limite0.05->s2_raw_Collembola_limite0.05
s2_raw_Collembola_limite0.05[s2_raw_Collembola_limite0.05>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.05[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola**
t(s2_raw_Collembola_limite0.05)->t_s2_f4_Collembola_limite0.05 ##trasp
t_s2_f4_Collembola_limite0.05[1:51,]->community_Collembola_limite0.05 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.05)<-community_Collembola_limite0.05[1,]
as.data.frame(community_Collembola_limite0.05)->community_Collembola0.05 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.05)
community_Collembola0.05[order(row.names(community_Collembola0.05)),]->community_Collembola0.05 ##order samples
write.table (community_Collembola0.05, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.05.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.05.txt")->community_Collembola0.05
####submatrixes of the Nevado Toluca del lado West de nivel 5.0%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola0.05)
community_Collembola0.05[which(str_extract (row.names(community_Collembola0.05), "_W_") %in% "_W_"),]->community_Collembola0.05_West
dim(community_Collembola0.05_West)
community_Collembola0.05_West[,which(colSums(community_Collembola0.05_West)!=0)]->community_Collembola0.05_West ##to remove no data colums
dim(community_Collembola0.05_West)
write.table (community_Collembola0.05_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.05_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.05_West.txt")->community_Collembola0.05_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE_0.075**
unique (s2_raw_threshold0.075$limite0.075)->levels_limite0.075
data.frame()->s2_raw_Collembola_limite0.075
for (i in 1:length(unique (s2_raw_threshold0.075$limite0.075)))
{
levels_limite0.075[i]->level
s2_raw_threshold0.075[which(s2_raw_threshold0.075$limite0.075==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names, level and also the negative column
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.075,sum)->s2_raw_Collembola_limite0.075
}
##**transform in present/absence table**
s2_raw_Collembola_limite0.075->s2_raw_Collembola_limite0.075
s2_raw_Collembola_limite0.075[s2_raw_Collembola_limite0.075>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.075[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola**
t(s2_raw_Collembola_limite0.075)->t_s2_f4_Collembola_limite0.075 ##trasp
t_s2_f4_Collembola_limite0.075[1:51,]->community_Collembola_limite0.075 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.075)<-community_Collembola_limite0.075[1,]
as.data.frame(community_Collembola_limite0.075)->community_Collembola0.075 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.075)
community_Collembola0.075[order(row.names(community_Collembola0.075)),]->community_Collembola0.075 ##order samples
write.table (community_Collembola0.075, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.075.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.075.txt")->community_Collembola0.075
####submatrixes of the Nevado Toluca del lado West de nivel 7.5%. NOTA_Nancy: Quiero hacer tablas que incluyan datos con localidades del lado West dentro del Nevado de Toluca.
dim(community_Collembola0.075)
community_Collembola0.075[which(str_extract (row.names(community_Collembola0.075), "_W_") %in% "_W_"),]->community_Collembola0.075_West
dim(community_Collembola0.075_West)
community_Collembola0.075_West[,which(colSums(community_Collembola0.075_West)!=0)]->community_Collembola0.075_West ##to remove no data colums
dim(community_Collembola0.075_West)
write.table (community_Collembola0.075_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.075_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.075_West.txt")->community_Collembola0.075_West
#
####loop to create the new matrix combining haplotype by otu pertenency, i.e. submatrix by limit
#**Collembola MATRIX_LIMITE GMYC_0.029**
unique(s2_raw_threshold0.029$limite0.029)->levels_limite0.029
data.frame()->s2_raw_Collembola_limite0.029
for (i in 1:length(unique (s2_raw_threshold0.029$limite0.029)))
{
levels_limite0.029[i]->level
s2_raw_threshold0.029[which(s2_raw_threshold0.029$limite0.029==level),]->subcom_level_names
subcom_level_names[,c(2:52)]->subcom_level #delete names,
colSums(subcom_level)->sum
as.data.frame(sum)->sum
t(sum)->sum
row.names(sum)<-subcom_level_names[1,1] #keep the name of the first haplotype
rbind(s2_raw_Collembola_limite0.029,sum)->s2_raw_Collembola_limite0.029
}
#####delete the ocurrences with less than 4 reads by library (same criteria than denoising)
#####s2_raw_Collembola_limite0.029->s2_f4_with_abundance_Collembola_limite0.029
#####s2_f4_with_abundance_Collembola_limite0.029[s2_f4_with_abundance_Collembola_limite0.029<4]<-0 ##2 warning corresponding wiht the columms of the names and taxa
##**transform in present/absence table**
s2_raw_Collembola_limite0.029->s2_raw_Collembola_limite0.029
s2_raw_Collembola_limite0.029[s2_raw_Collembola_limite0.029>1]<-1 ##transform in present/absence table
##**transform in present/absence table**
#####s2_f4_with_abundance->s2_f4 #NOTA_Nancy: Tengo un subset de Coleoptear
#####s2_f4[s2_f4>1]<-1 ##2 warning corresponding wiht the columms of the names and taxa
##**checking if there is any row with no presence**
s2_raw_Collembola_limite0.029[,1:51]->data #Nota_Nancy: Modifique el numero: 2:50 por 1:51, aunque no funcionó. Volví a la version de 2:50
rowSums(data)
length(which(rowSums(data)!=0))
length(which(rowSums(data)==0))
##**Community matrixes (samples in rows and h in cols).**
##**Collembola*
t(s2_raw_Collembola_limite0.029)->t_s2_f4_Collembola_limite0.029 ##trasp
t_s2_f4_Collembola_limite0.029[1:51,]->community_Collembola_limite0.029 #NOTA_Nancy: Este numero es importante. Colocar exactamente el numero de "s2_f4[,2:52]->data".
colnames(t_s2_f4_Collembola_limite0.029)<-community_Collembola_limite0.029[1,]
as.data.frame(community_Collembola_limite0.029)->community_Collembola0.029 ##trasp including col and row names
####community_Acari[-49,]->community_Collembola ##removing neg
dim(community_Collembola0.029)
community_Collembola0.029[order(row.names(community_Collembola0.029)),]->community_Collembola0.029 ##order samples
write.table (community_Collembola0.029, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.029.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.029.txt")->community_Collembola0.029
####submatrixes by SITE in Nevado Toluca. NOTA_Nancy: Quiero hacer tablas que incluyan datos de una montañas en la epoca de lluvida con localidades dentro del Nevado de Toluca.
dim(community_Collembola0.029)
community_Collembola0.029[which(str_extract (row.names(community_Collembola0.029), "_W_") %in% "_W_"),]->community_Collembola0.029_West
dim(community_Collembola0.029_West)
community_Collembola0.029_West[,which(colSums(community_Collembola0.029_West)!=0)]->community_Collembola0.029_West ##to remove no data colums
dim(community_Collembola0.029_West)
write.table (community_Collembola0.029_West, file="../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.029_West.txt") ##this is necessary for the format, not able to solve in other way
read.table ("../genetic/Data_out/Collembola/Collembola_IBD_West/community_Collembola0.029_West.txt")->community_Collembola0.029_West
#**BETADIVERSITY ORDINATIONS by SITE**
#**Collembola**
##**beta general_Level_Haplotipos**
beta.pair(community_Collembola_h_West, index.family="sorensen")->beta.pair_CollembolaWest_h
##**beta general_Level_0.005**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.005_West, index.family="sorensen")->beta.pair_CollembolaWest_0.005
##**beta general_Level_0.015**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.015_West, index.family="sorensen")->beta.pair_CollembolaWest_0.015
##**beta general_Level_0.02**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.02_West, index.family="sorensen")->beta.pair_CollembolaWest_0.02
##**beta general_Level_0.03**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.03_West, index.family="sorensen")->beta.pair_CollembolaWest_0.03
##**beta general_Level_0.05**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.05_West, index.family="sorensen")->beta.pair_CollembolaWest_0.05
##**beta general_Level_0.075
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.075_West, index.family="sorensen")->beta.pair_CollembolaWest_0.075
##**beta general_Level_0.029**
#####betadiversity by pair of communities using sorensen on the precense/absence data, with estimation of turnover and nestedness datamatrixes simultaneously
beta.pair(community_Collembola0.029_West, index.family="sorensen")->beta.pair_CollembolaWest_0.029
#
##**FORMAS DE OBTENER LA MATRIX effective resistance**
Resistance_matrix_West <- read.table("../spatial/IBDistanceMatrix/geomatrix_Site_West.txt", sep = ",", header=T, row.names = 1)
dim(Resistance_matrix_West)
class(Resistance_matrix_West)
Resistance_matrix_West <- as.matrix(Resistance_matrix_West)
class(Resistance_matrix_West)
Resistance_matrix_West[order(row.names(Resistance_matrix_West)),order(colnames(Resistance_matrix_West))]->Resistance_matrix_West #Ordena la Resistance matrix con la de beta. ##important order both matrixes
Resistance_matrix_West[upper.tri(Resistance_matrix_West)] <- NA
Resistance_matrix_West
class(Resistance_matrix_West)
Resistance_matrix_West <- as.dist(Resistance_matrix_West)
Resistance_matrix_West
#
##**Generating similarity values and adding 0.001 to avoid LOG(0)**
1-beta.pair_CollembolaWest_h$beta.sim +0.001->all_h_betasim
1-beta.pair_CollembolaWest_0.005$beta.sim+0.001->all_0.005_betasim
1-beta.pair_CollembolaWest_0.015$beta.sim+0.001->all_0.015_betasim
1-beta.pair_CollembolaWest_0.02$beta.sim+0.001->all_0.02_betasim
1-beta.pair_CollembolaWest_0.03$beta.sim+0.001->all_0.03_betasim
1-beta.pair_CollembolaWest_0.05$beta.sim+0.001->all_0.05_betasim
1-beta.pair_CollembolaWest_0.075$beta.sim+0.001->all_0.075_betasim
1-beta.pair_CollembolaWest_0.029$beta.sim+0.001->all_0.029_betasim
#
##**log log linear regression multiple levels (fractality 1)**
MRM(log(all_0.005_betasim)~log(all_h_betasim))
MRM(log(all_0.015_betasim)~log(all_h_betasim))
MRM(log(all_0.02_betasim)~log(all_h_betasim))
MRM(log(all_0.03_betasim)~log(all_h_betasim))
MRM(log(all_0.05_betasim)~log(all_h_betasim))
MRM(log(all_0.075_betasim)~log(all_h_betasim))
MRM(log(all_0.029_betasim)~log(all_h_betasim))
#
##**only to plot linear regressions**
log(all_h_betasim)->log_all_h_betasim
log(all_0.005_betasim)->log_all_0.005_betasim
log(all_0.015_betasim)->log_all_0.015_betasim
log(all_0.02_betasim)->log_all_0.02_betasim
log(all_0.03_betasim)->log_all_0.03_betasim
log(all_0.05_betasim)->log_all_0.05_betasim
log(all_0.075_betasim)->log_all_0.075_betasim
log(all_0.029_betasim)->log_all_0.029_betasim
#
###estimated <- seq(-4, 0, 0.01) nuevo
linear.model_0.005 <- lm(log_all_0.005_betasim~ log_all_h_betasim)
summary(linear.model_0.005)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.005 <- predict(linear.model_0.005,list(log_all_h_betasim=estimated))
linear.model_0.015 <- lm(log_all_0.015_betasim~ log_all_h_betasim)
summary(linear.model_0.015)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.015 <- predict(linear.model_0.015,list(log_all_h_betasim=estimated))
linear.model_0.02 <- lm(log_all_0.02_betasim~ log_all_h_betasim)
summary(linear.model_0.02)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.02 <- predict(linear.model_0.02,list(log_all_h_betasim=estimated))
linear.model_0.03 <- lm(log_all_0.03_betasim~ log_all_h_betasim)
summary(linear.model_0.03)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.03 <- predict(linear.model_0.03,list(log_all_h_betasim=estimated))
linear.model_0.05 <- lm(log_all_0.05_betasim~ log_all_h_betasim)
summary(linear.model_0.05)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.05 <- predict(linear.model_0.05,list(log_all_h_betasim=estimated))
linear.model_0.075 <- lm(log_all_0.075_betasim~ log_all_h_betasim)
summary(linear.model_0.075)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.075 <- predict(linear.model_0.075,list(log_all_h_betasim=estimated))
linear.model_0.029 <- lm(log_all_0.029_betasim~ log_all_h_betasim)
summary(linear.model_0.029)
estimated <- seq(-4, 0, 0.01)
counts.lineal_0.029 <- predict(linear.model_0.029,list(log_all_h_betasim=estimated))
#
##palette(gray(0:20 / 20))
##colors: "#003695", "#c997a9", "#9d9cc6", "#d49e57", "#9cb15b", "#fbd048", "#93dfff", "#F4A582"
plot(log_all_h_betasim, log_all_0.005_betasim, pch=20, main="log_all_forestCollembola_similarity_multilevel", col = "#003695", ylim=c(-3.4,-0.05),xlim=c(-3.8,-0.1),ylab="log_similarity",xlab="log_similarity_h_level", cex.lab= 1.5, cex.axis= 1.5)
lines(estimated, counts.lineal_0.005,lwd=2, col = "#003695", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.015_betasim, pch=20, col="#c997a9")
lines(estimated, counts.lineal_0.015,lwd=2, col = "#c997a9", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.02_betasim, pch=20, col="#9d9cc6")
lines(estimated, counts.lineal_0.02,lwd=2, col = "#9d9cc6", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.029_betasim, pch=20, col="#F4A582")
lines(estimated, counts.lineal_0.029,lwd=2, col="#F4A582", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.03_betasim, pch=20, col="#d49e57")
lines(estimated, counts.lineal_0.03,lwd=2, col = "#d49e57", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.05_betasim, pch=20, col="#fbd048")
lines(estimated, counts.lineal_0.05,lwd=2, col = "#fbd048", xlab = "Time (s)", ylab = "Counts")
points(log_all_h_betasim, log_all_0.075_betasim, pch=20, col= "#93dfff")
lines(estimated, counts.lineal_0.075,lwd=2, col = "#93dfff", xlab = "Time (s)", ylab = "Counts")
#
##**Decay using geomatrix**
decay.model(all_h_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_h_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_h
decay.model(all_0.005_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.005_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.005
decay.model(all_0.015_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.015_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.015
decay.model(all_0.02_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.02_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.02
decay.model(all_0.03_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.03_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.03
decay.model(all_0.05_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.05_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.05
decay.model(all_0.075_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.075_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.075
decay.model(all_0.029_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")
decay.model(all_0.029_betasim,Resistance_matrix_West,model.type = "exp",y.type="sim")->decay_0.029
#
##palette(gray(0:20 / 20))
##colors: "#003695", "#c997a9", "#9d9cc6", "#d49e57", "#9cb15b", "#fbd048", "#93dfff", "#F4A582"
##**Plot with 6 levels: h, 1.5, 3, 5, 7.5, GMYC**
##**Plot all levels**
plot.decay(decay_h, ylim=c(0,1.0), xlim=c(1,3200), pch=20, lwd=4, cex.lab= 1.5, cex.axis= 1.5, col="#003695")
##plot.decay(decay_0.005,add=T,pch=20,lwd=4,col="#c997a9")
plot.decay(decay_0.015,add=T,pch=20,lwd=4,col="#9d9cc6")
##plot.decay(decay_0.02,add=T,pch=20,lwd=4,col="#d49e57")
plot.decay(decay_0.03,add=T,pch=20,lwd=4,col="#9cb15b")
plot.decay(decay_0.05,add=T,pch=20,lwd=4,col="#fbd048")
plot.decay(decay_0.075,add=T,pch=20,lwd=4,col="#93dfff")
plot.decay(decay_0.029,add=T, pch=20, lty=3, lwd=4, col="#F4A582")
##**Plot levels: h,3, 5, y GMYC.**
##**Plot all levels**
plot.decay(decay_h, ylim=c(0,1.0), xlim=c(1,3200), pch=20, lwd=4, cex.lab= 1.5, cex.axis= 1.5, col="#003695")
##plot.decay(decay_0.005,add=T,pch=20,lwd=4,col="#c997a9")
##plot.decay(decay_0.015,add=T,pch=20,lwd=4,col="#9d9cc6")
##plot.decay(decay_0.02,add=T,pch=20,lwd=4,col="#d49e57")
plot.decay(decay_0.03,add=T,pch=20,lwd=4,col="#9cb15b")
plot.decay(decay_0.05,add=T,pch=20,lwd=4,col="#fbd048")
plot.decay(decay_0.075,add=T,pch=20,lwd=4,col="#93dfff")
plot.decay(decay_0.029,add=T, pch=20, lty=3, lwd=4, col="#F4A582")
##**Plot with levels: h, 0.5, 1.5, 3, 5, 7.5, GMYC**
##**Plot all levels**
plot.decay(decay_h, ylim=c(0,1.0), xlim=c(1,3200), pch=20, lwd=4, cex.lab= 1.5, cex.axis= 1.5, col="#003695")
plot.decay(decay_0.005,add=T,pch=20,lwd=4,col="#c997a9")
plot.decay(decay_0.015,add=T,pch=20,lwd=4,col="#9d9cc6")
##plot.decay(decay_0.02,add=T,pch=20,lwd=4,col="#d49e57")
plot.decay(decay_0.029,add=T,pch=20,lwd=4,col="#92000A")
plot.decay(decay_0.03,add=T,pch=20,lty=3,lwd=4,col="#9cb15b")
plot.decay(decay_0.05,add=T,pch=20,lwd=4,col="#fbd048")
plot.decay(decay_0.075,add=T,pch=20,lwd=4,col="#93dfff")
#
##**rsquared and pval of ddecays and slopes (Plot all levels)**
cbind (decay_h$pseudo.r.squared,decay_0.005$pseudo.r.squared,decay_0.015$pseudo.r.squared,decay_0.02$pseudo.r.squared,decay_0.03$pseudo.r.squared,decay_0.05$pseudo.r.squared, decay_0.075$pseudo.r.squared, decay_0.029$pseudo.r.squared)->rsquared
colnames(rsquared)<-c("h","0.005","0.015","0.02","0.03","0.05", "0.075","GMYC")
rsquared
cbind (decay_h$p.value, decay_0.005$p.value, decay_0.015$p.value, decay_0.02$p.value, decay_0.03$p.value,decay_0.05$p.value, decay_0.075$p.value, decay_0.029$p.value)->p.value
rbind(rsquared,p.value)->rsquared
text (x=barplot(rsquared[1,],ylim=c(0,1.0),cex.lab= 1.4, cex.axis= 1.4, cex=1.4, xlab="levels", ylab="exp_var_geomatrix", main="all_forestCollembola_geomatrix_exp_var"), y=rsquared[1,],label=rsquared[2,],po=3,cex=0.9)
cbind (decay_h$b.slope,decay_0.005$b.slope,decay_0.015$b.slope, decay_0.02$b.slope, decay_0.03$b.slope,decay_0.05$b.slope, decay_0.075$b.slope, decay_0.029$b.slope)->b.slope
barplot(b.slope,main="all_forestCollembola_geomatrix_slopes_ddcay")
##**rsquared and pval of ddecays and slopes (levels: h, GMYC, 1.5, 3, 5, 7.5)**
cbind (decay_h$pseudo.r.squared,decay_0.015$pseudo.r.squared,decay_0.03$pseudo.r.squared,decay_0.05$pseudo.r.squared,decay_0.075$pseudo.r.squared, decay_0.029$pseudo.r.squared)->rsquared
colnames(rsquared)<-c("h", "0.015","0.03","0.05", "0.075", "GMYC")
rsquared
cbind (decay_h$p.value, decay_0.015$p.value, decay_0.03$p.value,decay_0.05$p.value, decay_0.075$p.value, decay_0.029$p.value)->p.value
rbind(rsquared,p.value)->rsquared
text (x=barplot(rsquared[1,],ylim=c(0,1.0),cex.lab= 1.4, cex.axis= 1.4, cex=1.4, xlab="levels", ylab="exp_var_geomatrix", main="all_forestCollembola_geomatrix_exp_var"), y=rsquared[1,],label=rsquared[2,],po=3,cex=0.9)
cbind (decay_h$b.slope,decay_0.015$b.slope, decay_0.03$b.slope,decay_0.05$b.slope, decay_0.075$b.slopedecay_0.029$b.slope)->b.slope
barplot(b.slope,main="all_forestCollembola_geomatrix_slopes_ddcay")
##**rsquared and pval of ddecays and slopes (levels: levels: h, GMYC, 3 and 5)**
cbind (decay_h$pseudo.r.squared,decay_0.03$pseudo.r.squared,decay_0.05$pseudo.r.squared, decay_0.029$pseudo.r.squared)->rsquared
colnames(rsquared)<-c("h", "0.03","0.05", "GMYC")
rsquared
cbind (decay_h$p.value, decay_0.03$p.value,decay_0.05$p.value, decay_0.029$p.value)->p.value
rbind(rsquared,p.value)->rsquared
text (x=barplot(rsquared[1,],ylim=c(0,1.0),cex.lab= 1.4, cex.axis= 1.4, cex=1.4, xlab="levels", ylab="exp_var_geomatrix", main="all_forestCollembola_geomatrix_exp_var"), y=rsquared[1,],label=rsquared[2,],po=3,cex=0.9)
cbind (decay_h$b.slope,decay_0.03$b.slope,decay_0.05$b.slope, decay_0.029$b.slope)->b.slope
barplot(b.slope,main="all_forestCollembola_geomatrix_slopes_ddcay")
##**rsquared and pval of ddecays and slopes (levels: h, 0.5, 1.5, 3, 5, 7.5)**
cbind (decay_h$pseudo.r.squared,decay_0.005$pseudo.r.squared,decay_0.015$pseudo.r.squared,decay_0.03$pseudo.r.squared, decay_0.05$pseudo.r.squared, decay_0.075$pseudo.r.squared)->rsquared
colnames(rsquared)<-c("h","0.005","0.015","0.03","0.05", "0.075")
rsquared
cbind (decay_h$p.value, decay_0.005$p.value, decay_0.015$p.value, decay_0.03$p.value, decay_0.05$p.value, decay_0.075$p.value)->p.value
rbind(rsquared,p.value)->rsquared
text (x=barplot(rsquared[1,],ylim=c(0,1.0),cex.lab= 1.4, cex.axis= 1.4, cex=1.4, xlab="levels", ylab="exp_var_geomatrix", main="all_forestCollembola_geomatrix_exp_var"), y=rsquared[1,],label=rsquared[2,],po=3,cex=0.9)
cbind (decay_h$b.slope,decay_0.005$b.slope,decay_0.015$b.slope, decay_0.03$b.slope,decay_0.05$b.slope, decay_0.075$b.slope)->b.slope
barplot(b.slope,main="all_forestCollembola_geomatrix_slopes_ddcay")
##**log log pearson correlations (fractality_2)**
levels<-c(1,2,3,4,5,6,7,9)
rbind (decay_h$a.intercept,decay_0.005$a.intercept,decay_0.015$a.intercept,decay_0.02$a.intercept,decay_0.03$a.intercept,decay_0.05$a.intercept, decay_0.075$a.intercept, decay_0.029$a.intercept)->intercepts
intercepts
##**h, 0.005, 0.015, 0.02, 0.03, 0.05, 0.75, 0.029**
##**beta.pair_CollembolaWest_h**
beta.multi(community_Collembola_h_West, index.family="sorensen")->diss_mean_h
diss_mean_h$beta.SIM->diss_mean_h
beta.multi(community_Collembola0.005_West, index.family="sorensen")->diss_mean_0.005
diss_mean_0.005$beta.SIM->diss_mean_0.005
beta.multi(community_Collembola0.015_West, index.family="sorensen")->diss_mean_0.015
diss_mean_0.015$beta.SIM->diss_mean_0.015
beta.multi(community_Collembola0.02_West, index.family="sorensen")->diss_mean_0.02
diss_mean_0.02$beta.SIM->diss_mean_0.02
beta.multi(community_Collembola0.03_West, index.family="sorensen")->diss_mean_0.03
diss_mean_0.03$beta.SIM->diss_mean_0.03
beta.multi(community_Collembola0.05_West, index.family="sorensen")->diss_mean_0.05
diss_mean_0.05$beta.SIM->diss_mean_0.05
beta.multi(community_Collembola0.075_West, index.family="sorensen")->diss_mean_0.075
diss_mean_0.075$beta.SIM->diss_mean_0.075
beta.multi(community_Collembola0.029_West, index.family="sorensen")->diss_mean_0.029
diss_mean_0.029$beta.SIM->diss_mean_0.029
rbind(diss_mean_h,diss_mean_0.005,diss_mean_0.015,diss_mean_0.02,diss_mean_0.03,diss_mean_0.05,diss_mean_0.075, diss_mean_0.029)->mean_diss
1-mean_diss+0.001->mean_sim
mean_sim
dim(community_Collembola_h_West)[2]->n_h
dim(community_Collembola0.005_West)[2]->n_0.005
dim(community_Collembola0.015_West)[2]->n_0.015
dim(community_Collembola0.02_West)[2]->n_0.02
dim(community_Collembola0.03_West)[2]->n_0.03
dim(community_Collembola0.05_West)[2]->n_0.05
dim(community_Collembola0.075_West)[2]->n_0.075
dim(community_Collembola0.029_West)[2]->n_0.029
rbind (n_h,n_0.005,n_0.015,n_0.02,n_0.03,n_0.05,n_0.075, n_0.029)->n_lineages
n_lineages
cor.test(log(levels),log(intercepts))
plot(log(intercepts)~log(levels), lwd=2, pch=16, cex=1.5, cex.lab= 1.5, cex.axis=1.5, col= c("#003695", "#c997a9", "#9d9cc6", "#d49e57", "#9cb15b", "#fbd048", "#93dfff", "#F4A582"))
abline(lm(log(intercepts)~log(levels)))
cor.test(log(levels),log(n_lineages))
plot(log(n_lineages)~log(levels), lwd=2, pch=16,cex=1.5, cex.lab= 1.5, cex.axis= 1.5, col= c("#003695", "#c997a9", "#9d9cc6", "#d49e57", "#9cb15b", "#fbd048", "#93dfff", "#F4A582"))
abline(lm(log(n_lineages)~log(levels)))
cor.test(log(levels),log(mean_sim))
plot(log(mean_sim)~log(levels), lwd=2, pch=16,cex=1.5, cex.lab= 1.5, cex.axis= 1.5, col= c("#003695", "#c997a9", "#9d9cc6", "#d49e57", "#9cb15b", "#fbd048", "#93dfff", "#F4A582"))
abline(lm(log(mean_sim)~log(levels)))
#**END**
|
\name{cuModuleGetTexRef}
\alias{cuModuleGetTexRef}
\title{Returns a handle to a texture reference}
\description{ Returns the handle of the texture reference of name \code{name}
in the module \code{hmod}. If no texture reference of that name exists,
\code{cuModuleGetTexRef}() returns \code{CUDA_ERROR_NOT_FOUND}. This texture reference
handle should not be destroyed, since it will be destroyed when the module
is unloaded.}
\usage{cuModuleGetTexRef(hmod, name)}
\arguments{
\item{hmod}{Module to retrieve texture reference from}
\item{name}{Name of texture reference to retrieve}
}
\value{pTexRef}
\seealso{\code{\link{cuModuleGetFunction}}
\code{\link{cuModuleGetGlobal}}
\code{\link{cuModuleGetSurfRef}}
\code{\link{cuModuleLoad}}
\code{\link{cuModuleLoadData}}
\code{\link{cuModuleLoadDataEx}}
\code{\link{cuModuleLoadFatBinary}}
\code{\link{cuModuleUnload}}}
\references{\url{http://docs.nvidia.com/cuda/cuda-driver-api/index.htm}}
\keyword{programming}
\concept{GPU}
| /man/cuModuleGetTexRef.Rd | no_license | xfbingshan/RCUDA | R | false | false | 982 | rd | \name{cuModuleGetTexRef}
\alias{cuModuleGetTexRef}
\title{Returns a handle to a texture reference}
\description{ Returns the handle of the texture reference of name \code{name}
in the module \code{hmod}. If no texture reference of that name exists,
\code{cuModuleGetTexRef}() returns \code{CUDA_ERROR_NOT_FOUND}. This texture reference
handle should not be destroyed, since it will be destroyed when the module
is unloaded.}
\usage{cuModuleGetTexRef(hmod, name)}
\arguments{
\item{hmod}{Module to retrieve texture reference from}
\item{name}{Name of texture reference to retrieve}
}
\value{pTexRef}
\seealso{\code{\link{cuModuleGetFunction}}
\code{\link{cuModuleGetGlobal}}
\code{\link{cuModuleGetSurfRef}}
\code{\link{cuModuleLoad}}
\code{\link{cuModuleLoadData}}
\code{\link{cuModuleLoadDataEx}}
\code{\link{cuModuleLoadFatBinary}}
\code{\link{cuModuleUnload}}}
\references{\url{http://docs.nvidia.com/cuda/cuda-driver-api/index.htm}}
\keyword{programming}
\concept{GPU}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R, R/cpda.R
\name{rplba}
\alias{rplba}
\alias{rplba1}
\alias{rplba2}
\alias{rplba3}
\alias{rplba4}
\alias{rplbaR1}
\alias{rplbaR2}
\alias{rplbaR3}
\title{Generate Random Choice Response Times using pLBA Model}
\usage{
rplba(n, pVec)
rplba1(n, pVec)
rplba2(n, pVec)
rplba3(n, pVec)
rplba4(n, pVec)
rplbaR1(n = 10, pVec = c(A = 1.51, b = 2.7, v1 = 3.32, v2 = 2.24, w1 = 1.51,
w2 = 3.69, sv = 1, rD = 0.31, swt = 0.5, t0 = 0.08))
rplbaR2(n = 10, pVec = c(A1 = 1.51, A2 = 1.51, b1 = 2.7, b2 = 2.7, v1 =
3.32, v2 = 2.24, w1 = 1.51, w2 = 3.69, sv1 = 1, sv2 = 1, sw1 = 1, sw2 = 1, rD
= 0.31, swt = 0.5, t0 = 0.08))
rplbaR3(n, pVec = c(A1 = 1.5, A2 = 1.5, B1 = 1.2, B2 = 1.2, C1 = 0.3, C2 =
0.3, v1 = 3.32, v2 = 2.24, w1 = 1.51, w2 = 3.69, sv1 = 1, sv2 = 1, sw1 = 1,
sw2 = 1, rD = 0.3, tD = 0.3, swt = 0.5, t0 = 0.08))
}
\arguments{
\item{n}{number of observations. Must be an integer}
\item{pVec}{a numeric vector storing pLBA model parameters. The sequence is
critical. See details for the sequence.}
}
\value{
A \code{n x 2} matrix with a first column storing choices and second
column storing response times.
}
\description{
This function uses two-accumulator piecewise LBA model to generate random
choice RTs. There are 3 variants: \code{rplba}, \code{rplba1}, and
\code{rplba2}. Each of them has a corresponding R version,
\code{rplbaR}, \code{rplbaR1}, and \code{rplbaR2}, for the purpose of
speed testing. Because the difference of random number generators in C and
R, they do not generate exactly identical RTs. When generating large
enough observations, the distributions generated by R and C will match.
}
\details{
The main function \code{rplba} implements a more flexible
version of pLBA random number generator than the other two. It uses the
following parameterisation (order matter):
\itemize{
\item \bold{\emph{A1}} accumulator 1 start-point upper bound. \code{A} is
the upper bound of the interval \code{[0, A]}, which is used by an uniform
distribution to generate a start-point. Average amount of
prior evidence (i.e., before accumulation process even begins) across trials
is \code{A/2}.
\item \bold{\emph{A2}} accumulator 2 start-point upper bound.
\item \bold{\emph{B1}} accumulator 1 traveling distance. Note this is not
a decision threshold!. LBA convention denotes decision threshold/caution as
b (lowercase) and traveling distance as B (uppercase). \code{B=b-A} is
the traveling distance, and \code{b-A/2} is a measure of average
\emph{decision caution}.
\item \bold{\emph{B2}} accumulator 2 traveling distance.
\item \bold{\emph{C1}} the amount of traveling distance change for
accumulator 1 at the stage 2.
\item \bold{\emph{C2}} the amount of traveling distance change for
accumulator 2 at the stage 2.
\item \bold{\emph{v1}} accumulator 1 drift rate, stage 1
\item \bold{\emph{v2}} accumulator 2 drift rate, stage 1
\item \bold{\emph{w1}} accumulator 1 drift rate, stage 2
\item \bold{\emph{w2}} accumulator 2 drift rate, stage 2
\item \bold{\emph{sv1}} accumulator 1 drift rate standard deviation,
stage 1.
\item \bold{\emph{sv2}} accumulator 2 drift rate standard deviation,
stage 1.
\item \bold{\emph{sw1}} accumulator 1 drift rate standard deviation,
stage 2.
\item \bold{\emph{sw2}} accumulator 2 drift rate standard deviation,
stage 2.
\item \bold{\emph{rD}} the delay duration while stage 1 drift rate switches
to stage 2 drift rate
\item \bold{\emph{tD}} the delay duration while stage 1 threshold switches
to stage 2 threshold
\item \bold{\emph{swt}} switch time, usually determined by experimental
design.
\item \bold{\emph{t0}} non-decision time in second.
}
\code{rplba1} uses the following parameterisation:
\itemize{
\item \bold{\emph{A}} a common start-point interval for both accumulators.
\item \bold{\emph{b}} a common response threshold for both accumulators.
\item \bold{\emph{v1}} accumulator 1 drift rate, stage 1
\item \bold{\emph{v2}} accumulator 2 drift rate, stage 1
\item \bold{\emph{w1}} accumulator 1 drift rate, stage 2
\item \bold{\emph{w2}} accumulator 2 drift rate, stage 2
\item \bold{\emph{sv}} a common standard deviation for both accumulators
\item \bold{\emph{rD}} a delay period while drift rate switch to a
second stage process
\item \bold{\emph{swt}} switch time, usually determined by experimental
design
\item \bold{\emph{t0}} non-decision time in second.
}
\code{rplba2} uses the following parameterisation:
\itemize{
\item \bold{\emph{A1}} start-point interval of the accumulator 1.
\item \bold{\emph{A2}} start-point interval of the accumulator 2.
\item \bold{\emph{b1}} accumulator 1 response threshold.
\item \bold{\emph{b2}} accumulator 2 response threshold.
\item \bold{\emph{v1}} accumulator 1 drift rate, stage 1
\item \bold{\emph{v2}} accumulator 2 drift rate, stage 1
\item \bold{\emph{w1}} accumulator 1 drift rate, stage 2
\item \bold{\emph{w2}} accumulator 2 drift rate, stage 2
\item \bold{\emph{sv1}} the standard deviation of accumulator 1 drirt rate
during stage 1.
\item \bold{\emph{sv2}} the standard deviation of accumulator 2 drirt rate
during stage 1.
\item \bold{\emph{sw1}} the standard deviation of accumulator 1 drirt rate
during stage 2.
\item \bold{\emph{sw2}} the standard deviation of accumulator 2 drirt rate
during stage 2.
\item \bold{\emph{rD}} a delay period while drift rate switch to a
second stage process
\item \bold{\emph{swt}} switch time, usually determined by experimental
design
\item \bold{\emph{t0}} non-decision time in second.
}
}
\examples{
################
## Example 1 ##
################
pVec3.1 <- c(A1=1.51, A2=1.51, B1=1.2, B2=1.2, C1=.3, C2=.3, v1=3.32,
v2=2.24, w1=1.51, w2=3.69, sv1=1, sv2=1, sw1=1, sw2=1, rD=0.1,
tD=.1, swt=0.5, t0=0.08)
pVec3.2 <- c(A1=1.51, A2=1.51, B1=1.2, B2=1.2, C1=.3, C2=.3, v1=3.32,
v2=2.24, w1=1.51, w2=3.69, sv1=1, sv2=1, sw1=1, sw2=1, rD=0.1,
tD=.15, swt=0.5, t0=0.08)
pVec3.3 <- c(A1=1.51, A2=1.51, B1=1.2, B2=1.2, C1=.3, C2=.3, v1=3.32,
v2=2.24, w1=1.51, w2=3.69, sv1=1, sv2=1, sw1=1, sw2=1, rD=0.15,
tD=.1, swt=0.5, t0=0.08)
n <- 1e5
set.seed(123); system.time(dat5.1 <- cpda::rplbaR(n, pVec3.1))
set.seed(123); system.time(dat5.2 <- cpda::rplbaR(n, pVec3.2))
set.seed(123); system.time(dat5.3 <- cpda::rplbaR(n, pVec3.3))
set.seed(123); system.time(dat6.1 <- cpda::rplba( n, pVec3.1))
set.seed(123); system.time(dat6.2 <- cpda::rplba( n, pVec3.2))
set.seed(123); system.time(dat6.3 <- cpda::rplba( n, pVec3.3))
tmp5.1 <- data.frame(choice=factor(dat5.1[,1]), rt=dat5.1[,2])
tmp5.2 <- data.frame(choice=factor(dat5.2[,1]), rt=dat5.2[,2])
tmp5.3 <- data.frame(choice=factor(dat5.3[,1]), rt=dat5.3[,2])
tmp6.1 <- data.frame(choice=factor(dat6.1[,1]), rt=dat6.1[,2])
tmp6.2 <- data.frame(choice=factor(dat6.2[,1]), rt=dat6.2[,2])
tmp6.3 <- data.frame(choice=factor(dat6.3[,1]), rt=dat6.3[,2])
tmp5.1$fun <- "R"
tmp5.2$fun <- "R"
tmp5.3$fun <- "R"
tmp6.1$fun <- "C"
tmp6.2$fun <- "C"
tmp6.3$fun <- "C"
tmp5.1$vec <- "1"
tmp5.2$vec <- "2"
tmp5.3$vec <- "3"
tmp6.1$vec <- "1"
tmp6.2$vec <- "2"
tmp6.3$vec <- "3"
df <- rbind(tmp5.1, tmp5.2, tmp5.3, tmp6.1, tmp6.2, tmp6.3)
df$fun <- factor(df$fun)
## Show R and C functions produce almost identical distributions
\dontrun{
## Set up a colour palette
cb <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2",
"#D55E00", "#CC79A7")
require(ggplot2)
ggplot(data=df, aes(x = rt, fill=fun, color=fun)) +
geom_density(alpha=0.2) +
facet_grid(vec~ choice) +
scale_fill_manual(values=cb)
## Or you can use lattice or base graphics
require(lattice)
histogram( ~rt | vec+choice+fun, data=df, breaks="fd", type="density",
xlab="Response Time (s)",
panel=function(x, ...) {
panel.histogram(x, ...)
panel.densityplot(x, darg=list(kernel="gaussian"),...)
})
}
par(mfrow=c(3,2))
hist(tmp5.1[tmp5.1$choice==1,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 1")
lines(density(tmp6.1[tmp6.1$choice==1,"rt"]), col="red", lty="dashed", lwd=1.5)
hist(tmp5.1[tmp5.1$choice==2,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 2")
lines(density(tmp6.1[tmp6.1$choice==2,"rt"]), col="red", lty="dashed", lwd=1.5)
#############
hist(tmp5.2[tmp5.2$choice==1,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 1")
lines(density(tmp6.2[tmp6.2$choice==1,"rt"]), col="red", lty="dashed", lwd=1.5)
hist(tmp5.2[tmp5.2$choice==2,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 2")
lines(density(tmp6.2[tmp6.2$choice==2,"rt"]), col="red", lty="dashed", lwd=1.5)
#############
hist(tmp5.3[tmp5.3$choice==1,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 1")
lines(density(tmp6.3[tmp6.3$choice==1,"rt"]), col="red", lty="dashed", lwd=1.5)
hist(tmp5.3[tmp5.3$choice==2,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 2")
lines(density(tmp6.3[tmp6.3$choice==2,"rt"]), col="red", lty="dashed", lwd=1.5)
par(mfrow=c(1,1))
################
## Example 2 ##
################
pVec1 <- c(A=1.51, b=2.7, v1=3.32, v2=2.24, w1=1.51, w2=3.69,
sv=1, rD=0.31, swt=0.5, t0=0.08)
pVec2 <- c(A1=1.51, A2=1.51, b1=2.7, b2=2.7, v1=3.32, v2=2.24,
w1=1.51, w2=3.69, sv1=1, sv2=1, sw1=1, sw2=1, rD=0.31,
swt=0.5, t0=0.08)
system.time(dat1 <- cpda::rplba1( n, pVec1))
system.time(dat2 <- cpda::rplba2( n, pVec2))
system.time(dat3 <- cpda::rplbaR1(n, pVec1))
system.time(dat4 <- cpda::rplbaR2(n, pVec2))
tmp1 <- data.frame(choice=factor(dat1[,1]), rt=dat1[,2])
tmp2 <- data.frame(choice=factor(dat2[,1]), rt=dat2[,2])
tmp3 <- data.frame(choice=factor(dat3[,1]), rt=dat3[,2])
tmp4 <- data.frame(choice=factor(dat4[,1]), rt=dat4[,2])
tmp1$fun <- "rplba1"
tmp2$fun <- "rplba2"
tmp3$fun <- "rplba1-R"
tmp4$fun <- "rplba2-R"
tmp0 <- rbind(tmp1, tmp2, tmp3, tmp4)
tmp0$fun <- factor(tmp0$fun)
\dontrun{
require(ggplot2)
ggplot(data = tmp0, aes(x = rt, fill=fun, color=fun)) +
geom_density(alpha=0.2) +
facet_grid(.~ choice) +
scale_fill_manual(values=cb)
}
}
| /man/rplba.Rd | no_license | TasCL/cpda | R | false | true | 10,264 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R, R/cpda.R
\name{rplba}
\alias{rplba}
\alias{rplba1}
\alias{rplba2}
\alias{rplba3}
\alias{rplba4}
\alias{rplbaR1}
\alias{rplbaR2}
\alias{rplbaR3}
\title{Generate Random Choice Response Times using pLBA Model}
\usage{
rplba(n, pVec)
rplba1(n, pVec)
rplba2(n, pVec)
rplba3(n, pVec)
rplba4(n, pVec)
rplbaR1(n = 10, pVec = c(A = 1.51, b = 2.7, v1 = 3.32, v2 = 2.24, w1 = 1.51,
w2 = 3.69, sv = 1, rD = 0.31, swt = 0.5, t0 = 0.08))
rplbaR2(n = 10, pVec = c(A1 = 1.51, A2 = 1.51, b1 = 2.7, b2 = 2.7, v1 =
3.32, v2 = 2.24, w1 = 1.51, w2 = 3.69, sv1 = 1, sv2 = 1, sw1 = 1, sw2 = 1, rD
= 0.31, swt = 0.5, t0 = 0.08))
rplbaR3(n, pVec = c(A1 = 1.5, A2 = 1.5, B1 = 1.2, B2 = 1.2, C1 = 0.3, C2 =
0.3, v1 = 3.32, v2 = 2.24, w1 = 1.51, w2 = 3.69, sv1 = 1, sv2 = 1, sw1 = 1,
sw2 = 1, rD = 0.3, tD = 0.3, swt = 0.5, t0 = 0.08))
}
\arguments{
\item{n}{number of observations. Must be an integer}
\item{pVec}{a numeric vector storing pLBA model parameters. The sequence is
critical. See details for the sequence.}
}
\value{
A \code{n x 2} matrix with a first column storing choices and second
column storing response times.
}
\description{
This function uses two-accumulator piecewise LBA model to generate random
choice RTs. There are 3 variants: \code{rplba}, \code{rplba1}, and
\code{rplba2}. Each of them has a corresponding R version,
\code{rplbaR}, \code{rplbaR1}, and \code{rplbaR2}, for the purpose of
speed testing. Because the difference of random number generators in C and
R, they do not generate exactly identical RTs. When generating large
enough observations, the distributions generated by R and C will match.
}
\details{
The main function \code{rplba} implements a more flexible
version of pLBA random number generator than the other two. It uses the
following parameterisation (order matter):
\itemize{
\item \bold{\emph{A1}} accumulator 1 start-point upper bound. \code{A} is
the upper bound of the interval \code{[0, A]}, which is used by an uniform
distribution to generate a start-point. Average amount of
prior evidence (i.e., before accumulation process even begins) across trials
is \code{A/2}.
\item \bold{\emph{A2}} accumulator 2 start-point upper bound.
\item \bold{\emph{B1}} accumulator 1 traveling distance. Note this is not
a decision threshold!. LBA convention denotes decision threshold/caution as
b (lowercase) and traveling distance as B (uppercase). \code{B=b-A} is
the traveling distance, and \code{b-A/2} is a measure of average
\emph{decision caution}.
\item \bold{\emph{B2}} accumulator 2 traveling distance.
\item \bold{\emph{C1}} the amount of traveling distance change for
accumulator 1 at the stage 2.
\item \bold{\emph{C2}} the amount of traveling distance change for
accumulator 2 at the stage 2.
\item \bold{\emph{v1}} accumulator 1 drift rate, stage 1
\item \bold{\emph{v2}} accumulator 2 drift rate, stage 1
\item \bold{\emph{w1}} accumulator 1 drift rate, stage 2
\item \bold{\emph{w2}} accumulator 2 drift rate, stage 2
\item \bold{\emph{sv1}} accumulator 1 drift rate standard deviation,
stage 1.
\item \bold{\emph{sv2}} accumulator 2 drift rate standard deviation,
stage 1.
\item \bold{\emph{sw1}} accumulator 1 drift rate standard deviation,
stage 2.
\item \bold{\emph{sw2}} accumulator 2 drift rate standard deviation,
stage 2.
\item \bold{\emph{rD}} the delay duration while stage 1 drift rate switches
to stage 2 drift rate
\item \bold{\emph{tD}} the delay duration while stage 1 threshold switches
to stage 2 threshold
\item \bold{\emph{swt}} switch time, usually determined by experimental
design.
\item \bold{\emph{t0}} non-decision time in second.
}
\code{rplba1} uses the following parameterisation:
\itemize{
\item \bold{\emph{A}} a common start-point interval for both accumulators.
\item \bold{\emph{b}} a common response threshold for both accumulators.
\item \bold{\emph{v1}} accumulator 1 drift rate, stage 1
\item \bold{\emph{v2}} accumulator 2 drift rate, stage 1
\item \bold{\emph{w1}} accumulator 1 drift rate, stage 2
\item \bold{\emph{w2}} accumulator 2 drift rate, stage 2
\item \bold{\emph{sv}} a common standard deviation for both accumulators
\item \bold{\emph{rD}} a delay period while drift rate switch to a
second stage process
\item \bold{\emph{swt}} switch time, usually determined by experimental
design
\item \bold{\emph{t0}} non-decision time in second.
}
\code{rplba2} uses the following parameterisation:
\itemize{
\item \bold{\emph{A1}} start-point interval of the accumulator 1.
\item \bold{\emph{A2}} start-point interval of the accumulator 2.
\item \bold{\emph{b1}} accumulator 1 response threshold.
\item \bold{\emph{b2}} accumulator 2 response threshold.
\item \bold{\emph{v1}} accumulator 1 drift rate, stage 1
\item \bold{\emph{v2}} accumulator 2 drift rate, stage 1
\item \bold{\emph{w1}} accumulator 1 drift rate, stage 2
\item \bold{\emph{w2}} accumulator 2 drift rate, stage 2
\item \bold{\emph{sv1}} the standard deviation of accumulator 1 drirt rate
during stage 1.
\item \bold{\emph{sv2}} the standard deviation of accumulator 2 drirt rate
during stage 1.
\item \bold{\emph{sw1}} the standard deviation of accumulator 1 drirt rate
during stage 2.
\item \bold{\emph{sw2}} the standard deviation of accumulator 2 drirt rate
during stage 2.
\item \bold{\emph{rD}} a delay period while drift rate switch to a
second stage process
\item \bold{\emph{swt}} switch time, usually determined by experimental
design
\item \bold{\emph{t0}} non-decision time in second.
}
}
\examples{
################
## Example 1 ##
################
pVec3.1 <- c(A1=1.51, A2=1.51, B1=1.2, B2=1.2, C1=.3, C2=.3, v1=3.32,
v2=2.24, w1=1.51, w2=3.69, sv1=1, sv2=1, sw1=1, sw2=1, rD=0.1,
tD=.1, swt=0.5, t0=0.08)
pVec3.2 <- c(A1=1.51, A2=1.51, B1=1.2, B2=1.2, C1=.3, C2=.3, v1=3.32,
v2=2.24, w1=1.51, w2=3.69, sv1=1, sv2=1, sw1=1, sw2=1, rD=0.1,
tD=.15, swt=0.5, t0=0.08)
pVec3.3 <- c(A1=1.51, A2=1.51, B1=1.2, B2=1.2, C1=.3, C2=.3, v1=3.32,
v2=2.24, w1=1.51, w2=3.69, sv1=1, sv2=1, sw1=1, sw2=1, rD=0.15,
tD=.1, swt=0.5, t0=0.08)
n <- 1e5
set.seed(123); system.time(dat5.1 <- cpda::rplbaR(n, pVec3.1))
set.seed(123); system.time(dat5.2 <- cpda::rplbaR(n, pVec3.2))
set.seed(123); system.time(dat5.3 <- cpda::rplbaR(n, pVec3.3))
set.seed(123); system.time(dat6.1 <- cpda::rplba( n, pVec3.1))
set.seed(123); system.time(dat6.2 <- cpda::rplba( n, pVec3.2))
set.seed(123); system.time(dat6.3 <- cpda::rplba( n, pVec3.3))
tmp5.1 <- data.frame(choice=factor(dat5.1[,1]), rt=dat5.1[,2])
tmp5.2 <- data.frame(choice=factor(dat5.2[,1]), rt=dat5.2[,2])
tmp5.3 <- data.frame(choice=factor(dat5.3[,1]), rt=dat5.3[,2])
tmp6.1 <- data.frame(choice=factor(dat6.1[,1]), rt=dat6.1[,2])
tmp6.2 <- data.frame(choice=factor(dat6.2[,1]), rt=dat6.2[,2])
tmp6.3 <- data.frame(choice=factor(dat6.3[,1]), rt=dat6.3[,2])
tmp5.1$fun <- "R"
tmp5.2$fun <- "R"
tmp5.3$fun <- "R"
tmp6.1$fun <- "C"
tmp6.2$fun <- "C"
tmp6.3$fun <- "C"
tmp5.1$vec <- "1"
tmp5.2$vec <- "2"
tmp5.3$vec <- "3"
tmp6.1$vec <- "1"
tmp6.2$vec <- "2"
tmp6.3$vec <- "3"
df <- rbind(tmp5.1, tmp5.2, tmp5.3, tmp6.1, tmp6.2, tmp6.3)
df$fun <- factor(df$fun)
## Show R and C functions produce almost identical distributions
\dontrun{
## Set up a colour palette
cb <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2",
"#D55E00", "#CC79A7")
require(ggplot2)
ggplot(data=df, aes(x = rt, fill=fun, color=fun)) +
geom_density(alpha=0.2) +
facet_grid(vec~ choice) +
scale_fill_manual(values=cb)
## Or you can use lattice or base graphics
require(lattice)
histogram( ~rt | vec+choice+fun, data=df, breaks="fd", type="density",
xlab="Response Time (s)",
panel=function(x, ...) {
panel.histogram(x, ...)
panel.densityplot(x, darg=list(kernel="gaussian"),...)
})
}
par(mfrow=c(3,2))
hist(tmp5.1[tmp5.1$choice==1,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 1")
lines(density(tmp6.1[tmp6.1$choice==1,"rt"]), col="red", lty="dashed", lwd=1.5)
hist(tmp5.1[tmp5.1$choice==2,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 2")
lines(density(tmp6.1[tmp6.1$choice==2,"rt"]), col="red", lty="dashed", lwd=1.5)
#############
hist(tmp5.2[tmp5.2$choice==1,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 1")
lines(density(tmp6.2[tmp6.2$choice==1,"rt"]), col="red", lty="dashed", lwd=1.5)
hist(tmp5.2[tmp5.2$choice==2,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 2")
lines(density(tmp6.2[tmp6.2$choice==2,"rt"]), col="red", lty="dashed", lwd=1.5)
#############
hist(tmp5.3[tmp5.3$choice==1,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 1")
lines(density(tmp6.3[tmp6.3$choice==1,"rt"]), col="red", lty="dashed", lwd=1.5)
hist(tmp5.3[tmp5.3$choice==2,"rt"], breaks="fd", col="gray", freq=FALSE,
xlab="RT (s)", main="pLBA-Choice 2")
lines(density(tmp6.3[tmp6.3$choice==2,"rt"]), col="red", lty="dashed", lwd=1.5)
par(mfrow=c(1,1))
################
## Example 2 ##
################
pVec1 <- c(A=1.51, b=2.7, v1=3.32, v2=2.24, w1=1.51, w2=3.69,
sv=1, rD=0.31, swt=0.5, t0=0.08)
pVec2 <- c(A1=1.51, A2=1.51, b1=2.7, b2=2.7, v1=3.32, v2=2.24,
w1=1.51, w2=3.69, sv1=1, sv2=1, sw1=1, sw2=1, rD=0.31,
swt=0.5, t0=0.08)
system.time(dat1 <- cpda::rplba1( n, pVec1))
system.time(dat2 <- cpda::rplba2( n, pVec2))
system.time(dat3 <- cpda::rplbaR1(n, pVec1))
system.time(dat4 <- cpda::rplbaR2(n, pVec2))
tmp1 <- data.frame(choice=factor(dat1[,1]), rt=dat1[,2])
tmp2 <- data.frame(choice=factor(dat2[,1]), rt=dat2[,2])
tmp3 <- data.frame(choice=factor(dat3[,1]), rt=dat3[,2])
tmp4 <- data.frame(choice=factor(dat4[,1]), rt=dat4[,2])
tmp1$fun <- "rplba1"
tmp2$fun <- "rplba2"
tmp3$fun <- "rplba1-R"
tmp4$fun <- "rplba2-R"
tmp0 <- rbind(tmp1, tmp2, tmp3, tmp4)
tmp0$fun <- factor(tmp0$fun)
\dontrun{
require(ggplot2)
ggplot(data = tmp0, aes(x = rt, fill=fun, color=fun)) +
geom_density(alpha=0.2) +
facet_grid(.~ choice) +
scale_fill_manual(values=cb)
}
}
|
#' @export
generate_predictions <- function(testYear = 2017) {
statsToPredict <- c("rush_att", "rush_yds", "rush_td", "targets", "rec", "rec_yds", "rec_td",
"pass_cmp", "pass_att", "pass_yds", "pass_td", "pass_int")
gamelogs <- plyr::ldply(list.files("data/gamelogs/"), function(x) {
t <- read.csv(file = paste0("data/gamelogs/", x), header = T, stringsAsFactors = F)
t <- t[, c("player", "game_num", "rush_att", "rush_yds", "rush_yds_per_att", "rush_td", "targets",
"rec", "rec_yds", "rec_yds_per_rec", "rec_td", "catch_pct",
"pass_cmp", "pass_att", "pass_yds", "pass_td", "pass_int", "pass_rating",
"pass_yds_per_att", "year")]
t[is.na(t)] <- 0
t <- t %>% filter(game_num >= 1 & game_num <= 16)
return(t)
})
gamelogs <- unique(gamelogs)
yearly <- gamelogs %>% group_by(player, year) %>%
summarise(rush_att = sum(rush_att), rush_yds = sum(rush_yds),
rush_yds_per_att = sum(rush_yds)/sum(rush_att),
rush_td = sum(rush_td), targets = sum(targets),
rec = sum(rec), rec_yds = sum(rec_yds),
rec_yds_per_rec = sum(rec_yds)/sum(rec),
rec_td = sum(rec_td), catch_pct = sum(rec)/sum(targets),
pass_cmp = sum(pass_cmp), pass_att = sum(pass_att),
pass_pct = sum(pass_cmp)/sum(pass_att),
pass_yds = sum(pass_yds), pass_td = sum(pass_td),
pass_int = sum(pass_int), pass_rating = mean(pass_rating),
pass_yds_per_att = sum(pass_yds)/sum(pass_att)
) %>% data.frame()
yearly <- apply(yearly, 2, function(x) ifelse(is.nan(x), NA, x))
yearly <- as.data.frame(yearly)
for(i in 2:ncol(yearly)) {yearly[,i] <- as.numeric(as.character(yearly[,i]))}
yearly$player <- as.character(yearly$player)
statmodels <- list()
for(stat in 1:length(statsToPredict)) {
print(statsToPredict[stat])
currentStat <- statsToPredict[stat]
category <- strsplit(currentStat, "_")[[1]][1]
currentStatData <- data.frame()
for(yearToTest in 2013:(testYear - 1)) {
print(yearToTest)
train <- yearly %>% filter(year < yearToTest & (year >= (yearToTest - 3)))
if(category == "pass") {
train <- train %>% filter(pass_att > 100)
} else if(category == "rush") {
train <- train %>% filter(rush_att > 10)
} else if(category == "rec") {
train <- train %>% filter(targets > 10)
}
test <- yearly %>% filter(year == yearToTest)
train <- train %>% filter(player %in% test$player)
test <- test %>% filter(player %in% train$player)
temp_test <- test[c("player", "year", currentStat)]
temp_train <- train[c("player", "year", colnames(train)[grep(category, colnames(train))])]
temp_train <- temp_train %>%
group_by(player) %>%
mutate(suffix = paste0("_", 1:n())) %>%
gather(var, val, -c(player, suffix)) %>%
unite(var_group, var, suffix, sep = "") %>%
spread(var_group, val) %>% data.frame()
temp_train$PredictStat <- currentStat
temp_train <- merge(temp_test, temp_train, by = "player")
currentStatData <- rbind(currentStatData, temp_train)
}
test_data <- yearly %>% filter(year >= (testYear-3) & year < testYear)
if(category == "pass") {
test_data <- test_data %>% filter(pass_att > 100)
} else if(category == "rush") {
test_data <- test_data %>% filter(rush_att > 10)
} else if(category == "rec") {
test_data <- test_data %>% filter(targets > 10)
}
test_data <- test_data[c("player", "year", colnames(test_data)[grep(category, colnames(test_data))])]
test_data <- test_data %>%
group_by(player) %>%
mutate(suffix = paste0("_", 1:n())) %>%
gather(var, val, -c(player, suffix)) %>%
unite(var_group, var, suffix, sep = "") %>%
spread(var_group, val) %>% data.frame()
completetest <- test_data[complete.cases(test_data), ]
temp_formula <- paste0(eval(expr = paste0(currentStat, " ~ ", paste(colnames(temp_train)[c(4:(ncol(temp_train) - 4))], collapse = " + "))))
temp_lmmodel <- lm(formula = temp_formula, data = temp_train)
temp_rfmodel <- randomForest::randomForest(formula = as.formula(temp_formula),
data = temp_train,
na.action = randomForest::na.roughfix)
temp_lmpreds <- predict(temp_lmmodel, newdata=completetest)
temp_rfpreds <- predict(temp_rfmodel, newdata=completetest)
temp_list <- list(stat = currentStat,
testyear = yearToTest,
traindata = temp_train,
testpredictdata = test_data,
testdata = yearly %>% filter(year == testYear),
lmmodel = temp_lmmodel,
lmpreds = temp_lmpreds,
rfmodel = temp_rfmodel,
rfpreds = temp_rfpreds
)
statmodels[i] <- list(temp_list)
}
}
| /R/generate_predictions.R | no_license | ctloftin/FantasyFootballData | R | false | false | 5,083 | r |
#' @export
generate_predictions <- function(testYear = 2017) {
statsToPredict <- c("rush_att", "rush_yds", "rush_td", "targets", "rec", "rec_yds", "rec_td",
"pass_cmp", "pass_att", "pass_yds", "pass_td", "pass_int")
gamelogs <- plyr::ldply(list.files("data/gamelogs/"), function(x) {
t <- read.csv(file = paste0("data/gamelogs/", x), header = T, stringsAsFactors = F)
t <- t[, c("player", "game_num", "rush_att", "rush_yds", "rush_yds_per_att", "rush_td", "targets",
"rec", "rec_yds", "rec_yds_per_rec", "rec_td", "catch_pct",
"pass_cmp", "pass_att", "pass_yds", "pass_td", "pass_int", "pass_rating",
"pass_yds_per_att", "year")]
t[is.na(t)] <- 0
t <- t %>% filter(game_num >= 1 & game_num <= 16)
return(t)
})
gamelogs <- unique(gamelogs)
yearly <- gamelogs %>% group_by(player, year) %>%
summarise(rush_att = sum(rush_att), rush_yds = sum(rush_yds),
rush_yds_per_att = sum(rush_yds)/sum(rush_att),
rush_td = sum(rush_td), targets = sum(targets),
rec = sum(rec), rec_yds = sum(rec_yds),
rec_yds_per_rec = sum(rec_yds)/sum(rec),
rec_td = sum(rec_td), catch_pct = sum(rec)/sum(targets),
pass_cmp = sum(pass_cmp), pass_att = sum(pass_att),
pass_pct = sum(pass_cmp)/sum(pass_att),
pass_yds = sum(pass_yds), pass_td = sum(pass_td),
pass_int = sum(pass_int), pass_rating = mean(pass_rating),
pass_yds_per_att = sum(pass_yds)/sum(pass_att)
) %>% data.frame()
yearly <- apply(yearly, 2, function(x) ifelse(is.nan(x), NA, x))
yearly <- as.data.frame(yearly)
for(i in 2:ncol(yearly)) {yearly[,i] <- as.numeric(as.character(yearly[,i]))}
yearly$player <- as.character(yearly$player)
statmodels <- list()
for(stat in 1:length(statsToPredict)) {
print(statsToPredict[stat])
currentStat <- statsToPredict[stat]
category <- strsplit(currentStat, "_")[[1]][1]
currentStatData <- data.frame()
for(yearToTest in 2013:(testYear - 1)) {
print(yearToTest)
train <- yearly %>% filter(year < yearToTest & (year >= (yearToTest - 3)))
if(category == "pass") {
train <- train %>% filter(pass_att > 100)
} else if(category == "rush") {
train <- train %>% filter(rush_att > 10)
} else if(category == "rec") {
train <- train %>% filter(targets > 10)
}
test <- yearly %>% filter(year == yearToTest)
train <- train %>% filter(player %in% test$player)
test <- test %>% filter(player %in% train$player)
temp_test <- test[c("player", "year", currentStat)]
temp_train <- train[c("player", "year", colnames(train)[grep(category, colnames(train))])]
temp_train <- temp_train %>%
group_by(player) %>%
mutate(suffix = paste0("_", 1:n())) %>%
gather(var, val, -c(player, suffix)) %>%
unite(var_group, var, suffix, sep = "") %>%
spread(var_group, val) %>% data.frame()
temp_train$PredictStat <- currentStat
temp_train <- merge(temp_test, temp_train, by = "player")
currentStatData <- rbind(currentStatData, temp_train)
}
test_data <- yearly %>% filter(year >= (testYear-3) & year < testYear)
if(category == "pass") {
test_data <- test_data %>% filter(pass_att > 100)
} else if(category == "rush") {
test_data <- test_data %>% filter(rush_att > 10)
} else if(category == "rec") {
test_data <- test_data %>% filter(targets > 10)
}
test_data <- test_data[c("player", "year", colnames(test_data)[grep(category, colnames(test_data))])]
test_data <- test_data %>%
group_by(player) %>%
mutate(suffix = paste0("_", 1:n())) %>%
gather(var, val, -c(player, suffix)) %>%
unite(var_group, var, suffix, sep = "") %>%
spread(var_group, val) %>% data.frame()
completetest <- test_data[complete.cases(test_data), ]
temp_formula <- paste0(eval(expr = paste0(currentStat, " ~ ", paste(colnames(temp_train)[c(4:(ncol(temp_train) - 4))], collapse = " + "))))
temp_lmmodel <- lm(formula = temp_formula, data = temp_train)
temp_rfmodel <- randomForest::randomForest(formula = as.formula(temp_formula),
data = temp_train,
na.action = randomForest::na.roughfix)
temp_lmpreds <- predict(temp_lmmodel, newdata=completetest)
temp_rfpreds <- predict(temp_rfmodel, newdata=completetest)
temp_list <- list(stat = currentStat,
testyear = yearToTest,
traindata = temp_train,
testpredictdata = test_data,
testdata = yearly %>% filter(year == testYear),
lmmodel = temp_lmmodel,
lmpreds = temp_lmpreds,
rfmodel = temp_rfmodel,
rfpreds = temp_rfpreds
)
statmodels[i] <- list(temp_list)
}
}
|
#Demonstration of neuro networks using iris dataset
#ref: http://www.di.fc.ul.pt/~jpn/r/neuralnets/neuralnets.html
#The following code seems to work but I haven't fully understand the rationale
attach(iris)
library(caret)
library(dplyr)
library(e1071)
library(neuralnet)
set.seed(101)
in_train=createDataPartition(Species, p = 3/4)[[1]]
train_iris=iris[in_train,]
test_iris=iris[-in_train,]
modelFit <- train(Species ~ ., data=train_iris, method='nnet', trace = FALSE)
confusionMatrix(test_iris$Species,predict(modelFit,test_iris))
#manual calculation of accuracy
mean(as.character(predict(modelFit,test_iris))==as.character(test_iris$Species)) #0.972
| /general_r/Tutorials/neuroNetwork.R | no_license | peterwu19881230/R_Utility | R | false | false | 684 | r | #Demonstration of neuro networks using iris dataset
#ref: http://www.di.fc.ul.pt/~jpn/r/neuralnets/neuralnets.html
#The following code seems to work but I haven't fully understand the rationale
attach(iris)
library(caret)
library(dplyr)
library(e1071)
library(neuralnet)
set.seed(101)
in_train=createDataPartition(Species, p = 3/4)[[1]]
train_iris=iris[in_train,]
test_iris=iris[-in_train,]
modelFit <- train(Species ~ ., data=train_iris, method='nnet', trace = FALSE)
confusionMatrix(test_iris$Species,predict(modelFit,test_iris))
#manual calculation of accuracy
mean(as.character(predict(modelFit,test_iris))==as.character(test_iris$Species)) #0.972
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CorrBin.R
\name{fitCorrBin}
\alias{fitCorrBin}
\title{Fitting the Correlated Binomial Distribution when binomial
random variable, frequency, probability of success and covariance are given}
\usage{
fitCorrBin(x,obs.freq,p,cov)
}
\arguments{
\item{x}{vector of binomial random variables.}
\item{obs.freq}{vector of frequencies.}
\item{p}{single value for probability of success.}
\item{cov}{single value for covariance.}
}
\value{
The output of \code{fitCorrBin} gives the class format \code{fitCB} and \code{fit} consisting a list
\code{bin.ran.var} binomial random variables.
\code{obs.freq} corresponding observed frequencies.
\code{exp.freq} corresponding expected frequencies.
\code{statistic} chi-squared test statistics.
\code{df} degree of freedom.
\code{p.value} probability value by chi-squared test statistic.
\code{corr} Correlation value.
\code{fitCB} fitted probability values of \code{dCorrBin}.
\code{NegLL} Negative Log Likelihood value.
\code{AIC} AIC value.
\code{call} the inputs of the function.
Methods \code{summary}, \code{print}, \code{AIC}, \code{residuals} and \code{fitted}
can be used to extract specific outputs.
}
\description{
The function will fit the Correlated Binomial Distribution
when random variables, corresponding frequencies, probability of success and covariance are given.
It will provide the expected frequencies, chi-squared test statistics value, p value,
and degree of freedom so that it can be seen if this distribution fits the data.
}
\details{
\deqn{obs.freq \ge 0}
\deqn{x = 0,1,2,..}
\deqn{0 < p < 1}
\deqn{-\infty < cov < +\infty}
\strong{NOTE} : If input parameters are not in given domain conditions
necessary error messages will be provided to go further.
}
\examples{
No.D.D <- 0:7 #assigning the random variables
Obs.fre.1 <- c(47,54,43,40,40,41,39,95) #assigning the corresponding frequencies
#estimating the parameters using maximum log likelihood value and assigning it
parameters <- EstMLECorrBin(x=No.D.D,freq=Obs.fre.1,p=0.5,cov=0.0050)
pCorrBin <- bbmle::coef(parameters)[1]
covCorrBin <- bbmle::coef(parameters)[2]
#fitting when the random variable,frequencies,probability and covariance are given
results <- fitCorrBin(No.D.D,Obs.fre.1,pCorrBin,covCorrBin)
results
#extracting the AIC value
AIC(results)
#extract fitted values
fitted(results)
}
\references{
Johnson, N. L., Kemp, A. W., & Kotz, S. (2005). Univariate discrete distributions (Vol. 444).
Hoboken, NJ: Wiley-Interscience.
L. L. Kupper, J.K.H., 1978. The Use of a Correlated Binomial Model for the Analysis of Certain Toxicological
Experiments. Biometrics, 34(1), pp.69-76.
Paul, S.R., 1985. A three-parameter generalization of the binomial distribution. Communications in Statistics
- Theory and Methods, 14(6), pp.1497-1506.
Available at: \doi{10.1080/03610928508828990}.
Jorge G. Morel and Nagaraj K. Neerchal. Overdispersion Models in SAS. SAS Institute, 2012.
}
| /man/fitCorrBin.Rd | no_license | cran/fitODBOD | R | false | true | 3,015 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CorrBin.R
\name{fitCorrBin}
\alias{fitCorrBin}
\title{Fitting the Correlated Binomial Distribution when binomial
random variable, frequency, probability of success and covariance are given}
\usage{
fitCorrBin(x,obs.freq,p,cov)
}
\arguments{
\item{x}{vector of binomial random variables.}
\item{obs.freq}{vector of frequencies.}
\item{p}{single value for probability of success.}
\item{cov}{single value for covariance.}
}
\value{
The output of \code{fitCorrBin} gives the class format \code{fitCB} and \code{fit} consisting a list
\code{bin.ran.var} binomial random variables.
\code{obs.freq} corresponding observed frequencies.
\code{exp.freq} corresponding expected frequencies.
\code{statistic} chi-squared test statistics.
\code{df} degree of freedom.
\code{p.value} probability value by chi-squared test statistic.
\code{corr} Correlation value.
\code{fitCB} fitted probability values of \code{dCorrBin}.
\code{NegLL} Negative Log Likelihood value.
\code{AIC} AIC value.
\code{call} the inputs of the function.
Methods \code{summary}, \code{print}, \code{AIC}, \code{residuals} and \code{fitted}
can be used to extract specific outputs.
}
\description{
The function will fit the Correlated Binomial Distribution
when random variables, corresponding frequencies, probability of success and covariance are given.
It will provide the expected frequencies, chi-squared test statistics value, p value,
and degree of freedom so that it can be seen if this distribution fits the data.
}
\details{
\deqn{obs.freq \ge 0}
\deqn{x = 0,1,2,..}
\deqn{0 < p < 1}
\deqn{-\infty < cov < +\infty}
\strong{NOTE} : If input parameters are not in given domain conditions
necessary error messages will be provided to go further.
}
\examples{
No.D.D <- 0:7 #assigning the random variables
Obs.fre.1 <- c(47,54,43,40,40,41,39,95) #assigning the corresponding frequencies
#estimating the parameters using maximum log likelihood value and assigning it
parameters <- EstMLECorrBin(x=No.D.D,freq=Obs.fre.1,p=0.5,cov=0.0050)
pCorrBin <- bbmle::coef(parameters)[1]
covCorrBin <- bbmle::coef(parameters)[2]
#fitting when the random variable,frequencies,probability and covariance are given
results <- fitCorrBin(No.D.D,Obs.fre.1,pCorrBin,covCorrBin)
results
#extracting the AIC value
AIC(results)
#extract fitted values
fitted(results)
}
\references{
Johnson, N. L., Kemp, A. W., & Kotz, S. (2005). Univariate discrete distributions (Vol. 444).
Hoboken, NJ: Wiley-Interscience.
L. L. Kupper, J.K.H., 1978. The Use of a Correlated Binomial Model for the Analysis of Certain Toxicological
Experiments. Biometrics, 34(1), pp.69-76.
Paul, S.R., 1985. A three-parameter generalization of the binomial distribution. Communications in Statistics
- Theory and Methods, 14(6), pp.1497-1506.
Available at: \doi{10.1080/03610928508828990}.
Jorge G. Morel and Nagaraj K. Neerchal. Overdispersion Models in SAS. SAS Institute, 2012.
}
|
#!/usr/bin/env Rscript
# Do not run this test on a login node.
library(sna)
library(network)
#library(ergm) # sampson data set
library(foreach)
library(Rmpi)
library(snow)
library(doParallel)
dyn.load("../lib/bet.so")
asserteq = function(a, b, name) {
if (sum(abs(a-b)) > 0.00001) {
print(paste("Not equal! ", name))
print(a)
print(b)
q()
}
}
btn_wrapper_dopar <- function(dat, p) {
dat <- as.edgelist.sna(dat)
n <- attr(dat, "n")
if (n %% p != 0) {
m <- n + (p - n%%p)
delta <- m / p
}
else
delta <- n / p
c <- foreach(i=1:p, .combine='+', .inorder=FALSE) %dopar%
{
st <- as.integer((i-1) * delta)
end <- min(as.integer(st + delta), n)
.Call("betweenness_partial", dat, n, NROW(dat), 0, st, end)
}
return(c)
}
data(flo)
a <- betweenness(flo)
np <- mpi.universe.size() - 1
cl <- makeMPIcluster(np)
i <- clusterCall(cl, function() { dyn.load("../lib/bet.so") })
registerDoParallel(cl)
for (p in c(1,2,4,8,16)) {
b <- btn_wrapper_dopar(flo, p)
asserteq(a, b, paste("wrapper-dopar ",p))
}
stopCluster(cl)
mpi.exit()
| /betweenness/tests/bet-test-mpi.R | no_license | khanna7/Key_Player | R | false | false | 1,164 | r | #!/usr/bin/env Rscript
# Do not run this test on a login node.
library(sna)
library(network)
#library(ergm) # sampson data set
library(foreach)
library(Rmpi)
library(snow)
library(doParallel)
dyn.load("../lib/bet.so")
asserteq = function(a, b, name) {
if (sum(abs(a-b)) > 0.00001) {
print(paste("Not equal! ", name))
print(a)
print(b)
q()
}
}
btn_wrapper_dopar <- function(dat, p) {
dat <- as.edgelist.sna(dat)
n <- attr(dat, "n")
if (n %% p != 0) {
m <- n + (p - n%%p)
delta <- m / p
}
else
delta <- n / p
c <- foreach(i=1:p, .combine='+', .inorder=FALSE) %dopar%
{
st <- as.integer((i-1) * delta)
end <- min(as.integer(st + delta), n)
.Call("betweenness_partial", dat, n, NROW(dat), 0, st, end)
}
return(c)
}
data(flo)
a <- betweenness(flo)
np <- mpi.universe.size() - 1
cl <- makeMPIcluster(np)
i <- clusterCall(cl, function() { dyn.load("../lib/bet.so") })
registerDoParallel(cl)
for (p in c(1,2,4,8,16)) {
b <- btn_wrapper_dopar(flo, p)
asserteq(a, b, paste("wrapper-dopar ",p))
}
stopCluster(cl)
mpi.exit()
|
m_filter_single_var_ui <- function(id, dataset, dataname, colname) {
ns <- NS(id)
id <- ns("filter")
if (is.numeric(dataset[[colname]])) {
# slider
v_range <- range(dataset[[colname]])
sliderInput(
inputId = id,
label = colname,
min = v_range[1],
max = v_range[2],
value = v_range
)
} else {
# radio-button
v_choices <- sort(unique(dataset[[colname]]))
checkboxGroupInput(
inputId = id,
label = colname,
choices = v_choices,
selected = v_choices
)
}
}
m_filter_single_var_srv <- function(id, dataset, dataname, colname) {
moduleServer(
id,
function(input, output, session) {
if (is.numeric(dataset[[colname]])) {
# condition: x >= min & x <= max
reactive({
call("&", call(">=", as.name(colname), input$filter[1]), call("<=", as.name(colname), input$filter[2]))
})
} else {
# condition: x %in% choices
reactive({
call("%in%", as.name(colname), input$filter)
})
}
}
)
}
m_filter_single_dataset_ui <- function(id, dataset, dataname, colnames) {
ns <- NS(id)
tagList(
tags$b(dataname),
lapply(
colnames,
function(colname) {
m_filter_single_var_ui(ns(colname), dataset, dataname, colname)
}
)
)
}
m_filter_single_dataset_srv <- function(id, dataset, dataname, colnames) {
moduleServer(
id,
function(input, output, session) {
# list of reactive objects with calls
condition_calls <- lapply(
colnames,
function(colname) {
m_filter_single_var_srv(colname, dataset, dataname, colname)
}
)
reactive({
do.call(
dplyr::filter,
args = c(list(dataset), lapply(condition_calls, function(x) x()))
)
})
}
)
}
m_filter_ui <- function(id, data) {
ns <- NS(id)
tags$form(
class = "well",
role = "complementary",
tagList(
m_filter_single_dataset_ui(ns("ADSL"), data[["ADSL"]], "ADSL", c("SEX", "RACE", "AGE")),
m_filter_single_dataset_ui(ns("ADAE"), data[["ADAE"]], "ADAE", c("AETERM", "AETERM")),
m_filter_single_dataset_ui(ns("ADVS"), data[["ADVS"]], "ADVS", c("PARAMCD", "AVISIT"))
)
)
}
m_filter_srv <- function(id, data) {
moduleServer(
id,
function(input, output, session) {
ADSL_FILTERED <- m_filter_single_dataset_srv("ADSL", data[["ADSL"]], "ADSL", c("SEX", "RACE", "AGE"))
ADAE_FILTERED <- m_filter_single_dataset_srv("ADAE", data[["ADAE"]], "ADAE", c("AETERM", "AETERM"))
ADVS_FILTERED <- m_filter_single_dataset_srv("ADVS", data[["ADVS"]], "ADVS", c("PARAMCD", "AVISIT"))
ADSL_FINAL <- reactive({
ADSL_FILTERED()
})
ADAE_FINAL <- reactive({
dplyr::inner_join(ADAE_FILTERED(), ADSL_FILTERED(), by = c("STUDYID", "USUBJID"))
})
ADVS_FINAL <- reactive({
dplyr::inner_join(ADVS_FILTERED(), ADSL_FILTERED(), by = c("STUDYID", "USUBJID"))
})
return(list(
ADSL = ADSL_FINAL,
ADAE = ADAE_FINAL,
ADVS = ADVS_FINAL
))
}
)
}
| /programs/shiny/m_filter.R | no_license | openpharma/rinpharma_workshop_2021 | R | false | false | 3,143 | r | m_filter_single_var_ui <- function(id, dataset, dataname, colname) {
ns <- NS(id)
id <- ns("filter")
if (is.numeric(dataset[[colname]])) {
# slider
v_range <- range(dataset[[colname]])
sliderInput(
inputId = id,
label = colname,
min = v_range[1],
max = v_range[2],
value = v_range
)
} else {
# radio-button
v_choices <- sort(unique(dataset[[colname]]))
checkboxGroupInput(
inputId = id,
label = colname,
choices = v_choices,
selected = v_choices
)
}
}
m_filter_single_var_srv <- function(id, dataset, dataname, colname) {
moduleServer(
id,
function(input, output, session) {
if (is.numeric(dataset[[colname]])) {
# condition: x >= min & x <= max
reactive({
call("&", call(">=", as.name(colname), input$filter[1]), call("<=", as.name(colname), input$filter[2]))
})
} else {
# condition: x %in% choices
reactive({
call("%in%", as.name(colname), input$filter)
})
}
}
)
}
m_filter_single_dataset_ui <- function(id, dataset, dataname, colnames) {
ns <- NS(id)
tagList(
tags$b(dataname),
lapply(
colnames,
function(colname) {
m_filter_single_var_ui(ns(colname), dataset, dataname, colname)
}
)
)
}
m_filter_single_dataset_srv <- function(id, dataset, dataname, colnames) {
moduleServer(
id,
function(input, output, session) {
# list of reactive objects with calls
condition_calls <- lapply(
colnames,
function(colname) {
m_filter_single_var_srv(colname, dataset, dataname, colname)
}
)
reactive({
do.call(
dplyr::filter,
args = c(list(dataset), lapply(condition_calls, function(x) x()))
)
})
}
)
}
m_filter_ui <- function(id, data) {
ns <- NS(id)
tags$form(
class = "well",
role = "complementary",
tagList(
m_filter_single_dataset_ui(ns("ADSL"), data[["ADSL"]], "ADSL", c("SEX", "RACE", "AGE")),
m_filter_single_dataset_ui(ns("ADAE"), data[["ADAE"]], "ADAE", c("AETERM", "AETERM")),
m_filter_single_dataset_ui(ns("ADVS"), data[["ADVS"]], "ADVS", c("PARAMCD", "AVISIT"))
)
)
}
m_filter_srv <- function(id, data) {
moduleServer(
id,
function(input, output, session) {
ADSL_FILTERED <- m_filter_single_dataset_srv("ADSL", data[["ADSL"]], "ADSL", c("SEX", "RACE", "AGE"))
ADAE_FILTERED <- m_filter_single_dataset_srv("ADAE", data[["ADAE"]], "ADAE", c("AETERM", "AETERM"))
ADVS_FILTERED <- m_filter_single_dataset_srv("ADVS", data[["ADVS"]], "ADVS", c("PARAMCD", "AVISIT"))
ADSL_FINAL <- reactive({
ADSL_FILTERED()
})
ADAE_FINAL <- reactive({
dplyr::inner_join(ADAE_FILTERED(), ADSL_FILTERED(), by = c("STUDYID", "USUBJID"))
})
ADVS_FINAL <- reactive({
dplyr::inner_join(ADVS_FILTERED(), ADSL_FILTERED(), by = c("STUDYID", "USUBJID"))
})
return(list(
ADSL = ADSL_FINAL,
ADAE = ADAE_FINAL,
ADVS = ADVS_FINAL
))
}
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genoset-class.R
\name{show,GenoSet-method}
\alias{show,GenoSet-method}
\title{Print a GenoSet}
\usage{
\S4method{show}{GenoSet}(object)
}
\arguments{
\item{object}{a GenoSet}
}
\description{
Prints out a description of a GenoSet object
}
| /man/show-GenoSet-method.Rd | no_license | phaverty/genoset | R | false | true | 316 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genoset-class.R
\name{show,GenoSet-method}
\alias{show,GenoSet-method}
\title{Print a GenoSet}
\usage{
\S4method{show}{GenoSet}(object)
}
\arguments{
\item{object}{a GenoSet}
}
\description{
Prints out a description of a GenoSet object
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qcs.cusum.r
\name{qcs.cusum}
\alias{qcs.cusum}
\alias{qcs.cusum.default}
\alias{qcs.cusum.qcd}
\title{Function to plot the cusum chart}
\usage{
qcs.cusum(x, ...)
\method{qcs.cusum}{default}(
x,
var.index = 1,
sample.index = 2,
covar.index = NULL,
covar.names = NULL,
data.name = NULL,
sizes = NULL,
center = NULL,
std.dev = NULL,
decision.interval = 5,
se.shift = 1,
plot = FALSE,
...
)
\method{qcs.cusum}{qcd}(
x,
center = NULL,
std.dev = NULL,
decision.interval = 5,
se.shift = 1,
plot = FALSE,
...
)
}
\arguments{
\item{x}{Object qcd (Quality Control Data)}
\item{...}{arguments passed to or from methods.}
\item{var.index}{a scalar with the column number corresponding to the observed data for
the variable (the variable quality). Alternativelly can be a string with the
name of the quality variable.}
\item{sample.index}{a scalar with the column number corresponding to the index each
group (sample).}
\item{covar.index}{optional. A scalar or numeric vector with the column number(s)
corresponding to the covariate(s). Alternativelly it can be a character vector with
the names of the covariates.}
\item{covar.names}{optional. A string or vector of strings with names for the
covariate columns. Only valid if there is more than one column of data. By
default, takes the names from the original object.}
\item{data.name}{a string specifying the name of the variable which appears on the
plots. If not provided it is taken from the object given as data.}
\item{sizes}{a value or a vector of values specifying the sample sizes
associated with each group.}
\item{center}{a value specifying the center of group statistics or the
''target'' value of the process.}
\item{std.dev}{a value or an available method specifying the within-group
standard deviation(s) of the process. \cr Several methods are available for
estimating the standard deviation.}
\item{decision.interval}{A numeric value specifying the number of standard
errors of the summary statistics at which the cumulative sum is out of
control.}
\item{se.shift}{The amount of shift to detect in the process, measured in
standard errors of the summary statistics.}
\item{plot}{a logical value indicating it should be plotted.}
}
\description{
This function is used to compute statistics required by the cusum chart.
}
\examples{
library(qcr)
data(pistonrings)
attach(pistonrings)
res.qcd <- qcd(pistonrings, type.data = "dependence")
res.qcs <- qcs.cusum(res.qcd, type = "cusum")
summary(res.qcs)
plot(res.qcs)
}
| /man/qcs.cusum.Rd | no_license | cran/qcr | R | false | true | 2,607 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qcs.cusum.r
\name{qcs.cusum}
\alias{qcs.cusum}
\alias{qcs.cusum.default}
\alias{qcs.cusum.qcd}
\title{Function to plot the cusum chart}
\usage{
qcs.cusum(x, ...)
\method{qcs.cusum}{default}(
x,
var.index = 1,
sample.index = 2,
covar.index = NULL,
covar.names = NULL,
data.name = NULL,
sizes = NULL,
center = NULL,
std.dev = NULL,
decision.interval = 5,
se.shift = 1,
plot = FALSE,
...
)
\method{qcs.cusum}{qcd}(
x,
center = NULL,
std.dev = NULL,
decision.interval = 5,
se.shift = 1,
plot = FALSE,
...
)
}
\arguments{
\item{x}{Object qcd (Quality Control Data)}
\item{...}{arguments passed to or from methods.}
\item{var.index}{a scalar with the column number corresponding to the observed data for
the variable (the variable quality). Alternativelly can be a string with the
name of the quality variable.}
\item{sample.index}{a scalar with the column number corresponding to the index each
group (sample).}
\item{covar.index}{optional. A scalar or numeric vector with the column number(s)
corresponding to the covariate(s). Alternativelly it can be a character vector with
the names of the covariates.}
\item{covar.names}{optional. A string or vector of strings with names for the
covariate columns. Only valid if there is more than one column of data. By
default, takes the names from the original object.}
\item{data.name}{a string specifying the name of the variable which appears on the
plots. If not provided it is taken from the object given as data.}
\item{sizes}{a value or a vector of values specifying the sample sizes
associated with each group.}
\item{center}{a value specifying the center of group statistics or the
''target'' value of the process.}
\item{std.dev}{a value or an available method specifying the within-group
standard deviation(s) of the process. \cr Several methods are available for
estimating the standard deviation.}
\item{decision.interval}{A numeric value specifying the number of standard
errors of the summary statistics at which the cumulative sum is out of
control.}
\item{se.shift}{The amount of shift to detect in the process, measured in
standard errors of the summary statistics.}
\item{plot}{a logical value indicating it should be plotted.}
}
\description{
This function is used to compute statistics required by the cusum chart.
}
\examples{
library(qcr)
data(pistonrings)
attach(pistonrings)
res.qcd <- qcd(pistonrings, type.data = "dependence")
res.qcs <- qcs.cusum(res.qcd, type = "cusum")
summary(res.qcs)
plot(res.qcs)
}
|
Sys.setenv(JAVA_HOME='/usr/local/software/spack/spack-0.11.2/opt/spack/linux-rhel7-x86_64/gcc-5.4.0/jdk-8u141-b15-p4aaoptkqukgdix6dh5ey236kllhluvr/jre') #Ubuntu cluster
## Load packages
library(nlrx)
library(tidyverse)
library(rcartocolor)
library(ggthemes)
# Office
netlogopath <- file.path("/usr/local/Cluster-Apps/netlogo/6.0.4")
outpath <- file.path("/home/hs621/github/chapter4")
## Step1: Create a nl obejct:
nl <- nl(nlversion = "6.0.4",
nlpath = netlogopath,
modelpath = file.path(outpath, "global_St111241_Dongjak.nlogo"),
jvmmem = 1024)
## Step2: Add Experiment
nl@experiment <- experiment(expname = "nlrx_spatial",
outpath = outpath,
repetition = 1,
tickmetrics = "true",
idsetup = "setup",
idgo = "go",
runtime = 8764,
evalticks=seq(1,8764),
metrics = c(
"number-dead",
"danger",
'Daebang_risk',
'Heukseok_risk',
'Noryangjin1_risk',
'Noryangjin2_risk',
'Sadang1_risk',
'Sadang2_risk',
'Sadang3_risk',
'Sadang4_risk',
'Sadang5_risk',
'Sangdo1_risk',
'Sangdo2_risk',
'Sangdo3_risk',
'Sangdo4_risk',
'Shindaebang1_risk',
'Shindaebang2_risk',
"age15", "age1564", "age65", "eduhigh", "edulow"
),
variables = list('AC' = list(values=c(100,150,200))),
constants = list("PM10-parameters" = 100,
"Scenario" = "\"BAU\"",
"scenario-percent" = "\"inc-sce\"")
)
# Evaluate if variables and constants are valid:
eval_variables_constants(nl)
nl@simdesign <- simdesign_distinct(nl = nl, nseeds = 1)
#nl@simdesign <- simdesign_simple(nl = nl, nseeds = 1)
# Step4: Run simulations:
init <- Sys.time()
results <- run_nl_all(nl = nl)
Sys.time() - init
rm(nl)
write.csv(results, paste("dongjak_BAU_", results$`random-seed`[1], ".csv", sep = ""), row.names = F)
| /scripts_bau/dongjak_bau.R | no_license | dataandcrowd/PollutionABM | R | false | false | 2,688 | r | Sys.setenv(JAVA_HOME='/usr/local/software/spack/spack-0.11.2/opt/spack/linux-rhel7-x86_64/gcc-5.4.0/jdk-8u141-b15-p4aaoptkqukgdix6dh5ey236kllhluvr/jre') #Ubuntu cluster
## Load packages
library(nlrx)
library(tidyverse)
library(rcartocolor)
library(ggthemes)
# Office
netlogopath <- file.path("/usr/local/Cluster-Apps/netlogo/6.0.4")
outpath <- file.path("/home/hs621/github/chapter4")
## Step1: Create a nl obejct:
nl <- nl(nlversion = "6.0.4",
nlpath = netlogopath,
modelpath = file.path(outpath, "global_St111241_Dongjak.nlogo"),
jvmmem = 1024)
## Step2: Add Experiment
nl@experiment <- experiment(expname = "nlrx_spatial",
outpath = outpath,
repetition = 1,
tickmetrics = "true",
idsetup = "setup",
idgo = "go",
runtime = 8764,
evalticks=seq(1,8764),
metrics = c(
"number-dead",
"danger",
'Daebang_risk',
'Heukseok_risk',
'Noryangjin1_risk',
'Noryangjin2_risk',
'Sadang1_risk',
'Sadang2_risk',
'Sadang3_risk',
'Sadang4_risk',
'Sadang5_risk',
'Sangdo1_risk',
'Sangdo2_risk',
'Sangdo3_risk',
'Sangdo4_risk',
'Shindaebang1_risk',
'Shindaebang2_risk',
"age15", "age1564", "age65", "eduhigh", "edulow"
),
variables = list('AC' = list(values=c(100,150,200))),
constants = list("PM10-parameters" = 100,
"Scenario" = "\"BAU\"",
"scenario-percent" = "\"inc-sce\"")
)
# Evaluate if variables and constants are valid:
eval_variables_constants(nl)
nl@simdesign <- simdesign_distinct(nl = nl, nseeds = 1)
#nl@simdesign <- simdesign_simple(nl = nl, nseeds = 1)
# Step4: Run simulations:
init <- Sys.time()
results <- run_nl_all(nl = nl)
Sys.time() - init
rm(nl)
write.csv(results, paste("dongjak_BAU_", results$`random-seed`[1], ".csv", sep = ""), row.names = F)
|
library(rethinking)
library(tidyverse)
data(WaffleDivorce)
d <- WaffleDivorce
d$A <- scale(d$MedianAgeMarriage)
d$D <- scale(d$Divorce)
m5.1 <- quap(
alist(
D ~ dnorm(mu, sigma),
mu <- a + b_a * A,
a ~ dnorm(0,0.2),
b_a ~ dnorm(0,0.5),
sigma ~ dexp(1)
), data=d
)
plot(d$A, d$D, xlab="Median Age Marriage", ylab="Divorce Rate")
title("Divorce Rate vs Median Age Marriage")
samples <- extract.samples(m5.1)
curve(mean(samples$a) + mean(samples$b_a) * (x - mean(d$A)), add=TRUE)
library(dagitty)
dag5.1 <- dagitty("dag{
A -> D
A -> M
M -> D
}")
coordinates(dag5.1) <- list(x=c(A=0, D=1, M=2), y=c(A=0, D=1, M=0))
drawdag(dag5.1)
d$M <- scale(d$Marriage)
m5.3 <- quap(
alist(
D ~ dnorm(mu, sigma),
mu <- a + bM * M + bA * A,
a ~ dnorm(0, 0.2),
bM ~ dnorm(0, 0.5),
bA ~ dnorm(0, 0.5),
sigma ~ dexp(1)
), data = d)
)
precis(m5.3)
plot(coeftab(m5.1, m5.3), par=c("bA", "bM"))
# Predictor residual plots
# To compute predictor residuals for either, we just use the other predictor to model it
# so for marriage rate, we use median marriage age to model it
m5.4 <- quap(
alist(
M ~ dnorm(mu, sigma),
mu <- a + bAM * A,
a ~ dnorm(0,0.2),
bAM ~ dnorm(0,0.5),
sigma ~ dexp(1)
), data = d
)
mu <- link(m5.4)
mu_mean <- apply(mu, 2, mean)
mu_resid <- d$M - mu_mean
d$M_res <- mu_resid
samples <- extract.samples(m5.4)
# Now do another linear regression with x as the marriage rate residuals and
# y as the standardized divorce rate
m_res <- quap(
alist(
D ~ dnorm(mu, sigma),
mu <- a + b * ( M_res - mean(M_res)),
a ~ dnorm(0,0.2),
b ~ dnorm(0,0.5),
sigma ~ dexp(1)
), data = d
)
samples <- extract.samples(m_res)
a <- mean(samples$a)
b <- mean(samples$b)
x_bar <- mean(d$M_res)
ggplot(data = d, aes(M_res, D)) +
geom_point() +
geom_abline(aes(intercept=a - b*x_bar, slope=b)) +
xlab("Marriage rate residuals") +
ylab("Divorce rate (std)")
# Now, turn to Posterior prediction plots
mu <- link(m5.3)
mu_mean <- apply(mu, 2, mean)
mu_PI <- apply(mu, 2, PI)
D_sim <- sim(m5.3, n=1e4)
D_PI <- apply(D_sim, 2, PI)
plot(mu_mean ~ d$D, col=rangi2, ylim=range(mu_PI),
xlab="Observed divorce", ylab="Predicted divorce")
abline(a=0, b=1, lty=2)
for(i in 1:nrow(d)) lines(rep(d$D[i],2), mu_PI[,i], col=rangi2)
identify(x=d$D, y=mu_mean, labels=d$Loc)
# counterfactual model
data("WaffleDivorce")
d <- list()
d$A <- standardize(WaffleDivorce$MedianAgeMarriage)
d$D <- standardize(WaffleDivorce$Divorce)
d$M <- standardize(WaffleDivorce$Marriage)
m5.3_A <- quap(
alist(
## A -> D <- M
D ~ dnorm(mu, sigma),
mu <- a + bM*M + bA*A, # because there is no interaction between A and M
a ~ dnorm(0, 0.2),
bM ~ dnorm(0, 0.5),
bA ~ dnorm(0, 0.5),
sigma ~ dexp(1),
## A -> M
M ~ dnorm(mu_M, sigma_M),
mu_M <- aM + bAM*A,
aM ~ dnorm(0,0.2),
bAM ~ dnorm(0, 0.5),
sigma_M ~ dexp(1)
), data =d )
A_seq <- seq(from=-2, to=2, length.out=30)
sim_dat <- data.frame( A=A_seq)
s <- sim(m5.3_A, data=sim_dat, var=c("M", "D"))
str(s)
# display counterfactual predictions
plot(sim_dat$A, colMeans(s$D), ylim=c(-2,2), type="l",
xlab="mnipulated A", ylab="counterfactual D")
shade(apply(s$D, 2, PI), sim_dat$A)
mtext("Total counterfactual effect of A on D")
plot(sim_dat$A, colMeans(s$M), ylim=c(-2,2), type="l",
xlab="manipulated A", ylab="counterfactual M")
shade(apply(s$M, 2, PI), sim_dat$A)
mtext("Counterfactual effect A -> M")
# simulate the effect of A -> D <- M, but assume no relationship between
# A and M
sim_dat <- data.frame(M=seq(from=-2, to=2, length.out=30), A=0)
s <- sim(m5.3_A, data=sim_dat, vars="D")
plot(sim_dat$M, colMeans(s), ylim=c(-2,2), type="l",
xlab="manipulated M", ylab="counterfactual D")
shade(apply(s,2,PI), sim_dat$M)
mtext("Total counterfactual effect of M on D")
######################### 5.2 Masked relationship
library(rethinking)
data(milk)
d <- milk
str(d)
d$K <- scale(d$kcal.per.g)
d$N <- scale(d$neocortex.perc)
d$M <- scale(log(d$mass))
# first model to consider -- bivariate regression
# deal with NA value, use complete.cases
dcc <- d[complete.cases(d$K, d$N, d$M), ]
m5.5_draft <- quap(
alist(
K ~ dnorm(mu, sigma),
mu <- a + bN * N,
a ~ dnorm(0,1),
bN ~ dnorm(0,1),
sigma ~ dexp(1)
), data=dcc )
# do prior prediction to see if things makes sense
prior <- extract.prior(m5.5_draft)
xseq <- c(-2,2)
mu <- link(m5.5_draft, post=prior, data=list(N=xseq))
plot(NULL, xlim=xseq, ylim=xseq)
for(i in 1:50) lines(xseq, mu[i, ], col=col.alpha("black", 0.3))
# but the prior graph above is absolute awful. Change the prior so that it
# looks better
m5.5 <- quap(
alist(
K ~ dnorm(mu, sigma),
mu <- a + bN * N,
a ~ dnorm(0,0.2),
bN ~ dnorm(0,0.5),
sigma ~ dexp(1)
), data=dcc )
# do prior prediction to see if things makes sense
prior <- extract.prior(m5.5)
xseq <- c(-2,2)
mu <- link(m5.5_draft, post=prior, data=list(N=xseq))
plot(NULL, xlim=xseq, ylim=xseq)
for(i in 1:50) lines(xseq, mu[i, ], col=col.alpha("black", 0.3))
precis(m5.5)
xseq <- seq(from=min(dcc$N)-0.15, to=max(dcc$N)+0.15, length.out=30)
mu <- link(m5.5, data=list(N=xseq))
mu_mean <- apply(mu, 2, mean)
mu_PI <- apply(mu, 2, PI)
plot(K ~ N, data=dcc)
lines(xseq, mu_mean, lwd=2)
shade(mu_PI, xseq)
m5.6 <- quap(
alist(
K ~ dnorm(mu, sigma),
mu <- a + bM * M ,
a ~ dnorm(0, 0.2) ,
bM ~ dnorm(0, 0.5),
sigma ~ dexp(1)
) , data=dcc
)
precis(m5.6)
# this graph has a negative slope
xseq <- seq(from=min(dcc$M)-0.15, to=max(dcc$M)+0.15, length.out=30)
mu <- link(m5.6, data=list(M=xseq))
mu_mean <- apply(mu, 2, mean)
mu_PI <- apply(mu, 2, PI)
plot(K ~ M, data=dcc)
lines(xseq, mu_mean, lwd=2)
shade(mu_PI, xseq)
# Now let's see what happens when we add both predictors
m5.7 <- quap(
alist(
K ~ dnorm(mu, sigma),
mu <- a + bN * N + bM * M,
a ~ dnorm(0, 0.2),
bN ~ dnorm(0, 0.5),
bM ~ dnorm(0, 0.5),
sigma ~ dexp(1)
), data=dcc
)
precis(m5.7)
plot(coeftab(m5.5, m5.6, m5.7), pars=c("bM", "bN"))
pairs( ~K + M + N, dcc)
# Categorical data analysis
data("Howell1")
d <- Howell1
d$sex <- ifelse(d$male==1, 2, 1)
str(d$sex)
m5.8 <- quap(
alist(
height ~ dnorm(mu, sigma),
mu <- a[sex],
a[sex] ~ dnorm(178, 20),
sigma ~ dunif(0, 50)
), data=d )
precis(m5.8, depth=2)
post <- extract.samples(m5.8)
post$diff_fm <- post$a[,1] - post$a[,2]
precis(post, depth=2, hist=FALSE)
# multipel categories using index method
data(milk)
d <- milk
unique(d$clade)
d$clade_id <- as.integer(d$clade )
d$K <- scale(d$kcal.per.g)
m5.9 <- quap(
alist(
K ~ dnorm(mu, sigma),
mu <- a[clade_id],
a[clade_id] ~ dnorm(0,0.5),
sigma ~ dexp(1)
), data=d )
labels <- paste( "a[" , 1:4 , "]:" , levels(d$clade) , sep="" )
plot( precis( m5.9 , depth=2 , pars="a" ) , labels=labels ,
xlab="expected kcal (std)" )
| /Follow_through/Chapter5_follow_through.R | no_license | nichi97/Statistical_rethinking- | R | false | false | 7,036 | r | library(rethinking)
library(tidyverse)
data(WaffleDivorce)
d <- WaffleDivorce
d$A <- scale(d$MedianAgeMarriage)
d$D <- scale(d$Divorce)
m5.1 <- quap(
alist(
D ~ dnorm(mu, sigma),
mu <- a + b_a * A,
a ~ dnorm(0,0.2),
b_a ~ dnorm(0,0.5),
sigma ~ dexp(1)
), data=d
)
plot(d$A, d$D, xlab="Median Age Marriage", ylab="Divorce Rate")
title("Divorce Rate vs Median Age Marriage")
samples <- extract.samples(m5.1)
curve(mean(samples$a) + mean(samples$b_a) * (x - mean(d$A)), add=TRUE)
library(dagitty)
dag5.1 <- dagitty("dag{
A -> D
A -> M
M -> D
}")
coordinates(dag5.1) <- list(x=c(A=0, D=1, M=2), y=c(A=0, D=1, M=0))
drawdag(dag5.1)
d$M <- scale(d$Marriage)
m5.3 <- quap(
alist(
D ~ dnorm(mu, sigma),
mu <- a + bM * M + bA * A,
a ~ dnorm(0, 0.2),
bM ~ dnorm(0, 0.5),
bA ~ dnorm(0, 0.5),
sigma ~ dexp(1)
), data = d)
)
precis(m5.3)
plot(coeftab(m5.1, m5.3), par=c("bA", "bM"))
# Predictor residual plots
# To compute predictor residuals for either, we just use the other predictor to model it
# so for marriage rate, we use median marriage age to model it
m5.4 <- quap(
alist(
M ~ dnorm(mu, sigma),
mu <- a + bAM * A,
a ~ dnorm(0,0.2),
bAM ~ dnorm(0,0.5),
sigma ~ dexp(1)
), data = d
)
mu <- link(m5.4)
mu_mean <- apply(mu, 2, mean)
mu_resid <- d$M - mu_mean
d$M_res <- mu_resid
samples <- extract.samples(m5.4)
# Now do another linear regression with x as the marriage rate residuals and
# y as the standardized divorce rate
m_res <- quap(
alist(
D ~ dnorm(mu, sigma),
mu <- a + b * ( M_res - mean(M_res)),
a ~ dnorm(0,0.2),
b ~ dnorm(0,0.5),
sigma ~ dexp(1)
), data = d
)
samples <- extract.samples(m_res)
a <- mean(samples$a)
b <- mean(samples$b)
x_bar <- mean(d$M_res)
ggplot(data = d, aes(M_res, D)) +
geom_point() +
geom_abline(aes(intercept=a - b*x_bar, slope=b)) +
xlab("Marriage rate residuals") +
ylab("Divorce rate (std)")
# Now, turn to Posterior prediction plots
mu <- link(m5.3)
mu_mean <- apply(mu, 2, mean)
mu_PI <- apply(mu, 2, PI)
D_sim <- sim(m5.3, n=1e4)
D_PI <- apply(D_sim, 2, PI)
plot(mu_mean ~ d$D, col=rangi2, ylim=range(mu_PI),
xlab="Observed divorce", ylab="Predicted divorce")
abline(a=0, b=1, lty=2)
for(i in 1:nrow(d)) lines(rep(d$D[i],2), mu_PI[,i], col=rangi2)
identify(x=d$D, y=mu_mean, labels=d$Loc)
# counterfactual model
data("WaffleDivorce")
d <- list()
d$A <- standardize(WaffleDivorce$MedianAgeMarriage)
d$D <- standardize(WaffleDivorce$Divorce)
d$M <- standardize(WaffleDivorce$Marriage)
m5.3_A <- quap(
alist(
## A -> D <- M
D ~ dnorm(mu, sigma),
mu <- a + bM*M + bA*A, # because there is no interaction between A and M
a ~ dnorm(0, 0.2),
bM ~ dnorm(0, 0.5),
bA ~ dnorm(0, 0.5),
sigma ~ dexp(1),
## A -> M
M ~ dnorm(mu_M, sigma_M),
mu_M <- aM + bAM*A,
aM ~ dnorm(0,0.2),
bAM ~ dnorm(0, 0.5),
sigma_M ~ dexp(1)
), data =d )
A_seq <- seq(from=-2, to=2, length.out=30)
sim_dat <- data.frame( A=A_seq)
s <- sim(m5.3_A, data=sim_dat, var=c("M", "D"))
str(s)
# display counterfactual predictions
plot(sim_dat$A, colMeans(s$D), ylim=c(-2,2), type="l",
xlab="mnipulated A", ylab="counterfactual D")
shade(apply(s$D, 2, PI), sim_dat$A)
mtext("Total counterfactual effect of A on D")
plot(sim_dat$A, colMeans(s$M), ylim=c(-2,2), type="l",
xlab="manipulated A", ylab="counterfactual M")
shade(apply(s$M, 2, PI), sim_dat$A)
mtext("Counterfactual effect A -> M")
# simulate the effect of A -> D <- M, but assume no relationship between
# A and M
sim_dat <- data.frame(M=seq(from=-2, to=2, length.out=30), A=0)
s <- sim(m5.3_A, data=sim_dat, vars="D")
plot(sim_dat$M, colMeans(s), ylim=c(-2,2), type="l",
xlab="manipulated M", ylab="counterfactual D")
shade(apply(s,2,PI), sim_dat$M)
mtext("Total counterfactual effect of M on D")
######################### 5.2 Masked relationship
library(rethinking)
data(milk)
d <- milk
str(d)
d$K <- scale(d$kcal.per.g)
d$N <- scale(d$neocortex.perc)
d$M <- scale(log(d$mass))
# first model to consider -- bivariate regression
# deal with NA value, use complete.cases
dcc <- d[complete.cases(d$K, d$N, d$M), ]
m5.5_draft <- quap(
alist(
K ~ dnorm(mu, sigma),
mu <- a + bN * N,
a ~ dnorm(0,1),
bN ~ dnorm(0,1),
sigma ~ dexp(1)
), data=dcc )
# do prior prediction to see if things makes sense
prior <- extract.prior(m5.5_draft)
xseq <- c(-2,2)
mu <- link(m5.5_draft, post=prior, data=list(N=xseq))
plot(NULL, xlim=xseq, ylim=xseq)
for(i in 1:50) lines(xseq, mu[i, ], col=col.alpha("black", 0.3))
# but the prior graph above is absolute awful. Change the prior so that it
# looks better
m5.5 <- quap(
alist(
K ~ dnorm(mu, sigma),
mu <- a + bN * N,
a ~ dnorm(0,0.2),
bN ~ dnorm(0,0.5),
sigma ~ dexp(1)
), data=dcc )
# do prior prediction to see if things makes sense
prior <- extract.prior(m5.5)
xseq <- c(-2,2)
mu <- link(m5.5_draft, post=prior, data=list(N=xseq))
plot(NULL, xlim=xseq, ylim=xseq)
for(i in 1:50) lines(xseq, mu[i, ], col=col.alpha("black", 0.3))
precis(m5.5)
xseq <- seq(from=min(dcc$N)-0.15, to=max(dcc$N)+0.15, length.out=30)
mu <- link(m5.5, data=list(N=xseq))
mu_mean <- apply(mu, 2, mean)
mu_PI <- apply(mu, 2, PI)
plot(K ~ N, data=dcc)
lines(xseq, mu_mean, lwd=2)
shade(mu_PI, xseq)
m5.6 <- quap(
alist(
K ~ dnorm(mu, sigma),
mu <- a + bM * M ,
a ~ dnorm(0, 0.2) ,
bM ~ dnorm(0, 0.5),
sigma ~ dexp(1)
) , data=dcc
)
precis(m5.6)
# this graph has a negative slope
xseq <- seq(from=min(dcc$M)-0.15, to=max(dcc$M)+0.15, length.out=30)
mu <- link(m5.6, data=list(M=xseq))
mu_mean <- apply(mu, 2, mean)
mu_PI <- apply(mu, 2, PI)
plot(K ~ M, data=dcc)
lines(xseq, mu_mean, lwd=2)
shade(mu_PI, xseq)
# Now let's see what happens when we add both predictors
m5.7 <- quap(
alist(
K ~ dnorm(mu, sigma),
mu <- a + bN * N + bM * M,
a ~ dnorm(0, 0.2),
bN ~ dnorm(0, 0.5),
bM ~ dnorm(0, 0.5),
sigma ~ dexp(1)
), data=dcc
)
precis(m5.7)
plot(coeftab(m5.5, m5.6, m5.7), pars=c("bM", "bN"))
pairs( ~K + M + N, dcc)
# Categorical data analysis
data("Howell1")
d <- Howell1
d$sex <- ifelse(d$male==1, 2, 1)
str(d$sex)
m5.8 <- quap(
alist(
height ~ dnorm(mu, sigma),
mu <- a[sex],
a[sex] ~ dnorm(178, 20),
sigma ~ dunif(0, 50)
), data=d )
precis(m5.8, depth=2)
post <- extract.samples(m5.8)
post$diff_fm <- post$a[,1] - post$a[,2]
precis(post, depth=2, hist=FALSE)
# multipel categories using index method
data(milk)
d <- milk
unique(d$clade)
d$clade_id <- as.integer(d$clade )
d$K <- scale(d$kcal.per.g)
m5.9 <- quap(
alist(
K ~ dnorm(mu, sigma),
mu <- a[clade_id],
a[clade_id] ~ dnorm(0,0.5),
sigma ~ dexp(1)
), data=d )
labels <- paste( "a[" , 1:4 , "]:" , levels(d$clade) , sep="" )
plot( precis( m5.9 , depth=2 , pars="a" ) , labels=labels ,
xlab="expected kcal (std)" )
|
library(devtools)
library(tidyquant)
library(crypto)
library(ggplot2)
library(tseries)
library(zoo)
library(dplyr)
library(xts)
options("getSymbols.warning4.0"=FALSE)
options("getSymbols.yahoo.warning"=FALSE)
# ===============================================================================
#We first collect our data
getSymbols("NIO", from= '2011-01-01', to = '2021-06-21', warnings= FALSE, auto.assign = TRUE)
nio <- as.data.frame(date = index(NIO), coredata(NIO))
nio<-nio[4]
nio$date <- index(NIO)
#================================================================================
#Then we check for N/A values in our dataset
sum(is.na(nio))
dtaset <- ts(data = nio$NIO.Close, frequency = 1)
plot.ts(dtaset)
# ===============================================================================
###################################################################
reg_1<-lm(nio$NIO.Close~nio$date)
windows()
chart_Series(NIO)
plot(x=nio$date, y=nio$NIO.Close)
abline(reg_1)
#From the plot, we see that the series is not covariance stationary, as it's mean
#and variance do not stay the same over a long period of time. So we will use first
#order differencing to check if we remove non-stationarity by detrending. Because
#out time series is exponential, we will differantiate the logarithm of our time-series.
#================================================================================
frst_diff<-diff(log(nio$NIO.Close))
ts.plot(frst_diff)
#From the first order differentiation we have managed to detrend the time series,
#while the existence of outliers remains.
#================================================================================
#We will start by applying the AR(1) model and test if the residuals have any serial
#correlation
ar1<- arima(frst_diff, order=c(1,0,0))
ar2<- arima(frst_diff, order=c(2,0,0))
ar1
ar2#To extract the residuals use: ar1_resid<- residuals(ar1)
#To extract the fitted values, use: ar1_fitted <- ar1 - ar1_resid
ar1_resid<- residuals(ar1)
#We use the t-test to check for error autocorrelation. Because we
#will use an autoregressive model, the Durbin watson test is invalid (because the
#independent variables include past values of the dependent variable). Hence we
#will use a t-test involving a residual autocorrelation and the std error of the
#residual autocorrelation.
ttest <- t.test(ar1_resid, mu = 0)
ttest[1]>qt(0.025,695,lower.tail= FALSE)
#Since the critical value is higher than the t-statistic of our test, we assume that
#our model is correctly specified and that we can use OLS. Since no significant serial
#correlation is found, then there is no need for the AR(2) model and we can proceed with
#the AR(1) model.
#From the summary table, we see that the t-statistic is t = -0.0263939 which is included
#in the 95% confidence interval. Since p-value = 0.979 > t-statistic, we cannot reject H0
#and we accept that our mean equals to zero.
#Plotting our time series along with our fitted values.
ar1_fitted <- frst_diff - ar1_resid
windows()
ts.plot(frst_diff)
points(ar1_fitted)
#Now we need to check the autocorrelations of the residuals from the model
windows()
ts.plot(ar1_resid)
acf(ar1_resid)
#================================================================================
#We will now try to use our model to predict 2 periods later
predictions <- predict(ar1, n.ahead = 3)
#Our models values are in a logarithmic scale and differenced. So we will need
#to apply reverse difference and then use the exponential.
predictions$pred <- exp(predictions$pred)
predictions$pred <- diffinv(predictions$pred, lag = 1, xi = nio$NIO.Close[length(nio$NIO.Close)])
#Below we plot our data with the predictions of our model.
windows()
plot.ts(nio$NIO.Close, xlim = c(0,700))
points(predictions$pred, col = 2)
#================================================================================
| /NIO - Stock Movement.R | no_license | ltheod01/stock---time-series-analysis- | R | false | false | 3,995 | r | library(devtools)
library(tidyquant)
library(crypto)
library(ggplot2)
library(tseries)
library(zoo)
library(dplyr)
library(xts)
options("getSymbols.warning4.0"=FALSE)
options("getSymbols.yahoo.warning"=FALSE)
# ===============================================================================
#We first collect our data
getSymbols("NIO", from= '2011-01-01', to = '2021-06-21', warnings= FALSE, auto.assign = TRUE)
nio <- as.data.frame(date = index(NIO), coredata(NIO))
nio<-nio[4]
nio$date <- index(NIO)
#================================================================================
#Then we check for N/A values in our dataset
sum(is.na(nio))
dtaset <- ts(data = nio$NIO.Close, frequency = 1)
plot.ts(dtaset)
# ===============================================================================
###################################################################
reg_1<-lm(nio$NIO.Close~nio$date)
windows()
chart_Series(NIO)
plot(x=nio$date, y=nio$NIO.Close)
abline(reg_1)
#From the plot, we see that the series is not covariance stationary, as it's mean
#and variance do not stay the same over a long period of time. So we will use first
#order differencing to check if we remove non-stationarity by detrending. Because
#out time series is exponential, we will differantiate the logarithm of our time-series.
#================================================================================
frst_diff<-diff(log(nio$NIO.Close))
ts.plot(frst_diff)
#From the first order differentiation we have managed to detrend the time series,
#while the existence of outliers remains.
#================================================================================
#We will start by applying the AR(1) model and test if the residuals have any serial
#correlation
ar1<- arima(frst_diff, order=c(1,0,0))
ar2<- arima(frst_diff, order=c(2,0,0))
ar1
ar2#To extract the residuals use: ar1_resid<- residuals(ar1)
#To extract the fitted values, use: ar1_fitted <- ar1 - ar1_resid
ar1_resid<- residuals(ar1)
#We use the t-test to check for error autocorrelation. Because we
#will use an autoregressive model, the Durbin watson test is invalid (because the
#independent variables include past values of the dependent variable). Hence we
#will use a t-test involving a residual autocorrelation and the std error of the
#residual autocorrelation.
ttest <- t.test(ar1_resid, mu = 0)
ttest[1]>qt(0.025,695,lower.tail= FALSE)
#Since the critical value is higher than the t-statistic of our test, we assume that
#our model is correctly specified and that we can use OLS. Since no significant serial
#correlation is found, then there is no need for the AR(2) model and we can proceed with
#the AR(1) model.
#From the summary table, we see that the t-statistic is t = -0.0263939 which is included
#in the 95% confidence interval. Since p-value = 0.979 > t-statistic, we cannot reject H0
#and we accept that our mean equals to zero.
#Plotting our time series along with our fitted values.
ar1_fitted <- frst_diff - ar1_resid
windows()
ts.plot(frst_diff)
points(ar1_fitted)
#Now we need to check the autocorrelations of the residuals from the model
windows()
ts.plot(ar1_resid)
acf(ar1_resid)
#================================================================================
#We will now try to use our model to predict 2 periods later
predictions <- predict(ar1, n.ahead = 3)
#Our models values are in a logarithmic scale and differenced. So we will need
#to apply reverse difference and then use the exponential.
predictions$pred <- exp(predictions$pred)
predictions$pred <- diffinv(predictions$pred, lag = 1, xi = nio$NIO.Close[length(nio$NIO.Close)])
#Below we plot our data with the predictions of our model.
windows()
plot.ts(nio$NIO.Close, xlim = c(0,700))
points(predictions$pred, col = 2)
#================================================================================
|
# Some important notes:
#
# Windows Users:
#
# 1.You may need to install RTools as well.
#
# 2. You may get stuck in a loop where RStudio asks if you
# want to restart R repeatedly. Try completely closing RStudio
# if you have this problem, or answering no in the prompt.
#
# 3. During the install_github() commands I found that updating rlang
# made a mess, so do not choose to update it when prompted.
#
# Linux users: Running this will make your computer very upset. Run it
# while you make breakfast or something.
#
# The following are all required packages and dependencies of packages
# for Analyzing Baseball Data with R.
install.packages('tidyverse')
install.packages('doParallel')
install.packages('DBI')
install.packages('RMySQL')
install.packages('latticeExtra')
install.packages('cli')
install.packages('gh')
install.packages('usethis')
install.packages('devtools')
install.packages('xml2')
install.packages('pitchRx')
install.packages('mlbgameday')
install.packages('Lahman')
install.packages("RSQLite")
devtools::install_github("BillPetti/baseballr")
devtools::install_github("keberwein/mlbgameday")
devtools::install_github("beanumber/retro") # Added 3/23/21
# The next set is packages I found necessary for other projects. For the Aging
# Curves project we've had in the past, here is the hyperlink to the article
# we found and the packages in the code that was required to run their R tools:
#
# https://www.baseballprospectus.com/news/article/59972/the-delta-method-revisited/
install.packages("pacman")
pacman::p_install_gh("datarootsio/artyfarty")
pacman::p_load(tidyverse,artyfarty,ggthemes,
mgcv,radiant.data,doFuture,future.apply)
# For the fielding optimization project, here is the library used to produce the
# final spray chart as well as other packages that were found helpful:
devtools::install_github("bdilday/GeomMLBStadiums")
install.packages("plot3D")
install.packages("rgl")
install.packages("xlsx")
| /R Tools & Tutorials/installing_packages.R | no_license | JHDatz/Big-Problems-Tools | R | false | false | 2,013 | r | # Some important notes:
#
# Windows Users:
#
# 1.You may need to install RTools as well.
#
# 2. You may get stuck in a loop where RStudio asks if you
# want to restart R repeatedly. Try completely closing RStudio
# if you have this problem, or answering no in the prompt.
#
# 3. During the install_github() commands I found that updating rlang
# made a mess, so do not choose to update it when prompted.
#
# Linux users: Running this will make your computer very upset. Run it
# while you make breakfast or something.
#
# The following are all required packages and dependencies of packages
# for Analyzing Baseball Data with R.
install.packages('tidyverse')
install.packages('doParallel')
install.packages('DBI')
install.packages('RMySQL')
install.packages('latticeExtra')
install.packages('cli')
install.packages('gh')
install.packages('usethis')
install.packages('devtools')
install.packages('xml2')
install.packages('pitchRx')
install.packages('mlbgameday')
install.packages('Lahman')
install.packages("RSQLite")
devtools::install_github("BillPetti/baseballr")
devtools::install_github("keberwein/mlbgameday")
devtools::install_github("beanumber/retro") # Added 3/23/21
# The next set is packages I found necessary for other projects. For the Aging
# Curves project we've had in the past, here is the hyperlink to the article
# we found and the packages in the code that was required to run their R tools:
#
# https://www.baseballprospectus.com/news/article/59972/the-delta-method-revisited/
install.packages("pacman")
pacman::p_install_gh("datarootsio/artyfarty")
pacman::p_load(tidyverse,artyfarty,ggthemes,
mgcv,radiant.data,doFuture,future.apply)
# For the fielding optimization project, here is the library used to produce the
# final spray chart as well as other packages that were found helpful:
devtools::install_github("bdilday/GeomMLBStadiums")
install.packages("plot3D")
install.packages("rgl")
install.packages("xlsx")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/31_broyden_band.R
\name{broyden_band}
\alias{broyden_band}
\title{Broyden Banded Function}
\usage{
broyden_band()
}
\value{
A list containing:
\itemize{
\item \code{fn} Objective function which calculates the value given input
parameter vector.
\item \code{gr} Gradient function which calculates the gradient vector
given input parameter vector.
\item \code{he} If available, the hessian matrix (second derivatives)
of the function w.r.t. the parameters at the given values.
\item \code{fg} A function which, given the parameter vector, calculates
both the objective value and gradient, returning a list with members
\code{fn} and \code{gr}, respectively.
\item \code{x0} Function returning the standard starting point, given
\code{n}, the number of variables desired.
}
}
\description{
Test function 31 from the More', Garbow and Hillstrom paper.
}
\details{
The objective function is the sum of \code{m} functions, each of \code{n}
parameters.
\itemize{
\item Dimensions: Number of parameters \code{n} variable, number of summand
functions \code{m = n}.
\item Minima: \code{f = 0}.
}
The number of parameters, \code{n}, in the objective function is not
specified when invoking this function. It is implicitly set by the length of
the parameter vector passed to the objective and gradient functions that this
function creates. See the 'Examples' section.
}
\examples{
btri <- broyden_band()
# 6 variable problem using the standard starting point
x0_6 <- btri$x0(6)
res_6 <- stats::optim(x0_6, btri$fn, btri$gr, method = "L-BFGS-B")
# Standing starting point with 8 variables
res_8 <- stats::optim(btri$x0(8), btri$fn, btri$gr, method = "L-BFGS-B")
# Create your own 4 variable starting point
res_4 <- stats::optim(c(0.1, 0.2, 0.3, 0.4), btri$fn, btri$gr,
method = "L-BFGS-B")
}
\references{
More', J. J., Garbow, B. S., & Hillstrom, K. E. (1981).
Testing unconstrained optimization software.
\emph{ACM Transactions on Mathematical Software (TOMS)}, \emph{7}(1), 17-41.
\doi{doi.org/10.1145/355934.355936}
Broyden, C. G. (1971).
The convergence of an algorithm for solving sparse nonlinear systems.
\emph{Mathematics of Computation}, \emph{25}(114), 285-294.
\doi{doi.org/10.1090/S0025-5718-1971-0297122-5}
}
| /man/broyden_band.Rd | permissive | jlmelville/funconstrain | R | false | true | 2,340 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/31_broyden_band.R
\name{broyden_band}
\alias{broyden_band}
\title{Broyden Banded Function}
\usage{
broyden_band()
}
\value{
A list containing:
\itemize{
\item \code{fn} Objective function which calculates the value given input
parameter vector.
\item \code{gr} Gradient function which calculates the gradient vector
given input parameter vector.
\item \code{he} If available, the hessian matrix (second derivatives)
of the function w.r.t. the parameters at the given values.
\item \code{fg} A function which, given the parameter vector, calculates
both the objective value and gradient, returning a list with members
\code{fn} and \code{gr}, respectively.
\item \code{x0} Function returning the standard starting point, given
\code{n}, the number of variables desired.
}
}
\description{
Test function 31 from the More', Garbow and Hillstrom paper.
}
\details{
The objective function is the sum of \code{m} functions, each of \code{n}
parameters.
\itemize{
\item Dimensions: Number of parameters \code{n} variable, number of summand
functions \code{m = n}.
\item Minima: \code{f = 0}.
}
The number of parameters, \code{n}, in the objective function is not
specified when invoking this function. It is implicitly set by the length of
the parameter vector passed to the objective and gradient functions that this
function creates. See the 'Examples' section.
}
\examples{
btri <- broyden_band()
# 6 variable problem using the standard starting point
x0_6 <- btri$x0(6)
res_6 <- stats::optim(x0_6, btri$fn, btri$gr, method = "L-BFGS-B")
# Standing starting point with 8 variables
res_8 <- stats::optim(btri$x0(8), btri$fn, btri$gr, method = "L-BFGS-B")
# Create your own 4 variable starting point
res_4 <- stats::optim(c(0.1, 0.2, 0.3, 0.4), btri$fn, btri$gr,
method = "L-BFGS-B")
}
\references{
More', J. J., Garbow, B. S., & Hillstrom, K. E. (1981).
Testing unconstrained optimization software.
\emph{ACM Transactions on Mathematical Software (TOMS)}, \emph{7}(1), 17-41.
\doi{doi.org/10.1145/355934.355936}
Broyden, C. G. (1971).
The convergence of an algorithm for solving sparse nonlinear systems.
\emph{Mathematics of Computation}, \emph{25}(114), 285-294.
\doi{doi.org/10.1090/S0025-5718-1971-0297122-5}
}
|
# K-Means Clustering
# Importing the dataset
dataset <- read.csv('Mall_Customers.csv')
X <-dataset[4:5]
# Using the Elbow method to find the optimal number of clusters
set.seed(6)
wcss <- vector()
for(i in 1:10) wcss[i] <- sum(kmeans(X,i)$withinss)
plot(1:10,wcss, type = 'b', main = paste('Clusters of clients'),xlab = 'Number of clusters', ylab = 'WCSS')
# Applying k-means to the dataset
set.seed(29)
kmeans <- kmeans(X,5,iter.max = 300, nstart = 10)
# Visualising the clusters
library(cluster)
clusplot(X,
kmeans$cluster,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste('Clusters of clients'),
xlab = 'Annual Income',
ylab = 'Spending Score')
| /Part 4 - Clustering/Section 24 - K-Means Clustering/kmeans_R.R | no_license | Itsu004/Machine-Learning | R | false | false | 817 | r | # K-Means Clustering
# Importing the dataset
dataset <- read.csv('Mall_Customers.csv')
X <-dataset[4:5]
# Using the Elbow method to find the optimal number of clusters
set.seed(6)
wcss <- vector()
for(i in 1:10) wcss[i] <- sum(kmeans(X,i)$withinss)
plot(1:10,wcss, type = 'b', main = paste('Clusters of clients'),xlab = 'Number of clusters', ylab = 'WCSS')
# Applying k-means to the dataset
set.seed(29)
kmeans <- kmeans(X,5,iter.max = 300, nstart = 10)
# Visualising the clusters
library(cluster)
clusplot(X,
kmeans$cluster,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste('Clusters of clients'),
xlab = 'Annual Income',
ylab = 'Spending Score')
|
#' Connect to bety using current PEcAn configuration
#' @param php.config Path to `config.php`
#' @export
#'
betyConnect <- function(php.config = "../../web/config.php") {
## Read PHP config file for webserver
config.list <- PEcAn.utils::read_web_config(php.config)
## Database connection
# TODO: The latest version of dplyr/dbplyr works with standard DBI-based
# objects, so we should replace this with a standard `db.open` call.
dplyr::src_postgres(dbname = config.list$db_bety_database,
host = config.list$db_bety_hostname,
user = config.list$db_bety_username,
password = config.list$db_bety_password)
} # betyConnect
#' Convert number to scientific notation pretty expression
#' @param l Number to convert to scientific notation
#' @export
fancy_scientific <- function(l) {
options(scipen = 12)
# turn in to character string in scientific notation
l <- format(l, scientific = TRUE)
# quote the part before the exponent to keep all the digits
l <- gsub("^(.*)e", "'\\1'e", l)
# turn the 'e+' into plotmath format
l <- gsub("e", "%*%10^", l)
# keep 0 as 0
l <- gsub("0e\\+00", "0", l)
# return this as an expression
return(parse(text = l))
} # fancy_scientific
#' Count rows of a data frame
#' @param df Data frame of which to count length
#' @export
dplyr.count <- function(df) {
return(dplyr::collect(dplyr::tally(df))[["n"]])
} # dplyr.count
#' Convert netcdf number of days to date
#' @export
ncdays2date <- function(time, unit) {
date <- lubridate::parse_date_time(unit, c("ymd_hms", "ymd_h", "ymd"))
days <- udunits2::ud.convert(time, unit, paste("days since ", date))
seconds <- udunits2::ud.convert(days, "days", "seconds")
return(as.POSIXct.numeric(seconds, origin = date, tz = "UTC"))
} # ncdays2date
#' Database host information
#'
#' @param bety BETYdb connection, as opened by `betyConnect()`
#' @export
dbHostInfo <- function(bety) {
# get host id
result <- db.query(query = "select cast(floor(nextval('users_id_seq') / 1e9) as bigint);", con = bety$con)
hostid <- result[["floor"]]
# get machine start and end based on hostid
machine <- dplyr::tbl(bety, "machines") %>%
dplyr::filter(sync_host_id == !!hostid) %>%
dplyr::select(sync_start, sync_end)
if (is.na(nrow(machine)) || nrow(machine) == 0) {
return(list(hostid = hostid,
start = 1e+09 * hostid,
end = 1e+09 * (hostid + 1) - 1))
} else {
return(list(hostid = hostid,
start = machine$sync_start,
end = machine$sync_end))
}
} # dbHostInfo
#' list of workflows that exist
#' @param ensemble Logical. Use workflows from ensembles table.
#' @inheritParams dbHostInfo
#' @export
workflows <- function(bety, ensemble = FALSE) {
hostinfo <- dbHostInfo(bety)
if (ensemble) {
query <- paste("SELECT ensembles.id AS ensemble_id, ensembles.workflow_id, workflows.folder",
"FROM ensembles, workflows WHERE runtype = 'ensemble'")
} else {
query <- "SELECT id AS workflow_id, folder FROM workflows"
}
dplyr::tbl(bety, dbplyr::sql(query)) %>%
dplyr::filter(workflow_id >= !!hostinfo$start & workflow_id <= !!hostinfo$end) %>%
return()
} # workflows
#' Get single workflow by workflow_id
#' @param workflow_id Workflow ID
#' @inheritParams dbHostInfo
#' @export
workflow <- function(bety, workflow_id) {
workflows(bety) %>%
dplyr::filter(workflow_id == !!workflow_id)
} # workflow
#' Get table of runs corresponding to a workflow
#' @inheritParams dbHostInfo
#' @inheritParams workflow
#' @export
runs <- function(bety, workflow_id) {
Workflows <- workflow(bety, workflow_id) %>%
dplyr::select(workflow_id, folder)
Ensembles <- dplyr::tbl(bety, "ensembles") %>%
dplyr::select(ensemble_id = id, workflow_id) %>%
dplyr::inner_join(Workflows, by = "workflow_id")
Runs <- dplyr::tbl(bety, "runs") %>%
dplyr::select(run_id = id, ensemble_id) %>%
dplyr::inner_join(Ensembles, by = "ensemble_id")
dplyr::select(Runs, -workflow_id, -ensemble_id) %>%
return()
} # runs
#' Get vector of workflow IDs
#' @inheritParams dbHostInfo
#' @param query Named vector or list of workflow IDs
#' @export
get_workflow_ids <- function(bety, query, all.ids = FALSE) {
# If we dont want all workflow ids but only workflow id from the user url query
if (!all.ids && "workflow_id" %in% names(query)) {
ids <- unlist(query[names(query) == "workflow_id"], use.names = FALSE)
} else {
# Get all workflow IDs
ids <- workflows(bety, ensemble = FALSE) %>%
dplyr::distinct(workflow_id) %>%
dplyr::collect() %>%
dplyr::pull(workflow_id) %>%
sort(decreasing = TRUE)
}
return(ids)
} # get_workflow_ids
#' Get data frame of users and IDs
#' @inheritParams dbHostInfo
#' @export
get_users <- function(bety) {
hostinfo <- dbHostInfo(bety)
query <- "SELECT id, login FROM users"
out <- dplyr::tbl(bety, dbplyr::sql(query)) %>%
dplyr::filter(id >= hostinfo$start & id <= hostinfo$end)
return(out)
} # get_workflow_ids
#' Get vector of run IDs for a given workflow ID
#' @inheritParams dbHostInfo
#' @inheritParams workflow
#' @export
get_run_ids <- function(bety, workflow_id) {
run_ids <- c("No runs found")
if (workflow_id != "") {
runs <- runs(bety, workflow_id)
if (dplyr.count(runs) > 0) {
run_ids <- dplyr::pull(runs, run_id) %>% sort()
}
}
return(run_ids)
} # get_run_ids
#' Get vector of variable names for a particular workflow and run ID
#' @inheritParams dbHostInfo
#' @inheritParams workflow
#' @param run_id Run ID
#' @param remove_pool logical: ignore variables with 'pools' in their names?
#' @export
get_var_names <- function(bety, workflow_id, run_id, remove_pool = TRUE) {
var_names <- character(0)
if (workflow_id != "" && run_id != "") {
workflow <- dplyr::collect(workflow(bety, workflow_id))
if (nrow(workflow) > 0) {
outputfolder <- file.path(workflow$folder, "out", run_id)
if (utils::file_test("-d", outputfolder)) {
files <- list.files(outputfolder, "*.nc$", full.names = TRUE)
for (file in files) {
nc <- ncdf4::nc_open(file)
lapply(nc$var, function(x) {
if (x$name != "") {
var_names[[x$longname]] <<- x$name
}
})
ncdf4::nc_close(nc)
}
}
}
if (length(var_names) == 0) {
var_names <- "No variables found"
}
if (remove_pool) {
var_names <- var_names[!grepl("pool", var_names, ignore.case = TRUE)] ## Ignore 'poolnames' and 'carbon pools' variables
}
}
return(var_names)
} # get_var_names
#' Get vector of variable names for a particular workflow and run ID
#' @inheritParams get_var_names
#' @param run_id Run ID
#' @param workflow_id Workflow ID
#' @export
var_names_all <- function(bety, workflow_id, run_id) {
# @return List of variable names
# Get variables for a particular workflow and run id
var_names <- get_var_names(bety, workflow_id, run_id)
# Remove variables which should not be shown to the user
removeVarNames <- c('Year','FracJulianDay')
var_names <- var_names[!var_names %in% removeVarNames]
return(var_names)
} # var_names_all
#' Load data for a single run of the model
#' @inheritParams var_names_all
#' @inheritParams workflow
#' @param run_id Run ID
#' @param workflow_id Workflow ID
#' @export
load_data_single_run <- function(bety, workflow_id, run_id) {
# For a particular combination of workflow and run id, loads
# all variables from all files.
# @return Dataframe for one run
# Adapted from earlier code in pecan/shiny/workflowPlot/server.R
globalDF <- data.frame()
workflow <- dplyr::collect(workflow(bety, workflow_id))
# Use the function 'var_names_all' to get all variables
var_names <- var_names_all(bety, workflow_id, run_id)
# lat/lon often cause trouble (like with JULES) but aren't needed for this basic plotting
var_names <- setdiff(var_names, c("lat", "latitude", "lon", "longitude"))
outputfolder <- file.path(workflow$folder, 'out', run_id)
out <- read.output(runid = run_id, outdir = outputfolder, variables = var_names, dataframe = TRUE)
ncfile <- list.files(path = outputfolder, pattern = "\\.nc$", full.names = TRUE)[1]
nc <- ncdf4::nc_open(ncfile)
globalDF <- tidyr::gather(out, key = var_name, value = vals, names(out)[names(out) != "posix"]) %>%
dplyr::rename(dates = posix)
globalDF$workflow_id <- workflow_id
globalDF$run_id <- run_id
globalDF$xlab <- "Time"
globalDF$ylab <- unlist(sapply(globalDF$var_name, function(x){
if(!is.null(nc$var[[x]]$units)){
return(nc$var[[x]]$units)
}else{
return("")
}
} ))
globalDF$title <- unlist(lapply(globalDF$var_name, function(x){
long_name <- names(which(var_names == x))
ifelse(length(long_name) > 0, long_name, x)
}
))
return(globalDF)
} #load_data_single_run
| /base/db/R/query.dplyr.R | permissive | dlebauer/pecan | R | false | false | 8,963 | r | #' Connect to bety using current PEcAn configuration
#' @param php.config Path to `config.php`
#' @export
#'
betyConnect <- function(php.config = "../../web/config.php") {
## Read PHP config file for webserver
config.list <- PEcAn.utils::read_web_config(php.config)
## Database connection
# TODO: The latest version of dplyr/dbplyr works with standard DBI-based
# objects, so we should replace this with a standard `db.open` call.
dplyr::src_postgres(dbname = config.list$db_bety_database,
host = config.list$db_bety_hostname,
user = config.list$db_bety_username,
password = config.list$db_bety_password)
} # betyConnect
#' Convert number to scientific notation pretty expression
#' @param l Number to convert to scientific notation
#' @export
fancy_scientific <- function(l) {
options(scipen = 12)
# turn in to character string in scientific notation
l <- format(l, scientific = TRUE)
# quote the part before the exponent to keep all the digits
l <- gsub("^(.*)e", "'\\1'e", l)
# turn the 'e+' into plotmath format
l <- gsub("e", "%*%10^", l)
# keep 0 as 0
l <- gsub("0e\\+00", "0", l)
# return this as an expression
return(parse(text = l))
} # fancy_scientific
#' Count rows of a data frame
#' @param df Data frame of which to count length
#' @export
dplyr.count <- function(df) {
return(dplyr::collect(dplyr::tally(df))[["n"]])
} # dplyr.count
#' Convert netcdf number of days to date
#' @export
ncdays2date <- function(time, unit) {
date <- lubridate::parse_date_time(unit, c("ymd_hms", "ymd_h", "ymd"))
days <- udunits2::ud.convert(time, unit, paste("days since ", date))
seconds <- udunits2::ud.convert(days, "days", "seconds")
return(as.POSIXct.numeric(seconds, origin = date, tz = "UTC"))
} # ncdays2date
#' Database host information
#'
#' @param bety BETYdb connection, as opened by `betyConnect()`
#' @export
dbHostInfo <- function(bety) {
# get host id
result <- db.query(query = "select cast(floor(nextval('users_id_seq') / 1e9) as bigint);", con = bety$con)
hostid <- result[["floor"]]
# get machine start and end based on hostid
machine <- dplyr::tbl(bety, "machines") %>%
dplyr::filter(sync_host_id == !!hostid) %>%
dplyr::select(sync_start, sync_end)
if (is.na(nrow(machine)) || nrow(machine) == 0) {
return(list(hostid = hostid,
start = 1e+09 * hostid,
end = 1e+09 * (hostid + 1) - 1))
} else {
return(list(hostid = hostid,
start = machine$sync_start,
end = machine$sync_end))
}
} # dbHostInfo
#' list of workflows that exist
#' @param ensemble Logical. Use workflows from ensembles table.
#' @inheritParams dbHostInfo
#' @export
workflows <- function(bety, ensemble = FALSE) {
hostinfo <- dbHostInfo(bety)
if (ensemble) {
query <- paste("SELECT ensembles.id AS ensemble_id, ensembles.workflow_id, workflows.folder",
"FROM ensembles, workflows WHERE runtype = 'ensemble'")
} else {
query <- "SELECT id AS workflow_id, folder FROM workflows"
}
dplyr::tbl(bety, dbplyr::sql(query)) %>%
dplyr::filter(workflow_id >= !!hostinfo$start & workflow_id <= !!hostinfo$end) %>%
return()
} # workflows
#' Get single workflow by workflow_id
#' @param workflow_id Workflow ID
#' @inheritParams dbHostInfo
#' @export
workflow <- function(bety, workflow_id) {
workflows(bety) %>%
dplyr::filter(workflow_id == !!workflow_id)
} # workflow
#' Get table of runs corresponding to a workflow
#' @inheritParams dbHostInfo
#' @inheritParams workflow
#' @export
runs <- function(bety, workflow_id) {
Workflows <- workflow(bety, workflow_id) %>%
dplyr::select(workflow_id, folder)
Ensembles <- dplyr::tbl(bety, "ensembles") %>%
dplyr::select(ensemble_id = id, workflow_id) %>%
dplyr::inner_join(Workflows, by = "workflow_id")
Runs <- dplyr::tbl(bety, "runs") %>%
dplyr::select(run_id = id, ensemble_id) %>%
dplyr::inner_join(Ensembles, by = "ensemble_id")
dplyr::select(Runs, -workflow_id, -ensemble_id) %>%
return()
} # runs
#' Get vector of workflow IDs
#' @inheritParams dbHostInfo
#' @param query Named vector or list of workflow IDs
#' @export
get_workflow_ids <- function(bety, query, all.ids = FALSE) {
# If we dont want all workflow ids but only workflow id from the user url query
if (!all.ids && "workflow_id" %in% names(query)) {
ids <- unlist(query[names(query) == "workflow_id"], use.names = FALSE)
} else {
# Get all workflow IDs
ids <- workflows(bety, ensemble = FALSE) %>%
dplyr::distinct(workflow_id) %>%
dplyr::collect() %>%
dplyr::pull(workflow_id) %>%
sort(decreasing = TRUE)
}
return(ids)
} # get_workflow_ids
#' Get data frame of users and IDs
#' @inheritParams dbHostInfo
#' @export
get_users <- function(bety) {
hostinfo <- dbHostInfo(bety)
query <- "SELECT id, login FROM users"
out <- dplyr::tbl(bety, dbplyr::sql(query)) %>%
dplyr::filter(id >= hostinfo$start & id <= hostinfo$end)
return(out)
} # get_workflow_ids
#' Get vector of run IDs for a given workflow ID
#' @inheritParams dbHostInfo
#' @inheritParams workflow
#' @export
get_run_ids <- function(bety, workflow_id) {
run_ids <- c("No runs found")
if (workflow_id != "") {
runs <- runs(bety, workflow_id)
if (dplyr.count(runs) > 0) {
run_ids <- dplyr::pull(runs, run_id) %>% sort()
}
}
return(run_ids)
} # get_run_ids
#' Get vector of variable names for a particular workflow and run ID
#' @inheritParams dbHostInfo
#' @inheritParams workflow
#' @param run_id Run ID
#' @param remove_pool logical: ignore variables with 'pools' in their names?
#' @export
get_var_names <- function(bety, workflow_id, run_id, remove_pool = TRUE) {
var_names <- character(0)
if (workflow_id != "" && run_id != "") {
workflow <- dplyr::collect(workflow(bety, workflow_id))
if (nrow(workflow) > 0) {
outputfolder <- file.path(workflow$folder, "out", run_id)
if (utils::file_test("-d", outputfolder)) {
files <- list.files(outputfolder, "*.nc$", full.names = TRUE)
for (file in files) {
nc <- ncdf4::nc_open(file)
lapply(nc$var, function(x) {
if (x$name != "") {
var_names[[x$longname]] <<- x$name
}
})
ncdf4::nc_close(nc)
}
}
}
if (length(var_names) == 0) {
var_names <- "No variables found"
}
if (remove_pool) {
var_names <- var_names[!grepl("pool", var_names, ignore.case = TRUE)] ## Ignore 'poolnames' and 'carbon pools' variables
}
}
return(var_names)
} # get_var_names
#' Get vector of variable names for a particular workflow and run ID
#' @inheritParams get_var_names
#' @param run_id Run ID
#' @param workflow_id Workflow ID
#' @export
var_names_all <- function(bety, workflow_id, run_id) {
# @return List of variable names
# Get variables for a particular workflow and run id
var_names <- get_var_names(bety, workflow_id, run_id)
# Remove variables which should not be shown to the user
removeVarNames <- c('Year','FracJulianDay')
var_names <- var_names[!var_names %in% removeVarNames]
return(var_names)
} # var_names_all
#' Load data for a single run of the model
#' @inheritParams var_names_all
#' @inheritParams workflow
#' @param run_id Run ID
#' @param workflow_id Workflow ID
#' @export
load_data_single_run <- function(bety, workflow_id, run_id) {
# For a particular combination of workflow and run id, loads
# all variables from all files.
# @return Dataframe for one run
# Adapted from earlier code in pecan/shiny/workflowPlot/server.R
globalDF <- data.frame()
workflow <- dplyr::collect(workflow(bety, workflow_id))
# Use the function 'var_names_all' to get all variables
var_names <- var_names_all(bety, workflow_id, run_id)
# lat/lon often cause trouble (like with JULES) but aren't needed for this basic plotting
var_names <- setdiff(var_names, c("lat", "latitude", "lon", "longitude"))
outputfolder <- file.path(workflow$folder, 'out', run_id)
out <- read.output(runid = run_id, outdir = outputfolder, variables = var_names, dataframe = TRUE)
ncfile <- list.files(path = outputfolder, pattern = "\\.nc$", full.names = TRUE)[1]
nc <- ncdf4::nc_open(ncfile)
globalDF <- tidyr::gather(out, key = var_name, value = vals, names(out)[names(out) != "posix"]) %>%
dplyr::rename(dates = posix)
globalDF$workflow_id <- workflow_id
globalDF$run_id <- run_id
globalDF$xlab <- "Time"
globalDF$ylab <- unlist(sapply(globalDF$var_name, function(x){
if(!is.null(nc$var[[x]]$units)){
return(nc$var[[x]]$units)
}else{
return("")
}
} ))
globalDF$title <- unlist(lapply(globalDF$var_name, function(x){
long_name <- names(which(var_names == x))
ifelse(length(long_name) > 0, long_name, x)
}
))
return(globalDF)
} #load_data_single_run
|
library('tidyverse')
library('ggplot2')
library('kableExtra')
library('Hmisc') #This Library will help in dividing the data into groups using the cut function
source("C:/Users/Admin/OneDrive/Analytics/HTS ML Output/Functiions - Combi.R")
machakos<-read.csv('C:/Users/Admin/OneDrive/Analytics/HTS ML Output/machakos_preds.csv')
nairobi <-read.csv('C:/Users/Admin/OneDrive/Analytics/HTS ML Output/nairobi_preds.csv')
siaya <-read.csv('C:/Users/Admin/OneDrive/Analytics/HTS ML Output/siaya_preds.csv')
homabay <-read.csv('C:/Users/Admin/OneDrive/Analytics/HTS ML Output/homabay_preds.csv')
#For purposes of joining the data later, add the column county
machakos <-machakos %>% mutate(county='Machakos')
nairobi <-nairobi %>% mutate(county='Nairobi')
siaya <-siaya %>% mutate(county='Siaya')
homabay <-homabay %>% mutate(county='Homabay')
#Compute the Risk Outcomes for the different Counties
machakos <-Set_RiskOutcome(machakos,0.372,0.25,0.094)
nairobi <-Set_RiskOutcome(nairobi,0.562,0.275,0.088)
siaya <-Set_RiskOutcome(siaya,0.508,0.26,0.076)
homabay <-Set_RiskOutcome(homabay,0.362,0.188,0.05)
#Combine the Nairobi and Machakos and Siaya Datasets
combi <- rbind(machakos,nairobi,siaya,homabay)
# Reordering group factor levels
combi$county <- factor(combi$county, levels = c("Siaya", "Homabay", "Nairobi", "Machakos"))
rm(machakos)
rm(nairobi)
rm(siaya)
rm(homabay)
# Include RowNumber in Combined Dataset
combi <- combi %>%
arrange(desc(Positive)) %>%
mutate(rowNum=row_number())
#Put the Testing Data into Groups
combi$RowGroup<-as.numeric(cut2(combi$rowNum, g=10))
combi$RowGroup<-combi$RowGroup*10
combi <- combi %>% group_by(FinalTestResult,RowGroup)%>% mutate(ResRowNum=row_number()) %>% ungroup()
table(combi$RiskOutcome,combi$county)
# Output Positivity for Each County
posit <- combi %>% group_by(county,FinalTestResult) %>% summarise(num=n()) %>% ungroup()
# Pivot Wider Final Test Result becomes the Columns, Risk Outcome becomes the Rows
posit <- posit %>% pivot_wider(names_from = FinalTestResult, values_from = num)
posit <-posit %>% mutate(TotalTested=Positive+Negative,
positivity=(Positive/TotalTested))
#Create Age Group Category
combi <-combi %>% mutate(AgeGroup=ifelse(AgeAtTest<15,'Under 15 Yrs','Over 15 Yrs'))
#Create Age Group Category - Based on DATIM
combi <-combi %>% mutate(Age_Grp=ifelse(AgeAtTest<=9,'Under 10 Yrs',
ifelse(AgeAtTest>=10 & AgeAtTest<=10, '10 to 14 Yrs',
ifelse(AgeAtTest>=15 & AgeAtTest<=19, '15 to 19 Yrs',"Over 20 Years"
))))
# Combine the 2 top risk outcomes
combi <- combi %>% mutate(HHRiskOutcome = ifelse(RiskOutcome =='Highest Risk'| RiskOutcome =='High Risk','High Risk',
ifelse(RiskOutcome =='Medium Risk','Medium Risk','Low Risk')))
combi$HHRiskOutcome<- factor(combi$HHRiskOutcome,levels = c("Low Risk", "Medium Risk", "High Risk"))
#Combine the Medium and High Risks
combi <- combi %>% mutate(HHMRiskOutcome = ifelse(RiskOutcome =='Highest Risk'| RiskOutcome =='High Risk' | RiskOutcome =='Medium Risk',
'High Risk','Low Risk'))
combi$HHMRiskOutcome<- factor(combi$HHMRiskOutcome,levels = c("Low Risk","High Risk"))
combi <- combi %>% mutate(HHMLRiskOutcome = ifelse(RiskOutcome =='Highest Risk'| RiskOutcome =='High Risk' | RiskOutcome =='Medium Risk' |RiskOutcome =='Low Risk',
'All Risk','Aii'))
saveRDS(combi, file = "combi.rds")
# Combine the 4 Risks
combiRisks<-rbind(Set_Risk_Summary(combi,HHMLRiskOutcome,'All Risks'),
Set_Risk_Summary(combi,RiskOutcome,'Highest Risk'),
Set_Risk_Summary(combi,HHRiskOutcome,'HighestHigh Risks'),
Set_Risk_Summary(combi,HHMRiskOutcome,'MediumHighestHigh Risks'))
combiHighRisk<-get_HighRisk(combiRisks)
saveRDS(combiHighRisk, file = "combiHighRisk.rds")
# Summarize the Risk Outcomes by Final Test Result and Age Group
combiRisksAge<-rbind(Set_Risk_Age_Summary(combi,HHMLRiskOutcome,AgeGroup,'All Risks'),
Set_Risk_Age_Summary(combi,RiskOutcome,AgeGroup,'All Risk'),
Set_Risk_Age_Summary(combi,HHRiskOutcome,AgeGroup,'HighestHigh Risks'),
Set_Risk_Age_Summary(combi,HHMRiskOutcome,AgeGroup,'MediumHighestHigh Risks'))
combiHighRisksAge<-get_HighRisk(combiRisksAge)
saveRDS(combiHighRisksAge, file = "combiHighRisksAge.rds")
# Summarize the Risk Outcomes by Final Test Result and Age Group (Including 10-14, 15-19)
combiRisksAgeGrp<-rbind(Set_Risk_Age_Summary(combi,HHMLRiskOutcome,Age_Grp,'All Risks'),
Set_Risk_Age_Summary(combi,RiskOutcome,Age_Grp,'All Risk'),
Set_Risk_Age_Summary(combi,HHRiskOutcome,Age_Grp,'HighestHigh Risks'),
Set_Risk_Age_Summary(combi,HHMRiskOutcome,Age_Grp,'MediumHighestHigh Risks'))
combiHighRisksAgeGrp<-get_HighRisk(combiRisksAgeGrp)
combiHighRisksAgeGrp_Adole <- combiHighRisksAgeGrp %>%
filter(Age_Grp %in% c("Under 10 Yrs","10 to 14 Yrs","15 to 19 Yrs"))
saveRDS(combiHighRisksAgeGrp_Adole, file = "combiHighRisksAgeGrp_Adole.rds")
| /HTS ML Output/HTS ML App County Predictions.R | no_license | MaringaM/Analytics | R | false | false | 5,280 | r | library('tidyverse')
library('ggplot2')
library('kableExtra')
library('Hmisc') #This Library will help in dividing the data into groups using the cut function
source("C:/Users/Admin/OneDrive/Analytics/HTS ML Output/Functiions - Combi.R")
machakos<-read.csv('C:/Users/Admin/OneDrive/Analytics/HTS ML Output/machakos_preds.csv')
nairobi <-read.csv('C:/Users/Admin/OneDrive/Analytics/HTS ML Output/nairobi_preds.csv')
siaya <-read.csv('C:/Users/Admin/OneDrive/Analytics/HTS ML Output/siaya_preds.csv')
homabay <-read.csv('C:/Users/Admin/OneDrive/Analytics/HTS ML Output/homabay_preds.csv')
#For purposes of joining the data later, add the column county
machakos <-machakos %>% mutate(county='Machakos')
nairobi <-nairobi %>% mutate(county='Nairobi')
siaya <-siaya %>% mutate(county='Siaya')
homabay <-homabay %>% mutate(county='Homabay')
#Compute the Risk Outcomes for the different Counties
machakos <-Set_RiskOutcome(machakos,0.372,0.25,0.094)
nairobi <-Set_RiskOutcome(nairobi,0.562,0.275,0.088)
siaya <-Set_RiskOutcome(siaya,0.508,0.26,0.076)
homabay <-Set_RiskOutcome(homabay,0.362,0.188,0.05)
#Combine the Nairobi and Machakos and Siaya Datasets
combi <- rbind(machakos,nairobi,siaya,homabay)
# Reordering group factor levels
combi$county <- factor(combi$county, levels = c("Siaya", "Homabay", "Nairobi", "Machakos"))
rm(machakos)
rm(nairobi)
rm(siaya)
rm(homabay)
# Include RowNumber in Combined Dataset
combi <- combi %>%
arrange(desc(Positive)) %>%
mutate(rowNum=row_number())
#Put the Testing Data into Groups
combi$RowGroup<-as.numeric(cut2(combi$rowNum, g=10))
combi$RowGroup<-combi$RowGroup*10
combi <- combi %>% group_by(FinalTestResult,RowGroup)%>% mutate(ResRowNum=row_number()) %>% ungroup()
table(combi$RiskOutcome,combi$county)
# Output Positivity for Each County
posit <- combi %>% group_by(county,FinalTestResult) %>% summarise(num=n()) %>% ungroup()
# Pivot Wider Final Test Result becomes the Columns, Risk Outcome becomes the Rows
posit <- posit %>% pivot_wider(names_from = FinalTestResult, values_from = num)
posit <-posit %>% mutate(TotalTested=Positive+Negative,
positivity=(Positive/TotalTested))
#Create Age Group Category
combi <-combi %>% mutate(AgeGroup=ifelse(AgeAtTest<15,'Under 15 Yrs','Over 15 Yrs'))
#Create Age Group Category - Based on DATIM
combi <-combi %>% mutate(Age_Grp=ifelse(AgeAtTest<=9,'Under 10 Yrs',
ifelse(AgeAtTest>=10 & AgeAtTest<=10, '10 to 14 Yrs',
ifelse(AgeAtTest>=15 & AgeAtTest<=19, '15 to 19 Yrs',"Over 20 Years"
))))
# Combine the 2 top risk outcomes
combi <- combi %>% mutate(HHRiskOutcome = ifelse(RiskOutcome =='Highest Risk'| RiskOutcome =='High Risk','High Risk',
ifelse(RiskOutcome =='Medium Risk','Medium Risk','Low Risk')))
combi$HHRiskOutcome<- factor(combi$HHRiskOutcome,levels = c("Low Risk", "Medium Risk", "High Risk"))
#Combine the Medium and High Risks
combi <- combi %>% mutate(HHMRiskOutcome = ifelse(RiskOutcome =='Highest Risk'| RiskOutcome =='High Risk' | RiskOutcome =='Medium Risk',
'High Risk','Low Risk'))
combi$HHMRiskOutcome<- factor(combi$HHMRiskOutcome,levels = c("Low Risk","High Risk"))
combi <- combi %>% mutate(HHMLRiskOutcome = ifelse(RiskOutcome =='Highest Risk'| RiskOutcome =='High Risk' | RiskOutcome =='Medium Risk' |RiskOutcome =='Low Risk',
'All Risk','Aii'))
saveRDS(combi, file = "combi.rds")
# Combine the 4 Risks
combiRisks<-rbind(Set_Risk_Summary(combi,HHMLRiskOutcome,'All Risks'),
Set_Risk_Summary(combi,RiskOutcome,'Highest Risk'),
Set_Risk_Summary(combi,HHRiskOutcome,'HighestHigh Risks'),
Set_Risk_Summary(combi,HHMRiskOutcome,'MediumHighestHigh Risks'))
combiHighRisk<-get_HighRisk(combiRisks)
saveRDS(combiHighRisk, file = "combiHighRisk.rds")
# Summarize the Risk Outcomes by Final Test Result and Age Group
combiRisksAge<-rbind(Set_Risk_Age_Summary(combi,HHMLRiskOutcome,AgeGroup,'All Risks'),
Set_Risk_Age_Summary(combi,RiskOutcome,AgeGroup,'All Risk'),
Set_Risk_Age_Summary(combi,HHRiskOutcome,AgeGroup,'HighestHigh Risks'),
Set_Risk_Age_Summary(combi,HHMRiskOutcome,AgeGroup,'MediumHighestHigh Risks'))
combiHighRisksAge<-get_HighRisk(combiRisksAge)
saveRDS(combiHighRisksAge, file = "combiHighRisksAge.rds")
# Summarize the Risk Outcomes by Final Test Result and Age Group (Including 10-14, 15-19)
combiRisksAgeGrp<-rbind(Set_Risk_Age_Summary(combi,HHMLRiskOutcome,Age_Grp,'All Risks'),
Set_Risk_Age_Summary(combi,RiskOutcome,Age_Grp,'All Risk'),
Set_Risk_Age_Summary(combi,HHRiskOutcome,Age_Grp,'HighestHigh Risks'),
Set_Risk_Age_Summary(combi,HHMRiskOutcome,Age_Grp,'MediumHighestHigh Risks'))
combiHighRisksAgeGrp<-get_HighRisk(combiRisksAgeGrp)
combiHighRisksAgeGrp_Adole <- combiHighRisksAgeGrp %>%
filter(Age_Grp %in% c("Under 10 Yrs","10 to 14 Yrs","15 to 19 Yrs"))
saveRDS(combiHighRisksAgeGrp_Adole, file = "combiHighRisksAgeGrp_Adole.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AutoGeneratedDefinitions.R
\name{detectCohortsByName}
\alias{detectCohortsByName}
\title{Detect the presence of string matched Cohort definitions.}
\usage{
detectCohortsByName(pattern, negate = FALSE, baseUrl)
}
\arguments{
\item{pattern}{A pattern to look for. See \link[stringr]{str_detect} for details.}
\item{negate}{If TRUE, return non-matching elements. See \link[stringr]{str_detect} for details.}
\item{baseUrl}{The base URL for the WebApi instance, for example:
"http://server.org:80/WebAPI".}
}
\value{
FALSE if no matches. If matched - output from \link[ROhdsiWebApi]{getCohortDefinitionsMetaData}
}
\description{
Detect the presence of string matched Cohort definitions.
}
\details{
Detect string matched Cohort definition names from the WebApi, and retrieve metadata definitions.
}
\examples{
\dontrun{
detectCohorts(pattern = "this text string to search in pattern",
baseUrl = "http://server.org:80/WebAPI")
}
}
| /man/detectCohortsByName.Rd | permissive | OHDSI/ROhdsiWebApi | R | false | true | 1,020 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AutoGeneratedDefinitions.R
\name{detectCohortsByName}
\alias{detectCohortsByName}
\title{Detect the presence of string matched Cohort definitions.}
\usage{
detectCohortsByName(pattern, negate = FALSE, baseUrl)
}
\arguments{
\item{pattern}{A pattern to look for. See \link[stringr]{str_detect} for details.}
\item{negate}{If TRUE, return non-matching elements. See \link[stringr]{str_detect} for details.}
\item{baseUrl}{The base URL for the WebApi instance, for example:
"http://server.org:80/WebAPI".}
}
\value{
FALSE if no matches. If matched - output from \link[ROhdsiWebApi]{getCohortDefinitionsMetaData}
}
\description{
Detect the presence of string matched Cohort definitions.
}
\details{
Detect string matched Cohort definition names from the WebApi, and retrieve metadata definitions.
}
\examples{
\dontrun{
detectCohorts(pattern = "this text string to search in pattern",
baseUrl = "http://server.org:80/WebAPI")
}
}
|
Data<-read.table("household_power_consumption.txt",sep=";",skip=1,na.strings="?")
colnames(Data)<-c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
NeededData<-subset(Data,Data$Date=="1/2/2007" | Data$Date =="2/2/2007")
par(mfrow = c(2, 2))
hist(NeededData$Global_active_power, main = "Global Active Power",xlab = "Global Active Power (kilowatts)", col = "Red")
plot(NeededData$datetime,NeededData$Voltage, type = "l",xlab="datetime",ylab="Voltage")
plot(NeededData$datetime,NeededData$Sub_metering_1,type = "l", ylab = "Energy sub metering", xlab = "")
points(NeededData$datetime,NeededData$Sub_metering_2,type = "l", ylab = "Energy sub metering", xlab = "",col="red")
points(NeededData$datetime,NeededData$Sub_metering_3,type = "l", ylab = "Energy sub metering", xlab = "",col="blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(NeededData$datetime,NeededData$Global_active_power, type = "l",xlab="datetime",ylab="Global_active_power")
dev.copy(png, file = "plot4.png", height = 480, width = 480)
dev.off()
| /plot4.r | no_license | AlaaElDinDarwish/datasciencecoursera | R | false | false | 1,207 | r | Data<-read.table("household_power_consumption.txt",sep=";",skip=1,na.strings="?")
colnames(Data)<-c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
NeededData<-subset(Data,Data$Date=="1/2/2007" | Data$Date =="2/2/2007")
par(mfrow = c(2, 2))
hist(NeededData$Global_active_power, main = "Global Active Power",xlab = "Global Active Power (kilowatts)", col = "Red")
plot(NeededData$datetime,NeededData$Voltage, type = "l",xlab="datetime",ylab="Voltage")
plot(NeededData$datetime,NeededData$Sub_metering_1,type = "l", ylab = "Energy sub metering", xlab = "")
points(NeededData$datetime,NeededData$Sub_metering_2,type = "l", ylab = "Energy sub metering", xlab = "",col="red")
points(NeededData$datetime,NeededData$Sub_metering_3,type = "l", ylab = "Energy sub metering", xlab = "",col="blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(NeededData$datetime,NeededData$Global_active_power, type = "l",xlab="datetime",ylab="Global_active_power")
dev.copy(png, file = "plot4.png", height = 480, width = 480)
dev.off()
|
/ADB/pls/man/kernelpls.fit.Rd | no_license | Etjean/M1stuff | R | false | false | 3,411 | rd | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_expressions.R
\name{getValue}
\alias{getValue}
\title{Calculate the value of an expression or reference}
\usage{
getValue(expression, model = getCurrentModel())
}
\arguments{
\item{expression}{Expressions to calculate, as character, finite numeric, or logical vector.}
\item{model}{a model object}
}
\value{
a numeric vector of values
}
\description{
\code{getValue} calculates the value of a given expression or reference.
}
\seealso{
\code{\link{getInitialValue}}
Other expression functions:
\code{\link{getInitialValue}()}
}
\concept{expression functions}
| /man/getValue.Rd | permissive | jpahle/CoRC | R | false | true | 646 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_expressions.R
\name{getValue}
\alias{getValue}
\title{Calculate the value of an expression or reference}
\usage{
getValue(expression, model = getCurrentModel())
}
\arguments{
\item{expression}{Expressions to calculate, as character, finite numeric, or logical vector.}
\item{model}{a model object}
}
\value{
a numeric vector of values
}
\description{
\code{getValue} calculates the value of a given expression or reference.
}
\seealso{
\code{\link{getInitialValue}}
Other expression functions:
\code{\link{getInitialValue}()}
}
\concept{expression functions}
|
library(data.table)
library(dplyr)
hpc3 <- fread( input = "./household_power_consumption.txt",
na.strings = "?",
sep = ";",
stringsAsFactors = TRUE) %>%
filter( strptime( Date, "%d/%m/%Y") == "2007-02-01" |
strptime( Date, "%d/%m/%Y") == "2007-02-02")
hpc3 <- cbind( hpc3, datetime = strptime( paste( hpc3$Date,
hpc3$Time),
format = "%d/%m/%Y %H:%M:%S"))
timeLocale <- Sys.getlocale( category = "LC_TIME") ## LC_TIME=fr_FR.UTF-8
Sys.setlocale( category = "LC_TIME", "en_US.UTF-8")
plot( hpc3$datetime,
hpc3$Sub_metering_1,
ylim = c( 0, 40),
type = "l",
xlab = "",
ylab = "Energy sub metering",
col = "black")
par( new = TRUE)
plot( hpc3$datetime,
hpc3$Sub_metering_2,
ylim = c( 0, 40),
type = "l",
xlab = "",
ylab = "Energy sub metering",
col = "red")
par( new = TRUE)
plot( hpc3$datetime,
hpc3$Sub_metering_3,
ylim = c( 0, 40),
type = "l",
xlab = "",
ylab = "Energy sub metering",
col = "blue")
legend( "topright",
pch = "―",
col = c( "black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
par( new = FALSE)
dev.copy( "plot3.png",
device = png,
width = 480,
height = 480,
units = "px" )
dev.off( which = dev.cur())
Sys.setlocale( category = "LC_TIME", timeLocale)
rm( hpc3, timeLocale)
| /plot3.R | no_license | alex1sc/ExData_Plotting1 | R | false | false | 1,597 | r | library(data.table)
library(dplyr)
hpc3 <- fread( input = "./household_power_consumption.txt",
na.strings = "?",
sep = ";",
stringsAsFactors = TRUE) %>%
filter( strptime( Date, "%d/%m/%Y") == "2007-02-01" |
strptime( Date, "%d/%m/%Y") == "2007-02-02")
hpc3 <- cbind( hpc3, datetime = strptime( paste( hpc3$Date,
hpc3$Time),
format = "%d/%m/%Y %H:%M:%S"))
timeLocale <- Sys.getlocale( category = "LC_TIME") ## LC_TIME=fr_FR.UTF-8
Sys.setlocale( category = "LC_TIME", "en_US.UTF-8")
plot( hpc3$datetime,
hpc3$Sub_metering_1,
ylim = c( 0, 40),
type = "l",
xlab = "",
ylab = "Energy sub metering",
col = "black")
par( new = TRUE)
plot( hpc3$datetime,
hpc3$Sub_metering_2,
ylim = c( 0, 40),
type = "l",
xlab = "",
ylab = "Energy sub metering",
col = "red")
par( new = TRUE)
plot( hpc3$datetime,
hpc3$Sub_metering_3,
ylim = c( 0, 40),
type = "l",
xlab = "",
ylab = "Energy sub metering",
col = "blue")
legend( "topright",
pch = "―",
col = c( "black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
par( new = FALSE)
dev.copy( "plot3.png",
device = png,
width = 480,
height = 480,
units = "px" )
dev.off( which = dev.cur())
Sys.setlocale( category = "LC_TIME", timeLocale)
rm( hpc3, timeLocale)
|
#' threshold fitting with a maximul likelood algorithm
#' it avoids to compute the gaussian integral
#' but requires to do random simulations with the same package
#' test should be done to see which method is the quickest
#'
#'The proportion does not converge for now, BUG
#'
#'#' @param pF1 is the proportion of facies 1
#' @param pF2 is the proportion of facies 2
#' @param rho is the correlation between the two gaussian functions
#' @param iterations is the number of iterations to match with the proportions
#' @return t2 is the trunction for the second gaussian function
threshold_fitting_ml<-function(pF1,pF2,rho,iterations){
t1 = qnorm(pF1)
# random simulation with correlation rho
gauss<-rmvnorm(iterations,c(0,0),matrix(c(1,rho,rho,1),2,2))
logml_max=-Inf
t2_fit=0
for(inc1 in 1:(iterations-1)){
t2 = qnorm(inc1 /iterations)
x= gauss[,1]>t1 & gauss[,2]>t2
# maximum likelihood for a bernoulli distribution
logml = sum(x) *log(pF2) + (iterations-sum(x))*log(1-pF2)
if (logml > logml_max){
logml_max = logml
t2_fit = t2
}
}
return(t2_fit)
}
| /R/threshold_fitting_ml.R | permissive | tleblevecIMP/TransPGS | R | false | false | 1,109 | r | #' threshold fitting with a maximul likelood algorithm
#' it avoids to compute the gaussian integral
#' but requires to do random simulations with the same package
#' test should be done to see which method is the quickest
#'
#'The proportion does not converge for now, BUG
#'
#'#' @param pF1 is the proportion of facies 1
#' @param pF2 is the proportion of facies 2
#' @param rho is the correlation between the two gaussian functions
#' @param iterations is the number of iterations to match with the proportions
#' @return t2 is the trunction for the second gaussian function
threshold_fitting_ml<-function(pF1,pF2,rho,iterations){
t1 = qnorm(pF1)
# random simulation with correlation rho
gauss<-rmvnorm(iterations,c(0,0),matrix(c(1,rho,rho,1),2,2))
logml_max=-Inf
t2_fit=0
for(inc1 in 1:(iterations-1)){
t2 = qnorm(inc1 /iterations)
x= gauss[,1]>t1 & gauss[,2]>t2
# maximum likelihood for a bernoulli distribution
logml = sum(x) *log(pF2) + (iterations-sum(x))*log(1-pF2)
if (logml > logml_max){
logml_max = logml
t2_fit = t2
}
}
return(t2_fit)
}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Hello World!"),
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 5,
max = 50,
value = 30)
),
mainPanel(
plotOutput("distPlot")
)
)
))
| /shiny/L1.hello/ui.R | no_license | yama1968/Spikes | R | false | false | 389 | r | library(shiny)
shinyUI(fluidPage(
titlePanel("Hello World!"),
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 5,
max = 50,
value = 30)
),
mainPanel(
plotOutput("distPlot")
)
)
))
|
#library(editrules)
#if (require(testthat)){
# test_package("editrules")
#}
| /editrules/tests/test_all.R | no_license | ingted/R-Examples | R | false | false | 83 | r | #library(editrules)
#if (require(testthat)){
# test_package("editrules")
#}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{model.idJobPlatform}
\alias{model.idJobPlatform}
\title{Title}
\usage{
model.idJobPlatform(reponseAdd)
}
\arguments{
\item{reponseAdd}{}
}
\description{
Title
}
| /man/model.idJobPlatform.Rd | no_license | saagie/rstudio-saagie-addin | R | false | true | 254 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{model.idJobPlatform}
\alias{model.idJobPlatform}
\title{Title}
\usage{
model.idJobPlatform(reponseAdd)
}
\arguments{
\item{reponseAdd}{}
}
\description{
Title
}
|
testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p1 = c(8.57286299609258e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72131968218895e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result) | /metacoder/inst/testfiles/intersect_line_rectangle/AFL_intersect_line_rectangle/intersect_line_rectangle_valgrind_files/1615769459-test.R | permissive | akhikolla/updatedatatype-list3 | R | false | false | 728 | r | testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p1 = c(8.57286299609258e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72131968218895e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result) |
# Check that required packages are running
devtools::install_github("thomasp85/patchwork")
library(patchwork)
if (!require("pacman")) install.packages("pacman")
pacman::p_load(tidyverse, extrafont, sf)
# Get working directory
wd <- getwd()
# I found a Google Maps point dataset of English Cathedrals, downloaded as a KML
cathedrals <- st_read("English Cathedrals.kml")
cathedrals <- st_transform(cathedrals, 27700)
cathedrals <- st_zm(cathedrals)
st_crs(cathedrals) = 27700
cathedrals <- subset(cathedrals, cathedrals$Name != "Placemark 22")
# Make a bounding box of the English Cathedrals, add a buffer, so that we don't import more of the OS OpenMap Local ImportantBuilding layer than we need to.
cathedral.bbox <- st_as_sf(st_as_sfc(st_bbox(cathedrals)), 27700)
cathedral.bbox <- st_buffer(cathedral.bbox, 500)
wkt <- st_as_text(st_geometry(cathedral.bbox))
important.sf <- st_read(paste0(wd, "/water/data/opmplc_gb.gpkg"), layer = "ImportantBuilding", wkt_filter = wkt)
st_crs(important.sf) = 27700
important.sf <- subset(important.sf, important.sf$buildingTheme == "Religious Buildings")
# Not all of the OS ImportantBuildings data says whether it is a cathedral, and no mention of the denomination. So, I loop through a subset of all religious ImportantBuildings within a 1km buffer, filtering for keywords (Ely is called "St Mary's Chapel" for some reason).
cathedral.list <- list()
for(i in 1:nrow(cathedrals)){
buffer <- st_buffer(cathedrals[i,], 1000)
clip.sf <- st_intersection(important.sf, buffer)
clip.sf <- subset(clip.sf, grepl("Cathedral", clip.sf$distinctiveName) |
grepl("Abbey", clip.sf$distinctiveName) |
grepl("Minster", clip.sf$distinctiveName) |
grepl("St Mary's Chapel", clip.sf$distinctiveName))
cathedral.list[[i]] <- clip.sf
print(i)
}
cathedrals <- mapedit:::combine_list_of_sf(cathedral.list)
st_crs(cathedrals) = 27700
# Now that I have a list of potentials, I subset out the unwanted ones (RC instead of CofE etc.). Note that some cathedrals will have multiple cathedrals, but others will include neighbouring cathedrals (St Pauls and Southwark, Sheffield RC and CofE) - clean that up.
cathedrals <- subset(cathedrals, cathedrals$distinctiveName != "Westminster Cathedral")
cathedrals <- subset(cathedrals, cathedrals$distinctiveName != "The Metropolitan Cathedral Church of St Chad")
cathedrals <- subset(cathedrals, cathedrals$distinctiveName != "Metropolitan Cathedral of Christ the King")
cathedrals <- subset(cathedrals, !(cathedrals$distinctiveName == "The Guild Church of St Nicholas Cole Abbey" & cathedrals$Name == "Southwark Cathedral"))
cathedrals <- subset(cathedrals, !(cathedrals$distinctiveName == "St Paul's Cathedral" & cathedrals$Name == "Southwark Cathedral"))
cathedrals <- subset(cathedrals, !(cathedrals$distinctiveName == "The Guild Church of St Nicholas Cole Abbey" & cathedrals$Name == "St Paul's Cathedral"))
cathedrals <- subset(cathedrals, !(cathedrals$distinctiveName == "The Cathedral and Collegiate Church of St Saviour and St Mary Overie, Southwark" & cathedrals$Name == "St Paul's Cathedral"))
cathedrals <- subset(cathedrals, cathedrals$distinctiveName != "St Mary's Roman Catholic Cathedral")
cathedrals <- subset(cathedrals, cathedrals$distinctiveName != "Cathedral Church of St Marie")
rm(buffer, cathedral.bbox, cathedral.list, clip.sf, important.sf, i, wkt)
# Cathedral is superflous in the label now
cathedrals$Name <- gsub("Cathedral", "", cathedrals$Name)
# I want to use Transport as a font, so set up additional fonts
font_import(prompt = FALSE)
loadfonts()
# Make a list of cathedrals alphabetical
namelist <- sort(unique(cathedrals$Name))
# Set font size
f <- 6
# The plots will be turned into a grid using the patchwork package. This loop makes a ggplot object of each in turn.
for(i in 1:length(namelist)){
print("starting...")
g2 <- ggplot() +
geom_sf(data = cathedrals[cathedrals$Name == namelist[i],], fill = "black", color = "black") +
theme_classic(base_size = f, base_family = "Transport") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.line.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.line.y=element_blank(),
plot.title = element_text(hjust = 0.5),
legend.position = "none",
plot.margin=grid::unit(c(2,2,2,2), "mm")) +
ggtitle(namelist[i])
assign(paste0("p", i), g2)
print(i/length(namelist))
print(i)
}
# Set this up for the patchwork package
map.gg <- p1 +
p2 +
p3 +
p4 +
p5 +
p6 +
p7 +
p8 +
p9 +
p10 +
p11 +
p12 +
p13 +
p14 +
p15 +
p16 +
p17 +
p18 +
p19 +
p20 +
p21 +
p22 +
p23 +
p24 +
p25 +
p26 +
p27 +
p28 +
p29 +
p30 +
p31 +
p32 +
p33 +
p34 +
p35 +
p36 +
p37 +
p38 +
p39 +
p40 +
p41 +
p42 +
plot_layout()
ggsave("Monochrome_9.jpg", map.gg, scale = 2, dpi = 'retina')
| /Monochrome_9.R | no_license | wengraf/30DayMapChallenge2020 | R | false | false | 5,093 | r | # Check that required packages are running
devtools::install_github("thomasp85/patchwork")
library(patchwork)
if (!require("pacman")) install.packages("pacman")
pacman::p_load(tidyverse, extrafont, sf)
# Get working directory
wd <- getwd()
# I found a Google Maps point dataset of English Cathedrals, downloaded as a KML
cathedrals <- st_read("English Cathedrals.kml")
cathedrals <- st_transform(cathedrals, 27700)
cathedrals <- st_zm(cathedrals)
st_crs(cathedrals) = 27700
cathedrals <- subset(cathedrals, cathedrals$Name != "Placemark 22")
# Make a bounding box of the English Cathedrals, add a buffer, so that we don't import more of the OS OpenMap Local ImportantBuilding layer than we need to.
cathedral.bbox <- st_as_sf(st_as_sfc(st_bbox(cathedrals)), 27700)
cathedral.bbox <- st_buffer(cathedral.bbox, 500)
wkt <- st_as_text(st_geometry(cathedral.bbox))
important.sf <- st_read(paste0(wd, "/water/data/opmplc_gb.gpkg"), layer = "ImportantBuilding", wkt_filter = wkt)
st_crs(important.sf) = 27700
important.sf <- subset(important.sf, important.sf$buildingTheme == "Religious Buildings")
# Not all of the OS ImportantBuildings data says whether it is a cathedral, and no mention of the denomination. So, I loop through a subset of all religious ImportantBuildings within a 1km buffer, filtering for keywords (Ely is called "St Mary's Chapel" for some reason).
cathedral.list <- list()
for(i in 1:nrow(cathedrals)){
buffer <- st_buffer(cathedrals[i,], 1000)
clip.sf <- st_intersection(important.sf, buffer)
clip.sf <- subset(clip.sf, grepl("Cathedral", clip.sf$distinctiveName) |
grepl("Abbey", clip.sf$distinctiveName) |
grepl("Minster", clip.sf$distinctiveName) |
grepl("St Mary's Chapel", clip.sf$distinctiveName))
cathedral.list[[i]] <- clip.sf
print(i)
}
cathedrals <- mapedit:::combine_list_of_sf(cathedral.list)
st_crs(cathedrals) = 27700
# Now that I have a list of potentials, I subset out the unwanted ones (RC instead of CofE etc.). Note that some cathedrals will have multiple cathedrals, but others will include neighbouring cathedrals (St Pauls and Southwark, Sheffield RC and CofE) - clean that up.
cathedrals <- subset(cathedrals, cathedrals$distinctiveName != "Westminster Cathedral")
cathedrals <- subset(cathedrals, cathedrals$distinctiveName != "The Metropolitan Cathedral Church of St Chad")
cathedrals <- subset(cathedrals, cathedrals$distinctiveName != "Metropolitan Cathedral of Christ the King")
cathedrals <- subset(cathedrals, !(cathedrals$distinctiveName == "The Guild Church of St Nicholas Cole Abbey" & cathedrals$Name == "Southwark Cathedral"))
cathedrals <- subset(cathedrals, !(cathedrals$distinctiveName == "St Paul's Cathedral" & cathedrals$Name == "Southwark Cathedral"))
cathedrals <- subset(cathedrals, !(cathedrals$distinctiveName == "The Guild Church of St Nicholas Cole Abbey" & cathedrals$Name == "St Paul's Cathedral"))
cathedrals <- subset(cathedrals, !(cathedrals$distinctiveName == "The Cathedral and Collegiate Church of St Saviour and St Mary Overie, Southwark" & cathedrals$Name == "St Paul's Cathedral"))
cathedrals <- subset(cathedrals, cathedrals$distinctiveName != "St Mary's Roman Catholic Cathedral")
cathedrals <- subset(cathedrals, cathedrals$distinctiveName != "Cathedral Church of St Marie")
rm(buffer, cathedral.bbox, cathedral.list, clip.sf, important.sf, i, wkt)
# Cathedral is superflous in the label now
cathedrals$Name <- gsub("Cathedral", "", cathedrals$Name)
# I want to use Transport as a font, so set up additional fonts
font_import(prompt = FALSE)
loadfonts()
# Make a list of cathedrals alphabetical
namelist <- sort(unique(cathedrals$Name))
# Set font size
f <- 6
# The plots will be turned into a grid using the patchwork package. This loop makes a ggplot object of each in turn.
for(i in 1:length(namelist)){
print("starting...")
g2 <- ggplot() +
geom_sf(data = cathedrals[cathedrals$Name == namelist[i],], fill = "black", color = "black") +
theme_classic(base_size = f, base_family = "Transport") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.line.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.line.y=element_blank(),
plot.title = element_text(hjust = 0.5),
legend.position = "none",
plot.margin=grid::unit(c(2,2,2,2), "mm")) +
ggtitle(namelist[i])
assign(paste0("p", i), g2)
print(i/length(namelist))
print(i)
}
# Set this up for the patchwork package
map.gg <- p1 +
p2 +
p3 +
p4 +
p5 +
p6 +
p7 +
p8 +
p9 +
p10 +
p11 +
p12 +
p13 +
p14 +
p15 +
p16 +
p17 +
p18 +
p19 +
p20 +
p21 +
p22 +
p23 +
p24 +
p25 +
p26 +
p27 +
p28 +
p29 +
p30 +
p31 +
p32 +
p33 +
p34 +
p35 +
p36 +
p37 +
p38 +
p39 +
p40 +
p41 +
p42 +
plot_layout()
ggsave("Monochrome_9.jpg", map.gg, scale = 2, dpi = 'retina')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createBinMatrix.R
\name{createBinMatrix}
\alias{createBinMatrix}
\title{Create Matrix from calculated Bins}
\usage{
createBinMatrix(data, pval)
}
| /man/createBinMatrix.Rd | no_license | liangdp1984/cnAnalysis450k | R | false | true | 225 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createBinMatrix.R
\name{createBinMatrix}
\alias{createBinMatrix}
\title{Create Matrix from calculated Bins}
\usage{
createBinMatrix(data, pval)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/editing.R
\name{update_object}
\alias{update_object}
\title{Update an object with a new file}
\usage{
update_object(mn, pid, path, format_id = NULL, new_pid = NULL, sid = NULL)
}
\arguments{
\item{mn}{(MNode) The Member Node to update the object on.}
\item{pid}{(character) The PID of the object to update.}
\item{path}{(character) The full path to the file to update with.}
\item{format_id}{(character) Optional. The format ID to set for the object.
When not set, \code{\link[=guess_format_id]{guess_format_id()}} will be used to guess the format ID.
Should be a \href{https://cn.dataone.org/cn/v2/formats}{DataONE format ID}.}
\item{new_pid}{(character) Optional. Specify the PID for the new object.
Defaults to automatically generating a new, random UUID-style PID.}
\item{sid}{(character) Optional. Specify a Series ID (SID) to use for the new object.}
}
\value{
(character) The PID of the updated object.
}
\description{
This is a convenience wrapper around \code{\link[dataone:updateObject]{dataone::updateObject()}} which copies in
fields from the old object's System Metadata such as the rightsHolder and
accessPolicy and updates only what needs to be changed.
}
\examples{
\dontrun{
cn <- CNode("STAGING2")
mn <- getMNode(cn,"urn:node:mnTestKNB")
pid <- "urn:uuid:23c7cae4-0fc8-4241-96bb-aa8ed94d71fe"
my_path <- "/home/Documents/myfile.csv"
new_pid <- update_object(mn, pid, my_path, format_id = "text/csv")
}
}
| /man/update_object.Rd | permissive | NCEAS/arcticdatautils | R | false | true | 1,505 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/editing.R
\name{update_object}
\alias{update_object}
\title{Update an object with a new file}
\usage{
update_object(mn, pid, path, format_id = NULL, new_pid = NULL, sid = NULL)
}
\arguments{
\item{mn}{(MNode) The Member Node to update the object on.}
\item{pid}{(character) The PID of the object to update.}
\item{path}{(character) The full path to the file to update with.}
\item{format_id}{(character) Optional. The format ID to set for the object.
When not set, \code{\link[=guess_format_id]{guess_format_id()}} will be used to guess the format ID.
Should be a \href{https://cn.dataone.org/cn/v2/formats}{DataONE format ID}.}
\item{new_pid}{(character) Optional. Specify the PID for the new object.
Defaults to automatically generating a new, random UUID-style PID.}
\item{sid}{(character) Optional. Specify a Series ID (SID) to use for the new object.}
}
\value{
(character) The PID of the updated object.
}
\description{
This is a convenience wrapper around \code{\link[dataone:updateObject]{dataone::updateObject()}} which copies in
fields from the old object's System Metadata such as the rightsHolder and
accessPolicy and updates only what needs to be changed.
}
\examples{
\dontrun{
cn <- CNode("STAGING2")
mn <- getMNode(cn,"urn:node:mnTestKNB")
pid <- "urn:uuid:23c7cae4-0fc8-4241-96bb-aa8ed94d71fe"
my_path <- "/home/Documents/myfile.csv"
new_pid <- update_object(mn, pid, my_path, format_id = "text/csv")
}
}
|
#' @title Extract CIFTI data from file
#' @description Extracts the data after the CIFTI XML information
#' @param fname Filename of CIFTI
#' @param nim NIfTI-2 header, if already parsed.
#' If \code{NULL}, \code{\link{nifti_2_hdr}}
#' will be run on the CIFTI.
#'
#' @return Array of values
#' @export
cifti_data = function(fname, nim = NULL) {
if (is.null(nim)) {
nim = nifti_2_hdr(fname = fname)
}
fid = file(fname, "rb")
on.exit({
close(fid)
})
seek(fid, where = 0, origin = "end")
filesize = seek(fid)
seek(fid, where = nim@vox_offset, origin = "start");
dtype = as.character(nim@datatype)
img_dim = nim@dim_
n_items = prod(img_dim[2:length(img_dim)])
what = switch(dtype,
"2" = integer(),
"4" = numeric(),
"8" = integer(),
"16" = numeric(),
"64" = double(),
"512" = numeric(),
"768" = integer()
)
size = switch(dtype,
"2" = 1,
"4" = 2,
"8" = 4,
"16" = 4,
"64" = 8,
"512" = 2,
"768" = 4
)
if (is.null(what) || is.null(size)) {
stop("Unsupported data type indicated by NIfTI-2 header!")
}
vals = readBin(con = fid,
what = what,
n = filesize * 2, size = size)
if (n_items > length(vals)) {
stop("Not all CIFTI data read!")
}
if (n_items < length(vals)) {
stop("More data read than header indicated - header or data problem!")
}
cifti_dim = img_dim[6:8]
## === myc 20190207
# some cifti (e.g., MSC) has dense data (60k+ vertex) comes first:
# e.g., Dimension : 1 x 1 x 1 x 1 x 65890 x 818
# Whereas standard cifti has rows * dense for dtseries https://www.humanconnectome.org/software/workbench-command/-cifti-help
if (cifti_dim[1] > cifti_dim[2]) {
temp_vals = array(vals, dim = img_dim[c(7,6,8)]) # fill array based on dense coming first
vals = array(t(as.matrix(temp_vals[,,])), dim = cifti_dim) # transform array back to origianl dimension to match header
} else{
vals = array(vals, dim = cifti_dim)
}
# [m, n] = size(voxdata);
# if m>n
# dat = nan(Ngreynodes,n);
# dat(greynodeIndex(dataIndex),:) = voxdata;
# else
# dat = nan(Ngreynodes,m);
# dat(greynodeIndex(dataIndex),:) = transpose(voxdata);
# end
return(vals)
} | /R/cifti_data.R | no_license | muschellij2/cifti | R | false | false | 2,433 | r | #' @title Extract CIFTI data from file
#' @description Extracts the data after the CIFTI XML information
#' @param fname Filename of CIFTI
#' @param nim NIfTI-2 header, if already parsed.
#' If \code{NULL}, \code{\link{nifti_2_hdr}}
#' will be run on the CIFTI.
#'
#' @return Array of values
#' @export
cifti_data = function(fname, nim = NULL) {
if (is.null(nim)) {
nim = nifti_2_hdr(fname = fname)
}
fid = file(fname, "rb")
on.exit({
close(fid)
})
seek(fid, where = 0, origin = "end")
filesize = seek(fid)
seek(fid, where = nim@vox_offset, origin = "start");
dtype = as.character(nim@datatype)
img_dim = nim@dim_
n_items = prod(img_dim[2:length(img_dim)])
what = switch(dtype,
"2" = integer(),
"4" = numeric(),
"8" = integer(),
"16" = numeric(),
"64" = double(),
"512" = numeric(),
"768" = integer()
)
size = switch(dtype,
"2" = 1,
"4" = 2,
"8" = 4,
"16" = 4,
"64" = 8,
"512" = 2,
"768" = 4
)
if (is.null(what) || is.null(size)) {
stop("Unsupported data type indicated by NIfTI-2 header!")
}
vals = readBin(con = fid,
what = what,
n = filesize * 2, size = size)
if (n_items > length(vals)) {
stop("Not all CIFTI data read!")
}
if (n_items < length(vals)) {
stop("More data read than header indicated - header or data problem!")
}
cifti_dim = img_dim[6:8]
## === myc 20190207
# some cifti (e.g., MSC) has dense data (60k+ vertex) comes first:
# e.g., Dimension : 1 x 1 x 1 x 1 x 65890 x 818
# Whereas standard cifti has rows * dense for dtseries https://www.humanconnectome.org/software/workbench-command/-cifti-help
if (cifti_dim[1] > cifti_dim[2]) {
temp_vals = array(vals, dim = img_dim[c(7,6,8)]) # fill array based on dense coming first
vals = array(t(as.matrix(temp_vals[,,])), dim = cifti_dim) # transform array back to origianl dimension to match header
} else{
vals = array(vals, dim = cifti_dim)
}
# [m, n] = size(voxdata);
# if m>n
# dat = nan(Ngreynodes,n);
# dat(greynodeIndex(dataIndex),:) = voxdata;
# else
# dat = nan(Ngreynodes,m);
# dat(greynodeIndex(dataIndex),:) = transpose(voxdata);
# end
return(vals)
} |
source("~/.Rprofile")
library(dplyr)
library(ncdf4)
library(abind)
## ALTERNATIVELY, USE DATA DOWNLOADED FROM FLUXCOM WEBSITE:
yearstart = 1980
yearend = 2013
idx <- 0
for (year in yearstart:yearend){
idx <- idx + 1
filn <- paste0( myhome, "/data/gpp_mte/GPP.RF.CRUNCEPv6.annual.", as.character(year), ".nc" )
nc <- nc_open( filn )
tmp <- ncvar_get( nc, varid="GPP" )
tmp <- tmp * 365 # convert to totals, given in units per day
if (idx>1) {
gpp <- abind( gpp, tmp, along = 3 )
} else {
gpp <- tmp
lon <- nc$dim$lon$vals
lat <- nc$dim$lat$vals
}
nc_close(nc)
}
## write annual GPP to file
outfilnam <- paste0(myhome, "/data/gpp_mte/gpp_mte_fluxcom_ANN.nc")
cdf.write( gpp, "gpp",
lon, lat,
filnam = outfilnam,
nvars = 1,
time = yearstart:yearend,
make.tdim = TRUE,
units_time = "year",
long_name_var1 = "Gross primary productivity",
units_var1 = "gC m-2 year-1",
glob_hist = "file created by emcon_iav/preprocess_gpp_mte_fluxcom.R based on GPP.RF.CRUNCEPv6.annual.<year>.nc, downloaded from ftp://ftp.bgc-jena.mpg.de/pub/outgoing/FluxCom/CarbonFluxes/RS+METEO/CRUNCEPv6/raw/annual/ (17.11.2017)."
)
| /preproc_mte_fluxcom.R | no_license | yangxhcaf/emcon_iav | R | false | false | 1,242 | r | source("~/.Rprofile")
library(dplyr)
library(ncdf4)
library(abind)
## ALTERNATIVELY, USE DATA DOWNLOADED FROM FLUXCOM WEBSITE:
yearstart = 1980
yearend = 2013
idx <- 0
for (year in yearstart:yearend){
idx <- idx + 1
filn <- paste0( myhome, "/data/gpp_mte/GPP.RF.CRUNCEPv6.annual.", as.character(year), ".nc" )
nc <- nc_open( filn )
tmp <- ncvar_get( nc, varid="GPP" )
tmp <- tmp * 365 # convert to totals, given in units per day
if (idx>1) {
gpp <- abind( gpp, tmp, along = 3 )
} else {
gpp <- tmp
lon <- nc$dim$lon$vals
lat <- nc$dim$lat$vals
}
nc_close(nc)
}
## write annual GPP to file
outfilnam <- paste0(myhome, "/data/gpp_mte/gpp_mte_fluxcom_ANN.nc")
cdf.write( gpp, "gpp",
lon, lat,
filnam = outfilnam,
nvars = 1,
time = yearstart:yearend,
make.tdim = TRUE,
units_time = "year",
long_name_var1 = "Gross primary productivity",
units_var1 = "gC m-2 year-1",
glob_hist = "file created by emcon_iav/preprocess_gpp_mte_fluxcom.R based on GPP.RF.CRUNCEPv6.annual.<year>.nc, downloaded from ftp://ftp.bgc-jena.mpg.de/pub/outgoing/FluxCom/CarbonFluxes/RS+METEO/CRUNCEPv6/raw/annual/ (17.11.2017)."
)
|
context("FacileData API: assay-level query and retrieval")
.classes <- c("DESeqDataSet", "DGEList", "EList", "ExpressionSet",
"SummarizedExperiment")
.rnaseq.class <- setdiff(.classes, "EList")
if (!exists("FDS")) FDS <- FacileData::exampleFacileDataSet()
if (!exists("Y")) Y <- example_bioc_data("DGEList", efds = FDS)
BIOC <- sapply(.classes, example_bioc_data, Y = Y, simplify = FALSE)
test_that("assay_names is defined for base Bioconductor classes", {
for (bclass in names(BIOC)) {
obj <- BIOC[[bclass]]
fn.name <- paste0("assay_names.", class(obj)[1L])
checkmate::expect_function(getFunction(fn.name), info = bclass)
anames <- assay_names(obj)
checkmate::expect_character(anames, min.len = 1L, info = bclass)
}
})
test_that("assay_names returns names of assays in the facilitated container", {
for (bclass in names(BIOC)) {
obj <- BIOC[[bclass]]
f <- facilitate(obj, run_vst = FALSE)
checkmate::expect_set_equal(assay_names(f), assay_names(obj), info = bclass)
}
})
test_that("assay_info returns legit metadata for all containers", {
expected.cols <- c(
assay = "character",
assay_type = "character",
feature_type = "character",
description = "character",
nfeatures = "integer",
storage_mode = "character")
for (bclass in names(BIOC)) {
obj <- BIOC[[bclass]]
f <- facilitate(obj, run_vst = FALSE)
ainfo <- assay_info(f)
expect_s3_class(ainfo, "data.frame")
# check each column is of expected data type
for (cname in names(expected.cols)) {
ctype <- expected.cols[cname]
info <- sprintf("column '%s' is not type '%s' from container '%s'",
cname, ctype, bclass)
expect_is(ainfo[[cname]], ctype, info = info)
}
expect_equal(ainfo[["feature_type"]][1L], "entrez", info = bclass)
expect_equal(ainfo[["nfeatures"]][1L], unname(nrow(obj)), info = bclass)
checkmate::expect_set_equal(
ainfo[["assay"]],
assay_names(f),
info = bclass)
}
})
test_that("(fetch|with)_assay_data retrieval works across containers", {
# This test is restricted to rnaseq containers for now
features.all <- features(FDS)
features.some <- dplyr::sample_n(features.all, 5)
samples.all <- samples(FDS) %>% collect()
samples.some <- dplyr::sample_n(samples.all, 10)
# The names of the assay will differ accross bioc data container types,
# so we remove that column from these results
fds.res <- list(
tidy.all = FDS %>%
fetch_assay_data(features.some, samples.all) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
tidy.some = FDS %>%
fetch_assay_data(features.some, samples.some) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
tidy.some.fids = FDS %>%
fetch_assay_data(features.some$feature_id, samples.some) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
# Exercising the `with_` call here simultaneously tests the with_
# decoration functionality as well as the normalization procedure, since
# the default for `with_assay_data` is `normalized = TRUE`
tidy.with = samples.some %>%
with_assay_data(features.some) %>%
arrange(sample_id),
matrix.all = FDS %>%
fetch_assay_data(features.some, samples.all, as.matrix = TRUE),
matrix.some.norm = FDS %>%
fetch_assay_data(features.some, samples.some, as.matrix = TRUE,
normalized = TRUE))
# See the next test for SummarizedExperiment
for (bclass in setdiff(.rnaseq.class, "SummarizedExperiment")) {
obj <- BIOC[[bclass]]
f <- facilitate(obj, assay_type = "rnaseq", run_vst = FALSE)
bsamples.all <- samples(f)
bsamples.some <- semi_join(bsamples.all, samples.some,
by = c("dataset", "sample_id"))
if (bclass == "DESeqDataSet") {
normalized <- "cpm"
} else {
normalized <- TRUE
}
bioc.res <- list(
# exclude the assay name the tidied reuslts because they will differ
# across containers
tidy.all = f %>%
fetch_assay_data(features.some, bsamples.all) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
tidy.some = f %>%
fetch_assay_data(features.some, bsamples.some) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
tidy.some.fids = f %>%
fetch_assay_data(features.some$feature_id, bsamples.some) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
tidy.with = bsamples.some %>%
with_assay_data(features.some, normalized = normalized) %>%
arrange(sample_id),
matrix.all = f %>%
fetch_assay_data(features.some, bsamples.all, as.matrix = TRUE),
matrix.some.norm = f %>%
fetch_assay_data(features.some, bsamples.some, normalized = normalized,
as.matrix = TRUE))
for (comp in names(bioc.res)) {
bres <- bioc.res[[comp]]
fres <- fds.res[[comp]]
is.tidy <- grepl("tidy\\.", comp)
info <- sprintf("[%s] %s (%s)", bclass, sub("^.*?\\.", "", comp),
sub("\\..*$", "", comp))
expect_is(fds(bres), is(f), info = info) # Ensure results were generated
expect_is(fds(bres), is(obj)) # from correct container type.
expect_is(bres, is(fres), info = info) # Results are the same type.
if (is.tidy) {
# expect_equal(bres, fres, info = info)
# if this is from deseqdataset, normfactor and libsize came along
# for the ride
ecols <- colnames(fres)
checkmate::expect_subset(ecols, colnames(bres), info = info)
# For some reason the default expect_equal.tbl_df would randomly break
# on SummarizedExperiment results. Manually expect_equal()-ing the
# columns of the tbl's against each other always returned TRUE, though,
# and I think it boils down to this:
# https://github.com/tidyverse/dplyr/issues/2751
# expect_equal.tbl_df has been removed from dplyr 1.0, but we needed
# this to work before that, so .....................
expect_equal(
as.data.frame(bres[, ecols]),
as.data.frame(fres),
info = info,
check.attributes = FALSE)
} else {
checkmate::expect_set_equal(colnames(bres), colnames(fres))
checkmate::expect_set_equal(rownames(bres), rownames(fres))
bres <- bres[rownames(fres), colnames(fres)]
expect_equal(
as.data.frame(bres),
as.data.frame(fres),
check.attributes = FALSE,
info = info)
}
}
}
})
# test_that("Stress test facile api against SummarizedExperiment", {
# # Tests against SummarizedExperiment seems to randomly fail on line 150
# # in the "tidy" tests ... need to loop on this to find it. The other
# # containers never seem to fail,
#
# obj <- BIOC[["SummarizedExperiment"]]
# f <- facilitate(obj, assay_type = "rnaseq")
# normalized <- TRUE
#
# features.all <- features(FDS)
# samples.all <- samples(FDS) %>% collect()
#
# for (i in 1:10) {
# features.some <- dplyr::sample_n(features.all, 5)
# samples.some <- dplyr::sample_n(samples.all, 10)
#
# # The names of the assay will differ accross bioc data container types,
# # so we remove that column from these results
# fds.res <- list(
# tidy.all = FDS %>%
# fetch_assay_data(features.some, samples.all) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# tidy.some = FDS %>%
# fetch_assay_data(features.some, samples.some) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# tidy.some.fids = FDS %>%
# fetch_assay_data(features.some$feature_id, samples.some) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# # Exercising the `with_` call here simultaneously tests the with_
# # decoration functionality as well as the normalization procedure, since
# # the default for `with_assay_data` is `normalized = TRUE`
# tidy.with = samples.some %>%
# with_assay_data(features.some) %>%
# arrange(sample_id),
# matrix.all = FDS %>%
# fetch_assay_data(features.some, samples.all, as.matrix = TRUE),
# matrix.some.norm = FDS %>%
# fetch_assay_data(features.some, samples.some, as.matrix = TRUE,
# normalized = TRUE))
#
#
# bsamples.all <- samples(f)
# bsamples.some <- semi_join(bsamples.all, samples.some,
# by = c("dataset", "sample_id"))
#
# bioc.res <- list(
# # exclude the assay name the tidied reuslts because they will differ
# # across containers
# tidy.all = f %>%
# fetch_assay_data(features.some, bsamples.all) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# tidy.some = f %>%
# fetch_assay_data(features.some, bsamples.some) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# tidy.some.fids = f %>%
# fetch_assay_data(features.some$feature_id, bsamples.some) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# tidy.with = bsamples.some %>%
# with_assay_data(features.some, normalized = normalized) %>%
# arrange(sample_id),
# matrix.all = f %>%
# fetch_assay_data(features.some, bsamples.all, as.matrix = TRUE),
# matrix.some.norm = f %>%
# fetch_assay_data(features.some, bsamples.some, normalized = normalized,
# as.matrix = TRUE))
#
# for (comp in names(bioc.res)) {
# bres <- bioc.res[[comp]]
# fres <- fds.res[[comp]]
# is.tidy <- grepl("tidy\\.", comp)
# info <- sprintf("[%s] %s (%s)", bclass, sub("^.*?\\.", "", comp),
# sub("\\..*$", "", comp))
#
# expect_is(fds(bres), is(f), info = info) # Ensure results were generated
# expect_is(fds(bres), is(obj)) # from correct container type.
# expect_is(bres, is(fres), info = info) # Results are the same type.
# if (is.tidy) {
# ecols <- colnames(fres)
# checkmate::expect_subset(ecols, colnames(bres), info = info)
# expect_equal(
# as.data.frame(bres[, ecols]),
# as.data.frame(fres),
# info = info,
# check.attributes = FALSE)
# } else {
# checkmate::expect_set_equal(colnames(bres), colnames(fres))
# checkmate::expect_set_equal(rownames(bres), rownames(fres))
# bres <- bres[rownames(fres), colnames(fres)]
# expect_equal(bres, fres, check.attributes = FALSE, info = info)
# }
# }
# }
#
# })
| /tests/testthat/test-facile-api-assay.R | permissive | RagnarDanneskjold/FacileBiocData | R | false | false | 10,792 | r | context("FacileData API: assay-level query and retrieval")
.classes <- c("DESeqDataSet", "DGEList", "EList", "ExpressionSet",
"SummarizedExperiment")
.rnaseq.class <- setdiff(.classes, "EList")
if (!exists("FDS")) FDS <- FacileData::exampleFacileDataSet()
if (!exists("Y")) Y <- example_bioc_data("DGEList", efds = FDS)
BIOC <- sapply(.classes, example_bioc_data, Y = Y, simplify = FALSE)
test_that("assay_names is defined for base Bioconductor classes", {
for (bclass in names(BIOC)) {
obj <- BIOC[[bclass]]
fn.name <- paste0("assay_names.", class(obj)[1L])
checkmate::expect_function(getFunction(fn.name), info = bclass)
anames <- assay_names(obj)
checkmate::expect_character(anames, min.len = 1L, info = bclass)
}
})
test_that("assay_names returns names of assays in the facilitated container", {
for (bclass in names(BIOC)) {
obj <- BIOC[[bclass]]
f <- facilitate(obj, run_vst = FALSE)
checkmate::expect_set_equal(assay_names(f), assay_names(obj), info = bclass)
}
})
test_that("assay_info returns legit metadata for all containers", {
expected.cols <- c(
assay = "character",
assay_type = "character",
feature_type = "character",
description = "character",
nfeatures = "integer",
storage_mode = "character")
for (bclass in names(BIOC)) {
obj <- BIOC[[bclass]]
f <- facilitate(obj, run_vst = FALSE)
ainfo <- assay_info(f)
expect_s3_class(ainfo, "data.frame")
# check each column is of expected data type
for (cname in names(expected.cols)) {
ctype <- expected.cols[cname]
info <- sprintf("column '%s' is not type '%s' from container '%s'",
cname, ctype, bclass)
expect_is(ainfo[[cname]], ctype, info = info)
}
expect_equal(ainfo[["feature_type"]][1L], "entrez", info = bclass)
expect_equal(ainfo[["nfeatures"]][1L], unname(nrow(obj)), info = bclass)
checkmate::expect_set_equal(
ainfo[["assay"]],
assay_names(f),
info = bclass)
}
})
test_that("(fetch|with)_assay_data retrieval works across containers", {
# This test is restricted to rnaseq containers for now
features.all <- features(FDS)
features.some <- dplyr::sample_n(features.all, 5)
samples.all <- samples(FDS) %>% collect()
samples.some <- dplyr::sample_n(samples.all, 10)
# The names of the assay will differ accross bioc data container types,
# so we remove that column from these results
fds.res <- list(
tidy.all = FDS %>%
fetch_assay_data(features.some, samples.all) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
tidy.some = FDS %>%
fetch_assay_data(features.some, samples.some) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
tidy.some.fids = FDS %>%
fetch_assay_data(features.some$feature_id, samples.some) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
# Exercising the `with_` call here simultaneously tests the with_
# decoration functionality as well as the normalization procedure, since
# the default for `with_assay_data` is `normalized = TRUE`
tidy.with = samples.some %>%
with_assay_data(features.some) %>%
arrange(sample_id),
matrix.all = FDS %>%
fetch_assay_data(features.some, samples.all, as.matrix = TRUE),
matrix.some.norm = FDS %>%
fetch_assay_data(features.some, samples.some, as.matrix = TRUE,
normalized = TRUE))
# See the next test for SummarizedExperiment
for (bclass in setdiff(.rnaseq.class, "SummarizedExperiment")) {
obj <- BIOC[[bclass]]
f <- facilitate(obj, assay_type = "rnaseq", run_vst = FALSE)
bsamples.all <- samples(f)
bsamples.some <- semi_join(bsamples.all, samples.some,
by = c("dataset", "sample_id"))
if (bclass == "DESeqDataSet") {
normalized <- "cpm"
} else {
normalized <- TRUE
}
bioc.res <- list(
# exclude the assay name the tidied reuslts because they will differ
# across containers
tidy.all = f %>%
fetch_assay_data(features.some, bsamples.all) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
tidy.some = f %>%
fetch_assay_data(features.some, bsamples.some) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
tidy.some.fids = f %>%
fetch_assay_data(features.some$feature_id, bsamples.some) %>%
select(-assay) %>%
arrange(sample_id, feature_id),
tidy.with = bsamples.some %>%
with_assay_data(features.some, normalized = normalized) %>%
arrange(sample_id),
matrix.all = f %>%
fetch_assay_data(features.some, bsamples.all, as.matrix = TRUE),
matrix.some.norm = f %>%
fetch_assay_data(features.some, bsamples.some, normalized = normalized,
as.matrix = TRUE))
for (comp in names(bioc.res)) {
bres <- bioc.res[[comp]]
fres <- fds.res[[comp]]
is.tidy <- grepl("tidy\\.", comp)
info <- sprintf("[%s] %s (%s)", bclass, sub("^.*?\\.", "", comp),
sub("\\..*$", "", comp))
expect_is(fds(bres), is(f), info = info) # Ensure results were generated
expect_is(fds(bres), is(obj)) # from correct container type.
expect_is(bres, is(fres), info = info) # Results are the same type.
if (is.tidy) {
# expect_equal(bres, fres, info = info)
# if this is from deseqdataset, normfactor and libsize came along
# for the ride
ecols <- colnames(fres)
checkmate::expect_subset(ecols, colnames(bres), info = info)
# For some reason the default expect_equal.tbl_df would randomly break
# on SummarizedExperiment results. Manually expect_equal()-ing the
# columns of the tbl's against each other always returned TRUE, though,
# and I think it boils down to this:
# https://github.com/tidyverse/dplyr/issues/2751
# expect_equal.tbl_df has been removed from dplyr 1.0, but we needed
# this to work before that, so .....................
expect_equal(
as.data.frame(bres[, ecols]),
as.data.frame(fres),
info = info,
check.attributes = FALSE)
} else {
checkmate::expect_set_equal(colnames(bres), colnames(fres))
checkmate::expect_set_equal(rownames(bres), rownames(fres))
bres <- bres[rownames(fres), colnames(fres)]
expect_equal(
as.data.frame(bres),
as.data.frame(fres),
check.attributes = FALSE,
info = info)
}
}
}
})
# test_that("Stress test facile api against SummarizedExperiment", {
# # Tests against SummarizedExperiment seems to randomly fail on line 150
# # in the "tidy" tests ... need to loop on this to find it. The other
# # containers never seem to fail,
#
# obj <- BIOC[["SummarizedExperiment"]]
# f <- facilitate(obj, assay_type = "rnaseq")
# normalized <- TRUE
#
# features.all <- features(FDS)
# samples.all <- samples(FDS) %>% collect()
#
# for (i in 1:10) {
# features.some <- dplyr::sample_n(features.all, 5)
# samples.some <- dplyr::sample_n(samples.all, 10)
#
# # The names of the assay will differ accross bioc data container types,
# # so we remove that column from these results
# fds.res <- list(
# tidy.all = FDS %>%
# fetch_assay_data(features.some, samples.all) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# tidy.some = FDS %>%
# fetch_assay_data(features.some, samples.some) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# tidy.some.fids = FDS %>%
# fetch_assay_data(features.some$feature_id, samples.some) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# # Exercising the `with_` call here simultaneously tests the with_
# # decoration functionality as well as the normalization procedure, since
# # the default for `with_assay_data` is `normalized = TRUE`
# tidy.with = samples.some %>%
# with_assay_data(features.some) %>%
# arrange(sample_id),
# matrix.all = FDS %>%
# fetch_assay_data(features.some, samples.all, as.matrix = TRUE),
# matrix.some.norm = FDS %>%
# fetch_assay_data(features.some, samples.some, as.matrix = TRUE,
# normalized = TRUE))
#
#
# bsamples.all <- samples(f)
# bsamples.some <- semi_join(bsamples.all, samples.some,
# by = c("dataset", "sample_id"))
#
# bioc.res <- list(
# # exclude the assay name the tidied reuslts because they will differ
# # across containers
# tidy.all = f %>%
# fetch_assay_data(features.some, bsamples.all) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# tidy.some = f %>%
# fetch_assay_data(features.some, bsamples.some) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# tidy.some.fids = f %>%
# fetch_assay_data(features.some$feature_id, bsamples.some) %>%
# select(-assay) %>%
# arrange(sample_id, feature_id),
# tidy.with = bsamples.some %>%
# with_assay_data(features.some, normalized = normalized) %>%
# arrange(sample_id),
# matrix.all = f %>%
# fetch_assay_data(features.some, bsamples.all, as.matrix = TRUE),
# matrix.some.norm = f %>%
# fetch_assay_data(features.some, bsamples.some, normalized = normalized,
# as.matrix = TRUE))
#
# for (comp in names(bioc.res)) {
# bres <- bioc.res[[comp]]
# fres <- fds.res[[comp]]
# is.tidy <- grepl("tidy\\.", comp)
# info <- sprintf("[%s] %s (%s)", bclass, sub("^.*?\\.", "", comp),
# sub("\\..*$", "", comp))
#
# expect_is(fds(bres), is(f), info = info) # Ensure results were generated
# expect_is(fds(bres), is(obj)) # from correct container type.
# expect_is(bres, is(fres), info = info) # Results are the same type.
# if (is.tidy) {
# ecols <- colnames(fres)
# checkmate::expect_subset(ecols, colnames(bres), info = info)
# expect_equal(
# as.data.frame(bres[, ecols]),
# as.data.frame(fres),
# info = info,
# check.attributes = FALSE)
# } else {
# checkmate::expect_set_equal(colnames(bres), colnames(fres))
# checkmate::expect_set_equal(rownames(bres), rownames(fres))
# bres <- bres[rownames(fres), colnames(fres)]
# expect_equal(bres, fres, check.attributes = FALSE, info = info)
# }
# }
# }
#
# })
|
install.packages("rpart")
library(rpart)
install.packages("rpart.plot")
library(rpart.plot)
install.packages("caret")
library(lattice)
library(ggplot2)
library(caret)
setwd("C:/Users/lianxiang/Desktop")
X1<- read.csv("X1.csv")
set.seed(1000)
t.idx <- createDataPartition(X1$roe, p=0.7, list = FALSE)
bfit <- rpart(roe ~ ., data = X1[t.idx,2:31])
bfit
plot(bfit) | /cuijiaru/123.R | permissive | Sallysjy/tutoring | R | false | false | 375 | r | install.packages("rpart")
library(rpart)
install.packages("rpart.plot")
library(rpart.plot)
install.packages("caret")
library(lattice)
library(ggplot2)
library(caret)
setwd("C:/Users/lianxiang/Desktop")
X1<- read.csv("X1.csv")
set.seed(1000)
t.idx <- createDataPartition(X1$roe, p=0.7, list = FALSE)
bfit <- rpart(roe ~ ., data = X1[t.idx,2:31])
bfit
plot(bfit) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{store_resid_var}
\alias{store_resid_var}
\title{store residual variance}
\usage{
store_resid_var(object)
}
\arguments{
\item{object}{The sema output, a list containing the model parameters.}
}
\value{
A data frame with the number of observations and the residual
variance.
}
\description{
This function extracts the residual variance and saves
it in a data frame with the number of data points seen so far.
}
\examples{
## First we create a dataset, consisting of 2500 observations from 20
## units. The fixed effects have the coefficients 1, 2, 3, 4, and 5. The
## variance of the random effects equals 1, 4, and 9. Lastly the
## residual variance equals 4:
test_data <- build_dataset(n = 2500,
j = 20,
fixed_coef = 1:5,
random_coef_sd = 1:3,
resid_sd = 2)
## Next, we fit a simple model to these data
m1 <- sema_fit_df(formula = y ~ 1 + V3 + V4 + V5 + V6 + (1 + V4 + V5 | id),
data_frame = test_data,
intercept = TRUE)
## to subtract the residual variance from the m1 object:
store_resid_var(m1)
}
\seealso{
\code{\link{ranef}}, \code{\link{store_fixed_coef}},
\code{\link{store_random_var}}
}
\keyword{coefficients}
\keyword{save}
| /man/store_resid_var.Rd | no_license | L-Ippel/SEMA | R | false | true | 1,448 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{store_resid_var}
\alias{store_resid_var}
\title{store residual variance}
\usage{
store_resid_var(object)
}
\arguments{
\item{object}{The sema output, a list containing the model parameters.}
}
\value{
A data frame with the number of observations and the residual
variance.
}
\description{
This function extracts the residual variance and saves
it in a data frame with the number of data points seen so far.
}
\examples{
## First we create a dataset, consisting of 2500 observations from 20
## units. The fixed effects have the coefficients 1, 2, 3, 4, and 5. The
## variance of the random effects equals 1, 4, and 9. Lastly the
## residual variance equals 4:
test_data <- build_dataset(n = 2500,
j = 20,
fixed_coef = 1:5,
random_coef_sd = 1:3,
resid_sd = 2)
## Next, we fit a simple model to these data
m1 <- sema_fit_df(formula = y ~ 1 + V3 + V4 + V5 + V6 + (1 + V4 + V5 | id),
data_frame = test_data,
intercept = TRUE)
## to subtract the residual variance from the m1 object:
store_resid_var(m1)
}
\seealso{
\code{\link{ranef}}, \code{\link{store_fixed_coef}},
\code{\link{store_random_var}}
}
\keyword{coefficients}
\keyword{save}
|
#!/usr/bin/Rscript
suppressPackageStartupMessages(library(dada2))
track.filt <- filterAndTrim(snakemake@input[['r1']],snakemake@output[['r1']],
snakemake@input[['r2']],snakemake@output[['r2']],
maxEE=snakemake@config[["maxEE"]],
compress=TRUE,
verbose=TRUE,
multithread=snakemake@threads)
row.names(track.filt) <- snakemake@params[["samples"]]
colnames(track.filt) = c('raw','filtered')
write.table(track.filt,snakemake@output[['nreads']], sep='\t') | /scripts/dada2/.ipynb_checkpoints/01_filter_dada-checkpoint.R | no_license | lvelosuarez/Snakemake_amplicon | R | false | false | 593 | r | #!/usr/bin/Rscript
suppressPackageStartupMessages(library(dada2))
track.filt <- filterAndTrim(snakemake@input[['r1']],snakemake@output[['r1']],
snakemake@input[['r2']],snakemake@output[['r2']],
maxEE=snakemake@config[["maxEE"]],
compress=TRUE,
verbose=TRUE,
multithread=snakemake@threads)
row.names(track.filt) <- snakemake@params[["samples"]]
colnames(track.filt) = c('raw','filtered')
write.table(track.filt,snakemake@output[['nreads']], sep='\t') |
## Author: Michael Creegan ##
## Date: March 4, 2021 ##
###############################################
### FOMC Sentiment Analysis & Insights / NLP ##
###############################################
library(magrittr)
read.table(
text = system("openssl ciphers -v", intern=TRUE) %>%
gsub("[[:alpha:]]+=", "", .)
) %>%
setNames(
c("ciphername", "protoccol_version", "key_exchange", "authentication",
"symmetric_encryption_method", "message_authentication_code")
)
##import libraries##
library(dplyr)
library(SentimentAnalysis)
library(lubridate)
library(ggplot2)
library(tidyr)
library(stringr)
library(rlang)
library(tidyverse)
library(tidytext)
#library(xlsx)
library(readxl)
library(openxlsx)
library(RCurl)
library(XML)
library(kableExtra)
library(tm)
library(ngram)
library(wordcloud)
library(ggridges)
library(gridExtra)
library(rcompanion)
library(ggcorrplot)
library(caret)
library(e1071)
library(R.utils)
library(DT)
library(lattice)
library(kernlab)
library(mlbench)
library(caretEnsemble)
library(nnet)
library(LiblineaR)
library(knitr)
##import FOMC statements from 2007 - forward##
links<-c("https://www.federalreserve.gov/newsevents/pressreleases/monetary20190130a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20190320a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20190501a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180131a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180321a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180502a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180613a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180801a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180926a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20181108a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20181219a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170201a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170315a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170503a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170614a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170726a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170920a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20171101a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20171213a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160127a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160316a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160427a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160615a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160727a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160921a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20161102a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20161214a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150128a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150318a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150429a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150617a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150729a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150917a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20151028a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20151216a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140129a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140319a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140430a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140618a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140730a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140917a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20141029a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20141217a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130130a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130320a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130501a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130619a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130731a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130918a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20131030a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20131218a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120125a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120313a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120425a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120620a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120801a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120913a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20121024a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20121212a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110126a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110315a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110427a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110622a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110809a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110921a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20111102a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20111213a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100127a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100316a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100428a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100623a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100810a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100921a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20101103a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20101214a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090128a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090318a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090429a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090624a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090812a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090923a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20091104a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20091216a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080122b.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080130a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080318a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080430a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080625a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080805a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080916a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20081008a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20081029a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20081216b.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070131a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070321a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070509a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070618a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070807a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070817b.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070918a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20071031a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20071211a.htm"
)
##Prepare metadata for extraction and create dataframe##
##extract year of publication from statement release date, create data frame w date and URL ##
statement.dates<-NULL
year<-NULL
for(i in seq(from=1, to=length(links))) {
statement.dates[i]<-(str_extract(links[i],"[[:digit:]]+"))
year[i]<-substr(statement.dates[i],1,4)
}
reports<-data.frame(year,statement.dates, links)
# Convert factors to characters
reports %<>% mutate_if(is.factor, as.character)%>% arrange(statement.dates)
## Data extraction via web-scraping ##
## loop through statement links and scrape contents from Fed website ##
## Discard fluff from content like prelim paragraphs ##
library(httr)
#httr_config <- config(ssl_cipher_list = "DEFAULT@SECLEVEL=1")
#res <- with_config(config = httr_config, GET(reports$links[2]))
statement.content<-NULL
statement.length<-NULL
for(i in seq(from=1, to=length(reports$links))) {
stm.url<-content(GET(reports$links[i]))
stm.tree<-htmlTreeParse(stm.url,useInternal=TRUE )
stm.tree.parse<-unlist(xpathApply(stm.tree, path="//p", fun=xmlValue))
n<-(which(!is.na(str_locate(stm.tree.parse, "release")))+1)[1]
l<-length(stm.tree.parse)-1
# Condense separate paragraphs into one element per statement date
reports$statement.content[i]<-paste(stm.tree.parse[n:l], collapse = "")
# Remove line breaks
reports$statement.content[i]<-gsub("\r?\n|\r"," ",reports$statement.content[i])
#reports$statement.content[i]<-gsub("\\.+\\;+\\,+","",reports$statement.content[i])
# Count number of characters per statement
reports$statement.length[i]<-nchar(reports$statement.content[i])
#reports$statement.length[i]<-wordcount(reports$statement.content[i], sep = " ", count.function = sum)
}
str(reports)
# Create R data object
saveRDS(reports, file = "fomc_data.rds")
#data cleansing#
# Correct the date for one statement, because the URL is not in sync with the actual date inside the statement content
reports$statement.dates[match(c("20070618"),reports$statement.dates)]<-"20070628"
##merge fomc dataset with reports dataset##
d4<-readRDS(file = "fomc_data.rds")
dim(d4)
str(d4)
classificationFile = "https://raw.githubusercontent.com/completegraph/DATA607FINAL/master/Code/Classification_FOMC_Statements.csv"
cls = read_csv(classificationFile , col_types = cols( Date = col_character() ) )
cls %>% rename( Economic.Growth = "Economic Growth", Employment.Growth = "Employment Growth", Medium.Term.Rate = "Medium Term Rate", Policy.Rate = "Policy Rate") -> cls
str(cls)
#merge the FOMC data and classification data#
d4 %>% inner_join( cls , by = c("statement.dates" = "Date")) %>%
mutate( date_mdy = mdy(Date2)) %>%
select(Index,
year ,
statement.dates,
links,
statement.content,
statement.length ,
date_mdy,
Economic.Growth,
Employment.Growth,
Inflation,
Medium.Term.Rate,
Policy.Rate ) -> mgData
str(mgData)
mgData %>% select( Index, date_mdy, Economic.Growth, Employment.Growth, Inflation, Medium.Term.Rate, Policy.Rate) %>% kable() %>% kable_styling(bootstrap_options = c("hover", "striped")) %>%
scroll_box(width = "90%", height = "300px")
## export the merged data frame! ##
rds_filename = "fomc_merged_data_v2.rds"
saveRDS(mgData, file = rds_filename)
## EXPLORATORY ANALYSIS ##
##Analyze FOMC statement word lengths and frequency##
# Compute total statement length per year by aggregating across individual statements
yearly.length<-reports%>% group_by(year) %>% summarize(words.per.year=sum(statement.length))
yearly.length
ggplot(yearly.length, aes(x=yearly.length$year,y=yearly.length$words.per.year))+
geom_bar(stat="identity",fill="darkblue", colour="black") +
coord_flip()+xlab("Year")+ylab("Statement Length")
sample<-reports%>%filter(reports$statement.dates=="20140319")
sample[,4]
str_count(sample, pattern="inflation")
p<-ggplot(reports, aes(x=year,y=statement.length))+
geom_point(stat="identity",color=statement.dates)+
scale_fill_brewer(palette="Pastel1")+
theme(legend.position="right")+xlab("Year") + ylab("Length of Statement")
p
p + ggplot2::annotate("text", x = 4,y = 5000,
label = "Bernanke", family="serif", fontface="bold",
colour="blue", size=4)+
ggplot2::annotate("text", x=10, y=5500, label="Yellen", family="serif", fontface="bold",
colour="darkred",size=4)+
ggplot2::annotate("text", x=13, y=3600, label="Powell", family="serif",
fontface="bold", colour="black",size=4)+
ggplot2::annotate("segment", x = 0, xend = 8.1, y = 2700, yend = 6500, colour = "blue",
size=1, arrow=arrow(ends="both"))+
ggplot2::annotate("segment", x = 8.1, xend = 12.1, y = 6500, yend = 3200,
colour = "darkred", size=1, arrow=arrow(ends="both"))+
ggplot2::annotate("segment", x = 12.1, xend = 14, y = 3200, yend = 3200,
colour = "black", size=1, arrow=arrow(ends="both"))
##custom stop words like "committee" that do not provide insight into sentiment##
words<-c("committee", "ben", "geithner", "bernanke",
"timothy", "hoenig", "thomas", "donald", "kevin", "mishkin",
"kroszner", "kohn", "charles", "frederic")
lexicon<-c("Custom")
my.stop_words<-data.frame(words, lexicon)
colnames(my.stop_words)<-c("word","lexicon")
new.stop_words <- rbind(my.stop_words, stop_words)
new.stop_words$word<-as.character(new.stop_words$word)
new.stop_words$lexicon<-as.character(new.stop_words$lexicon)
head(new.stop_words)
##cleanse data## remove punctuaction, white space, stop words, etc...##
report.words<-reports %>%
mutate(date = statement.dates, year = year, text= statement.content) %>%
unnest(text) %>% unnest_tokens(word, text) %>%
mutate(word = stripWhitespace(gsub("[^A-Za-z ]"," ",word))) %>%
filter(word != "") %>% filter(word != " ") %>%
anti_join(new.stop_words)%>%
count(date, year, word, sort = TRUE)%>%
mutate(frequency = n) %>%
select(date, year, word, frequency)
# Verify the count for the word "inflation" during the statements published in 2007
report.words%>%filter(year=='2007', word=='inflation')
# Rank most frequent words by year
f_text<-report.words%>% group_by(year,word) %>%
summarize(total=sum(frequency))%>%
arrange(year,desc(total),word)%>%
mutate(rank=row_number())%>%
ungroup() %>%
arrange(rank,year)
# Select the top 10 ranked words per year
topWords <- f_text %>%
filter(rank<11)%>%
arrange(year,rank)
print(topWords)
# Graph top 10 most frequent words by year
gg <- ggplot(head(topWords, 130), aes(y=total,x=reorder(word,rank))) +
geom_col(fill="#27408b") +
facet_wrap(~year,scales="free", ncol=3)+
coord_flip()+theme_ridges(font_size=11) +
labs(x="",y="",title="Most Frequent Words in FOMC Statements grouped by years (2007 - 2019)")
gg
##explore economic attributes##
mgData<-readRDS(file = "fomc_merged_data_v2.rds")
gEcon <- ggplot(data=mgData, aes(x=Economic.Growth, fill=Economic.Growth)) +
geom_bar() + theme(legend.position = "none")
gEmp <- ggplot(data=mgData, aes(x=Employment.Growth, fill=Employment.Growth)) +
geom_bar() + theme(legend.position = "none")
gInf <- ggplot(data=mgData, aes(x=Inflation, fill=Inflation)) +
geom_bar() + theme(legend.position = "none")
gRate <- ggplot(data=mgData, aes(x=Medium.Term.Rate, fill=Medium.Term.Rate)) +
geom_bar() + theme(legend.position = "none")
gPolicy <- ggplot(data=mgData, aes(x=Policy.Rate, fill=Policy.Rate)) +
geom_bar() + theme(legend.position = "none")
grid.arrange(gEcon, gEmp, gInf, gRate, gPolicy, ncol=3, nrow=2 )
##correlation/covariation of attributes##
mgData %>% select(Economic.Growth:Policy.Rate) -> catData # categorical data
cv = matrix(rep(0,25), nrow=5, ncol=5)
for(idx in 1:5){
for(jdx in 1:5){
cv[idx,jdx] = cramerV(catData[,idx], catData[,jdx])
}
}
rownames( cv ) = colnames(catData)
colnames( cv ) = colnames(catData)
ggcorrplot(cv, lab=TRUE, ggtheme = ggplot2::theme_classic(), colors=c("violet", "white", "lightgreen")) +
ggtitle("CramerV Matrix", subtitle="Classification Attributes Comparison")
##time series##
DGS10<-read.csv("https://raw.githubusercontent.com/DataScienceAR/Cuny-Assignments/master/Data-607/Data-Sets/DGS10.csv",stringsAsFactors = FALSE)
str(DGS10)
DGS10$DATE<- as_date(DGS10$DATE)
DGS10$DGS10<-as.numeric(DGS10$DGS10)
# Analysis of 10-Year Treasury Constant Maturity Rate
ggplot(data = DGS10)+
aes(x=DATE,y=`DGS10`)+
geom_line(size=.98,color="steelblue")+
labs(x="Date",y="Percent",title="10 Year Constant Maturity Rate")+
theme(panel.background = element_rect(fill = "white"))
# Analysis of Russell 3000® Total Market Index
RU3000TR<-read.csv("https://raw.githubusercontent.com/DataScienceAR/Cuny-Assignments/master/Data-607/Data-Sets/RU3000TR.csv",stringsAsFactors = FALSE)
str(RU3000TR)
RU3000TR$DATE<- as_date(RU3000TR$DATE)
RU3000TR$RU3000TR<-as.numeric(RU3000TR$RU3000TR)
ggplot(data = RU3000TR)+
aes(x=DATE,y=`RU3000TR`)+
geom_line(size=.98,color="steelblue")+
labs(x="Date",y="Percent",title="Russell 3000® Total Market Index")+
theme(panel.background = element_rect(fill = "white"))
# Analysis of Russell 1000® Total Market Index
RU1000TR<-read.csv("https://raw.githubusercontent.com/DataScienceAR/Cuny-Assignments/master/Data-607/Data-Sets/RU1000TR.csv",stringsAsFactors = FALSE)
str(RU1000TR)
RU1000TR$DATE<- as_date(RU1000TR$DATE)
RU1000TR$RU1000TR<-as.numeric(RU1000TR$RU1000TR)
ggplot(data = RU1000TR)+
aes(x=DATE,y=`RU1000TR`)+
geom_line(size=.98,color="steelblue")+
labs(x="Date",y="Percent",title="Russell 1000® Total Market Index")+
theme(panel.background = element_rect(fill = "white"))
# Analysis of Federal Funds Target Range
FEDTARGET<-read.csv("https://raw.githubusercontent.com/DataScienceAR/Cuny-Assignments/master/Data-607/Data-Sets/FEDTARGET.csv",stringsAsFactors = FALSE)
str(FEDTARGET)
FEDTARGET$DATE<- as.Date(strptime(FEDTARGET$DATE,format="%m/%d/%Y"),format="%Y-%m-%d")
FEDTARGET$Percent<-as.numeric(FEDTARGET$Percent)
ggplot(data = FEDTARGET)+
aes(x=DATE,y=`Percent`,color=Type)+
geom_line(size=.98)+
labs(x="Date",y="Percent",title="Federal Funds Target Range")+
theme(panel.background = element_rect(fill = "white"))
##Text Classification##
fomc_data <-readRDS(file = "fomc_merged_data_v2.rds")
head(select(fomc_data, Index,year,statement.dates,statement.length,date_mdy,
Employment.Growth,Economic.Growth,Inflation,Medium.Term.Rate,Policy.Rate))
#ramdomize data#
set.seed(1234567)
fomc_Rand <- fomc_data[sample(nrow(fomc_data)),]
customStopWords <- c("the federal open market committee", "committee")
fomc_dataX <- fomc_Rand %>%
mutate(statement.content = tolower(statement.content))%>%
mutate(statement.content = str_replace_all(statement.content, customStopWords, ""))
# form a corpus
corpus <- VCorpus(VectorSource(fomc_dataX$statement.content))
# Remove Punctuation
corpus <- tm_map(corpus, content_transformer(removePunctuation))
# Remove numbers
corpus <- tm_map(corpus, removeNumbers)
# Convert to lower case
corpus <- tm_map(corpus, content_transformer(tolower))
# Remove stop words
corpus <- tm_map(corpus, content_transformer(removeWords), stopwords("english"))
##Stemming
corpus <- tm_map(corpus, stemDocument)
# Remove Whitespace
corpus <- tm_map(corpus, stripWhitespace)
# Create Document Term Matrix
dtm <- DocumentTermMatrix(corpus)
# handle sparsity
corpusX <- removeSparseTerms(dtm, 0.30)
# convert to matrix
data_matrix <- as.matrix(corpusX)
#medium term rate classification#
mRate <- data_matrix
# attach the 'medium.term.rate' column
mRate_matrix <- cbind(mRate, fomc_dataX$Medium.Term.Rate)
# rename it to 'tone'
colnames(mRate_matrix)[ncol(mRate_matrix)] <- "tone"
# convert to data frame
mRateData <- as.data.frame(mRate_matrix)
# convert 'tone' to lower case and make it a factor column as well
mRateData$tone <- as.factor(tolower(mRateData$tone))
#partition into test and train data#
mRate_n <- nrow(mRateData)
mRateTrainVolume <- round(mRate_n * 0.7)
set.seed(314)
mRateTrainIndex <- sample(mRate_n, mRateTrainVolume)
mRateTrain <- mRateData[mRateTrainIndex,]
mRateTest <- mRateData[-mRateTrainIndex,]
names(mRateTrain)
##need to work on this model##
mRateModel <- train(tone ~., data = mRateTrain, method = 'svmLinear3')
##Sentiment Analysis##
fomcStatements <-readRDS(file = "fomc_merged_data_v2.rds") %>%
select(statement.dates, statement.content)
fomcX <- fomcStatements %>%
mutate(date = statement.dates, year = as.numeric(str_extract(statement.dates,'\\d{4}')),
text= statement.content)%>%
select(date, year, text)
# Sentiment analysis with Loughran-Mcdonald dictionary
sentiment <- analyzeSentiment(fomcX$text, language = "english", aggregate = fomcX$year,
removeStopwords = TRUE, stemming = TRUE,
rules=list("SentimentLM"=list(ruleSentiment,
loadDictionaryLM())))
summary(sentiment)
# Table showing breakdown of Sentiments
table(convertToDirection(sentiment$SentimentLM))
# Line plot to visualize the evolution of sentiment scores.
plotSentiment(sentiment, xlab="Tone")
Sentiment<-data.frame(fomcX$date,fomcX$year,sentiment$SentimentLM,
convertToDirection(sentiment$SentimentLM))
names(Sentiment)<-(c("FOMC_Date","FOMC_Year","Sentiment_Score","Sentiment"))
str(Sentiment)
# Change the date format to Ymd
Sentiment$FOMC_Date<- ymd(Sentiment$FOMC_Date)
Sentiment$FOMC_Year<- as.numeric(Sentiment$FOMC_Year)
str(Sentiment)
# Distribution of Sentiment Score for period of 2007 to 2019
ggplot(Sentiment,aes(x=Sentiment_Score))+
geom_histogram(binwidth =.0125,color="black",fill="lightblue")+
labs(x="Setiment Score",y="Frequency",title="Sentiment Score Distribution from 2007 to 2019")+
theme(panel.background = element_rect(fill = "white"))
# Sentiment Score Trend
ggplot(data = Sentiment)+
aes(x=FOMC_Date,y=Sentiment_Score)+
geom_line(size=.98,color="firebrick")+
labs(x="FOMC Date",y="Sentiment Score",title="Sentiment Score trend over the period of 2007 to 2019")+
theme(panel.background = element_rect(fill = "white"))
# Scatter plot of score vs Date (Grouped)
ggplot(Sentiment,aes(x=FOMC_Date,y=Sentiment_Score,color=Sentiment))+
geom_point()+
labs(x="FOMC Date",y="Sentiment Score",title="Sentiments spread over the period of 2007 to 2019")+
theme(panel.background = element_rect(fill = "white"))
# Exporting data frame to RDS
## Changing the Date format
Sentiment$FOMC_Date<-format(Sentiment$FOMC_Date, format = "%Y%m%d")
## Exporting to .RDS
saveRDS(Sentiment,"SentimentDF.rds")
##Financial Impact of Sentiment##
#load all data frames
mgData<-readRDS(file = "fomc_merged_data_v2.rds")
sData <- readRDS( file = "SentimentDF.rds")
file_fred_ru1000tr = "https://raw.githubusercontent.com/completegraph/DATA607FINAL/master/DATA/FRED_RU1000TR.csv"
ru1000tr = read_csv(file_fred_ru1000tr,
col_types = cols(DATE=col_character(),
RU1000TR = col_double() ) )
# Generate a lubridate date column to join with the FOMC data.
# -----------------------------------------------------------------
ru1000tr %>% mutate( date_mdy = lubridate::ymd( DATE ) )-> ruData
#z_ru_daily = (RU1000TR - mean(RU1000TR, na.rm=TRUE))/sd(RU1000TR, na.rm = TRUE )
# Second, join the data:
# Since this is a 2-way inner join, we start with the FOMC statement data
# and join it to the sentiment data by date string (yyyymmdd)
# -------------------------------------------------------------------------
mgData %>% inner_join(sData, by = c( "statement.dates" = "FOMC_Date")) -> msData
# Join the sentiment-FOMC data to the Russell 1000 Index data from FRED
# Make sure to add a Z-score for each of the time series: sentiment and Rusell index values
# Save the raw data and normalized data by FOMC data.
# ----------------------------------------------------------------------------------
msEQdata = msData %>% left_join(ruData, by = c("date_mdy" = "date_mdy") ) %>%
select( date_mdy, Sentiment_Score, RU1000TR ) %>%
mutate( z_ru_fomc = (RU1000TR - mean(RU1000TR, na.rm = TRUE) ) / sd( RU1000TR, na.rm=TRUE ) ,
z_sentiment = ( Sentiment_Score - mean( Sentiment_Score, na.rm = TRUE) ) /
sd( Sentiment_Score, na.rm=TRUE) )
msEQdata %>% mutate( logEquity = log(RU1000TR) ) %>%
mutate( z_logEquity = ( logEquity - mean(logEquity) )/ sd( logEquity ) ) -> msEQdata
msEQdata %>% kable() %>% scroll_box(width="100%", height="200px")
#Sentiment vs Russell 1000
ggplot() +
geom_line(data=msEQdata, aes(x=date_mdy, y=Sentiment_Score) , color = "red" ) +
geom_line(data=msEQdata, aes(x=date_mdy, y=RU1000TR), color="green") +
ggtitle("Sentiment vs. Russell 1000 Equity Level", subtitle="Not usable without fixes")
ggplot() +
geom_line(data=msEQdata, aes(x=date_mdy, y=z_sentiment) , color = "red" ) +
geom_line(data=msEQdata, aes(x=date_mdy, y=z_ru_fomc), color="green") +
ggtitle("Scaled Sentiment vs. Scaled Equity Index", subtitle = "Nearly There...")
ggplot() +
geom_line(data=msEQdata, aes(x=date_mdy, y=z_sentiment) , color = "red" ) +
geom_line(data=msEQdata, aes(x=date_mdy, y=z_logEquity), color="green") +
ggtitle("Scaled-Sentiment vs. Scaled Log Equity Price")
##regression of sentiment##
mod1 = lm( z_logEquity ~ z_sentiment, data=msEQdata[2:102,])
summary(mod1)
#scatterplot of regressed values with regression line to study the model fit##
ggplot(data=msEQdata[2:102,], aes(x=z_sentiment, y=z_logEquity) ) +
geom_point() +
geom_smooth(method=lm) +
ggtitle("ScatterPlot of Fitted Regression Model", subtitle="X=Z-Sentiment, Y=Z-LogRussell 1000 (2007-2019)")
###oil analysis##
setwd('C:/Users/creeg/Downloads')
wti <- read.csv("wtid.csv", header = TRUE)
wti$Date <- ymd(wti$Date)
wti %>% mutate( date_mdy = lubridate::ymd( Date ) )-> wtiData
##oil prices, equity prices, sentiment score (all standardized to z-score) in one table##
msEQWTIdata <- msEQdata %>% inner_join(wtiData, by = ("date_mdy" = "date_mdy")) %>%
select( date_mdy, Sentiment_Score, z_ru_fomc, z_sentiment, logEquity, z_logEquity
,Price) %>%
mutate(z_wti_price = (Price - mean(Price, na.rm = TRUE) ) / sd( Price, na.rm = TRUE))
msEQWTIdata %>% mutate( logWTI = log(Price) ) %>%
mutate( z_log_wti_price = ( logWTI - mean(logWTI) )/ sd( logWTI ) ) -> msEQWTIdata
##add 10 year treasury rates##
TSY10 <- DGS10
TSY10$DATE <- ymd(TSY10$DATE)
TSY10$DATE <- format(TSY10$DATE, format = "%Y%m%d")
TSY10 %>% mutate( date_mdy = lubridate::ymd( DATE ) )-> TSY10data
names(msEQWTIdata)
names(TSY10data)
msTSYdata <- msEQWTIdata %>% inner_join(TSY10data, by = ("date_mdy" = "date_mdy")) %>%
select( date_mdy, Sentiment_Score, z_ru_fomc, z_sentiment, logEquity, z_logEquity
,z_wti_price, logWTI, z_log_wti_price, DGS10) %>%
mutate(z_tsy10 = (DGS10 - mean(DGS10, na.rm = TRUE) ) / sd( DGS10, na.rm = TRUE))
msTSYdata %>% mutate( logTSY10 = log(DGS10) ) %>%
mutate( z_log_TSY10 = ( logTSY10 - mean(logTSY10) )/ sd( logTSY10 ) ) -> msTSYdata
mean(TSY10$DGS10)
sd(TSY10$DGS10)
mean(wti$Price)
sd(wti$Price)
##add 3M LIBOR rates USD##
libor <- read.csv("3MLIBOR.csv", header = TRUE)
head(libor)
libor
libor$USD3MTD156N <- as.numeric(libor$USD3MTD156N)
libor$DATE <- ymd(libor$DATE)
libor %>% mutate( date_mdy = lubridate::ymd( DATE ) )-> libordata
names(msTSYdata)
names(liborData)
msfinaldata <- msTSYdata %>% inner_join(libordata, by = ("date_mdy" = "date_mdy")) %>%
select( date_mdy, Sentiment_Score, z_ru_fomc, z_sentiment, logEquity, z_logEquity
,z_wti_price, logWTI, z_log_wti_price, z_tsy10, logTSY10, z_log_TSY10, USD3MTD156N) %>%
mutate(z_libor = (USD3MTD156N - mean(USD3MTD156N, na.rm = TRUE) ) / sd( USD3MTD156N, na.rm = TRUE))
msfinaldata %>% mutate( log3mLibor = log(USD3MTD156N) ) %>%
mutate( z_log_3M_Libor = ( log3mLibor - mean(log3mLibor) )/ sd( log3mLibor ) ) -> msfinaldata
##brent oil test##
brent <- read.csv("brent.csv", header = TRUE)
names(brent)
class(brent$Price)
brent$brent <- brent$Price
class(brent$brent)
brent$Date <- ymd(brent$Date)
brent %>% mutate( date_mdy = lubridate::ymd( Date ) )-> brentdata
names(msfinaldata)
msfinaldata1 <- msfinaldata %>% inner_join(brentdata, by = ("date_mdy" = "date_mdy")) %>%
select( date_mdy, Sentiment_Score, z_ru_fomc, z_sentiment, logEquity, z_logEquity
,z_wti_price, logWTI, z_log_wti_price, z_tsy10, logTSY10, z_log_TSY10
,z_libor, log3mLibor, z_log_3M_Libor, brent) %>%
mutate(z_brent = (brent - mean(brent, na.rm = TRUE) ) / sd( brent, na.rm = TRUE))
msfinaldata1 %>% mutate( logbrent = log(Price) ) %>%
mutate( z_log_brent = ( logbrent - mean(logbrent) )/ sd( logbrent ) ) -> msfinaldata
msfinaldata1$z_log_brent
head(msfinaldata1)
#library(writexl)
write_xlsx(msfinaldata1, "msfinal1.xlsx")
| /FOMC Statement Analysis.R | no_license | creeganmi/FOMC | R | false | false | 31,294 | r | ## Author: Michael Creegan ##
## Date: March 4, 2021 ##
###############################################
### FOMC Sentiment Analysis & Insights / NLP ##
###############################################
library(magrittr)
read.table(
text = system("openssl ciphers -v", intern=TRUE) %>%
gsub("[[:alpha:]]+=", "", .)
) %>%
setNames(
c("ciphername", "protoccol_version", "key_exchange", "authentication",
"symmetric_encryption_method", "message_authentication_code")
)
##import libraries##
library(dplyr)
library(SentimentAnalysis)
library(lubridate)
library(ggplot2)
library(tidyr)
library(stringr)
library(rlang)
library(tidyverse)
library(tidytext)
#library(xlsx)
library(readxl)
library(openxlsx)
library(RCurl)
library(XML)
library(kableExtra)
library(tm)
library(ngram)
library(wordcloud)
library(ggridges)
library(gridExtra)
library(rcompanion)
library(ggcorrplot)
library(caret)
library(e1071)
library(R.utils)
library(DT)
library(lattice)
library(kernlab)
library(mlbench)
library(caretEnsemble)
library(nnet)
library(LiblineaR)
library(knitr)
##import FOMC statements from 2007 - forward##
links<-c("https://www.federalreserve.gov/newsevents/pressreleases/monetary20190130a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20190320a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20190501a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180131a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180321a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180502a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180613a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180801a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20180926a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20181108a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20181219a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170201a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170315a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170503a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170614a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170726a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20170920a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20171101a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20171213a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160127a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160316a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160427a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160615a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160727a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20160921a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20161102a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20161214a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150128a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150318a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150429a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150617a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150729a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20150917a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20151028a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20151216a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140129a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140319a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140430a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140618a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140730a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20140917a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20141029a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20141217a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130130a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130320a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130501a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130619a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130731a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20130918a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20131030a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20131218a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120125a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120313a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120425a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120620a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120801a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20120913a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20121024a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20121212a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110126a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110315a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110427a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110622a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110809a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20110921a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20111102a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20111213a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100127a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100316a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100428a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100623a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100810a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20100921a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20101103a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20101214a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090128a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090318a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090429a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090624a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090812a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20090923a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20091104a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20091216a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080122b.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080130a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080318a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080430a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080625a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080805a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20080916a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20081008a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20081029a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20081216b.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070131a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070321a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070509a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070618a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070807a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070817b.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20070918a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20071031a.htm",
"https://www.federalreserve.gov/newsevents/pressreleases/monetary20071211a.htm"
)
##Prepare metadata for extraction and create dataframe##
##extract year of publication from statement release date, create data frame w date and URL ##
statement.dates<-NULL
year<-NULL
for(i in seq(from=1, to=length(links))) {
statement.dates[i]<-(str_extract(links[i],"[[:digit:]]+"))
year[i]<-substr(statement.dates[i],1,4)
}
reports<-data.frame(year,statement.dates, links)
# Convert factors to characters
reports %<>% mutate_if(is.factor, as.character)%>% arrange(statement.dates)
## Data extraction via web-scraping ##
## loop through statement links and scrape contents from Fed website ##
## Discard fluff from content like prelim paragraphs ##
library(httr)
#httr_config <- config(ssl_cipher_list = "DEFAULT@SECLEVEL=1")
#res <- with_config(config = httr_config, GET(reports$links[2]))
statement.content<-NULL
statement.length<-NULL
for(i in seq(from=1, to=length(reports$links))) {
stm.url<-content(GET(reports$links[i]))
stm.tree<-htmlTreeParse(stm.url,useInternal=TRUE )
stm.tree.parse<-unlist(xpathApply(stm.tree, path="//p", fun=xmlValue))
n<-(which(!is.na(str_locate(stm.tree.parse, "release")))+1)[1]
l<-length(stm.tree.parse)-1
# Condense separate paragraphs into one element per statement date
reports$statement.content[i]<-paste(stm.tree.parse[n:l], collapse = "")
# Remove line breaks
reports$statement.content[i]<-gsub("\r?\n|\r"," ",reports$statement.content[i])
#reports$statement.content[i]<-gsub("\\.+\\;+\\,+","",reports$statement.content[i])
# Count number of characters per statement
reports$statement.length[i]<-nchar(reports$statement.content[i])
#reports$statement.length[i]<-wordcount(reports$statement.content[i], sep = " ", count.function = sum)
}
str(reports)
# Create R data object
saveRDS(reports, file = "fomc_data.rds")
#data cleansing#
# Correct the date for one statement, because the URL is not in sync with the actual date inside the statement content
reports$statement.dates[match(c("20070618"),reports$statement.dates)]<-"20070628"
##merge fomc dataset with reports dataset##
d4<-readRDS(file = "fomc_data.rds")
dim(d4)
str(d4)
classificationFile = "https://raw.githubusercontent.com/completegraph/DATA607FINAL/master/Code/Classification_FOMC_Statements.csv"
cls = read_csv(classificationFile , col_types = cols( Date = col_character() ) )
cls %>% rename( Economic.Growth = "Economic Growth", Employment.Growth = "Employment Growth", Medium.Term.Rate = "Medium Term Rate", Policy.Rate = "Policy Rate") -> cls
str(cls)
#merge the FOMC data and classification data#
d4 %>% inner_join( cls , by = c("statement.dates" = "Date")) %>%
mutate( date_mdy = mdy(Date2)) %>%
select(Index,
year ,
statement.dates,
links,
statement.content,
statement.length ,
date_mdy,
Economic.Growth,
Employment.Growth,
Inflation,
Medium.Term.Rate,
Policy.Rate ) -> mgData
str(mgData)
mgData %>% select( Index, date_mdy, Economic.Growth, Employment.Growth, Inflation, Medium.Term.Rate, Policy.Rate) %>% kable() %>% kable_styling(bootstrap_options = c("hover", "striped")) %>%
scroll_box(width = "90%", height = "300px")
## export the merged data frame! ##
rds_filename = "fomc_merged_data_v2.rds"
saveRDS(mgData, file = rds_filename)
## EXPLORATORY ANALYSIS ##
##Analyze FOMC statement word lengths and frequency##
# Compute total statement length per year by aggregating across individual statements
yearly.length<-reports%>% group_by(year) %>% summarize(words.per.year=sum(statement.length))
yearly.length
ggplot(yearly.length, aes(x=yearly.length$year,y=yearly.length$words.per.year))+
geom_bar(stat="identity",fill="darkblue", colour="black") +
coord_flip()+xlab("Year")+ylab("Statement Length")
sample<-reports%>%filter(reports$statement.dates=="20140319")
sample[,4]
str_count(sample, pattern="inflation")
p<-ggplot(reports, aes(x=year,y=statement.length))+
geom_point(stat="identity",color=statement.dates)+
scale_fill_brewer(palette="Pastel1")+
theme(legend.position="right")+xlab("Year") + ylab("Length of Statement")
p
p + ggplot2::annotate("text", x = 4,y = 5000,
label = "Bernanke", family="serif", fontface="bold",
colour="blue", size=4)+
ggplot2::annotate("text", x=10, y=5500, label="Yellen", family="serif", fontface="bold",
colour="darkred",size=4)+
ggplot2::annotate("text", x=13, y=3600, label="Powell", family="serif",
fontface="bold", colour="black",size=4)+
ggplot2::annotate("segment", x = 0, xend = 8.1, y = 2700, yend = 6500, colour = "blue",
size=1, arrow=arrow(ends="both"))+
ggplot2::annotate("segment", x = 8.1, xend = 12.1, y = 6500, yend = 3200,
colour = "darkred", size=1, arrow=arrow(ends="both"))+
ggplot2::annotate("segment", x = 12.1, xend = 14, y = 3200, yend = 3200,
colour = "black", size=1, arrow=arrow(ends="both"))
##custom stop words like "committee" that do not provide insight into sentiment##
words<-c("committee", "ben", "geithner", "bernanke",
"timothy", "hoenig", "thomas", "donald", "kevin", "mishkin",
"kroszner", "kohn", "charles", "frederic")
lexicon<-c("Custom")
my.stop_words<-data.frame(words, lexicon)
colnames(my.stop_words)<-c("word","lexicon")
new.stop_words <- rbind(my.stop_words, stop_words)
new.stop_words$word<-as.character(new.stop_words$word)
new.stop_words$lexicon<-as.character(new.stop_words$lexicon)
head(new.stop_words)
##cleanse data## remove punctuaction, white space, stop words, etc...##
report.words<-reports %>%
mutate(date = statement.dates, year = year, text= statement.content) %>%
unnest(text) %>% unnest_tokens(word, text) %>%
mutate(word = stripWhitespace(gsub("[^A-Za-z ]"," ",word))) %>%
filter(word != "") %>% filter(word != " ") %>%
anti_join(new.stop_words)%>%
count(date, year, word, sort = TRUE)%>%
mutate(frequency = n) %>%
select(date, year, word, frequency)
# Verify the count for the word "inflation" during the statements published in 2007
report.words%>%filter(year=='2007', word=='inflation')
# Rank most frequent words by year
f_text<-report.words%>% group_by(year,word) %>%
summarize(total=sum(frequency))%>%
arrange(year,desc(total),word)%>%
mutate(rank=row_number())%>%
ungroup() %>%
arrange(rank,year)
# Select the top 10 ranked words per year
topWords <- f_text %>%
filter(rank<11)%>%
arrange(year,rank)
print(topWords)
# Graph top 10 most frequent words by year
gg <- ggplot(head(topWords, 130), aes(y=total,x=reorder(word,rank))) +
geom_col(fill="#27408b") +
facet_wrap(~year,scales="free", ncol=3)+
coord_flip()+theme_ridges(font_size=11) +
labs(x="",y="",title="Most Frequent Words in FOMC Statements grouped by years (2007 - 2019)")
gg
##explore economic attributes##
mgData<-readRDS(file = "fomc_merged_data_v2.rds")
gEcon <- ggplot(data=mgData, aes(x=Economic.Growth, fill=Economic.Growth)) +
geom_bar() + theme(legend.position = "none")
gEmp <- ggplot(data=mgData, aes(x=Employment.Growth, fill=Employment.Growth)) +
geom_bar() + theme(legend.position = "none")
gInf <- ggplot(data=mgData, aes(x=Inflation, fill=Inflation)) +
geom_bar() + theme(legend.position = "none")
gRate <- ggplot(data=mgData, aes(x=Medium.Term.Rate, fill=Medium.Term.Rate)) +
geom_bar() + theme(legend.position = "none")
gPolicy <- ggplot(data=mgData, aes(x=Policy.Rate, fill=Policy.Rate)) +
geom_bar() + theme(legend.position = "none")
grid.arrange(gEcon, gEmp, gInf, gRate, gPolicy, ncol=3, nrow=2 )
##correlation/covariation of attributes##
mgData %>% select(Economic.Growth:Policy.Rate) -> catData # categorical data
cv = matrix(rep(0,25), nrow=5, ncol=5)
for(idx in 1:5){
for(jdx in 1:5){
cv[idx,jdx] = cramerV(catData[,idx], catData[,jdx])
}
}
rownames( cv ) = colnames(catData)
colnames( cv ) = colnames(catData)
ggcorrplot(cv, lab=TRUE, ggtheme = ggplot2::theme_classic(), colors=c("violet", "white", "lightgreen")) +
ggtitle("CramerV Matrix", subtitle="Classification Attributes Comparison")
##time series##
DGS10<-read.csv("https://raw.githubusercontent.com/DataScienceAR/Cuny-Assignments/master/Data-607/Data-Sets/DGS10.csv",stringsAsFactors = FALSE)
str(DGS10)
DGS10$DATE<- as_date(DGS10$DATE)
DGS10$DGS10<-as.numeric(DGS10$DGS10)
# Analysis of 10-Year Treasury Constant Maturity Rate
ggplot(data = DGS10)+
aes(x=DATE,y=`DGS10`)+
geom_line(size=.98,color="steelblue")+
labs(x="Date",y="Percent",title="10 Year Constant Maturity Rate")+
theme(panel.background = element_rect(fill = "white"))
# Analysis of Russell 3000® Total Market Index
RU3000TR<-read.csv("https://raw.githubusercontent.com/DataScienceAR/Cuny-Assignments/master/Data-607/Data-Sets/RU3000TR.csv",stringsAsFactors = FALSE)
str(RU3000TR)
RU3000TR$DATE<- as_date(RU3000TR$DATE)
RU3000TR$RU3000TR<-as.numeric(RU3000TR$RU3000TR)
ggplot(data = RU3000TR)+
aes(x=DATE,y=`RU3000TR`)+
geom_line(size=.98,color="steelblue")+
labs(x="Date",y="Percent",title="Russell 3000® Total Market Index")+
theme(panel.background = element_rect(fill = "white"))
# Analysis of Russell 1000® Total Market Index
RU1000TR<-read.csv("https://raw.githubusercontent.com/DataScienceAR/Cuny-Assignments/master/Data-607/Data-Sets/RU1000TR.csv",stringsAsFactors = FALSE)
str(RU1000TR)
RU1000TR$DATE<- as_date(RU1000TR$DATE)
RU1000TR$RU1000TR<-as.numeric(RU1000TR$RU1000TR)
ggplot(data = RU1000TR)+
aes(x=DATE,y=`RU1000TR`)+
geom_line(size=.98,color="steelblue")+
labs(x="Date",y="Percent",title="Russell 1000® Total Market Index")+
theme(panel.background = element_rect(fill = "white"))
# Analysis of Federal Funds Target Range
FEDTARGET<-read.csv("https://raw.githubusercontent.com/DataScienceAR/Cuny-Assignments/master/Data-607/Data-Sets/FEDTARGET.csv",stringsAsFactors = FALSE)
str(FEDTARGET)
FEDTARGET$DATE<- as.Date(strptime(FEDTARGET$DATE,format="%m/%d/%Y"),format="%Y-%m-%d")
FEDTARGET$Percent<-as.numeric(FEDTARGET$Percent)
ggplot(data = FEDTARGET)+
aes(x=DATE,y=`Percent`,color=Type)+
geom_line(size=.98)+
labs(x="Date",y="Percent",title="Federal Funds Target Range")+
theme(panel.background = element_rect(fill = "white"))
##Text Classification##
fomc_data <-readRDS(file = "fomc_merged_data_v2.rds")
head(select(fomc_data, Index,year,statement.dates,statement.length,date_mdy,
Employment.Growth,Economic.Growth,Inflation,Medium.Term.Rate,Policy.Rate))
#ramdomize data#
set.seed(1234567)
fomc_Rand <- fomc_data[sample(nrow(fomc_data)),]
customStopWords <- c("the federal open market committee", "committee")
fomc_dataX <- fomc_Rand %>%
mutate(statement.content = tolower(statement.content))%>%
mutate(statement.content = str_replace_all(statement.content, customStopWords, ""))
# form a corpus
corpus <- VCorpus(VectorSource(fomc_dataX$statement.content))
# Remove Punctuation
corpus <- tm_map(corpus, content_transformer(removePunctuation))
# Remove numbers
corpus <- tm_map(corpus, removeNumbers)
# Convert to lower case
corpus <- tm_map(corpus, content_transformer(tolower))
# Remove stop words
corpus <- tm_map(corpus, content_transformer(removeWords), stopwords("english"))
##Stemming
corpus <- tm_map(corpus, stemDocument)
# Remove Whitespace
corpus <- tm_map(corpus, stripWhitespace)
# Create Document Term Matrix
dtm <- DocumentTermMatrix(corpus)
# handle sparsity
corpusX <- removeSparseTerms(dtm, 0.30)
# convert to matrix
data_matrix <- as.matrix(corpusX)
#medium term rate classification#
mRate <- data_matrix
# attach the 'medium.term.rate' column
mRate_matrix <- cbind(mRate, fomc_dataX$Medium.Term.Rate)
# rename it to 'tone'
colnames(mRate_matrix)[ncol(mRate_matrix)] <- "tone"
# convert to data frame
mRateData <- as.data.frame(mRate_matrix)
# convert 'tone' to lower case and make it a factor column as well
mRateData$tone <- as.factor(tolower(mRateData$tone))
#partition into test and train data#
mRate_n <- nrow(mRateData)
mRateTrainVolume <- round(mRate_n * 0.7)
set.seed(314)
mRateTrainIndex <- sample(mRate_n, mRateTrainVolume)
mRateTrain <- mRateData[mRateTrainIndex,]
mRateTest <- mRateData[-mRateTrainIndex,]
names(mRateTrain)
##need to work on this model##
mRateModel <- train(tone ~., data = mRateTrain, method = 'svmLinear3')
##Sentiment Analysis##
fomcStatements <-readRDS(file = "fomc_merged_data_v2.rds") %>%
select(statement.dates, statement.content)
fomcX <- fomcStatements %>%
mutate(date = statement.dates, year = as.numeric(str_extract(statement.dates,'\\d{4}')),
text= statement.content)%>%
select(date, year, text)
# Sentiment analysis with Loughran-Mcdonald dictionary
sentiment <- analyzeSentiment(fomcX$text, language = "english", aggregate = fomcX$year,
removeStopwords = TRUE, stemming = TRUE,
rules=list("SentimentLM"=list(ruleSentiment,
loadDictionaryLM())))
summary(sentiment)
# Table showing breakdown of Sentiments
table(convertToDirection(sentiment$SentimentLM))
# Line plot to visualize the evolution of sentiment scores.
plotSentiment(sentiment, xlab="Tone")
Sentiment<-data.frame(fomcX$date,fomcX$year,sentiment$SentimentLM,
convertToDirection(sentiment$SentimentLM))
names(Sentiment)<-(c("FOMC_Date","FOMC_Year","Sentiment_Score","Sentiment"))
str(Sentiment)
# Change the date format to Ymd
Sentiment$FOMC_Date<- ymd(Sentiment$FOMC_Date)
Sentiment$FOMC_Year<- as.numeric(Sentiment$FOMC_Year)
str(Sentiment)
# Distribution of Sentiment Score for period of 2007 to 2019
ggplot(Sentiment,aes(x=Sentiment_Score))+
geom_histogram(binwidth =.0125,color="black",fill="lightblue")+
labs(x="Setiment Score",y="Frequency",title="Sentiment Score Distribution from 2007 to 2019")+
theme(panel.background = element_rect(fill = "white"))
# Sentiment Score Trend
ggplot(data = Sentiment)+
aes(x=FOMC_Date,y=Sentiment_Score)+
geom_line(size=.98,color="firebrick")+
labs(x="FOMC Date",y="Sentiment Score",title="Sentiment Score trend over the period of 2007 to 2019")+
theme(panel.background = element_rect(fill = "white"))
# Scatter plot of score vs Date (Grouped)
ggplot(Sentiment,aes(x=FOMC_Date,y=Sentiment_Score,color=Sentiment))+
geom_point()+
labs(x="FOMC Date",y="Sentiment Score",title="Sentiments spread over the period of 2007 to 2019")+
theme(panel.background = element_rect(fill = "white"))
# Exporting data frame to RDS
## Changing the Date format
Sentiment$FOMC_Date<-format(Sentiment$FOMC_Date, format = "%Y%m%d")
## Exporting to .RDS
saveRDS(Sentiment,"SentimentDF.rds")
##Financial Impact of Sentiment##
#load all data frames
mgData<-readRDS(file = "fomc_merged_data_v2.rds")
sData <- readRDS( file = "SentimentDF.rds")
file_fred_ru1000tr = "https://raw.githubusercontent.com/completegraph/DATA607FINAL/master/DATA/FRED_RU1000TR.csv"
ru1000tr = read_csv(file_fred_ru1000tr,
col_types = cols(DATE=col_character(),
RU1000TR = col_double() ) )
# Generate a lubridate date column to join with the FOMC data.
# -----------------------------------------------------------------
ru1000tr %>% mutate( date_mdy = lubridate::ymd( DATE ) )-> ruData
#z_ru_daily = (RU1000TR - mean(RU1000TR, na.rm=TRUE))/sd(RU1000TR, na.rm = TRUE )
# Second, join the data:
# Since this is a 2-way inner join, we start with the FOMC statement data
# and join it to the sentiment data by date string (yyyymmdd)
# -------------------------------------------------------------------------
mgData %>% inner_join(sData, by = c( "statement.dates" = "FOMC_Date")) -> msData
# Join the sentiment-FOMC data to the Russell 1000 Index data from FRED
# Make sure to add a Z-score for each of the time series: sentiment and Rusell index values
# Save the raw data and normalized data by FOMC data.
# ----------------------------------------------------------------------------------
msEQdata = msData %>% left_join(ruData, by = c("date_mdy" = "date_mdy") ) %>%
select( date_mdy, Sentiment_Score, RU1000TR ) %>%
mutate( z_ru_fomc = (RU1000TR - mean(RU1000TR, na.rm = TRUE) ) / sd( RU1000TR, na.rm=TRUE ) ,
z_sentiment = ( Sentiment_Score - mean( Sentiment_Score, na.rm = TRUE) ) /
sd( Sentiment_Score, na.rm=TRUE) )
msEQdata %>% mutate( logEquity = log(RU1000TR) ) %>%
mutate( z_logEquity = ( logEquity - mean(logEquity) )/ sd( logEquity ) ) -> msEQdata
msEQdata %>% kable() %>% scroll_box(width="100%", height="200px")
#Sentiment vs Russell 1000
ggplot() +
geom_line(data=msEQdata, aes(x=date_mdy, y=Sentiment_Score) , color = "red" ) +
geom_line(data=msEQdata, aes(x=date_mdy, y=RU1000TR), color="green") +
ggtitle("Sentiment vs. Russell 1000 Equity Level", subtitle="Not usable without fixes")
ggplot() +
geom_line(data=msEQdata, aes(x=date_mdy, y=z_sentiment) , color = "red" ) +
geom_line(data=msEQdata, aes(x=date_mdy, y=z_ru_fomc), color="green") +
ggtitle("Scaled Sentiment vs. Scaled Equity Index", subtitle = "Nearly There...")
ggplot() +
geom_line(data=msEQdata, aes(x=date_mdy, y=z_sentiment) , color = "red" ) +
geom_line(data=msEQdata, aes(x=date_mdy, y=z_logEquity), color="green") +
ggtitle("Scaled-Sentiment vs. Scaled Log Equity Price")
##regression of sentiment##
mod1 = lm( z_logEquity ~ z_sentiment, data=msEQdata[2:102,])
summary(mod1)
#scatterplot of regressed values with regression line to study the model fit##
ggplot(data=msEQdata[2:102,], aes(x=z_sentiment, y=z_logEquity) ) +
geom_point() +
geom_smooth(method=lm) +
ggtitle("ScatterPlot of Fitted Regression Model", subtitle="X=Z-Sentiment, Y=Z-LogRussell 1000 (2007-2019)")
###oil analysis##
setwd('C:/Users/creeg/Downloads')
wti <- read.csv("wtid.csv", header = TRUE)
wti$Date <- ymd(wti$Date)
wti %>% mutate( date_mdy = lubridate::ymd( Date ) )-> wtiData
##oil prices, equity prices, sentiment score (all standardized to z-score) in one table##
msEQWTIdata <- msEQdata %>% inner_join(wtiData, by = ("date_mdy" = "date_mdy")) %>%
select( date_mdy, Sentiment_Score, z_ru_fomc, z_sentiment, logEquity, z_logEquity
,Price) %>%
mutate(z_wti_price = (Price - mean(Price, na.rm = TRUE) ) / sd( Price, na.rm = TRUE))
msEQWTIdata %>% mutate( logWTI = log(Price) ) %>%
mutate( z_log_wti_price = ( logWTI - mean(logWTI) )/ sd( logWTI ) ) -> msEQWTIdata
##add 10 year treasury rates##
TSY10 <- DGS10
TSY10$DATE <- ymd(TSY10$DATE)
TSY10$DATE <- format(TSY10$DATE, format = "%Y%m%d")
TSY10 %>% mutate( date_mdy = lubridate::ymd( DATE ) )-> TSY10data
names(msEQWTIdata)
names(TSY10data)
msTSYdata <- msEQWTIdata %>% inner_join(TSY10data, by = ("date_mdy" = "date_mdy")) %>%
select( date_mdy, Sentiment_Score, z_ru_fomc, z_sentiment, logEquity, z_logEquity
,z_wti_price, logWTI, z_log_wti_price, DGS10) %>%
mutate(z_tsy10 = (DGS10 - mean(DGS10, na.rm = TRUE) ) / sd( DGS10, na.rm = TRUE))
msTSYdata %>% mutate( logTSY10 = log(DGS10) ) %>%
mutate( z_log_TSY10 = ( logTSY10 - mean(logTSY10) )/ sd( logTSY10 ) ) -> msTSYdata
mean(TSY10$DGS10)
sd(TSY10$DGS10)
mean(wti$Price)
sd(wti$Price)
##add 3M LIBOR rates USD##
libor <- read.csv("3MLIBOR.csv", header = TRUE)
head(libor)
libor
libor$USD3MTD156N <- as.numeric(libor$USD3MTD156N)
libor$DATE <- ymd(libor$DATE)
libor %>% mutate( date_mdy = lubridate::ymd( DATE ) )-> libordata
names(msTSYdata)
names(liborData)
msfinaldata <- msTSYdata %>% inner_join(libordata, by = ("date_mdy" = "date_mdy")) %>%
select( date_mdy, Sentiment_Score, z_ru_fomc, z_sentiment, logEquity, z_logEquity
,z_wti_price, logWTI, z_log_wti_price, z_tsy10, logTSY10, z_log_TSY10, USD3MTD156N) %>%
mutate(z_libor = (USD3MTD156N - mean(USD3MTD156N, na.rm = TRUE) ) / sd( USD3MTD156N, na.rm = TRUE))
msfinaldata %>% mutate( log3mLibor = log(USD3MTD156N) ) %>%
mutate( z_log_3M_Libor = ( log3mLibor - mean(log3mLibor) )/ sd( log3mLibor ) ) -> msfinaldata
##brent oil test##
brent <- read.csv("brent.csv", header = TRUE)
names(brent)
class(brent$Price)
brent$brent <- brent$Price
class(brent$brent)
brent$Date <- ymd(brent$Date)
brent %>% mutate( date_mdy = lubridate::ymd( Date ) )-> brentdata
names(msfinaldata)
msfinaldata1 <- msfinaldata %>% inner_join(brentdata, by = ("date_mdy" = "date_mdy")) %>%
select( date_mdy, Sentiment_Score, z_ru_fomc, z_sentiment, logEquity, z_logEquity
,z_wti_price, logWTI, z_log_wti_price, z_tsy10, logTSY10, z_log_TSY10
,z_libor, log3mLibor, z_log_3M_Libor, brent) %>%
mutate(z_brent = (brent - mean(brent, na.rm = TRUE) ) / sd( brent, na.rm = TRUE))
msfinaldata1 %>% mutate( logbrent = log(Price) ) %>%
mutate( z_log_brent = ( logbrent - mean(logbrent) )/ sd( logbrent ) ) -> msfinaldata
msfinaldata1$z_log_brent
head(msfinaldata1)
#library(writexl)
write_xlsx(msfinaldata1, "msfinal1.xlsx")
|
library(reshape2)
# Reading activity labels and features
labels <- read.table("UCI HAR Dataset/activity_labels.txt")
labels[,2] <- as.character(labels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Extracts only the measurements on the mean and standard deviation for each measurement.
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
# Reading the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWanted]
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWanted]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# Merging + labeling
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresWanted.names)
# Activities and subjects as factors
allData$activity <- factor(allData$activity, levels = labels[,1], labels = labels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE) | /run_analysis.R | no_license | ianashkin/Getting-and-Cleaning-Data | R | false | false | 1,702 | r | library(reshape2)
# Reading activity labels and features
labels <- read.table("UCI HAR Dataset/activity_labels.txt")
labels[,2] <- as.character(labels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Extracts only the measurements on the mean and standard deviation for each measurement.
featuresWanted <- grep(".*mean.*|.*std.*", features[,2])
featuresWanted.names <- features[featuresWanted,2]
featuresWanted.names = gsub('-mean', 'Mean', featuresWanted.names)
featuresWanted.names = gsub('-std', 'Std', featuresWanted.names)
featuresWanted.names <- gsub('[-()]', '', featuresWanted.names)
# Reading the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWanted]
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWanted]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# Merging + labeling
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresWanted.names)
# Activities and subjects as factors
allData$activity <- factor(allData$activity, levels = labels[,1], labels = labels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE) |
# Quiz3ProgrammingAssignment2
#CODE:
# Pair of functions that cache the inverse of a matrix
## Usage: Pass the result of a makeCacheMatrix call to cacheSolve
## CACHE MATRIX FUNCTION
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#' Compute and cache the inverse of a matrix
# CACHE SOLVE FUNCTION
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached matrix inverse")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
#[makeCacheMatrix.zip](https://github.com/anyaborces/Quiz3ProgrammingAssignment2/files/6798333/makeCacheMatrix.zip)
#Write the following functions:
#makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
#cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
| /makeCacheMatrix.R | no_license | anyaborces/Quiz3ProgrammingAssignment2 | R | false | false | 1,277 | r | # Quiz3ProgrammingAssignment2
#CODE:
# Pair of functions that cache the inverse of a matrix
## Usage: Pass the result of a makeCacheMatrix call to cacheSolve
## CACHE MATRIX FUNCTION
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#' Compute and cache the inverse of a matrix
# CACHE SOLVE FUNCTION
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached matrix inverse")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
#[makeCacheMatrix.zip](https://github.com/anyaborces/Quiz3ProgrammingAssignment2/files/6798333/makeCacheMatrix.zip)
#Write the following functions:
#makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
#cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
|
#' Geom qq
#'
#' @rdname geom_qq
#' @export
#'
#' @param mapping Parameter to ggplot
#' @param data Parameter to ggplot
#' @param geom Parameter to ggplot
#' @param position Parameter to ggplot
#' @param ... Parameter to ggplot
#' @param line.p Vector of quantiles to use when fitting the Q-Q line, defaults
#' defaults to `c(.25, .75)`.
#' @param fullrange Should the q-q line span the full range of the plot, or just
#' the data
#' @param na.rm Parameter to ggplot
#' @param show.legend Parameter to ggplot
#' @param inherit.aes Parameter to ggplot
geom_qq_line <- function(mapping = NULL,
data = NULL,
geom = "path",
position = "identity",
...,
distribution = stats::qnorm,
dparams = list(),
line.p = c(.25, .75),
fullrange = FALSE, na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = StatQqLine,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
distribution = distribution,
dparams = dparams,
na.rm = na.rm,
line.p = line.p,
fullrange = fullrange,
...
)
)
}
#' @export
#' @rdname geom_qq
stat_qq_line <- geom_qq_line
#' ggplot2-ggproto
#'
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatQqLine <- ggproto("StatQqLine", Stat,
default_aes = aes(x = ..x.., y = ..y..),
required_aes = c("sample"),
compute_group = function(data,
scales,
quantiles = NULL,
distribution = stats::qnorm,
dparams = list(),
na.rm = FALSE,
line.p = c(.25, .75),
fullrange = FALSE) {
sample <- sort(data$sample)
n <- length(sample)
# Compute theoretical quantiles
if (is.null(quantiles)) {
quantiles <- stats::ppoints(n)
} else {
stopifnot(length(quantiles) == n)
}
theoretical <- do.call(
distribution,
c(list(p = quote(quantiles)), dparams)
)
if (length(line.p) != 2) {
stop(
"Cannot fit line quantiles ", line.p,
". Parameter line.p must have length 2.",
call. = FALSE)
}
x_coords <- do.call(distribution, c(list(p = line.p), dparams))
y_coords <- quantile(sample, line.p)
slope <- diff(y_coords) / diff(x_coords)
intercept <- y_coords[1L] - slope * x_coords[1L]
if (fullrange & !is.null(scales$x$dimension)) {
x <- scales$x$dimension()
} else {
x <- range(theoretical)
}
data.frame(x = x, y = slope * x + intercept)
}
)
| /R/stat-qq-line.R | no_license | snewhouse/CEMiTool | R | false | false | 2,676 | r | #' Geom qq
#'
#' @rdname geom_qq
#' @export
#'
#' @param mapping Parameter to ggplot
#' @param data Parameter to ggplot
#' @param geom Parameter to ggplot
#' @param position Parameter to ggplot
#' @param ... Parameter to ggplot
#' @param line.p Vector of quantiles to use when fitting the Q-Q line, defaults
#' defaults to `c(.25, .75)`.
#' @param fullrange Should the q-q line span the full range of the plot, or just
#' the data
#' @param na.rm Parameter to ggplot
#' @param show.legend Parameter to ggplot
#' @param inherit.aes Parameter to ggplot
geom_qq_line <- function(mapping = NULL,
data = NULL,
geom = "path",
position = "identity",
...,
distribution = stats::qnorm,
dparams = list(),
line.p = c(.25, .75),
fullrange = FALSE, na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = StatQqLine,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
distribution = distribution,
dparams = dparams,
na.rm = na.rm,
line.p = line.p,
fullrange = fullrange,
...
)
)
}
#' @export
#' @rdname geom_qq
stat_qq_line <- geom_qq_line
#' ggplot2-ggproto
#'
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatQqLine <- ggproto("StatQqLine", Stat,
default_aes = aes(x = ..x.., y = ..y..),
required_aes = c("sample"),
compute_group = function(data,
scales,
quantiles = NULL,
distribution = stats::qnorm,
dparams = list(),
na.rm = FALSE,
line.p = c(.25, .75),
fullrange = FALSE) {
sample <- sort(data$sample)
n <- length(sample)
# Compute theoretical quantiles
if (is.null(quantiles)) {
quantiles <- stats::ppoints(n)
} else {
stopifnot(length(quantiles) == n)
}
theoretical <- do.call(
distribution,
c(list(p = quote(quantiles)), dparams)
)
if (length(line.p) != 2) {
stop(
"Cannot fit line quantiles ", line.p,
". Parameter line.p must have length 2.",
call. = FALSE)
}
x_coords <- do.call(distribution, c(list(p = line.p), dparams))
y_coords <- quantile(sample, line.p)
slope <- diff(y_coords) / diff(x_coords)
intercept <- y_coords[1L] - slope * x_coords[1L]
if (fullrange & !is.null(scales$x$dimension)) {
x <- scales$x$dimension()
} else {
x <- range(theoretical)
}
data.frame(x = x, y = slope * x + intercept)
}
)
|
#integer
x <- 2L
typeof(x)
#double
y <- 2.5
typeof(y)
#integer
x <- 2L
typeof(x)
#double
y <- 2.5
typeof(y)
#double
t <- 3.0
typeof(t)
#complex
z <- 3 + 2i
typeof(z)
#character
a <- "h"
typeof(a)
#logical
q <- T
typeof(q)
q2 <- F
typeof(q2)
#complex
aa <- 5 + 4
typeof(aa)
typeof(z)
| /R-Beginner/tutorialR.R | no_license | Kayan-dev/R | R | false | false | 335 | r |
#integer
x <- 2L
typeof(x)
#double
y <- 2.5
typeof(y)
#integer
x <- 2L
typeof(x)
#double
y <- 2.5
typeof(y)
#double
t <- 3.0
typeof(t)
#complex
z <- 3 + 2i
typeof(z)
#character
a <- "h"
typeof(a)
#logical
q <- T
typeof(q)
q2 <- F
typeof(q2)
#complex
aa <- 5 + 4
typeof(aa)
typeof(z)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tools.R
\name{char_to_int}
\alias{char_to_int}
\title{Convert a character into integer vector}
\usage{
char_to_int(v)
}
\arguments{
\item{v}{a character vector.}
}
\value{
Return a integer vector.
}
\description{
Take the charactervector \code{v} and convert it
in an integer vector.
}
\details{
None.
}
\keyword{internal}
| /man/char_to_int.Rd | no_license | cran/functClust | R | false | true | 425 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tools.R
\name{char_to_int}
\alias{char_to_int}
\title{Convert a character into integer vector}
\usage{
char_to_int(v)
}
\arguments{
\item{v}{a character vector.}
}
\value{
Return a integer vector.
}
\description{
Take the charactervector \code{v} and convert it
in an integer vector.
}
\details{
None.
}
\keyword{internal}
|
# Aggregating Tagging Coverage for NOAA final report Maps
# 10/17/2019
#Packages
library(tidyverse)
library(here)
library(sf)
library(rnaturalearth)
library(gridExtra)
#Data
tagging <- read_csv(here("data", "noaa_report", "tags_clean_final.csv"))
recaps <- read_csv(here("data", "noaa_report", "tags_recaps_clean_final.csv"))
usa_poly <- ne_states("united states of america") %>% st_as_sf() %>% filter(region == "South")
mexico_poly <- ne_states("mexico") %>% st_as_sf()
#Reduce resolution and then group by lat/long coordinates, count the rows for each for number tagged
tag_bins <- tagging %>%
mutate(long_decdeg = round(long_decdeg, 2),
lat_decdeg = round(lat_decdeg, 2)) %>%
group_by(long_decdeg, lat_decdeg) %>%
summarise(n_tagged = n()) %>%
st_as_sf(coords = c("long_decdeg", "lat_decdeg"), crs = 4326)
ggplot() +
geom_sf(data = usa_poly) +
geom_sf(data = tag_bins, aes(size = n_tagged), color = "royalblue", fill = "royalblue", alpha = 0.6) +
coord_sf(xlim = c(-97.5, -82.5), ylim = c(25.8, 30.8)) +
theme_bw() +
ggtitle("Rounded to 2-digit(s)")
#Will want to do custom bins and shape sizes and add them to the dataframe
#Side-by-side comparison
no_agg <- tagging %>%
filter(between(long_decdeg, left = -91.2, right = -89)) %>%
st_as_sf(coords = c("long_decdeg", "lat_decdeg"), crs = 4326) %>%
ggplot() +
geom_sf(data = usa_poly) +
geom_sf(color = "royalblue", fill = "royalblue", alpha = 0.6) +
coord_sf(xlim = c(-91.2, -89), ylim = c(29, 30.5)) +
theme_bw() + theme(legend.position = "bottom") +
ggtitle("Original Data")
agg_plot <- ggplot() +
geom_sf(data = usa_poly) +
geom_sf(data = tag_bins, aes(size = n_tagged), color = "royalblue", fill = "royalblue", alpha = 0.6, show.legend = FALSE) +
coord_sf(xlim = c(-91.2, -89), ylim = c(29, 30.5)) +
theme_bw() + theme(legend.position = "bottom") +
ggtitle("Rounded to 100th of a Degree")
agg_plot
grid.arrange(grobs = list(no_agg, agg_plot), nrow = 1)
#### Tagging Areas ####
s_texas <- ggplot() +
geom_sf(data = usa_poly) +
geom_sf(data = mexico_poly) +
geom_sf(data = tag_bins, aes(size = n_tagged), alpha = 0.8, show.legend = FALSE) +
coord_sf(xlim = c(-97.9, -96.4), ylim = c(26, 27.6)) +
theme_bw() +
ggtitle("South Texas")
s_texas
mid_texas <- ggplot() +
geom_sf(data = usa_poly) +
geom_sf(data = tag_bins, aes(size = n_tagged), alpha = 0.8, show.legend = FALSE) +
coord_sf(xlim = c(-97.5, -96), ylim = c(27.6, 28.8)) +
theme_bw() +
ggtitle("Mid-Texas")
mid_texas
north_texas <- ggplot() +
geom_sf(data = usa_poly) +
geom_sf(data = tag_bins, aes(size = n_tagged), alpha = 0.8, show.legend = FALSE) +
coord_sf(xlim = c(-96, -94), ylim = c(28.2, 30.2)) +
theme_bw() +
ggtitle("North Texas")
north_texas
#Better Shapefile
usa_coastline <- rgdal::readOGR()
| /2019_edits/R/mapping/tags_spatial_bins.R | no_license | adamkemberling/Seamap_offshore_modeling | R | false | false | 2,865 | r | # Aggregating Tagging Coverage for NOAA final report Maps
# 10/17/2019
#Packages
library(tidyverse)
library(here)
library(sf)
library(rnaturalearth)
library(gridExtra)
#Data
tagging <- read_csv(here("data", "noaa_report", "tags_clean_final.csv"))
recaps <- read_csv(here("data", "noaa_report", "tags_recaps_clean_final.csv"))
usa_poly <- ne_states("united states of america") %>% st_as_sf() %>% filter(region == "South")
mexico_poly <- ne_states("mexico") %>% st_as_sf()
#Reduce resolution and then group by lat/long coordinates, count the rows for each for number tagged
tag_bins <- tagging %>%
mutate(long_decdeg = round(long_decdeg, 2),
lat_decdeg = round(lat_decdeg, 2)) %>%
group_by(long_decdeg, lat_decdeg) %>%
summarise(n_tagged = n()) %>%
st_as_sf(coords = c("long_decdeg", "lat_decdeg"), crs = 4326)
ggplot() +
geom_sf(data = usa_poly) +
geom_sf(data = tag_bins, aes(size = n_tagged), color = "royalblue", fill = "royalblue", alpha = 0.6) +
coord_sf(xlim = c(-97.5, -82.5), ylim = c(25.8, 30.8)) +
theme_bw() +
ggtitle("Rounded to 2-digit(s)")
#Will want to do custom bins and shape sizes and add them to the dataframe
#Side-by-side comparison
no_agg <- tagging %>%
filter(between(long_decdeg, left = -91.2, right = -89)) %>%
st_as_sf(coords = c("long_decdeg", "lat_decdeg"), crs = 4326) %>%
ggplot() +
geom_sf(data = usa_poly) +
geom_sf(color = "royalblue", fill = "royalblue", alpha = 0.6) +
coord_sf(xlim = c(-91.2, -89), ylim = c(29, 30.5)) +
theme_bw() + theme(legend.position = "bottom") +
ggtitle("Original Data")
agg_plot <- ggplot() +
geom_sf(data = usa_poly) +
geom_sf(data = tag_bins, aes(size = n_tagged), color = "royalblue", fill = "royalblue", alpha = 0.6, show.legend = FALSE) +
coord_sf(xlim = c(-91.2, -89), ylim = c(29, 30.5)) +
theme_bw() + theme(legend.position = "bottom") +
ggtitle("Rounded to 100th of a Degree")
agg_plot
grid.arrange(grobs = list(no_agg, agg_plot), nrow = 1)
#### Tagging Areas ####
s_texas <- ggplot() +
geom_sf(data = usa_poly) +
geom_sf(data = mexico_poly) +
geom_sf(data = tag_bins, aes(size = n_tagged), alpha = 0.8, show.legend = FALSE) +
coord_sf(xlim = c(-97.9, -96.4), ylim = c(26, 27.6)) +
theme_bw() +
ggtitle("South Texas")
s_texas
mid_texas <- ggplot() +
geom_sf(data = usa_poly) +
geom_sf(data = tag_bins, aes(size = n_tagged), alpha = 0.8, show.legend = FALSE) +
coord_sf(xlim = c(-97.5, -96), ylim = c(27.6, 28.8)) +
theme_bw() +
ggtitle("Mid-Texas")
mid_texas
north_texas <- ggplot() +
geom_sf(data = usa_poly) +
geom_sf(data = tag_bins, aes(size = n_tagged), alpha = 0.8, show.legend = FALSE) +
coord_sf(xlim = c(-96, -94), ylim = c(28.2, 30.2)) +
theme_bw() +
ggtitle("North Texas")
north_texas
#Better Shapefile
usa_coastline <- rgdal::readOGR()
|
library(data.table)
library(OSTSC)
train=fread("E:/USA/Projects/Research/Conferance/Individual_subjects/train_wi1.csv")
test=fread("E:/USA/Projects/Research/Conferance/Individual_subjects/test_final.csv")
train=train[,-1]
test=test[,-1]
test_balanced=OSTSC(sample=train[,4:186],label=train[,3])
x_train=as.data.table(test_balanced$sample)
colMeans(x_train[,5])
x_train_scale=scale(x_train)
x_train_scale=as.data.table(x_train_scale)
colMeans(x_train_scale)
y_train=as.data.table(test_balanced$label)
train_balanced_scaled=cbind(y_train,x_train_scale)
write.csv(train_balanced_scaled,"train_balanced_scaled.csv")
| /w2/Balancing.R | no_license | jayshah5696/drowsy_driving | R | false | false | 618 | r | library(data.table)
library(OSTSC)
train=fread("E:/USA/Projects/Research/Conferance/Individual_subjects/train_wi1.csv")
test=fread("E:/USA/Projects/Research/Conferance/Individual_subjects/test_final.csv")
train=train[,-1]
test=test[,-1]
test_balanced=OSTSC(sample=train[,4:186],label=train[,3])
x_train=as.data.table(test_balanced$sample)
colMeans(x_train[,5])
x_train_scale=scale(x_train)
x_train_scale=as.data.table(x_train_scale)
colMeans(x_train_scale)
y_train=as.data.table(test_balanced$label)
train_balanced_scaled=cbind(y_train,x_train_scale)
write.csv(train_balanced_scaled,"train_balanced_scaled.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-maps.R
\name{plot_facet_map}
\alias{plot_facet_map}
\title{Raster map with year facets}
\usage{
plot_facet_map(
df,
column = "est",
X = "X",
Y = "Y",
viridis_option = "C",
white_zero = FALSE,
low_fill = "Steel Blue 4",
mid_fill = "white",
high_fill = "Red 3",
na_colour = "red",
transform_col = no_trans,
raster_limits = NULL,
text_size = 4,
legend_position = "right"
)
}
\arguments{
\item{df}{Dataframe.}
\item{column}{Name column to be plotted.}
}
\description{
Raster map with year facets
}
| /man/plot_facet_map.Rd | no_license | pbs-assess/gfvelocities | R | false | true | 606 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-maps.R
\name{plot_facet_map}
\alias{plot_facet_map}
\title{Raster map with year facets}
\usage{
plot_facet_map(
df,
column = "est",
X = "X",
Y = "Y",
viridis_option = "C",
white_zero = FALSE,
low_fill = "Steel Blue 4",
mid_fill = "white",
high_fill = "Red 3",
na_colour = "red",
transform_col = no_trans,
raster_limits = NULL,
text_size = 4,
legend_position = "right"
)
}
\arguments{
\item{df}{Dataframe.}
\item{column}{Name column to be plotted.}
}
\description{
Raster map with year facets
}
|
# To create plot 3: Submetering vs. datetimezones
library("data.table")
setwd("~/data/")
# Reads in the data
powdata <- data.table::fread(input="household_power_consumption.txt",na.strings="?")
#using POSIXCT as timezones are needed along with date
powdata[,dateTime := as.POSIXct(paste(Date,Time), format ="%d/%m/%Y %H:%M:%S")]
#Filter dates
sub_data <- powdata[(dateTime >= "2007-02-01") & (dateTime < "2007-02-03")]
png("plot3.png", width=480, height=480)
#Plot
plot(sub_data[,dateTime], sub_data[,Sub_metering_1],type="l",xlab="",ylab="Energy sub metering")
lines(sub_data[,dateTime], sub_data[,Sub_metering_2],col="red")
lines(sub_data[,dateTime], sub_data[,Sub_metering_3],col="blue")
legend("topright",col=c("black","red","blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
,lty=c(1,1),lwd=c(1,1))
dev.off()
| /plot3.R | no_license | jatinder1979/ExData_Plotting1 | R | false | false | 842 | r | # To create plot 3: Submetering vs. datetimezones
library("data.table")
setwd("~/data/")
# Reads in the data
powdata <- data.table::fread(input="household_power_consumption.txt",na.strings="?")
#using POSIXCT as timezones are needed along with date
powdata[,dateTime := as.POSIXct(paste(Date,Time), format ="%d/%m/%Y %H:%M:%S")]
#Filter dates
sub_data <- powdata[(dateTime >= "2007-02-01") & (dateTime < "2007-02-03")]
png("plot3.png", width=480, height=480)
#Plot
plot(sub_data[,dateTime], sub_data[,Sub_metering_1],type="l",xlab="",ylab="Energy sub metering")
lines(sub_data[,dateTime], sub_data[,Sub_metering_2],col="red")
lines(sub_data[,dateTime], sub_data[,Sub_metering_3],col="blue")
legend("topright",col=c("black","red","blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
,lty=c(1,1),lwd=c(1,1))
dev.off()
|
#' Generate the JDOQL part of a filter.
#'
#' This function is to generate the string in the JDOQL query for some given value of a filter.
#'
#' @param filtername A character which is for the responding variable name in Wildbook framework.
#'
#' @param filtervalues A vector of the value for the filter.
#'
#' @param logic A parameter which can be "&&" for the logical AND or "||" for the logical OR.
#'
#' @param bridge An operator to connect the name and the default value is "==".
#'
filterstring <-
function(filtername,filtervalues,logic="||",bridge="=="){
#This function is to generate the string in the JDOQL query for some given value of a filter.
#"filtername" is a character which is for the responding variable name in Wildbook framework
#"filtervalue" is a vector of value for the filter
#The value of "logic" can be either "&&" for the logical AND or "||" for the logical OR.
#"bridge" is the operator to connect the name and the default value is "==".
numeric <- is.numeric(filtervalues)
if(!missing(filtervalues)&&(!is.null(filtervalues))){
if(numeric==FALSE) filtervalues<-paste0("'",filtervalues,"'")
filterstring<-paste("(",filtername,bridge,filtervalues,")",collapse = logic)
paste0("(",filterstring,")")
} else return(NULL)
}
#examples
#filterstring(filtername = "locationID", filtervalues = c("1","1a"))
| /R/filterstring.R | no_license | cran/RWildbook | R | false | false | 1,414 | r | #' Generate the JDOQL part of a filter.
#'
#' This function is to generate the string in the JDOQL query for some given value of a filter.
#'
#' @param filtername A character which is for the responding variable name in Wildbook framework.
#'
#' @param filtervalues A vector of the value for the filter.
#'
#' @param logic A parameter which can be "&&" for the logical AND or "||" for the logical OR.
#'
#' @param bridge An operator to connect the name and the default value is "==".
#'
filterstring <-
function(filtername,filtervalues,logic="||",bridge="=="){
#This function is to generate the string in the JDOQL query for some given value of a filter.
#"filtername" is a character which is for the responding variable name in Wildbook framework
#"filtervalue" is a vector of value for the filter
#The value of "logic" can be either "&&" for the logical AND or "||" for the logical OR.
#"bridge" is the operator to connect the name and the default value is "==".
numeric <- is.numeric(filtervalues)
if(!missing(filtervalues)&&(!is.null(filtervalues))){
if(numeric==FALSE) filtervalues<-paste0("'",filtervalues,"'")
filterstring<-paste("(",filtername,bridge,filtervalues,")",collapse = logic)
paste0("(",filterstring,")")
} else return(NULL)
}
#examples
#filterstring(filtername = "locationID", filtervalues = c("1","1a"))
|
# Used to load the data, check the data and make some basic plots
hosp.data <- read.csv("hospital-data.csv", stringsAsFactors = FALSE)
outcome <- read.csv("outcome-of-care-measures.csv",colClasses = "character", stringsAsFactors = FALSE)
outcome[,11]<-as.numeric(outcome[,11])
hist(outcome[,11])
names(hosp.data)
names(outcome)
valid.states <- unique(hosp.data[,"State"])
| /ProgAssignment3/Load.R | no_license | rpadebet/Programming-with-R | R | false | false | 377 | r |
# Used to load the data, check the data and make some basic plots
hosp.data <- read.csv("hospital-data.csv", stringsAsFactors = FALSE)
outcome <- read.csv("outcome-of-care-measures.csv",colClasses = "character", stringsAsFactors = FALSE)
outcome[,11]<-as.numeric(outcome[,11])
hist(outcome[,11])
names(hosp.data)
names(outcome)
valid.states <- unique(hosp.data[,"State"])
|
library(RSQLite)
library(tidyverse)
sqlite <- dbDriver("SQLite")
dbname <- "mashr_Whole_Blood.db"
# Create a connection to new database, mashr_Whole_Blood.db
db = dbConnect(sqlite, dbname)
# View tables included in mashr_Whole_Blood.db
dbListTables(db)
# View columns of weights and extra tables
dbListFields(db, "weights")
dbListFields(db, "extra")
# View weights and extra tables from mashr_Whole_Blood.db
weights <- dbGetQuery(db, 'select * from weights')
extra <- dbGetQuery(db, 'select * from extra')
# Load mashr_Whole_Blood and snps_matched dataframes
mashr_Whole_Blood <- read.csv("mashr_Whole_Blood.txt", sep="")
snps_matched <- read.delim("snps_matched.txt")
# Merges weights and snps_matched table by inner joining with varID in weights and panel_variant_id in snps_matched
# each variant with GTEx prediction model (panel_variant_id) is matched to a UK Biobank variant (variant)
merged <- weights %>% inner_join(snps_matched %>% select(variant, panel_variant_id), by=c("varID"="panel_variant_id"))
merged <- merged %>% mutate(varID = variant) %>% select(-variant)
# Update gene and n.snps.in.model column from counts of gene in matched
extra_n.snps <- merged %>% group_by(gene) %>% summarise(n.snps.in.model = n())
updated_extra <- inner_join(extra_n.snps, extra %>% select(-n.snps.in.model), by="gene")
# Write new model
output <- dbConnect(RSQLite::SQLite(), "whole-blood-output.db")
dbWriteTable(output, "weights", merged)
dbWriteTable(output, "extra", updated_extra)
# Update GTEx variant in mashr_Whole_Blood (RSID1 and RSID2) to UKBiobank definition
mapped_Whole_Blood <- inner_join(mashr_Whole_Blood, merged, by=c("RSID1" = "varID")) %>% mutate(RSID1 = variant) %>% select(-variant)
mapped_Whole_Blood <- inner_join(mapped_Whole_Blood, merged, by=c("RSID2" = "varID")) %>% mutate(RSID2 = variant) %>% select(-variant)
mapped_Whole_Blood <- mapped_Whole_Blood %>% select(GENE, variant.x, variant.y, VALUE) %>% rename(RSID1 = variant.x, RSID2 = variant.y)
write.table(mapped_Whole_Blood, file="mapped_Whole_Blood.txt")
dbDisconnect(db)
dbDisconnect(output)
| /sabrina/Whole_Blood_variants.r | permissive | hakyimlab/lab-tools | R | false | false | 2,084 | r | library(RSQLite)
library(tidyverse)
sqlite <- dbDriver("SQLite")
dbname <- "mashr_Whole_Blood.db"
# Create a connection to new database, mashr_Whole_Blood.db
db = dbConnect(sqlite, dbname)
# View tables included in mashr_Whole_Blood.db
dbListTables(db)
# View columns of weights and extra tables
dbListFields(db, "weights")
dbListFields(db, "extra")
# View weights and extra tables from mashr_Whole_Blood.db
weights <- dbGetQuery(db, 'select * from weights')
extra <- dbGetQuery(db, 'select * from extra')
# Load mashr_Whole_Blood and snps_matched dataframes
mashr_Whole_Blood <- read.csv("mashr_Whole_Blood.txt", sep="")
snps_matched <- read.delim("snps_matched.txt")
# Merges weights and snps_matched table by inner joining with varID in weights and panel_variant_id in snps_matched
# each variant with GTEx prediction model (panel_variant_id) is matched to a UK Biobank variant (variant)
merged <- weights %>% inner_join(snps_matched %>% select(variant, panel_variant_id), by=c("varID"="panel_variant_id"))
merged <- merged %>% mutate(varID = variant) %>% select(-variant)
# Update gene and n.snps.in.model column from counts of gene in matched
extra_n.snps <- merged %>% group_by(gene) %>% summarise(n.snps.in.model = n())
updated_extra <- inner_join(extra_n.snps, extra %>% select(-n.snps.in.model), by="gene")
# Write new model
output <- dbConnect(RSQLite::SQLite(), "whole-blood-output.db")
dbWriteTable(output, "weights", merged)
dbWriteTable(output, "extra", updated_extra)
# Update GTEx variant in mashr_Whole_Blood (RSID1 and RSID2) to UKBiobank definition
mapped_Whole_Blood <- inner_join(mashr_Whole_Blood, merged, by=c("RSID1" = "varID")) %>% mutate(RSID1 = variant) %>% select(-variant)
mapped_Whole_Blood <- inner_join(mapped_Whole_Blood, merged, by=c("RSID2" = "varID")) %>% mutate(RSID2 = variant) %>% select(-variant)
mapped_Whole_Blood <- mapped_Whole_Blood %>% select(GENE, variant.x, variant.y, VALUE) %>% rename(RSID1 = variant.x, RSID2 = variant.y)
write.table(mapped_Whole_Blood, file="mapped_Whole_Blood.txt")
dbDisconnect(db)
dbDisconnect(output)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pvalues.r
\name{pvisual}
\alias{pvisual}
\title{P-value calculations.}
\usage{
pvisual(
x,
K,
m = 20,
N = 10000,
type = "scenario3",
xp = 1,
target = 1,
upper.tail = TRUE
)
}
\arguments{
\item{x}{number of observed picks of the data plot}
\item{K}{number of evaluations}
\item{m}{size of the lineup}
\item{N}{MC parameter: number of replicates on which MC probabilities are based. Higher number of replicates will decrease MC variability.}
\item{type}{type of simulation used: scenario 3 assumes that the same lineup is shown in all K evaluations}
\item{xp}{exponent used, defaults to 1}
\item{target}{integer value identifying the location of the data plot}
\item{upper.tail}{compute probabilities P(X >= x). Be aware that the use of this parameter is not consistent with the other distribution functions in base. There, a value of P(X > x) is computed for upper.tail=TRUE.}
}
\value{
Vector/data frame. For comparison a p value based on a binomial distribution is provided as well.
}
\description{
These set of functions allow the user to calculate a p-value from the lineup after
it has been evaluated by K independent observers. The different functions
accommodate different lineup construction and showing to observers.
Details are in the papers Majumder et al (2012) JASA, and Hofmann et al (2015).
We distinguish between three different scenarios:
\itemize{
\item Scenario I: in each of K evaluations a different data set and a different set of (m-1) null plots is shown.
\item Scenario II: in each of K evaluations the same data set but a different set of (m-1) null plots is shown.
\item Scenario III: the same lineup, i.e. same data and same set of null plots, is shown to K different observers.
}
}
\examples{
pvisual(15, 20, m=3) # triangle test
}
| /man/pvisual.Rd | no_license | dicook/nullabor | R | false | true | 1,861 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pvalues.r
\name{pvisual}
\alias{pvisual}
\title{P-value calculations.}
\usage{
pvisual(
x,
K,
m = 20,
N = 10000,
type = "scenario3",
xp = 1,
target = 1,
upper.tail = TRUE
)
}
\arguments{
\item{x}{number of observed picks of the data plot}
\item{K}{number of evaluations}
\item{m}{size of the lineup}
\item{N}{MC parameter: number of replicates on which MC probabilities are based. Higher number of replicates will decrease MC variability.}
\item{type}{type of simulation used: scenario 3 assumes that the same lineup is shown in all K evaluations}
\item{xp}{exponent used, defaults to 1}
\item{target}{integer value identifying the location of the data plot}
\item{upper.tail}{compute probabilities P(X >= x). Be aware that the use of this parameter is not consistent with the other distribution functions in base. There, a value of P(X > x) is computed for upper.tail=TRUE.}
}
\value{
Vector/data frame. For comparison a p value based on a binomial distribution is provided as well.
}
\description{
These set of functions allow the user to calculate a p-value from the lineup after
it has been evaluated by K independent observers. The different functions
accommodate different lineup construction and showing to observers.
Details are in the papers Majumder et al (2012) JASA, and Hofmann et al (2015).
We distinguish between three different scenarios:
\itemize{
\item Scenario I: in each of K evaluations a different data set and a different set of (m-1) null plots is shown.
\item Scenario II: in each of K evaluations the same data set but a different set of (m-1) null plots is shown.
\item Scenario III: the same lineup, i.e. same data and same set of null plots, is shown to K different observers.
}
}
\examples{
pvisual(15, 20, m=3) # triangle test
}
|
#' @title Identify branches (including tips) descended from a node (internal function).
#' @description Internal function to get presence absence of descendent branches from a vector of node numbers. The descendents include the branch leading to the focal node (i.e. node defines the stem group no crown group
#' @param phy An object of class "phylo" (see ape package).
#' @param nodeIDs Vector of node numbers (positive integers).
#' @param cladeMembersObj Matrix of clade membership
#' @details The function returns a matrix of unique presences given the selected node. If the selected nodes are nested then presences are only recorded for the least inclusive node.
#' @return matrix Matrix of unique presences for each node id
#' @author Gavin Thomas
#' @examples
#' ## Read in phylogeny and data from Thomas et al. (2009)
#' data(anolis.tree)
#' data(anolis.data)
#'
#' cladeIdentityMatrix <- cladeIdentity(phy=anolis.tree, nodeIDs=170)
#' @export
cladeIdentity <- function (phy, nodeIDs, cladeMembersObj=NULL)
{
k <- length(nodeIDs)
if(is.null(cladeMembersObj)) {
cladeMembers <- matrix(NA, ncol = k, nrow = length(phy$edge[,
1]))
for (i in 1:k) {
nodeShiftID <- c(nodeIDs[i], node.descendents(x = nodeIDs[i], phy = phy))
cladeMembers[, i] <- as.numeric(phy$edge[, 2] %in% nodeShiftID)
}
}
if (is.null(cladeMembersObj)==FALSE) {
allNodes <- c(1:Ntip(phy), (Ntip(phy)+2):(length(phy$edge.length)+1)) #######
cladeMembers <- as.matrix(cladeMembersObj[,match(nodeIDs, allNodes)])
}
originalOrder <- colSums(cladeMembers)
richnessOrder <- sort(originalOrder, decreasing = FALSE,
index.return = TRUE)
cladeMembersOrdered <- matrix(cladeMembers[, richnessOrder$ix],
ncol = length(nodeIDs))
if (k>1) {
for (i in 2:k){
if (i ==2 ) { cladeMembersOrdered[,i] <- cladeMembersOrdered[,i] - cladeMembersOrdered[,1:(i-1)]}
else {cladeMembersOrdered[,i] <- cladeMembersOrdered[,i] - rowSums(cladeMembersOrdered[,1:(i-1)])}
}
}
cladeMembers <- cladeMembersOrdered[, sort(richnessOrder$ix, index.return = TRUE)$ix]
cladeMembers <- matrix(cladeMembers, ncol = length(nodeIDs))
return(cladeMembers)
}
| /R/cladeIdentity.R | no_license | cran/motmot.2.0 | R | false | false | 2,202 | r | #' @title Identify branches (including tips) descended from a node (internal function).
#' @description Internal function to get presence absence of descendent branches from a vector of node numbers. The descendents include the branch leading to the focal node (i.e. node defines the stem group no crown group
#' @param phy An object of class "phylo" (see ape package).
#' @param nodeIDs Vector of node numbers (positive integers).
#' @param cladeMembersObj Matrix of clade membership
#' @details The function returns a matrix of unique presences given the selected node. If the selected nodes are nested then presences are only recorded for the least inclusive node.
#' @return matrix Matrix of unique presences for each node id
#' @author Gavin Thomas
#' @examples
#' ## Read in phylogeny and data from Thomas et al. (2009)
#' data(anolis.tree)
#' data(anolis.data)
#'
#' cladeIdentityMatrix <- cladeIdentity(phy=anolis.tree, nodeIDs=170)
#' @export
cladeIdentity <- function (phy, nodeIDs, cladeMembersObj=NULL)
{
k <- length(nodeIDs)
if(is.null(cladeMembersObj)) {
cladeMembers <- matrix(NA, ncol = k, nrow = length(phy$edge[,
1]))
for (i in 1:k) {
nodeShiftID <- c(nodeIDs[i], node.descendents(x = nodeIDs[i], phy = phy))
cladeMembers[, i] <- as.numeric(phy$edge[, 2] %in% nodeShiftID)
}
}
if (is.null(cladeMembersObj)==FALSE) {
allNodes <- c(1:Ntip(phy), (Ntip(phy)+2):(length(phy$edge.length)+1)) #######
cladeMembers <- as.matrix(cladeMembersObj[,match(nodeIDs, allNodes)])
}
originalOrder <- colSums(cladeMembers)
richnessOrder <- sort(originalOrder, decreasing = FALSE,
index.return = TRUE)
cladeMembersOrdered <- matrix(cladeMembers[, richnessOrder$ix],
ncol = length(nodeIDs))
if (k>1) {
for (i in 2:k){
if (i ==2 ) { cladeMembersOrdered[,i] <- cladeMembersOrdered[,i] - cladeMembersOrdered[,1:(i-1)]}
else {cladeMembersOrdered[,i] <- cladeMembersOrdered[,i] - rowSums(cladeMembersOrdered[,1:(i-1)])}
}
}
cladeMembers <- cladeMembersOrdered[, sort(richnessOrder$ix, index.return = TRUE)$ix]
cladeMembers <- matrix(cladeMembers, ncol = length(nodeIDs))
return(cladeMembers)
}
|
#' qvalue2
#'
#' Estimate the q-values for a given set of p-values, in a safe manner.
#' The most frequent error that I have seen in using \code{qvalue}, is that pertaining
#' to the \dQuote{pi_0 estimate is less than zero} which is thrown as a
#' result of not being able to estimate the lambda parameter.
#' This function first attempts to generate qvalues using the default \dQuote{smoother}
#' method. If that fails due to pi_0 estimate, then this is repeated using the
#' \dQuote{bootstrap} method. If the same error is still thrown, then a fallback
#' FDR estimation method is used, which defaults to \dQuote{BH}, the Benjamini-Hochberg
#' FDR
#'
#' In addition, when running \code{library(qvalue)} in a headless server, you often
#' get a warning: \code{In fun(libname, pkgname) : no DISPLAY variable so Tk is not available}
#' which is actually thrown by tcltk. qvalue2 suppresses that message. See references.
#'
#' @param p a vector of p-values
#' @param lambda The value of the tuning parameter to estimate pi_0. Must be
#' in [0,1). Optional, see Storey (2002).
#' @param fallback The fallback p.adjust method. Default = \dQuote{BH}, but can
#' be one of the other FDR methods from \code{\link{p.adjust.methods}}
#' @return An object of class \code{qvalue}, even if the the FDR was estimated using
#' the \code{fallback} option, where the latter has \code{pi0=1.0}.
#'
#' @author Mark Cowley, 2011-10-25
#' @export
#' @references \url{http://stackoverflow.com/a/11554019/178297}
#'
#' @examples
#' p <- runif(100,0,1)
#' qvalue2(p)
#' # qv
#' pbad <- c( 2e-04, 3e-04, 5e-04, 6e-04, 8e-04, 8e-04, 9e-04, 9e-04, 0.001,
#' 0.001, 0.0013, 0.0013, 0.0013, 0.0014, 0.0016, 0.0017, 0.0019,
#' 0.0019, 0.0023, 0.0023, 0.0023, 0.0024, 0.0028, 0.0029, 0.0031,
#' 0.0032, 0.0032, 0.0032, 0.0034, 0.0034, 0.0035, 0.0037, 0.0038,
#' 0.0038, 0.0042, 0.0043, 0.0044, 0.0044, 0.0044, 0.0045 )
#' qvalue2(pbad)
#' # These will now throw errors:
#' \dontrun{
#' qvalue2(p=c(-0.1,0,0.5,0.9,1.0))
#' qvalue2(p, lambda=seq(-0.1,1,0.05))
#' qvalue2(p, lambda=c(0,0.2,0.5))
#' }
qvalue2 <- function(p, lambda=seq(0,0.90,0.05), fallback=c("BH", "fdr", "BY")[1]) {
if(capabilities("tcltk")) suppressPackageStartupMessages(suppressWarnings(library(tcltk)))
require(qvalue) || stop("required package 'qvalue' is not installed")
MSG <- "[1] \"ERROR: The estimated pi0 <= 0. Check that you have valid p-values or use another lambda method.\""
# qvalue writes its errors using print("ERROR: ....")
out <- capture.output(
q <- qvalue( as.numeric(p), lambda=lambda, pi0.method="smoother" )
)
if( is(q, "qvalue") ) {
# success was achieved using the smoother method.
return( q )
}
else {
if( out == MSG ) {
out <- capture.output(
q <- qvalue( as.numeric(p), lambda=lambda, pi0.method="bootstrap" )
)
if( is(q, "qvalue") ) {
cat("Q-values could not be estimated using the 'smoother' method -- 'bootstrap' method used instead.\n")
return( q )
}
else {
if( out == MSG ) {
cat(sprintf("Couldn't estimate q-values by 'smoother', or 'bootstrap'; generated FDR using %s\n", fallback))
q <- list(
call = match.call(),
pi0=1.0,
qvalues=p.adjust(p, method=fallback),
pvalues=p,
lambda=lambda
)
class(q) <- "qvalue"
return( q )
}
else {
# an error was thrown, but not relating to pi_0
stop(simpleError(out))
}
}
}
else {
# an error was thrown, but not relating to pi_0
stop(simpleError(out))
}
}
# tryCatch(
# q <- qvalue( as.numeric(p), lambda=lambda, pi0.method="smoother" ),
# error=function(e) {
# str(e)
# if( e$message == "ERROR: The estimated pi0 <= 0. Check that you have valid p-values or use another lambda method" ) {
# message("Q-values could not be estimated using the smoother method. Resorting to using the bootstrap method.")
# tryCatch(
# q <- qvalue( as.numeric(p), lambda=lambda, pi0.method="bootstrap" ),
# error=function(e) {
# if( e$message == "ERROR: The estimated pi0 <= 0. Check that you have valid p-values or use another lambda method" ) {
# q <- list(
# call = match.call(),
# pi0=1.0,
# qvalues=p.adjust(p, method=fallback),
# pvalues=p,
# lambda=lambda,
# )
# class(q) <- "qvalue"
# message(sprintf("Couldn't estimate q-values by 'smoother', or 'bootstrap'; generated FDR using %s"), fallback)
# message(msg)
# }
# else stop(e$message)
# }
# )
# }
# else stop(e$message)
# }
# )
#
# return( q )
}
# CHANGELOG
# 2012-10-19: added capabilties() check to avoid attempting to load tcltk in case of certain failure.
| /R/qvalue2.R | no_license | drmjc/mjcstats | R | false | false | 4,761 | r | #' qvalue2
#'
#' Estimate the q-values for a given set of p-values, in a safe manner.
#' The most frequent error that I have seen in using \code{qvalue}, is that pertaining
#' to the \dQuote{pi_0 estimate is less than zero} which is thrown as a
#' result of not being able to estimate the lambda parameter.
#' This function first attempts to generate qvalues using the default \dQuote{smoother}
#' method. If that fails due to pi_0 estimate, then this is repeated using the
#' \dQuote{bootstrap} method. If the same error is still thrown, then a fallback
#' FDR estimation method is used, which defaults to \dQuote{BH}, the Benjamini-Hochberg
#' FDR
#'
#' In addition, when running \code{library(qvalue)} in a headless server, you often
#' get a warning: \code{In fun(libname, pkgname) : no DISPLAY variable so Tk is not available}
#' which is actually thrown by tcltk. qvalue2 suppresses that message. See references.
#'
#' @param p a vector of p-values
#' @param lambda The value of the tuning parameter to estimate pi_0. Must be
#' in [0,1). Optional, see Storey (2002).
#' @param fallback The fallback p.adjust method. Default = \dQuote{BH}, but can
#' be one of the other FDR methods from \code{\link{p.adjust.methods}}
#' @return An object of class \code{qvalue}, even if the the FDR was estimated using
#' the \code{fallback} option, where the latter has \code{pi0=1.0}.
#'
#' @author Mark Cowley, 2011-10-25
#' @export
#' @references \url{http://stackoverflow.com/a/11554019/178297}
#'
#' @examples
#' p <- runif(100,0,1)
#' qvalue2(p)
#' # qv
#' pbad <- c( 2e-04, 3e-04, 5e-04, 6e-04, 8e-04, 8e-04, 9e-04, 9e-04, 0.001,
#' 0.001, 0.0013, 0.0013, 0.0013, 0.0014, 0.0016, 0.0017, 0.0019,
#' 0.0019, 0.0023, 0.0023, 0.0023, 0.0024, 0.0028, 0.0029, 0.0031,
#' 0.0032, 0.0032, 0.0032, 0.0034, 0.0034, 0.0035, 0.0037, 0.0038,
#' 0.0038, 0.0042, 0.0043, 0.0044, 0.0044, 0.0044, 0.0045 )
#' qvalue2(pbad)
#' # These will now throw errors:
#' \dontrun{
#' qvalue2(p=c(-0.1,0,0.5,0.9,1.0))
#' qvalue2(p, lambda=seq(-0.1,1,0.05))
#' qvalue2(p, lambda=c(0,0.2,0.5))
#' }
qvalue2 <- function(p, lambda=seq(0,0.90,0.05), fallback=c("BH", "fdr", "BY")[1]) {
if(capabilities("tcltk")) suppressPackageStartupMessages(suppressWarnings(library(tcltk)))
require(qvalue) || stop("required package 'qvalue' is not installed")
MSG <- "[1] \"ERROR: The estimated pi0 <= 0. Check that you have valid p-values or use another lambda method.\""
# qvalue writes its errors using print("ERROR: ....")
out <- capture.output(
q <- qvalue( as.numeric(p), lambda=lambda, pi0.method="smoother" )
)
if( is(q, "qvalue") ) {
# success was achieved using the smoother method.
return( q )
}
else {
if( out == MSG ) {
out <- capture.output(
q <- qvalue( as.numeric(p), lambda=lambda, pi0.method="bootstrap" )
)
if( is(q, "qvalue") ) {
cat("Q-values could not be estimated using the 'smoother' method -- 'bootstrap' method used instead.\n")
return( q )
}
else {
if( out == MSG ) {
cat(sprintf("Couldn't estimate q-values by 'smoother', or 'bootstrap'; generated FDR using %s\n", fallback))
q <- list(
call = match.call(),
pi0=1.0,
qvalues=p.adjust(p, method=fallback),
pvalues=p,
lambda=lambda
)
class(q) <- "qvalue"
return( q )
}
else {
# an error was thrown, but not relating to pi_0
stop(simpleError(out))
}
}
}
else {
# an error was thrown, but not relating to pi_0
stop(simpleError(out))
}
}
# tryCatch(
# q <- qvalue( as.numeric(p), lambda=lambda, pi0.method="smoother" ),
# error=function(e) {
# str(e)
# if( e$message == "ERROR: The estimated pi0 <= 0. Check that you have valid p-values or use another lambda method" ) {
# message("Q-values could not be estimated using the smoother method. Resorting to using the bootstrap method.")
# tryCatch(
# q <- qvalue( as.numeric(p), lambda=lambda, pi0.method="bootstrap" ),
# error=function(e) {
# if( e$message == "ERROR: The estimated pi0 <= 0. Check that you have valid p-values or use another lambda method" ) {
# q <- list(
# call = match.call(),
# pi0=1.0,
# qvalues=p.adjust(p, method=fallback),
# pvalues=p,
# lambda=lambda,
# )
# class(q) <- "qvalue"
# message(sprintf("Couldn't estimate q-values by 'smoother', or 'bootstrap'; generated FDR using %s"), fallback)
# message(msg)
# }
# else stop(e$message)
# }
# )
# }
# else stop(e$message)
# }
# )
#
# return( q )
}
# CHANGELOG
# 2012-10-19: added capabilties() check to avoid attempting to load tcltk in case of certain failure.
|
# Case 3: Writing functions
# Part 1: Correlation between two vectors
standardizefunc <- function(vec) {
stvec <- (vec-mean(vec)/sd(vec))
return(stvec)
}
correlationfunc <- function(xVec, yVec) {
rho <-(xVec %*% yVec)/(length(xVec)-1)
return(rho)
}
data <- read.csv("Datasets/deliverytime.csv", header=T, sep=",")
deltimestand <- standardizefunc(data$deltime)
ncasesstand <- standardizefunc(data$ncases)
rho <- correlationfunc(deltimestand,ncasesstand)
rho
# 2nd way
cor2 <- function (xVec, yVec) {
stxvec = (xVec-mean(xVec))/sd(xVec)
}
# Part 2
tmpFn <- function(vec) {
for (i in 1:length(vec)) {
x <- vec[i]
if (x < 0) {
x <- (x ^ 2) + (2 * x) + 3
} else if(x >= 0 & x < 2) {
x <- x + 3
} else if(x >= 2) {
x <- (x ^ 2) + (4 * x) - 7
}
vec[i] <- x
}
return(vec)
}
tmpFn(c(-1,0,2,5))
tmpFn2 <- function(x) {
if (x < 0) {
y <- (x ^ 2) + (2 * x) + 3
} else if(x >= 0 & x < 2) {
y <- x + 3
} else if(x >= 2) {
y <- (x ^ 2) + (4 * x) - 7
}
return(y)
}
tmpFn2(-1)
tmpFn2(0)
tmpFn2(2)
tmpFn2(5)
# Part 3
library(dplyr)
call_center_data_2013 <- read.csv("Datasets/Call Center Data/2013.csv", header=T, sep=",")
call_center_data_2014 <- read.csv("Datasets/Call Center Data/2014.csv", header=T, sep=",")
call_center_data_2015 <- read.csv("Datasets/Call Center Data/2015.csv", header=T, sep=",")
all_data <- rbind(call_center_data_2013, call_center_data_2014, call_center_data_2015)
all_data$MONTH <- month.name[all_data$MONTH]
all_data$MONTH <- as.factor(all_data$MONTH)
levels(all_data$MONTH)
str(all_data)
View(all_data)
summy <- function(data, questionNum){
if (questionNum == 1) {
print("Answer 1")
return(data %>% group_by(YEAR, MONTH) %>% summarise(count=n()))
} else if (questionNum == 2) {
countofcalls <- data %>% group_by(YEAR, MONTH) %>% summarise(count=n())
return(countofcalls %>% summarise(average=mean(count)))
print("Answer 2")
} else if (questionNum == 3) {
print("Answer 3")
} else if (questionNum == 4) {
print("Answer 4")
} else if (questionNum == 5) {
print("Answer 5")
} else {
print("Invalid question")
}
}
anwser1 <- summy(all_data, 1)
View(anwser1)
answer2 <- summy(all_data, 2)
View(anwser2)
warnings()
summy(all_data, 3)
summy(all_data, 4)
summy(all_data, 5)
| /Module-6-R-for-Business-Intelligence/workspace/case_3.r | no_license | Cbanzaime23/up-nec-business-intelligence | R | false | false | 2,456 | r | # Case 3: Writing functions
# Part 1: Correlation between two vectors
standardizefunc <- function(vec) {
stvec <- (vec-mean(vec)/sd(vec))
return(stvec)
}
correlationfunc <- function(xVec, yVec) {
rho <-(xVec %*% yVec)/(length(xVec)-1)
return(rho)
}
data <- read.csv("Datasets/deliverytime.csv", header=T, sep=",")
deltimestand <- standardizefunc(data$deltime)
ncasesstand <- standardizefunc(data$ncases)
rho <- correlationfunc(deltimestand,ncasesstand)
rho
# 2nd way
cor2 <- function (xVec, yVec) {
stxvec = (xVec-mean(xVec))/sd(xVec)
}
# Part 2
tmpFn <- function(vec) {
for (i in 1:length(vec)) {
x <- vec[i]
if (x < 0) {
x <- (x ^ 2) + (2 * x) + 3
} else if(x >= 0 & x < 2) {
x <- x + 3
} else if(x >= 2) {
x <- (x ^ 2) + (4 * x) - 7
}
vec[i] <- x
}
return(vec)
}
tmpFn(c(-1,0,2,5))
tmpFn2 <- function(x) {
if (x < 0) {
y <- (x ^ 2) + (2 * x) + 3
} else if(x >= 0 & x < 2) {
y <- x + 3
} else if(x >= 2) {
y <- (x ^ 2) + (4 * x) - 7
}
return(y)
}
tmpFn2(-1)
tmpFn2(0)
tmpFn2(2)
tmpFn2(5)
# Part 3
library(dplyr)
call_center_data_2013 <- read.csv("Datasets/Call Center Data/2013.csv", header=T, sep=",")
call_center_data_2014 <- read.csv("Datasets/Call Center Data/2014.csv", header=T, sep=",")
call_center_data_2015 <- read.csv("Datasets/Call Center Data/2015.csv", header=T, sep=",")
all_data <- rbind(call_center_data_2013, call_center_data_2014, call_center_data_2015)
all_data$MONTH <- month.name[all_data$MONTH]
all_data$MONTH <- as.factor(all_data$MONTH)
levels(all_data$MONTH)
str(all_data)
View(all_data)
summy <- function(data, questionNum){
if (questionNum == 1) {
print("Answer 1")
return(data %>% group_by(YEAR, MONTH) %>% summarise(count=n()))
} else if (questionNum == 2) {
countofcalls <- data %>% group_by(YEAR, MONTH) %>% summarise(count=n())
return(countofcalls %>% summarise(average=mean(count)))
print("Answer 2")
} else if (questionNum == 3) {
print("Answer 3")
} else if (questionNum == 4) {
print("Answer 4")
} else if (questionNum == 5) {
print("Answer 5")
} else {
print("Invalid question")
}
}
anwser1 <- summy(all_data, 1)
View(anwser1)
answer2 <- summy(all_data, 2)
View(anwser2)
warnings()
summy(all_data, 3)
summy(all_data, 4)
summy(all_data, 5)
|
## Necessary Libraries
library(shiny)
library(shinydashboard)
library(tidyr)
library(dplyr)
library(purrr)
library(ggplot2)
library(conflicted)
library(plotly)
## Resolve package conflicts
conflict_prefer("filter", "dplyr")
conflict_prefer("box", "shinydashboard")
conflict_prefer("layout", "plotly")
| /R/00_project_init.R | permissive | jakedilliott/ltcf_covid_dashboard | R | false | false | 302 | r | ## Necessary Libraries
library(shiny)
library(shinydashboard)
library(tidyr)
library(dplyr)
library(purrr)
library(ggplot2)
library(conflicted)
library(plotly)
## Resolve package conflicts
conflict_prefer("filter", "dplyr")
conflict_prefer("box", "shinydashboard")
conflict_prefer("layout", "plotly")
|
.onLoad <- function(libname, pkgname)
{
## file path and name settings
options('rp.file.name' = 'rapport-%T-%N-%n')
options('rp.file.path' = tempdir())
## tpl username
options('tpl.user' = {
if (is.empty(getOption('tpl.user'), TRUE, leading = TRUE))
'Anonymous'
else
getOption('tpl.user')
})
## use labels
## IMO, this should be implemented for all functions (sometimes you may not want labels)
options('rp.use.labels' = TRUE)
## paths settings
options('tpl.paths' = NULL)
## pander options
eO <- getOption('evals')
eO$graph.unify <- TRUE
options('evals' = eO)
## image format/envir settings
options('graph.replay' = FALSE) # TODO: fix on Win platform!
## tag regexes
## TODO: user customized "brew" tags should be added here
options('rp.tags' = c(
header.open = '^<!--head$',
header.close = '^head-->$',
comment.open = '<!--',
comment.close = '-->'
))
}
| /R/init.R | no_license | tothg/rapport | R | false | false | 1,094 | r | .onLoad <- function(libname, pkgname)
{
## file path and name settings
options('rp.file.name' = 'rapport-%T-%N-%n')
options('rp.file.path' = tempdir())
## tpl username
options('tpl.user' = {
if (is.empty(getOption('tpl.user'), TRUE, leading = TRUE))
'Anonymous'
else
getOption('tpl.user')
})
## use labels
## IMO, this should be implemented for all functions (sometimes you may not want labels)
options('rp.use.labels' = TRUE)
## paths settings
options('tpl.paths' = NULL)
## pander options
eO <- getOption('evals')
eO$graph.unify <- TRUE
options('evals' = eO)
## image format/envir settings
options('graph.replay' = FALSE) # TODO: fix on Win platform!
## tag regexes
## TODO: user customized "brew" tags should be added here
options('rp.tags' = c(
header.open = '^<!--head$',
header.close = '^head-->$',
comment.open = '<!--',
comment.close = '-->'
))
}
|
library(GlmSimulatoR)
set.seed(1)
###############################################
# Run code
###############################################
default <- simulate_gaussian()
model <- glm(formula = Y ~ X1 + X2 + X3, data = default, family = gaussian())
params <- c(1, 2, 3)
params <- c(max(params), params)
test_that("Run default. Check structure.", {
expect_true(all(class(default) == c("tbl_df", "tbl", "data.frame")))
expect_true(nrow(default) == 10000)
expect_true(all(colnames(default) == c("Y", "X1", "X2", "X3")))
expect_true(min(default$X1) >= 1)
expect_true(max(default$X1) <= 2)
expect_true(min(default$X2) >= 1)
expect_true(max(default$X2) <= 2)
expect_true(min(default$X3) >= 1)
expect_true(max(default$X3) <= 2)
expect_true(all(max(abs(model$coefficients - params)) <= .1))
})
rm(default, model, params)
test_that("Returns the correct number of rows.", {
expect_equal(nrow(simulate_gaussian(N = 10)), 10)
expect_equal(nrow(simulate_gaussian(N = 100)), 100)
expect_equal(nrow(simulate_gaussian(N = 1000)), 1000)
expect_equal(nrow(simulate_gaussian(N = 10000)), 10000)
})
test_that("Returns the correct number of predictors.", {
expect_equal(ncol(simulate_gaussian(weights = 1)), 2)
expect_equal(ncol(simulate_gaussian(weights = 1:2)), 3)
expect_equal(ncol(simulate_gaussian(weights = 1:3)), 4)
expect_equal(ncol(simulate_gaussian(weights = 1:4)), 5)
})
test_that("Returns the correct range for x.", {
expect_true(max(simulate_gaussian(weights = 1, xrange = 0)[, 2]) <= 1)
expect_true(min(simulate_gaussian(weights = 1, xrange = 0)[, 2]) >= 1)
expect_true(max(simulate_gaussian(weights = 1, xrange = 2)[, 2]) <= 3)
expect_true(min(simulate_gaussian(weights = 1, xrange = 2)[, 2]) >= 1)
expect_true(max(simulate_gaussian(weights = 1, xrange = 3)[, 2]) <= 4)
expect_true(min(simulate_gaussian(weights = 1, xrange = 3)[, 2]) >= 1)
expect_true(max(simulate_gaussian(weights = c(1, 2), xrange = 0)[, 3]) <= 1)
expect_true(min(simulate_gaussian(weights = c(1, 2), xrange = 0)[, 3]) >= 1)
expect_true(max(simulate_gaussian(weights = c(1, 2), xrange = 2)[, 3]) <= 3)
expect_true(min(simulate_gaussian(weights = c(1, 2), xrange = 2)[, 3]) >= 1)
expect_true(max(simulate_gaussian(weights = c(1, 2), xrange = 3)[, 3]) <= 4)
expect_true(min(simulate_gaussian(weights = c(1, 2), xrange = 3)[, 3]) >= 1)
})
test_that("Returns the correct number of unrelated variables.", {
expect_equal(ncol(simulate_gaussian(weights = 1, unrelated = 0)), 2)
expect_equal(ncol(simulate_gaussian(weights = 1, unrelated = 1)), 3)
expect_equal(ncol(simulate_gaussian(weights = 1, unrelated = 2)), 4)
expect_equal(ncol(simulate_gaussian(weights = 1, unrelated = 3)), 5)
expect_equal(ncol(simulate_gaussian(weights = 1:2, unrelated = 0)), 3)
expect_equal(ncol(simulate_gaussian(weights = 1:2, unrelated = 1)), 4)
expect_equal(ncol(simulate_gaussian(weights = 1:2, unrelated = 2)), 5)
expect_equal(ncol(simulate_gaussian(weights = 1:2, unrelated = 3)), 6)
})
test_that("All links execute", {
expect_true(all(class(simulate_gaussian(link = "identity")) == c("tbl_df", "tbl", "data.frame")))
expect_true(all(class(simulate_gaussian(link = "log")) == c("tbl_df", "tbl", "data.frame")))
expect_true(all(class(simulate_gaussian(link = "inverse")) == c("tbl_df", "tbl", "data.frame")))
})
test_that("Ancillary parameter works as expected", {
expect_true(simulate_gaussian()$Y %>% sd() < simulate_gaussian(ancillary = 5)$Y %>% sd())
})
###############################################
# Input checking
###############################################
test_that("Confirm input checing works.", {
expect_error(simulate_gaussian(N = -1), NULL)
expect_error(simulate_gaussian(N = c(100, 200)), NULL)
expect_error(simulate_gaussian(link = "sqrt"), NULL)
expect_error(simulate_gaussian(weights = c()), NULL)
expect_error(simulate_gaussian(xrange = "asdf"), NULL)
expect_error(simulate_gaussian(xrange = c()), NULL)
expect_error(simulate_gaussian(xrange = c(1, 2)), NULL)
expect_error(simulate_gaussian(xrange = -1), NULL)
expect_error(simulate_gaussian(unrelated = -1), NULL)
expect_error(simulate_gaussian(unrelated = c(10, 20)), NULL)
expect_error(simulate_gaussian(ancillary = -1), NULL)
expect_error(simulate_gaussian(ancillary = c(10, 20)), NULL)
})
| /tests/testthat/test_simullate_gaussian.R | no_license | cran/GlmSimulatoR | R | false | false | 4,424 | r | library(GlmSimulatoR)
set.seed(1)
###############################################
# Run code
###############################################
default <- simulate_gaussian()
model <- glm(formula = Y ~ X1 + X2 + X3, data = default, family = gaussian())
params <- c(1, 2, 3)
params <- c(max(params), params)
test_that("Run default. Check structure.", {
expect_true(all(class(default) == c("tbl_df", "tbl", "data.frame")))
expect_true(nrow(default) == 10000)
expect_true(all(colnames(default) == c("Y", "X1", "X2", "X3")))
expect_true(min(default$X1) >= 1)
expect_true(max(default$X1) <= 2)
expect_true(min(default$X2) >= 1)
expect_true(max(default$X2) <= 2)
expect_true(min(default$X3) >= 1)
expect_true(max(default$X3) <= 2)
expect_true(all(max(abs(model$coefficients - params)) <= .1))
})
rm(default, model, params)
test_that("Returns the correct number of rows.", {
expect_equal(nrow(simulate_gaussian(N = 10)), 10)
expect_equal(nrow(simulate_gaussian(N = 100)), 100)
expect_equal(nrow(simulate_gaussian(N = 1000)), 1000)
expect_equal(nrow(simulate_gaussian(N = 10000)), 10000)
})
test_that("Returns the correct number of predictors.", {
expect_equal(ncol(simulate_gaussian(weights = 1)), 2)
expect_equal(ncol(simulate_gaussian(weights = 1:2)), 3)
expect_equal(ncol(simulate_gaussian(weights = 1:3)), 4)
expect_equal(ncol(simulate_gaussian(weights = 1:4)), 5)
})
test_that("Returns the correct range for x.", {
expect_true(max(simulate_gaussian(weights = 1, xrange = 0)[, 2]) <= 1)
expect_true(min(simulate_gaussian(weights = 1, xrange = 0)[, 2]) >= 1)
expect_true(max(simulate_gaussian(weights = 1, xrange = 2)[, 2]) <= 3)
expect_true(min(simulate_gaussian(weights = 1, xrange = 2)[, 2]) >= 1)
expect_true(max(simulate_gaussian(weights = 1, xrange = 3)[, 2]) <= 4)
expect_true(min(simulate_gaussian(weights = 1, xrange = 3)[, 2]) >= 1)
expect_true(max(simulate_gaussian(weights = c(1, 2), xrange = 0)[, 3]) <= 1)
expect_true(min(simulate_gaussian(weights = c(1, 2), xrange = 0)[, 3]) >= 1)
expect_true(max(simulate_gaussian(weights = c(1, 2), xrange = 2)[, 3]) <= 3)
expect_true(min(simulate_gaussian(weights = c(1, 2), xrange = 2)[, 3]) >= 1)
expect_true(max(simulate_gaussian(weights = c(1, 2), xrange = 3)[, 3]) <= 4)
expect_true(min(simulate_gaussian(weights = c(1, 2), xrange = 3)[, 3]) >= 1)
})
test_that("Returns the correct number of unrelated variables.", {
expect_equal(ncol(simulate_gaussian(weights = 1, unrelated = 0)), 2)
expect_equal(ncol(simulate_gaussian(weights = 1, unrelated = 1)), 3)
expect_equal(ncol(simulate_gaussian(weights = 1, unrelated = 2)), 4)
expect_equal(ncol(simulate_gaussian(weights = 1, unrelated = 3)), 5)
expect_equal(ncol(simulate_gaussian(weights = 1:2, unrelated = 0)), 3)
expect_equal(ncol(simulate_gaussian(weights = 1:2, unrelated = 1)), 4)
expect_equal(ncol(simulate_gaussian(weights = 1:2, unrelated = 2)), 5)
expect_equal(ncol(simulate_gaussian(weights = 1:2, unrelated = 3)), 6)
})
test_that("All links execute", {
expect_true(all(class(simulate_gaussian(link = "identity")) == c("tbl_df", "tbl", "data.frame")))
expect_true(all(class(simulate_gaussian(link = "log")) == c("tbl_df", "tbl", "data.frame")))
expect_true(all(class(simulate_gaussian(link = "inverse")) == c("tbl_df", "tbl", "data.frame")))
})
test_that("Ancillary parameter works as expected", {
expect_true(simulate_gaussian()$Y %>% sd() < simulate_gaussian(ancillary = 5)$Y %>% sd())
})
###############################################
# Input checking
###############################################
test_that("Confirm input checing works.", {
expect_error(simulate_gaussian(N = -1), NULL)
expect_error(simulate_gaussian(N = c(100, 200)), NULL)
expect_error(simulate_gaussian(link = "sqrt"), NULL)
expect_error(simulate_gaussian(weights = c()), NULL)
expect_error(simulate_gaussian(xrange = "asdf"), NULL)
expect_error(simulate_gaussian(xrange = c()), NULL)
expect_error(simulate_gaussian(xrange = c(1, 2)), NULL)
expect_error(simulate_gaussian(xrange = -1), NULL)
expect_error(simulate_gaussian(unrelated = -1), NULL)
expect_error(simulate_gaussian(unrelated = c(10, 20)), NULL)
expect_error(simulate_gaussian(ancillary = -1), NULL)
expect_error(simulate_gaussian(ancillary = c(10, 20)), NULL)
})
|
#Exploratory Data Analysis - Course Project 1
#Plot 1
#Download the Dataset
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, destfile = "/Users/danieltheng/Desktop/Learning R/Exploratory Data Analysis/ExData_Plotting1/household_power_consumption.zip",
method = "curl")
unzip("/Users/danieltheng/Desktop/Learning R/Exploratory Data Analysis/ExData_Plotting1/household_power_consumption.zip")
#Load the Data into R
library(rio) #load the rio package for importing data
power_consumption <-
import("/Users/danieltheng/Desktop/Learning R/Exploratory Data Analysis/ExData_Plotting1/household_power_consumption.txt")
View(power_consumption)
#Subset the dates 2007-02-01 and 2007-02-02
#Extract only the date and times
dates <- data.frame(power_consumption$Date, power_consumption$Time, row.names = 1:2075259)
#Find which rows have the dates we want
index <- which(dates$power_consumption.Date %in% "1/2/2007")
index2 <- which(dates$power_consumption.Date %in% "2/2/2007")
#Merge the Date and Time into 1 variable
dates <- unite(dates, col = "Date/Time", sep = " ")
#Combine the row vector into 1
index <- c(index, index2)
dates <- dates[index, ] #Subset the dates we want
library(lubridate) #Load lubridate to work with dates
Dates.time <- dmy_hms(dates) #Choose correct fucntion to convert into dates
power_consumption <- power_consumption[index, ] #Subset only the observations we want
power_consumption <- data.frame(Dates.time, power_consumption[ , 3:9]) #Add the new dates/time column
#Convert all the columns to numerics
for(i in 2:8){
power_consumption[ ,i] <- as.numeric(power_consumption[ ,i])
}
#Constructing the Plot
View(power_consumption)
#Open png file
png("plot1.png", width = 480, height = 480, units = "px")
#Make Plot
with(power_consumption, hist(Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)"))
#Close the png file
dev.off()
| /plot1.R | no_license | dantheng/ExData_Plotting1 | R | false | false | 2,067 | r | #Exploratory Data Analysis - Course Project 1
#Plot 1
#Download the Dataset
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, destfile = "/Users/danieltheng/Desktop/Learning R/Exploratory Data Analysis/ExData_Plotting1/household_power_consumption.zip",
method = "curl")
unzip("/Users/danieltheng/Desktop/Learning R/Exploratory Data Analysis/ExData_Plotting1/household_power_consumption.zip")
#Load the Data into R
library(rio) #load the rio package for importing data
power_consumption <-
import("/Users/danieltheng/Desktop/Learning R/Exploratory Data Analysis/ExData_Plotting1/household_power_consumption.txt")
View(power_consumption)
#Subset the dates 2007-02-01 and 2007-02-02
#Extract only the date and times
dates <- data.frame(power_consumption$Date, power_consumption$Time, row.names = 1:2075259)
#Find which rows have the dates we want
index <- which(dates$power_consumption.Date %in% "1/2/2007")
index2 <- which(dates$power_consumption.Date %in% "2/2/2007")
#Merge the Date and Time into 1 variable
dates <- unite(dates, col = "Date/Time", sep = " ")
#Combine the row vector into 1
index <- c(index, index2)
dates <- dates[index, ] #Subset the dates we want
library(lubridate) #Load lubridate to work with dates
Dates.time <- dmy_hms(dates) #Choose correct fucntion to convert into dates
power_consumption <- power_consumption[index, ] #Subset only the observations we want
power_consumption <- data.frame(Dates.time, power_consumption[ , 3:9]) #Add the new dates/time column
#Convert all the columns to numerics
for(i in 2:8){
power_consumption[ ,i] <- as.numeric(power_consumption[ ,i])
}
#Constructing the Plot
View(power_consumption)
#Open png file
png("plot1.png", width = 480, height = 480, units = "px")
#Make Plot
with(power_consumption, hist(Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)"))
#Close the png file
dev.off()
|
make.view.ui = function(view.ind, ps=get.ps()) {
restore.point("make.view.ui")
shiny.dt = ps$shiny.dt
cdt = ps$cdt
if (view.ind==1) {
rows = which(shiny.dt$view.ind == view.ind | shiny.dt$view.ind == 0)
} else {
rows = which(shiny.dt$view.ind == view.ind)
}
ui.li = lapply(rows, function(i) {
#restore.point("hdkgkdhighoifhg")
if (shiny.dt$type[i]=="chunk") {
chunk.ind = which(cdt$chunk.name==shiny.dt$chunk.name[i])
ui=make.initial.chunk.ui(chunk.ind)
award.name = cdt$award.name[chunk.ind]
if (!is.na(award.name)) {
award.ui.id = get.award.ui.id(award.name)
return(list(ps$cdt$ui[[chunk.ind]],uiOutput(award.ui.id)))
}
return(ps$cdt$ui[[chunk.ind]])
} else if (shiny.dt$type[i]=="widget") {
wid = ps$rps$widgets[[ shiny.dt$widget.id[i] ]]
Widget = ps$rps$Widgets[[wid$rta$type]]
html = Widget$shiny.ui.fun(wid)
row = which(ps$rps$wid.dt$id == wid$rta$id)
award.name = ps$rps$wid.dt$award.name[row]
if (!is.na(award.name)) {
award.ui.id = get.award.ui.id(award.name)
return(list(mathJaxRTutor(html),uiOutput(award.ui.id)))
}
return(mathJaxRTutor(html))
} else {
#return(shiny.dt$html[[i]])
return(mathJaxRTutor(shiny.dt$html[[i]]))
}
})
ui.li = adapt.view.li.for.notes(ui.li,shiny.dt, rows)
#do.call("fluidRow", ui.li)
w = 12-ps$left.margin-ps$right.margin
my.ui = do.call("column", c(list(width=w, offset=ps$left.margin),ui.li))
fluidRow(my.ui)
}
# Make the default ui for each view and add it view.ui.li to ps
make.view.ui.li = function(view.inds = NULL,ps=get.ps()) {
restore.point("make.view.ui.li")
shiny.dt = ps$shiny.dt
if (is.null(view.inds))
view.inds = setdiff(unique(ps$shiny.dt$view.ind),0)
#make.view.ui(1)
view.ui.li = lapply(view.inds, make.view.ui)
ps$view.ui.li = view.ui.li
invisible(view.ui.li)
}
make.ex.ui = function(ex.ind, ps = get.ps(), session=ps$session, view.in.container=isTRUE(ps$view.in.container)) {
restore.point("make.ex.ui")
shiny.dt = ps$shiny.dt
cdt = ps$cdt
if (ex.ind==1) {
rows = which(shiny.dt$ex.ind == ex.ind | shiny.dt$ex.ind == 0)
} else {
rows = which(shiny.dt$ex.ind == ex.ind)
}
view.inds = unique(shiny.dt$view.ind[rows])
ex.name = ps$edt$ex.name[ex.ind]
if (view.in.container) {
li = lapply(view.inds, function(view.ind) {
outputName = paste0("viewUI",view.ind)
uiOutput(outputName)
})
} else {
li = ps$view.ui.li[view.inds]
}
# Button for next exercise
if (ex.ind < max(ps$cdt$ex.ind)) {
btnId = paste0("nextExBtn", ex.ind)
nextExBtn = actionButton(btnId,"Go to next exercise...")
li = c(li, list(nextExBtn))
buttonHandler(btnId, ex.ind=ex.ind, function(session,ex.ind,...) {
cat("\nnextExBtn pressed...")
updateTabsetPanel(session, inputId="exTabsetPanel", selected = paste0("exPanel",ex.ind+1))
})
}
do.call("tabPanel",
c(list(title=ex.name, value=paste0("exPanel",ex.ind)), li)
)
}
adapt.view.li.for.notes = function(view.li, shiny.dt, rows) {
note.inds = setdiff(unique(shiny.dt$note.ind[rows]),0)
if (length(note.inds)==0) return(view.li)
restore.point("adapt.view.li.for.notes")
remove.rows = NULL
note.ind = 1
for (note.ind in note.inds) {
nrows = which(shiny.dt$note.ind[rows] == note.ind)
note.name = shiny.dt$note.label[rows[nrows[1]]]
#nli = view.li[nrows]
collapseId = paste0("collapse_note_",note.ind)
collapsePanelId = paste0("collapse_panel_note_",note.ind)
panel = do.call("bsCollapsePanel",
c(list(title=note.name, value = collapsePanelId),
view.li[nrows]))
ui = bsCollapse(open = NULL, id = collapseId, panel)
view.li[[nrows[1]]] = ui
remove.rows = c(remove.rows, nrows[-1])
}
if (length(remove.rows)>0)
view.li = view.li[-remove.rows]
view.li
}
make.ex.ui.li = function(ex.inds = NULL, ps = get.ps()) {
restore.point("make.ex.ui.li")
shiny.dt = ps$shiny.dt
cdt = ps$cdt
edt = ps$edt
if (is.null(ex.inds)) {
ex.inds = setdiff(unique(edt$ex.ind),0)
if (!is.null(ps$shiny.ex.inds))
ex.inds = intersect(ex.inds, ps$shiny.ex.inds)
}
ps$ex.ui.li = lapply(ex.inds, make.ex.ui)
invisible(ps$ex.ui.li)
}
make.rtutor.page.ui = function(inner, ps = get.ps(), title="RTutor") {
# WARNING: If highlightjs cannot be loaded, whole problem set
# fails to work (very hard to detect bug)
# Link to local highlightjs version
dir = paste0(system.file('www', package='RTutor2'),"/highlightjs")
addResourcePath('highlightjs', paste0(system.file('www', package='RTutor2'),"/highlightjs"))
ret = navbarPage(title, header=
tags$head(
tags$script(src = 'highlightjs/highlight.min.js',
type = 'text/javascript'),
tags$script(src = 'highlightjs/languages/r.min.js',
type = 'text/javascript'),
tags$link(rel = 'stylesheet', type = 'text/css',
href = 'highlightjs/styles/github.min.css')
),
tabPanel(ps$ps.name, mathJaxRTutor(inner))
)
}
highlight.code.script = function() {
tags$script("$('pre code.r').each(function(i, e) {hljs.highlightBlock(e)});")
}
rtutor.html.ressources = function() {
# WARNING: If highlightjs cannot be loaded, whole problem set
# fails to work (very hard to detect bug)
# Link to local highlightjs version
dir = paste0(system.file('www', package='RTutor2'),"/highlightjs")
addResourcePath('highlightjs', paste0(system.file('www', package='RTutor3'),"/highlightjs"))
tagList(
tags$head(tags$link(rel = 'stylesheet', type = 'text/css',href = 'highlightjs/styles/mycode.css')),
tags$head(tags$script(src = 'highlightjs/highlight.min.js',type = 'text/javascript')),
tags$head(tags$script(src = 'highlightjs/languages/r.min.js',type = 'text/javascript'))
#tags$head(tags$script(src = 'highlightjs/highlight.pack.js',type = 'text/javascript')),
#tags$head(tags$script("hljs.initHighlightingOnLoad();"))
# HTML('
# <link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.1.0/styles/default.min.css">
# <script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.1.0/highlight.min.js"></script>
# <script>hljs.initHighlightingOnLoad();</script>
# ')
)
}
make.rtutor.ui = function(shiny.dt = ps$shiny.dt,cdt=ps$cdt, ps=get.ps(), just.inner=FALSE) {
restore.point("make.rtutor.ui")
view.ui.li = make.view.ui.li(ps=ps)
ex.ui.li = make.ex.ui.li(ps=ps)
#shown.html(view.ui.li[[3]])
li = list()
if (isTRUE(ps$show.data.exp)) {
li[[length(li)+1]] = tabPanel("Data Explorer",value="dataExplorerTabPanel", data.explorer.ui())
}
li[[length(li)+1]] = tabPanel(" ",value="statsPanel", uiOutput("uiProblemSetStats"), icon=icon(name="tasks", lib="font-awesome"))
if (isTRUE(ps$show.load.savel.panel)) {
li[[length(li)+1]] = tabPanel("",value="loadSaveTabPanel", load.save.ui(), icon=icon(name="folder-open", lib="font-awesome"))
}
if (isTRUE(ps$show.export.panel)) {
li[[length(li)+1]] = tabPanel("",value="exportTabPanel", export.ui(), icon=icon(name="download", lib="font-awesome"))
}
doc= do.call("tabsetPanel", c(
list(id="exTabsetPanel"),ex.ui.li,li
))
inner = doc
if (just.inner) return(inner)
ret = make.rtutor.page.ui(inner,ps=ps)
return(ret)
}
# Show a view ui
show.view.ui = function(view.ind, ps = get.ps(), session=ps$session) {
restore.point("show.view.ui")
if (view.ind==3)
restore.point("show.view.ui.3")
id = paste0("viewUI",view.ind)
ui = ps$view.ui.li[[view.ind]]
#browser()
updateUI(session,id, ui)
}
get.view.ui.of.ex = function(ex.ind, ps=get.ps()) {
restore.point("get.view.ui.of.ex")
if (ex.ind==1) {
rows = which(ps$shiny.dt$ex.ind == ex.ind | ps$shiny.dt$ex.ind == 0)
} else {
rows = which(ps$shiny.dt$ex.ind == ex.ind)
}
view.inds = setdiff(unique(ps$shiny.dt$view.ind[rows]),0)
ps$view.ui.li[view.inds]
}
show.view.ui.of.ex = function(ex.ind, ps = get.ps()) {
restore.point("show.view.ui.of.ex")
if (ex.ind==1) {
rows = which(ps$shiny.dt$ex.ind == ex.ind | ps$shiny.dt$ex.ind == 0)
} else {
rows = which(ps$shiny.dt$ex.ind == ex.ind)
}
view.inds = setdiff(unique(ps$shiny.dt$view.ind[rows]),0)
for (view.ind in view.inds)
show.view.ui(view.ind, ps)
}
show.ex.ui = function(ex.ind, ps=get.ps(), view.in.container=isTRUE(ps$view.in.container)) {
restore.point("show.ex.ui")
if (!view.in.container)
show.view.ui.of.ex(ex.ind)
chunk.inds = which(ps$cdt$ex.ind == ex.ind)
for (chunk.ind in chunk.inds) {
update.chunk.ui(chunk.ind)
}
} | /R/shiny_ui.r | no_license | skranz/RTutor3 | R | false | false | 8,804 | r | make.view.ui = function(view.ind, ps=get.ps()) {
restore.point("make.view.ui")
shiny.dt = ps$shiny.dt
cdt = ps$cdt
if (view.ind==1) {
rows = which(shiny.dt$view.ind == view.ind | shiny.dt$view.ind == 0)
} else {
rows = which(shiny.dt$view.ind == view.ind)
}
ui.li = lapply(rows, function(i) {
#restore.point("hdkgkdhighoifhg")
if (shiny.dt$type[i]=="chunk") {
chunk.ind = which(cdt$chunk.name==shiny.dt$chunk.name[i])
ui=make.initial.chunk.ui(chunk.ind)
award.name = cdt$award.name[chunk.ind]
if (!is.na(award.name)) {
award.ui.id = get.award.ui.id(award.name)
return(list(ps$cdt$ui[[chunk.ind]],uiOutput(award.ui.id)))
}
return(ps$cdt$ui[[chunk.ind]])
} else if (shiny.dt$type[i]=="widget") {
wid = ps$rps$widgets[[ shiny.dt$widget.id[i] ]]
Widget = ps$rps$Widgets[[wid$rta$type]]
html = Widget$shiny.ui.fun(wid)
row = which(ps$rps$wid.dt$id == wid$rta$id)
award.name = ps$rps$wid.dt$award.name[row]
if (!is.na(award.name)) {
award.ui.id = get.award.ui.id(award.name)
return(list(mathJaxRTutor(html),uiOutput(award.ui.id)))
}
return(mathJaxRTutor(html))
} else {
#return(shiny.dt$html[[i]])
return(mathJaxRTutor(shiny.dt$html[[i]]))
}
})
ui.li = adapt.view.li.for.notes(ui.li,shiny.dt, rows)
#do.call("fluidRow", ui.li)
w = 12-ps$left.margin-ps$right.margin
my.ui = do.call("column", c(list(width=w, offset=ps$left.margin),ui.li))
fluidRow(my.ui)
}
# Make the default ui for each view and add it view.ui.li to ps
make.view.ui.li = function(view.inds = NULL,ps=get.ps()) {
restore.point("make.view.ui.li")
shiny.dt = ps$shiny.dt
if (is.null(view.inds))
view.inds = setdiff(unique(ps$shiny.dt$view.ind),0)
#make.view.ui(1)
view.ui.li = lapply(view.inds, make.view.ui)
ps$view.ui.li = view.ui.li
invisible(view.ui.li)
}
make.ex.ui = function(ex.ind, ps = get.ps(), session=ps$session, view.in.container=isTRUE(ps$view.in.container)) {
restore.point("make.ex.ui")
shiny.dt = ps$shiny.dt
cdt = ps$cdt
if (ex.ind==1) {
rows = which(shiny.dt$ex.ind == ex.ind | shiny.dt$ex.ind == 0)
} else {
rows = which(shiny.dt$ex.ind == ex.ind)
}
view.inds = unique(shiny.dt$view.ind[rows])
ex.name = ps$edt$ex.name[ex.ind]
if (view.in.container) {
li = lapply(view.inds, function(view.ind) {
outputName = paste0("viewUI",view.ind)
uiOutput(outputName)
})
} else {
li = ps$view.ui.li[view.inds]
}
# Button for next exercise
if (ex.ind < max(ps$cdt$ex.ind)) {
btnId = paste0("nextExBtn", ex.ind)
nextExBtn = actionButton(btnId,"Go to next exercise...")
li = c(li, list(nextExBtn))
buttonHandler(btnId, ex.ind=ex.ind, function(session,ex.ind,...) {
cat("\nnextExBtn pressed...")
updateTabsetPanel(session, inputId="exTabsetPanel", selected = paste0("exPanel",ex.ind+1))
})
}
do.call("tabPanel",
c(list(title=ex.name, value=paste0("exPanel",ex.ind)), li)
)
}
adapt.view.li.for.notes = function(view.li, shiny.dt, rows) {
note.inds = setdiff(unique(shiny.dt$note.ind[rows]),0)
if (length(note.inds)==0) return(view.li)
restore.point("adapt.view.li.for.notes")
remove.rows = NULL
note.ind = 1
for (note.ind in note.inds) {
nrows = which(shiny.dt$note.ind[rows] == note.ind)
note.name = shiny.dt$note.label[rows[nrows[1]]]
#nli = view.li[nrows]
collapseId = paste0("collapse_note_",note.ind)
collapsePanelId = paste0("collapse_panel_note_",note.ind)
panel = do.call("bsCollapsePanel",
c(list(title=note.name, value = collapsePanelId),
view.li[nrows]))
ui = bsCollapse(open = NULL, id = collapseId, panel)
view.li[[nrows[1]]] = ui
remove.rows = c(remove.rows, nrows[-1])
}
if (length(remove.rows)>0)
view.li = view.li[-remove.rows]
view.li
}
make.ex.ui.li = function(ex.inds = NULL, ps = get.ps()) {
restore.point("make.ex.ui.li")
shiny.dt = ps$shiny.dt
cdt = ps$cdt
edt = ps$edt
if (is.null(ex.inds)) {
ex.inds = setdiff(unique(edt$ex.ind),0)
if (!is.null(ps$shiny.ex.inds))
ex.inds = intersect(ex.inds, ps$shiny.ex.inds)
}
ps$ex.ui.li = lapply(ex.inds, make.ex.ui)
invisible(ps$ex.ui.li)
}
make.rtutor.page.ui = function(inner, ps = get.ps(), title="RTutor") {
# WARNING: If highlightjs cannot be loaded, whole problem set
# fails to work (very hard to detect bug)
# Link to local highlightjs version
dir = paste0(system.file('www', package='RTutor2'),"/highlightjs")
addResourcePath('highlightjs', paste0(system.file('www', package='RTutor2'),"/highlightjs"))
ret = navbarPage(title, header=
tags$head(
tags$script(src = 'highlightjs/highlight.min.js',
type = 'text/javascript'),
tags$script(src = 'highlightjs/languages/r.min.js',
type = 'text/javascript'),
tags$link(rel = 'stylesheet', type = 'text/css',
href = 'highlightjs/styles/github.min.css')
),
tabPanel(ps$ps.name, mathJaxRTutor(inner))
)
}
highlight.code.script = function() {
tags$script("$('pre code.r').each(function(i, e) {hljs.highlightBlock(e)});")
}
rtutor.html.ressources = function() {
# WARNING: If highlightjs cannot be loaded, whole problem set
# fails to work (very hard to detect bug)
# Link to local highlightjs version
dir = paste0(system.file('www', package='RTutor2'),"/highlightjs")
addResourcePath('highlightjs', paste0(system.file('www', package='RTutor3'),"/highlightjs"))
tagList(
tags$head(tags$link(rel = 'stylesheet', type = 'text/css',href = 'highlightjs/styles/mycode.css')),
tags$head(tags$script(src = 'highlightjs/highlight.min.js',type = 'text/javascript')),
tags$head(tags$script(src = 'highlightjs/languages/r.min.js',type = 'text/javascript'))
#tags$head(tags$script(src = 'highlightjs/highlight.pack.js',type = 'text/javascript')),
#tags$head(tags$script("hljs.initHighlightingOnLoad();"))
# HTML('
# <link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.1.0/styles/default.min.css">
# <script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.1.0/highlight.min.js"></script>
# <script>hljs.initHighlightingOnLoad();</script>
# ')
)
}
make.rtutor.ui = function(shiny.dt = ps$shiny.dt,cdt=ps$cdt, ps=get.ps(), just.inner=FALSE) {
restore.point("make.rtutor.ui")
view.ui.li = make.view.ui.li(ps=ps)
ex.ui.li = make.ex.ui.li(ps=ps)
#shown.html(view.ui.li[[3]])
li = list()
if (isTRUE(ps$show.data.exp)) {
li[[length(li)+1]] = tabPanel("Data Explorer",value="dataExplorerTabPanel", data.explorer.ui())
}
li[[length(li)+1]] = tabPanel(" ",value="statsPanel", uiOutput("uiProblemSetStats"), icon=icon(name="tasks", lib="font-awesome"))
if (isTRUE(ps$show.load.savel.panel)) {
li[[length(li)+1]] = tabPanel("",value="loadSaveTabPanel", load.save.ui(), icon=icon(name="folder-open", lib="font-awesome"))
}
if (isTRUE(ps$show.export.panel)) {
li[[length(li)+1]] = tabPanel("",value="exportTabPanel", export.ui(), icon=icon(name="download", lib="font-awesome"))
}
doc= do.call("tabsetPanel", c(
list(id="exTabsetPanel"),ex.ui.li,li
))
inner = doc
if (just.inner) return(inner)
ret = make.rtutor.page.ui(inner,ps=ps)
return(ret)
}
# Show a view ui
show.view.ui = function(view.ind, ps = get.ps(), session=ps$session) {
restore.point("show.view.ui")
if (view.ind==3)
restore.point("show.view.ui.3")
id = paste0("viewUI",view.ind)
ui = ps$view.ui.li[[view.ind]]
#browser()
updateUI(session,id, ui)
}
get.view.ui.of.ex = function(ex.ind, ps=get.ps()) {
restore.point("get.view.ui.of.ex")
if (ex.ind==1) {
rows = which(ps$shiny.dt$ex.ind == ex.ind | ps$shiny.dt$ex.ind == 0)
} else {
rows = which(ps$shiny.dt$ex.ind == ex.ind)
}
view.inds = setdiff(unique(ps$shiny.dt$view.ind[rows]),0)
ps$view.ui.li[view.inds]
}
show.view.ui.of.ex = function(ex.ind, ps = get.ps()) {
restore.point("show.view.ui.of.ex")
if (ex.ind==1) {
rows = which(ps$shiny.dt$ex.ind == ex.ind | ps$shiny.dt$ex.ind == 0)
} else {
rows = which(ps$shiny.dt$ex.ind == ex.ind)
}
view.inds = setdiff(unique(ps$shiny.dt$view.ind[rows]),0)
for (view.ind in view.inds)
show.view.ui(view.ind, ps)
}
show.ex.ui = function(ex.ind, ps=get.ps(), view.in.container=isTRUE(ps$view.in.container)) {
restore.point("show.ex.ui")
if (!view.in.container)
show.view.ui.of.ex(ex.ind)
chunk.inds = which(ps$cdt$ex.ind == ex.ind)
for (chunk.ind in chunk.inds) {
update.chunk.ui(chunk.ind)
}
} |
#####################################################################
# moments.R
#
# Contains the functions for creating the moments of the data to target
# 09jun2017
# Philip Barrett, Washington DC
#####################################################################
target.moms <- function( cty, breaks=NULL, labs=NULL, bo.dta=TRUE ){
# Creates the target moments for the country
if( is.null(breaks) )
breaks <- as.Date( c( '1960/01/01', '1970/01/01', '1980/01/01',
'1990/01/01', '2000/01/01', '2009/01/01', '2018/01/01' ) )
if( is.null(labs) ) labs <- c('60s', '70s', '80s', '90s', '2000s pre-09', 'post-09')
# Default breaks and labels
dta <- read.csv(paste0('./data/',cty, '.csv'))
dta$date <- as.Date( dta$date, "%m/%d/%Y" )
# Read in and clean the data
dta$spd.1yr <- ( dta$Int_1y - dta$rfrA ) / 400
dta$spd.5yr <- ( dta$Int_5y - dta$rfrA ) / 400
# The spreads over the current risk free rate
v.moms <- c('spd.1yr','spd.5yr','cnlb_gdp','pb_gdp')
dec.ave <- do.call( rbind, by( dta[,v.moms], cut(dta$date,breaks,labs),
function(x) apply( x, 2, mean, na.rm=TRUE ) ) )
s.var <- c(by( dta[,'pb_gdp'], cut(dta$date,breaks,labs), sd, na.rm=TRUE ) )
r.cor <- c(by( dta, cut(dta$date,breaks,labs),
function(x) cor(x$pb_gdp, x$rfrA, use='complete.obs' ) ) )
g.cor <- c(by( dta, cut(dta$date,breaks,labs),
function(x) cor(x$pb_gdp, x$ngdp_pch, use='complete.obs' ) ) )
moms <- cbind( dec.ave, s.var, r.cor, g.cor )
# The moments
if(bo.dta){
return(list(moms=moms, dta=dta))
}
return( moms )
}
mom.series.plot <- function( cty ){
# Plots the target moment series
dta <- read.csv(paste0('./data/',cty, '.csv'))
dta$date <- as.Date( dta$date, "%m/%d/%Y" )
# Read in and clean the data
dta$spd.1yr <- ( dta$Int_1y - dta$rfrA ) / 400
dta$spd.5yr <- ( dta$Int_5y - dta$rfrA ) / 400
# The spreads over the current risk free rate
v.moms <- c('spd.1yr','spd.5yr','cnlb_gdp','pb_gdp')
par(mfrow=c(2,2))
for(vbl in v.moms){
plot( dta$date, dta[[vbl]], type='l', lwd=2, main=vbl, col='blue' )
abline(h=0)
}
par(mfrow=c(1,1))
}
surp.fit <- function( sol.o, interp, params, dta, debt='cnlb_gdp' ){
# Creates the sequence of surpluses required to fit the specified debt series in
# the interpreted simulation
pds <- nrow(dta) # Number of periods
s.init <- dta[[debt]][-pds] * ( params$R / params$G )[interp$s.idx] - dta[[debt]][-1]
# Initial guess, just from debt levels
s <- s.init
# Initialize the solution
f.sim.i <- function(s.i){
s[i] <- s.i
return( sim_core( c(1,interp$s.idx), sol.o$d.bar, sol.o$d.grid, sol.o$P, sol.o$Q,
params, dta[[debt]][1], TRUE, c(0,s) )[i+1,'d.prime'] -
dta[[debt]][1+i] )
}
# The error on the debt series
for( i in 1:length(interp$s.idx) ){
sol <- nleqslv( s[i], f.sim.i )
s[i] <- sol$x
}
return(s)
}
rf.fit <- function( params, dta, rf.debt=600, rf.level=12, v.surp=NULL,
d.init=NULL, sol=NULL, sol.o=NULL, init=NULL, print.level=0, ... ){
# Fits the parameters for the shifts and the lower pat of the surplus function
# based on a risk-free model.
### 1. Solve the risk-free model ###
params$v.s.coeff <- c( 0, rf.debt/2, rf.debt, rf.level, rf.level )
n.X <- length(params$R)
params$s.shift <- rep(0,n.X)
# A surplus function that puts no restriction on debt
if( is.null(init) ) init <- c( 0, rf.debt/4, 0, rep(0,n.X-1) )
# plot.z( sol$p, sol$d, params, xlim=c(0,max(sol$p)*1.2), ylim=c(0,max(sol$p)*1.2) )
# plot.sol( sol.o )
# Plots
if(is.null(sol)) sol <- sol.wrapper(params, cbind(0,rep(rf.debt,n.X)), plot.on = FALSE )
# Solve for the (approximate) debt levels
### 2. Create the surplus series ###
if( is.null(v.surp) ){
if(is.null(sol.o)) sol.o <- outer.wrapper( sol, params, print_level = 1 )
# Outer solution and errors
v.surp <- surp.fit( sol.o, interp, params, dta, ... )
# Create the first pass at the surplus series
}else{
d.max <- 10000
sol.o <- list( d.bar=sol$d,
d.grid=c(0,max(sol$d)), P=matrix(1,ncol=2,nrow=length(params$R)),
Q=matrix( 1/params$R, ncol=2, nrow=length(params$R) ) )
}
if( is.null(d.init) ) d.init <- dta$cnlb_gdp_lag[1]
### 3. Create the error function ###
params.cpy <- params
s.idx.p <- sapply(1:n.X, function(i) sum( interp$s.idx == i ) ) / nrow(interp$s.idx)
# Probability disctibution of states
err.fn <- function(x, s.dev=FALSE, aad=FALSE ){
# The error function
params.cpy$v.s.coeff <- c( max(x[1],0), max( x[1], min( x[2], rf.debt ) ),
rf.debt, min(x[3],rf.level) , rf.level )
# Lower parameters for surplus function
params.cpy$s.shift <- c( -sum(x[-(1:3)] * s.idx.p[-1] ) / s.idx.p[1], x[-(1:3)] )
# Shift parameters average to zero over the sample
sim <- sim_core( c(1,interp$s.idx), sol.o$d.bar, sol.o$d.grid, sol.o$P, sol.o$Q,
params.cpy, d.init, TRUE, c(0,0,v.surp) )
# Create the simulation (just need surplus function values but this is
# fine too)
if(s.dev) return(sd(sim[-1,'eps']))
if(aad) return(mean(abs(sim[-1,'eps'])))
return( sum( sim[-1,'eps'] ^ 2 ) )
}
### 4. Minimize the error function ###
control <- list(trace=print.level, maxit=100000)
opt <- optim( init, err.fn, control=control )
## NEED TO FIT THE SHIFTS BETTER ##
# control <- list(print_level=print.level, maxeval=1000, tol.abs=1e-04, algorithm="NLOPT_LN_COBYLA")
# sol <- nloptr( init, err.fn, opts=control )
# Minimize the error
out <- list()
out$v.s.coeff <- c( max(opt$par[1],0), max( opt$par[1], min( opt$par[2], rf.debt ) ),
rf.debt, min( opt$par[3], rf.level), rf.level )
out$s.shift <- c( -sum(opt$par[-(1:3)] * s.idx.p[-1] ) / s.idx.p[1], opt$par[-(1:3)] )
out$s.shift[s.idx.p==0] <- 0
out$surp.sd <- err.fn(opt$par, s.dev=TRUE )
out$aad <- err.fn(opt$par, aad=TRUE )
out$p <- sol$p
out$d <- sol$d
out$v.surp <- v.surp
out$err <- sol$err
# out$v.s.coeff <- c( max(sol$solution[1],0), max( sol$solution[1], min( sol$solution[2], rf.debt ) ),
# rf.debt, min( sol$solution[3], rf.level), rf.level )
# out$s.shift <- c( -sum(sol$solution[-(1:3)] * s.idx.p[-1] ) / s.idx.p[1], sol$solution[-(1:3)] )
# out$surp.sd <- sqrt( sol$objective )
# Format output
return(out)
}
price.diff.1.5 <- function( sol.o, params, sim, dta ){
# Computes the price difference between the 1 and 5 year bonds and compares it
# to the data analogue
q.dta <- sapply( c(1,5), function(i) ( 1 + dta[,paste0('Int_',i,'y')] / 100 ) ^ -i )
# The price of the assets in the data
l.QQ <- lapply( c(1,5),
function(i){
lambda <- 1 - 1 / (4*i)
q_hat_mat( sol.o$P, sol.o$d.bar, sol.o$Q, sol.o$Q, sol.o$d.grid, params$R,
params$G, params$s.shift, lambda, params$phi, sol.o$e.grid,
params$v.s.coeff, params$tri, matrix(0), FALSE, params$trans, 0 )
} )
q.sim <-sapply( l.QQ, function(QQ)
apply( sim, 1, function(x)
approx( sol.o$d.grid, QQ[x['idx'],], x['d.prime'] )$y ) )
# Create the simulation of prices
out <- data.frame( date=dta$date, q.dta, diff.dta=apply(q.dta,1,diff),
q.sim, diff.sim=apply(q.sim,1,diff) )
# Create the output
return(out)
}
price.diff.1.5.err <- function( sol.o, params, sim, dta ){
# Computes the error on the target term premium
pd <- price.diff.1.5( sol.o, params, sim, dta )
return( sum( ( pd$diff.dta - pd$diff.sim ) ^ 2, na.rm = TRUE ) )
}
price.diff.min <- function( params, interp, dta, sol, sol.o, h.0=c(25,-4), maxit=50 ){
# Minimizes the price difference error
#### 1. Set up ####
trial.diff <- rep( Inf, 3 ) ; trial.diff.tol <- 1e-04
it <- 1 ; h <- h.0
v.s.coeff <- params$v.s.coeff ; j <- 2 ; # Start by reducing the level first
i.j <- 1 ; i.j.max <- 5 ; err.old <- Inf ; fail.idx <- 0
sol.out <- sol ; sol.o.out <- sol.o
while( it < maxit & all(trial.diff > trial.diff.tol ) ){
#### 2. Create the trial parameters ####
params$v.s.coeff[c(3,5)][j] <- v.s.coeff[c(3,5)][j] + h[j]
params$v.s.coeff[2] <- min( params$v.s.coeff[c(2,3)])
#### 3. Solve the model & check solution ####
sol <- tryCatch( sol.wrapper( params, cbind(sol.out$p,sol.out$d) ),
error=function(e) list(p='fail', err=1) )
if( any( sol$p == 'fail' ) | max(abs(sol$err)) > 1e-05 ){
h[j] <- h[j] / 2
message("\n********************************************")
message(" Parameter iteration report: ")
message(" Iteration ", it )
message(" j = ", j, ", i.j = ", i.j )
message(" Model solution failed" )
message(" h = ", paste0( round(h,2), sep=', ' ) )
message(" max(abs(sol$err)) = ", max(abs(sol$err)) )
message("********************************************\n")
if( h[j] < .1 * h.0[j] ){ # Escape
h[j] <- h.0[j] # Reset step length
j <- if( j==2 ) 1 else j + 1 # Increment j
fail.idx <- fail.idx + 1
if( fail.idx == 3 ){
message("\n********************************************")
message(" Failing out")
message("********************************************")
return(list( sol=sol.out, sol.o=sol.o.out ))
}
i.j <- i.j + 1
}
}else{
#### 4. Create outer solution and simulate ####
sol.o <- outer.wrapper( sol, params, Q.init = sol.o.out$Q,
d.grid.init=sol.o.out$d.grid )
v.surp <- surp.fit(sol.o,interp,params,tgt$dta)
# Create the fitted surpluses
sim <- sim_core( c(1,interp$s.idx), sol.o$d.bar, sol.o$d.grid,
sol.o$P, sol.o$Q, params, tgt$dta$cnlb_gdp_lag[1],
TRUE, c(0,0,v.surp) )
# The simulation
#### 5. Create prices, measure error ####
err <- price.diff.1.5.err( sol.o, params, sim, dta )
#### 6. Accept or reject trial parameters ####
bo.improve <- err <= err.old + 1e-8
# trial.diff[j] <-
#### 7. Create new step or move on to next parameter ####
if( bo.improve ){
trial.diff[j] <- abs( v.s.coeff[c(3,5)][j] - params$v.s.coeff[c(3,5)][j] )
# Measure the difference in the updated coefficients.
v.s.coeff[c(3,5)][j] <- params$v.s.coeff[c(3,5)][j]
# Store the improved coeffs
message("\n********************************************")
message(" Parameter iteration report: ")
message(" Iteration ", it )
message(" j = ", j, ", i.j = ", i.j )
message(" bo.improve = ", if(bo.improve) 'True' else 'False' )
message(" v.s.coeff = ", paste0( round(v.s.coeff,2), sep=', ' ) )
message(" h = ", paste0( round(h,2), sep=', ' ) )
message(" err = ", err )
message(" err.old = ", err.old )
message(" trial.diff[j] = ", round(trial.diff[j]) )
message("********************************************\n")
fitted <- rf.fit( params, dta, v.s.coeff[3], v.s.coeff[5], v.surp,
sol, sol.o, init=c( params$v.s.coeff[c(1,2,4)], params$s.shift[-1] ) )
# Refit the lower parameters
params$v.s.coeff <- v.s.coeff <- fitted$v.s.coeff
params$s.shift <- fitted$s.shift
params$surp.sd <- fitted$surp.sd
# Paste to parameters
sol.out <- sol ; sol.o.out <- sol.o ; params.out <- params
message("\n********************************************")
message(" Refitting lower parameters: ")
message(" v.s.coeff = ", paste0( round(v.s.coeff,2), sep=', ' ) )
message(" s.shift = ", paste0( round(params$s.shift,2), sep=', ' ) )
message(" sd.surp = ", round( params$surp.sd, 4 ) )
message("********************************************\n")
plot.surp( params, TRUE, c(0,1.2*params$v.s.coeff[3]), TRUE,
ylim=range( c( params$v.s.coeff[4:5], sim[,'s'] ) ) )
points( dta$cnlb_gdp_lag, sim[,'s'], pch=16, cex=.75, col=sim[,'idx'] )
i.j <- 1 # Reset j-trial counter
h[j] <- h.0[j] # Update h[j]
j <- if( j==2 ) 1 else j + 1 # Increment j
err.old <- price.diff.1.5.err( sol.o, params, sim, dta )
# Store error with new lower parameters
fail.idx <- 0
}else{
message("\n********************************************")
message(" Parameter iteration report: ")
message(" Iteration ", it )
message(" j = ", j, ", i.j = ", i.j )
message(" bo.improve = ", if(bo.improve) 'True' else 'False' )
message(" h = ", paste0( round(h,2), sep=', ' ) )
message(" err = ", err )
message(" err.old = ", err.old )
message("********************************************\n")
h[j] <- h[j] / 2 # Decrease the step
i.j <- i.j + 1 # Increase j-trial counter
if( h[j] < .1 * h.0[j] ){ # Escape
h[j] <- h.0[j] # Reset step length
j <- if( j==2 ) 1 else j + 1 # Increment j
}
}
}
it <- it + 1
}
return( list( sol=sol.out, sol.o=sol.o.out ) )
}
| /R/moments.R | no_license | philipbarrett/debtLimit | R | false | false | 14,199 | r | #####################################################################
# moments.R
#
# Contains the functions for creating the moments of the data to target
# 09jun2017
# Philip Barrett, Washington DC
#####################################################################
target.moms <- function( cty, breaks=NULL, labs=NULL, bo.dta=TRUE ){
# Creates the target moments for the country
if( is.null(breaks) )
breaks <- as.Date( c( '1960/01/01', '1970/01/01', '1980/01/01',
'1990/01/01', '2000/01/01', '2009/01/01', '2018/01/01' ) )
if( is.null(labs) ) labs <- c('60s', '70s', '80s', '90s', '2000s pre-09', 'post-09')
# Default breaks and labels
dta <- read.csv(paste0('./data/',cty, '.csv'))
dta$date <- as.Date( dta$date, "%m/%d/%Y" )
# Read in and clean the data
dta$spd.1yr <- ( dta$Int_1y - dta$rfrA ) / 400
dta$spd.5yr <- ( dta$Int_5y - dta$rfrA ) / 400
# The spreads over the current risk free rate
v.moms <- c('spd.1yr','spd.5yr','cnlb_gdp','pb_gdp')
dec.ave <- do.call( rbind, by( dta[,v.moms], cut(dta$date,breaks,labs),
function(x) apply( x, 2, mean, na.rm=TRUE ) ) )
s.var <- c(by( dta[,'pb_gdp'], cut(dta$date,breaks,labs), sd, na.rm=TRUE ) )
r.cor <- c(by( dta, cut(dta$date,breaks,labs),
function(x) cor(x$pb_gdp, x$rfrA, use='complete.obs' ) ) )
g.cor <- c(by( dta, cut(dta$date,breaks,labs),
function(x) cor(x$pb_gdp, x$ngdp_pch, use='complete.obs' ) ) )
moms <- cbind( dec.ave, s.var, r.cor, g.cor )
# The moments
if(bo.dta){
return(list(moms=moms, dta=dta))
}
return( moms )
}
mom.series.plot <- function( cty ){
# Plots the target moment series
dta <- read.csv(paste0('./data/',cty, '.csv'))
dta$date <- as.Date( dta$date, "%m/%d/%Y" )
# Read in and clean the data
dta$spd.1yr <- ( dta$Int_1y - dta$rfrA ) / 400
dta$spd.5yr <- ( dta$Int_5y - dta$rfrA ) / 400
# The spreads over the current risk free rate
v.moms <- c('spd.1yr','spd.5yr','cnlb_gdp','pb_gdp')
par(mfrow=c(2,2))
for(vbl in v.moms){
plot( dta$date, dta[[vbl]], type='l', lwd=2, main=vbl, col='blue' )
abline(h=0)
}
par(mfrow=c(1,1))
}
surp.fit <- function( sol.o, interp, params, dta, debt='cnlb_gdp' ){
# Creates the sequence of surpluses required to fit the specified debt series in
# the interpreted simulation
pds <- nrow(dta) # Number of periods
s.init <- dta[[debt]][-pds] * ( params$R / params$G )[interp$s.idx] - dta[[debt]][-1]
# Initial guess, just from debt levels
s <- s.init
# Initialize the solution
f.sim.i <- function(s.i){
s[i] <- s.i
return( sim_core( c(1,interp$s.idx), sol.o$d.bar, sol.o$d.grid, sol.o$P, sol.o$Q,
params, dta[[debt]][1], TRUE, c(0,s) )[i+1,'d.prime'] -
dta[[debt]][1+i] )
}
# The error on the debt series
for( i in 1:length(interp$s.idx) ){
sol <- nleqslv( s[i], f.sim.i )
s[i] <- sol$x
}
return(s)
}
rf.fit <- function( params, dta, rf.debt=600, rf.level=12, v.surp=NULL,
d.init=NULL, sol=NULL, sol.o=NULL, init=NULL, print.level=0, ... ){
# Fits the parameters for the shifts and the lower pat of the surplus function
# based on a risk-free model.
### 1. Solve the risk-free model ###
params$v.s.coeff <- c( 0, rf.debt/2, rf.debt, rf.level, rf.level )
n.X <- length(params$R)
params$s.shift <- rep(0,n.X)
# A surplus function that puts no restriction on debt
if( is.null(init) ) init <- c( 0, rf.debt/4, 0, rep(0,n.X-1) )
# plot.z( sol$p, sol$d, params, xlim=c(0,max(sol$p)*1.2), ylim=c(0,max(sol$p)*1.2) )
# plot.sol( sol.o )
# Plots
if(is.null(sol)) sol <- sol.wrapper(params, cbind(0,rep(rf.debt,n.X)), plot.on = FALSE )
# Solve for the (approximate) debt levels
### 2. Create the surplus series ###
if( is.null(v.surp) ){
if(is.null(sol.o)) sol.o <- outer.wrapper( sol, params, print_level = 1 )
# Outer solution and errors
v.surp <- surp.fit( sol.o, interp, params, dta, ... )
# Create the first pass at the surplus series
}else{
d.max <- 10000
sol.o <- list( d.bar=sol$d,
d.grid=c(0,max(sol$d)), P=matrix(1,ncol=2,nrow=length(params$R)),
Q=matrix( 1/params$R, ncol=2, nrow=length(params$R) ) )
}
if( is.null(d.init) ) d.init <- dta$cnlb_gdp_lag[1]
### 3. Create the error function ###
params.cpy <- params
s.idx.p <- sapply(1:n.X, function(i) sum( interp$s.idx == i ) ) / nrow(interp$s.idx)
# Probability disctibution of states
err.fn <- function(x, s.dev=FALSE, aad=FALSE ){
# The error function
params.cpy$v.s.coeff <- c( max(x[1],0), max( x[1], min( x[2], rf.debt ) ),
rf.debt, min(x[3],rf.level) , rf.level )
# Lower parameters for surplus function
params.cpy$s.shift <- c( -sum(x[-(1:3)] * s.idx.p[-1] ) / s.idx.p[1], x[-(1:3)] )
# Shift parameters average to zero over the sample
sim <- sim_core( c(1,interp$s.idx), sol.o$d.bar, sol.o$d.grid, sol.o$P, sol.o$Q,
params.cpy, d.init, TRUE, c(0,0,v.surp) )
# Create the simulation (just need surplus function values but this is
# fine too)
if(s.dev) return(sd(sim[-1,'eps']))
if(aad) return(mean(abs(sim[-1,'eps'])))
return( sum( sim[-1,'eps'] ^ 2 ) )
}
### 4. Minimize the error function ###
control <- list(trace=print.level, maxit=100000)
opt <- optim( init, err.fn, control=control )
## NEED TO FIT THE SHIFTS BETTER ##
# control <- list(print_level=print.level, maxeval=1000, tol.abs=1e-04, algorithm="NLOPT_LN_COBYLA")
# sol <- nloptr( init, err.fn, opts=control )
# Minimize the error
out <- list()
out$v.s.coeff <- c( max(opt$par[1],0), max( opt$par[1], min( opt$par[2], rf.debt ) ),
rf.debt, min( opt$par[3], rf.level), rf.level )
out$s.shift <- c( -sum(opt$par[-(1:3)] * s.idx.p[-1] ) / s.idx.p[1], opt$par[-(1:3)] )
out$s.shift[s.idx.p==0] <- 0
out$surp.sd <- err.fn(opt$par, s.dev=TRUE )
out$aad <- err.fn(opt$par, aad=TRUE )
out$p <- sol$p
out$d <- sol$d
out$v.surp <- v.surp
out$err <- sol$err
# out$v.s.coeff <- c( max(sol$solution[1],0), max( sol$solution[1], min( sol$solution[2], rf.debt ) ),
# rf.debt, min( sol$solution[3], rf.level), rf.level )
# out$s.shift <- c( -sum(sol$solution[-(1:3)] * s.idx.p[-1] ) / s.idx.p[1], sol$solution[-(1:3)] )
# out$surp.sd <- sqrt( sol$objective )
# Format output
return(out)
}
price.diff.1.5 <- function( sol.o, params, sim, dta ){
# Computes the price difference between the 1 and 5 year bonds and compares it
# to the data analogue
q.dta <- sapply( c(1,5), function(i) ( 1 + dta[,paste0('Int_',i,'y')] / 100 ) ^ -i )
# The price of the assets in the data
l.QQ <- lapply( c(1,5),
function(i){
lambda <- 1 - 1 / (4*i)
q_hat_mat( sol.o$P, sol.o$d.bar, sol.o$Q, sol.o$Q, sol.o$d.grid, params$R,
params$G, params$s.shift, lambda, params$phi, sol.o$e.grid,
params$v.s.coeff, params$tri, matrix(0), FALSE, params$trans, 0 )
} )
q.sim <-sapply( l.QQ, function(QQ)
apply( sim, 1, function(x)
approx( sol.o$d.grid, QQ[x['idx'],], x['d.prime'] )$y ) )
# Create the simulation of prices
out <- data.frame( date=dta$date, q.dta, diff.dta=apply(q.dta,1,diff),
q.sim, diff.sim=apply(q.sim,1,diff) )
# Create the output
return(out)
}
price.diff.1.5.err <- function( sol.o, params, sim, dta ){
# Computes the error on the target term premium
pd <- price.diff.1.5( sol.o, params, sim, dta )
return( sum( ( pd$diff.dta - pd$diff.sim ) ^ 2, na.rm = TRUE ) )
}
price.diff.min <- function( params, interp, dta, sol, sol.o, h.0=c(25,-4), maxit=50 ){
# Minimizes the price difference error
#### 1. Set up ####
trial.diff <- rep( Inf, 3 ) ; trial.diff.tol <- 1e-04
it <- 1 ; h <- h.0
v.s.coeff <- params$v.s.coeff ; j <- 2 ; # Start by reducing the level first
i.j <- 1 ; i.j.max <- 5 ; err.old <- Inf ; fail.idx <- 0
sol.out <- sol ; sol.o.out <- sol.o
while( it < maxit & all(trial.diff > trial.diff.tol ) ){
#### 2. Create the trial parameters ####
params$v.s.coeff[c(3,5)][j] <- v.s.coeff[c(3,5)][j] + h[j]
params$v.s.coeff[2] <- min( params$v.s.coeff[c(2,3)])
#### 3. Solve the model & check solution ####
sol <- tryCatch( sol.wrapper( params, cbind(sol.out$p,sol.out$d) ),
error=function(e) list(p='fail', err=1) )
if( any( sol$p == 'fail' ) | max(abs(sol$err)) > 1e-05 ){
h[j] <- h[j] / 2
message("\n********************************************")
message(" Parameter iteration report: ")
message(" Iteration ", it )
message(" j = ", j, ", i.j = ", i.j )
message(" Model solution failed" )
message(" h = ", paste0( round(h,2), sep=', ' ) )
message(" max(abs(sol$err)) = ", max(abs(sol$err)) )
message("********************************************\n")
if( h[j] < .1 * h.0[j] ){ # Escape
h[j] <- h.0[j] # Reset step length
j <- if( j==2 ) 1 else j + 1 # Increment j
fail.idx <- fail.idx + 1
if( fail.idx == 3 ){
message("\n********************************************")
message(" Failing out")
message("********************************************")
return(list( sol=sol.out, sol.o=sol.o.out ))
}
i.j <- i.j + 1
}
}else{
#### 4. Create outer solution and simulate ####
sol.o <- outer.wrapper( sol, params, Q.init = sol.o.out$Q,
d.grid.init=sol.o.out$d.grid )
v.surp <- surp.fit(sol.o,interp,params,tgt$dta)
# Create the fitted surpluses
sim <- sim_core( c(1,interp$s.idx), sol.o$d.bar, sol.o$d.grid,
sol.o$P, sol.o$Q, params, tgt$dta$cnlb_gdp_lag[1],
TRUE, c(0,0,v.surp) )
# The simulation
#### 5. Create prices, measure error ####
err <- price.diff.1.5.err( sol.o, params, sim, dta )
#### 6. Accept or reject trial parameters ####
bo.improve <- err <= err.old + 1e-8
# trial.diff[j] <-
#### 7. Create new step or move on to next parameter ####
if( bo.improve ){
trial.diff[j] <- abs( v.s.coeff[c(3,5)][j] - params$v.s.coeff[c(3,5)][j] )
# Measure the difference in the updated coefficients.
v.s.coeff[c(3,5)][j] <- params$v.s.coeff[c(3,5)][j]
# Store the improved coeffs
message("\n********************************************")
message(" Parameter iteration report: ")
message(" Iteration ", it )
message(" j = ", j, ", i.j = ", i.j )
message(" bo.improve = ", if(bo.improve) 'True' else 'False' )
message(" v.s.coeff = ", paste0( round(v.s.coeff,2), sep=', ' ) )
message(" h = ", paste0( round(h,2), sep=', ' ) )
message(" err = ", err )
message(" err.old = ", err.old )
message(" trial.diff[j] = ", round(trial.diff[j]) )
message("********************************************\n")
fitted <- rf.fit( params, dta, v.s.coeff[3], v.s.coeff[5], v.surp,
sol, sol.o, init=c( params$v.s.coeff[c(1,2,4)], params$s.shift[-1] ) )
# Refit the lower parameters
params$v.s.coeff <- v.s.coeff <- fitted$v.s.coeff
params$s.shift <- fitted$s.shift
params$surp.sd <- fitted$surp.sd
# Paste to parameters
sol.out <- sol ; sol.o.out <- sol.o ; params.out <- params
message("\n********************************************")
message(" Refitting lower parameters: ")
message(" v.s.coeff = ", paste0( round(v.s.coeff,2), sep=', ' ) )
message(" s.shift = ", paste0( round(params$s.shift,2), sep=', ' ) )
message(" sd.surp = ", round( params$surp.sd, 4 ) )
message("********************************************\n")
plot.surp( params, TRUE, c(0,1.2*params$v.s.coeff[3]), TRUE,
ylim=range( c( params$v.s.coeff[4:5], sim[,'s'] ) ) )
points( dta$cnlb_gdp_lag, sim[,'s'], pch=16, cex=.75, col=sim[,'idx'] )
i.j <- 1 # Reset j-trial counter
h[j] <- h.0[j] # Update h[j]
j <- if( j==2 ) 1 else j + 1 # Increment j
err.old <- price.diff.1.5.err( sol.o, params, sim, dta )
# Store error with new lower parameters
fail.idx <- 0
}else{
message("\n********************************************")
message(" Parameter iteration report: ")
message(" Iteration ", it )
message(" j = ", j, ", i.j = ", i.j )
message(" bo.improve = ", if(bo.improve) 'True' else 'False' )
message(" h = ", paste0( round(h,2), sep=', ' ) )
message(" err = ", err )
message(" err.old = ", err.old )
message("********************************************\n")
h[j] <- h[j] / 2 # Decrease the step
i.j <- i.j + 1 # Increase j-trial counter
if( h[j] < .1 * h.0[j] ){ # Escape
h[j] <- h.0[j] # Reset step length
j <- if( j==2 ) 1 else j + 1 # Increment j
}
}
}
it <- it + 1
}
return( list( sol=sol.out, sol.o=sol.o.out ) )
}
|
\name{nondominated_points}
\alias{nondominated_points}
\title{Nondominated points}
\usage{
nondominated_points(points)
}
\arguments{
\item{points}{Matrix of points, one per column.}
}
\value{
Those points in \code{points} which are not dominated by
another point.
}
\description{
Return those points which are not dominated by another
point in \code{points}. This is the Pareto front
approximation of the point set.
}
\author{
Olaf Mersmann \email{olafm@statistik.tu-dortmund.de}
}
\keyword{optimize}
| /man/nondominated_points.Rd | no_license | cran/emoa | R | false | false | 518 | rd | \name{nondominated_points}
\alias{nondominated_points}
\title{Nondominated points}
\usage{
nondominated_points(points)
}
\arguments{
\item{points}{Matrix of points, one per column.}
}
\value{
Those points in \code{points} which are not dominated by
another point.
}
\description{
Return those points which are not dominated by another
point in \code{points}. This is the Pareto front
approximation of the point set.
}
\author{
Olaf Mersmann \email{olafm@statistik.tu-dortmund.de}
}
\keyword{optimize}
|
getwd()
library(xlsx)
library(dplyr)
library(ISLR)
df=read.csv('fifa.csv')
head(df)
View(df)
str(df)
dim(df)
head(df)
is.na(df)
| /Fifa.R | no_license | kumavard/git-github | R | false | false | 143 | r | getwd()
library(xlsx)
library(dplyr)
library(ISLR)
df=read.csv('fifa.csv')
head(df)
View(df)
str(df)
dim(df)
head(df)
is.na(df)
|
\name{ta}
\alias{ta}
\title{Temporal Aggregation of Time Series}
\usage{
ta(x, conversion = "sum", to = "annual")
}
\arguments{
\item{x}{a time series object of class \code{"ts"} or
\code{"mts"}.}
\item{conversion}{type of conversion: \code{"sum"},
\code{"average"}, \code{"first"} or \code{"last"}.}
\item{to}{(low-frequency) destination frequency as a
character string (\code{"annual"} or \code{"quarterly"})
or as a scalar (e.g. \code{1}, \code{2}, \code{4}).}
}
\value{
\code{ta} returns an object of class \code{"ts"} or
\code{"mts"}, depending on the class of the input series.
}
\description{
Performs temporal aggregation of high to low frequency
time series. Currently, \code{ta} only works with
\code{ts} or \code{mts} time series objects.
}
\details{
\code{ta} is used to aggregate a high frequency time
series into a low frequency series, while the latter is
either the sum, the average, the first or the last value
of the high-frequency series. \code{ta} is the inverse
function of \code{\link{td}}. If applied to an output
series of \code{td}, \code{ta} yields the original
series.
}
\examples{
data(swisspharma)
sales.q.a <- ta(sales.q, conversion = "sum", to = "annual")
all.equal(sales.a, sales.q.a)
}
\seealso{
\code{\link{td}} for the main function for temporal
disaggregation.
}
\keyword{models}
\keyword{ts,}
| /man/ta.Rd | no_license | petersteiner/tempdisagg | R | false | false | 1,378 | rd | \name{ta}
\alias{ta}
\title{Temporal Aggregation of Time Series}
\usage{
ta(x, conversion = "sum", to = "annual")
}
\arguments{
\item{x}{a time series object of class \code{"ts"} or
\code{"mts"}.}
\item{conversion}{type of conversion: \code{"sum"},
\code{"average"}, \code{"first"} or \code{"last"}.}
\item{to}{(low-frequency) destination frequency as a
character string (\code{"annual"} or \code{"quarterly"})
or as a scalar (e.g. \code{1}, \code{2}, \code{4}).}
}
\value{
\code{ta} returns an object of class \code{"ts"} or
\code{"mts"}, depending on the class of the input series.
}
\description{
Performs temporal aggregation of high to low frequency
time series. Currently, \code{ta} only works with
\code{ts} or \code{mts} time series objects.
}
\details{
\code{ta} is used to aggregate a high frequency time
series into a low frequency series, while the latter is
either the sum, the average, the first or the last value
of the high-frequency series. \code{ta} is the inverse
function of \code{\link{td}}. If applied to an output
series of \code{td}, \code{ta} yields the original
series.
}
\examples{
data(swisspharma)
sales.q.a <- ta(sales.q, conversion = "sum", to = "annual")
all.equal(sales.a, sales.q.a)
}
\seealso{
\code{\link{td}} for the main function for temporal
disaggregation.
}
\keyword{models}
\keyword{ts,}
|
test_that("test_me works", {
library(parallel)
mccollect(mcparallel(
expect_equal(test1(2, 2), 4)
))
mccollect(mcparallel(
expect_equal(test2(2, 2), 4)
))
expect_equal(test3(2, 2), 0)
})
| /tests/testthat/TestParallel/tests/testthat/test-TestParallel.R | permissive | r-lib/covr | R | false | false | 212 | r | test_that("test_me works", {
library(parallel)
mccollect(mcparallel(
expect_equal(test1(2, 2), 4)
))
mccollect(mcparallel(
expect_equal(test2(2, 2), 4)
))
expect_equal(test3(2, 2), 0)
})
|
library("tidyverse")
library("stringr")
library("yaml")
library("parallel")
initial.options <- commandArgs(trailingOnly = FALSE)
script.basename <- sub("--file=", "", initial.options[grep("--file=", initial.options)]) %>%
dirname
setwd(script.basename)
args <- yaml.load_file("../_config.yaml")
attach(args)
pred.files.dir <- paste0(analysisdir, "/output/predicted")
setwd(pred.files.dir)
pred.files <- Sys.glob("chr*.txt")
print(str_c("We are parsing ", length(pred.files), " files"))
#for(pred.name in pred.files){
get_sum <- function(pred.name){
# What do we want to get? Min, Max, Var, Number > 0.5
print(str_c("Processing file: ", pred.name))
# load the data
suppressMessages(dat <- read_tsv(pred.name, col_names = FALSE))
chr <- pred.name %>% str_extract("chr[0-9]+") %>% str_extract("[0-9]+") %>% as.numeric()
type <- str_extract(pred.name, "[A,C,G,T]{2}_[A,C,G,T]{2}")
return(
tibble(chr = chr,
type = type,
min.rate = min(dat$X3),
max.rate = max(dat$X3),
median.rate = median(dat$X3),
mean.rate = mean(dat$X3),
var.rate = var(dat$X3),
nSites = length(dat$X3),
nMutableSites = sum(dat$X3 > 0.5)
))
}
res <- mclapply(pred.files, get_sum, mc.cores = 10)
res.table <- bind_rows(res)
write.csv(res.table, file = "explore.csv", row.names=FALSE)
| /R/evalPred.R | permissive | theandyb/smaug-redux | R | false | false | 1,418 | r | library("tidyverse")
library("stringr")
library("yaml")
library("parallel")
initial.options <- commandArgs(trailingOnly = FALSE)
script.basename <- sub("--file=", "", initial.options[grep("--file=", initial.options)]) %>%
dirname
setwd(script.basename)
args <- yaml.load_file("../_config.yaml")
attach(args)
pred.files.dir <- paste0(analysisdir, "/output/predicted")
setwd(pred.files.dir)
pred.files <- Sys.glob("chr*.txt")
print(str_c("We are parsing ", length(pred.files), " files"))
#for(pred.name in pred.files){
get_sum <- function(pred.name){
# What do we want to get? Min, Max, Var, Number > 0.5
print(str_c("Processing file: ", pred.name))
# load the data
suppressMessages(dat <- read_tsv(pred.name, col_names = FALSE))
chr <- pred.name %>% str_extract("chr[0-9]+") %>% str_extract("[0-9]+") %>% as.numeric()
type <- str_extract(pred.name, "[A,C,G,T]{2}_[A,C,G,T]{2}")
return(
tibble(chr = chr,
type = type,
min.rate = min(dat$X3),
max.rate = max(dat$X3),
median.rate = median(dat$X3),
mean.rate = mean(dat$X3),
var.rate = var(dat$X3),
nSites = length(dat$X3),
nMutableSites = sum(dat$X3 > 0.5)
))
}
res <- mclapply(pred.files, get_sum, mc.cores = 10)
res.table <- bind_rows(res)
write.csv(res.table, file = "explore.csv", row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Ca.R
\name{Ca}
\alias{Ca}
\title{Coefficient of Association}
\usage{
Ca(traj1, traj2, tc = 0, dc = 0)
}
\arguments{
\item{traj1}{an object of the class \code{ltraj} which contains the time-stamped
movement fixes of the first object. Note this object must be a \code{type II
ltraj} object. For more information on objects of this type see \code{
help(ltraj)}.}
\item{traj2}{same as \code{traj1}.}
\item{tc}{temporal tolerance limit (in seconds) for defining when two fixes
are simultaneous or together. Parameter passed to function \code{GetSimultaneous}.}
\item{dc}{distance tolerance limit (in appropriate units) for defining when
two fixes are spatially together.}
}
\value{
This function returns a numeric result of the Ca statistic.
}
\description{
This function measures the dynamic interaction between two moving objects following
the methods first described by Cole (1949), and more recently employed by Bauman (1998).
}
\details{
This function can be used to calculate the Cole (1949) measure of dynamic
interaction between two animals. Termed a coefficient of association, the Ca
statistic tests the number of fixes the animals are observed together against the
total number of fixes following:
\deqn{Ca = \frac{2AB}{A+B}}{2AB/(A+B)}
where \eqn{A} (respectively \eqn{B}) is the number of times animal 1 (resp. 2) are
observed, and \eqn{AB} is the number of times the two animals are observed together.
Several works, including Bauman (1998) have suggested that Ca > 0.5 indicates
affiliation or fidelity, while Ca < 0.5 indicates no association between the
two animals. Note that this function calls \code{GetSimultaneous} to identify the temporal
component of identifying when fixes together.
}
\examples{
data(deer)
deer37 <- deer[1]
deer38 <- deer[2]
#tc = 7.5 minutes, dc = 50 meters
Ca(deer37, deer38, tc = 7.5*60, dc = 50)
}
\references{
Bauman, P.J. (1998) The Wind Cave National Park elk herd: home ranges, seasonal movements, and alternative control methods.
M.S. Thesis. South Dakota State University, Brookings, South Dakota, USA. \cr\cr
Cole, L.C. (1949) The measurement of interspecific association. \emph{Ecology}. \bold{30}, 411--424.
}
\seealso{
GetSimultaneous, Prox, HAI
}
\keyword{indices}
| /man/Ca.Rd | no_license | jedalong/wildlifeDI | R | false | true | 2,318 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Ca.R
\name{Ca}
\alias{Ca}
\title{Coefficient of Association}
\usage{
Ca(traj1, traj2, tc = 0, dc = 0)
}
\arguments{
\item{traj1}{an object of the class \code{ltraj} which contains the time-stamped
movement fixes of the first object. Note this object must be a \code{type II
ltraj} object. For more information on objects of this type see \code{
help(ltraj)}.}
\item{traj2}{same as \code{traj1}.}
\item{tc}{temporal tolerance limit (in seconds) for defining when two fixes
are simultaneous or together. Parameter passed to function \code{GetSimultaneous}.}
\item{dc}{distance tolerance limit (in appropriate units) for defining when
two fixes are spatially together.}
}
\value{
This function returns a numeric result of the Ca statistic.
}
\description{
This function measures the dynamic interaction between two moving objects following
the methods first described by Cole (1949), and more recently employed by Bauman (1998).
}
\details{
This function can be used to calculate the Cole (1949) measure of dynamic
interaction between two animals. Termed a coefficient of association, the Ca
statistic tests the number of fixes the animals are observed together against the
total number of fixes following:
\deqn{Ca = \frac{2AB}{A+B}}{2AB/(A+B)}
where \eqn{A} (respectively \eqn{B}) is the number of times animal 1 (resp. 2) are
observed, and \eqn{AB} is the number of times the two animals are observed together.
Several works, including Bauman (1998) have suggested that Ca > 0.5 indicates
affiliation or fidelity, while Ca < 0.5 indicates no association between the
two animals. Note that this function calls \code{GetSimultaneous} to identify the temporal
component of identifying when fixes together.
}
\examples{
data(deer)
deer37 <- deer[1]
deer38 <- deer[2]
#tc = 7.5 minutes, dc = 50 meters
Ca(deer37, deer38, tc = 7.5*60, dc = 50)
}
\references{
Bauman, P.J. (1998) The Wind Cave National Park elk herd: home ranges, seasonal movements, and alternative control methods.
M.S. Thesis. South Dakota State University, Brookings, South Dakota, USA. \cr\cr
Cole, L.C. (1949) The measurement of interspecific association. \emph{Ecology}. \bold{30}, 411--424.
}
\seealso{
GetSimultaneous, Prox, HAI
}
\keyword{indices}
|
match_length <- function(pattern, text) {
mat <- gregexpr(pattern, text)[[1]]
if (mat[1] == -1) 0 else sum(attr(mat, "match.length"))
}
#' @include expressions.R
get_top_script <- function(text) {
num_letters <- vapply(expressions, match_length, 1, text = text)
if (any(num_letters > 0)) {
names(which.max(num_letters))
} else {
NULL
}
}
| /R/script.R | permissive | gaborcsardi/franc | R | false | false | 361 | r |
match_length <- function(pattern, text) {
mat <- gregexpr(pattern, text)[[1]]
if (mat[1] == -1) 0 else sum(attr(mat, "match.length"))
}
#' @include expressions.R
get_top_script <- function(text) {
num_letters <- vapply(expressions, match_length, 1, text = text)
if (any(num_letters > 0)) {
names(which.max(num_letters))
} else {
NULL
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rearrangePartners.R
\name{rearrangePartners}
\alias{rearrangePartners}
\title{Rearrange partners}
\usage{
rearrangePartners(x, perm)
}
\arguments{
\item{x}{An IndexedRelations object.}
\item{perm}{An integer vector specifying the permutation of the partners.}
}
\value{
An IndexedRelations object with rearranged partners.
}
\description{
Rearrange partners in an \linkS4class{IndexedRelations} object.
}
\details{
This function facilitates the task of switching the order of partners across all relationships in \code{x}.
Note that the length of \code{perm} need not be the as \code{ncol(partners(x))};
partners can be dropped, duplicated, etc.
}
\examples{
library(GenomicRanges)
genomic <- GRanges("chrA", IRanges(1:10*20, 1:10*20+10))
intervals <- IRanges(1:20*10, 1:20*10+10)
rel <- IndexedRelations(list(1:2, 1:2), list(genomic, intervals))
rearrangePartners(rel, c(2, 1))
# Or even drop a partner completely:
rearrangePartners(rel, 2)
}
\author{
Aaron Lun
}
| /man/rearrangePartners.Rd | no_license | LTLA/IndexedRelations | R | false | true | 1,048 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rearrangePartners.R
\name{rearrangePartners}
\alias{rearrangePartners}
\title{Rearrange partners}
\usage{
rearrangePartners(x, perm)
}
\arguments{
\item{x}{An IndexedRelations object.}
\item{perm}{An integer vector specifying the permutation of the partners.}
}
\value{
An IndexedRelations object with rearranged partners.
}
\description{
Rearrange partners in an \linkS4class{IndexedRelations} object.
}
\details{
This function facilitates the task of switching the order of partners across all relationships in \code{x}.
Note that the length of \code{perm} need not be the as \code{ncol(partners(x))};
partners can be dropped, duplicated, etc.
}
\examples{
library(GenomicRanges)
genomic <- GRanges("chrA", IRanges(1:10*20, 1:10*20+10))
intervals <- IRanges(1:20*10, 1:20*10+10)
rel <- IndexedRelations(list(1:2, 1:2), list(genomic, intervals))
rearrangePartners(rel, c(2, 1))
# Or even drop a partner completely:
rearrangePartners(rel, 2)
}
\author{
Aaron Lun
}
|
\name{perDistance}
\alias{perDistance}
\title{
Periodogram based dissimilarity
}
\description{
Calculates the dissimilarity between two numerical series of the same length based on the distance between their periodograms.
}
\usage{
perDistance(x, y, ...)
}
\arguments{
\item{x}{
Numeric vector containing the first time series.
}
\item{y}{
Numeric vector containing the second time series.
}
\item{...}{
Additional parameters for the function. See \code{\link[TSclust]{diss.PER}} for more
information.
}
}
\details{
This is simply a wrapper for the \code{\link[TSclust]{diss.PER}} function of package \pkg{TSclust}. As such, all the functionalities of the \code{\link[TSclust]{diss.PER}} function are also available when using this function.
}
\value{
\item{d}{
The computed distance between the pair of series.
}
}
\references{
Pablo Montero, José A. Vilar (2014). TSclust: An R Package for Time Series
Clustering. Journal of Statistical Software, 62(1), 1-43. URL
http://www.jstatsoft.org/v62/i01/.
}
\author{
Usue Mori, Alexander Mendiburu, Jose A. Lozano.
}
\seealso{
To calculate this distance measure using \code{ts}, \code{zoo} or \code{xts} objects see \code{\link{tsDistances}}. To calculate distance matrices of time series databases using this measure see \code{\link{tsDatabaseDistances}}.
}
\examples{
#The objects example.series1 and example.series2 are two
#numeric series of length 100.
data(example.series1)
data(example.series2)
#For information on their generation and shape see
#help page of example.series.
help(example.series)
#Calculate the ar.mah distance between the two series using
#the default parameters.
perDistance(example.series1, example.series2)
}
| /man/perDistance.Rd | no_license | Kevin-Jin/TSdist | R | false | false | 1,736 | rd | \name{perDistance}
\alias{perDistance}
\title{
Periodogram based dissimilarity
}
\description{
Calculates the dissimilarity between two numerical series of the same length based on the distance between their periodograms.
}
\usage{
perDistance(x, y, ...)
}
\arguments{
\item{x}{
Numeric vector containing the first time series.
}
\item{y}{
Numeric vector containing the second time series.
}
\item{...}{
Additional parameters for the function. See \code{\link[TSclust]{diss.PER}} for more
information.
}
}
\details{
This is simply a wrapper for the \code{\link[TSclust]{diss.PER}} function of package \pkg{TSclust}. As such, all the functionalities of the \code{\link[TSclust]{diss.PER}} function are also available when using this function.
}
\value{
\item{d}{
The computed distance between the pair of series.
}
}
\references{
Pablo Montero, José A. Vilar (2014). TSclust: An R Package for Time Series
Clustering. Journal of Statistical Software, 62(1), 1-43. URL
http://www.jstatsoft.org/v62/i01/.
}
\author{
Usue Mori, Alexander Mendiburu, Jose A. Lozano.
}
\seealso{
To calculate this distance measure using \code{ts}, \code{zoo} or \code{xts} objects see \code{\link{tsDistances}}. To calculate distance matrices of time series databases using this measure see \code{\link{tsDatabaseDistances}}.
}
\examples{
#The objects example.series1 and example.series2 are two
#numeric series of length 100.
data(example.series1)
data(example.series2)
#For information on their generation and shape see
#help page of example.series.
help(example.series)
#Calculate the ar.mah distance between the two series using
#the default parameters.
perDistance(example.series1, example.series2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conos.r
\name{quickCPCA}
\alias{quickCPCA}
\title{Perform cpca on two samples}
\usage{
quickCPCA(r.n, data.type = "counts", k = 30, ncomps = 100,
n.odgenes = NULL, var.scale = TRUE, verbose = TRUE,
neighborhood.average = FALSE)
}
\arguments{
\item{r.n}{list of p2 objects}
\item{k}{neighborhood size to use}
\item{ncomps}{number of components to calculate (default=100)}
\item{n.odgenes}{number of overdispersed genes to take from each dataset}
\item{var.scale}{whether to scale variance (default=TRUE)}
\item{verbose}{whether to be verbose}
\item{neighborhood.average}{use neighborhood average values}
\item{n.cores}{number of cores to use}
}
\description{
Perform cpca on two samples
}
| /man/quickCPCA.Rd | no_license | camiel-m/conos | R | false | true | 777 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conos.r
\name{quickCPCA}
\alias{quickCPCA}
\title{Perform cpca on two samples}
\usage{
quickCPCA(r.n, data.type = "counts", k = 30, ncomps = 100,
n.odgenes = NULL, var.scale = TRUE, verbose = TRUE,
neighborhood.average = FALSE)
}
\arguments{
\item{r.n}{list of p2 objects}
\item{k}{neighborhood size to use}
\item{ncomps}{number of components to calculate (default=100)}
\item{n.odgenes}{number of overdispersed genes to take from each dataset}
\item{var.scale}{whether to scale variance (default=TRUE)}
\item{verbose}{whether to be verbose}
\item{neighborhood.average}{use neighborhood average values}
\item{n.cores}{number of cores to use}
}
\description{
Perform cpca on two samples
}
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_gcam.usa_LA101.EIA_SEDS
#'
#' Produce two ouput tables from the EIA state energy database:
#' \itemize{
#' \item{L101.inEIA_EJ_state_S_F: Energy data by GCAM sector and fuel, state, and year; energy units in EJ, years from 1971-2010, includes only rows that have a defined sector and fuel}
#' \item{L101.EIA_use_all_Bbtu: Energy data by EIA sector and fuel code, GCAM sector and fuel, MSN, state, and year; energy units in Billion BTU, years from 1960-2011, includes all original data}
#' }
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L101.EIA_use_all_Bbtu}, \code{L101.inEIA_EJ_state_S_F}. The corresponding file in the
#' original data system was \code{LA101.EIA_SEDS.R} (gcam-usa level1).
#' @details See above
#' @importFrom assertthat assert_that
#' @importFrom dplyr arrange bind_rows filter group_by left_join mutate select summarise
#' @importFrom tidyr gather spread fill
#' @author AS April 2017
module_gcamusa_LA101.EIA_SEDS <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "gcam-usa/EIA_SEDS_fuels",
FILE = "gcam-usa/EIA_SEDS_sectors",
FILE = "gcam-usa/EIA_use_all_Bbtu",
FILE = "gcam-usa/A_fuel_conv"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L101.EIA_use_all_Bbtu",
"L101.inEIA_EJ_state_S_F"))
} else if(command == driver.MAKE) {
year <- value <- Data_Status <- State <- MSN <- GCAM_fuel <- GCAM_sector <-
state <- sector <- fuel <- conv_Bbtu_EJ <- EIA_fuel <- EIA_sector <-
description.x <- description.y <- NULL # silence package check.
all_data <- list(...)[[1]]
# Load required inputs
EIA_SEDS_fuels <- get_data(all_data, "gcam-usa/EIA_SEDS_fuels")
EIA_SEDS_sectors <- get_data(all_data, "gcam-usa/EIA_SEDS_sectors")
EIA_use_all_Bbtu <- get_data(all_data, "gcam-usa/EIA_use_all_Bbtu")
A_fuel_conv <- get_data(all_data, "gcam-usa/A_fuel_conv")
# ===================================================
# Prep for output tables - add columns for GCAM sector and fuel names, using the substrings of the Mnemonic Series Name (MSN) code, and filter out U.S.
EIA_use_all_Bbtu %>%
gather_years %>%
mutate(EIA_fuel = substr(MSN, 1, 2), # First and second digits of MSN is energy code
EIA_sector = substr(MSN, 3, 4)) %>% # Third and fourth digits of MSN is sector code
left_join(EIA_SEDS_fuels, by = "EIA_fuel") %>%
left_join(EIA_SEDS_sectors, by = "EIA_sector") %>%
filter(State != "US") %>%
mutate(state = State, fuel = GCAM_fuel, sector = GCAM_sector) ->
Bbtu_with_GCAM_names
# Create 1 of the 2 output tables: narrow years from 1971-2010, convert billion BTU to EJ (fuel specific), remove rows that have no defined sector or fuel name
Bbtu_with_GCAM_names %>%
select(state, sector, fuel, year, value) %>%
filter(year %in% HISTORICAL_YEARS, !is.na(fuel), !is.na(sector)) %>%
left_join(A_fuel_conv, by = "fuel") %>%
mutate(value = value * conv_Bbtu_EJ) %>%
group_by(state, sector, fuel, year) %>%
summarise(value = sum(value)) %>%
arrange(fuel, sector) %>%
ungroup() ->
L101.inEIA_EJ_state_S_F
# Create other output table: leave units as billion BTU, getting rid of missing values: prior to 1980, lots are missing. These data are only used for state-wise allocations
Bbtu_with_GCAM_names %>%
select(Data_Status, state, MSN, year, value, EIA_fuel, EIA_sector, sector, fuel, -State, -description.x, -description.y) %>%
arrange(Data_Status, state, MSN, EIA_fuel, EIA_sector, sector, fuel, -year) -> # Year needs to be in descending order to use fill function
Bbtu_with_GCAM_names_intermediate
# To create this second output table, I need to split the dataframe and recombine
Bbtu_with_GCAM_names_intermediate %>%
filter(year %in% 1971:2011) %>% # Custom year range, want to keep NAs in 1960-1970
fill(value) %>% # Replace NAs in 1971-1979 with values from one year more recent
bind_rows(filter(Bbtu_with_GCAM_names_intermediate, year %in% 1960:1970)) %>% # Reattaching 1960-1970 rows
arrange(Data_Status, state, MSN, EIA_fuel, EIA_sector, sector, fuel, -year) ->
L101.EIA_use_all_Bbtu
# ===================================================
L101.EIA_use_all_Bbtu %>%
add_title("State Energy Data in Bbtu by Year, GCAM-Sector, and GCAM-Fuel") %>%
add_units("Billion BTU") %>%
add_comments("GCAM sector and fuel names were added, NAs for years 1971-1979 were replaced with most recent year's data available") %>%
add_legacy_name("L101.EIA_use_all_Bbtu") %>%
add_precursors("gcam-usa/EIA_use_all_Bbtu", "gcam-usa/EIA_SEDS_fuels",
"gcam-usa/EIA_SEDS_sectors") ->
L101.EIA_use_all_Bbtu
L101.inEIA_EJ_state_S_F %>%
add_title("State Energy Data in EJ by Year, GCAM-Sector, and GCAM-Fuel") %>%
add_units("EJ") %>%
add_comments("GCAM sector and fuel names were added, units converted to EJ, data with no GCAM fuel or sector name removed") %>%
add_legacy_name("L101.inEIA_EJ_state_S_F") %>%
add_precursors("gcam-usa/EIA_use_all_Bbtu", "gcam-usa/EIA_SEDS_fuels",
"gcam-usa/EIA_SEDS_sectors", "gcam-usa/A_fuel_conv") ->
L101.inEIA_EJ_state_S_F
return_data(L101.EIA_use_all_Bbtu, L101.inEIA_EJ_state_S_F)
} else {
stop("Unknown command")
}
}
| /R/zchunk_LA101.EIA_SEDS.R | permissive | almondtea/gcamdata | R | false | false | 5,747 | r | # Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_gcam.usa_LA101.EIA_SEDS
#'
#' Produce two ouput tables from the EIA state energy database:
#' \itemize{
#' \item{L101.inEIA_EJ_state_S_F: Energy data by GCAM sector and fuel, state, and year; energy units in EJ, years from 1971-2010, includes only rows that have a defined sector and fuel}
#' \item{L101.EIA_use_all_Bbtu: Energy data by EIA sector and fuel code, GCAM sector and fuel, MSN, state, and year; energy units in Billion BTU, years from 1960-2011, includes all original data}
#' }
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L101.EIA_use_all_Bbtu}, \code{L101.inEIA_EJ_state_S_F}. The corresponding file in the
#' original data system was \code{LA101.EIA_SEDS.R} (gcam-usa level1).
#' @details See above
#' @importFrom assertthat assert_that
#' @importFrom dplyr arrange bind_rows filter group_by left_join mutate select summarise
#' @importFrom tidyr gather spread fill
#' @author AS April 2017
module_gcamusa_LA101.EIA_SEDS <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "gcam-usa/EIA_SEDS_fuels",
FILE = "gcam-usa/EIA_SEDS_sectors",
FILE = "gcam-usa/EIA_use_all_Bbtu",
FILE = "gcam-usa/A_fuel_conv"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L101.EIA_use_all_Bbtu",
"L101.inEIA_EJ_state_S_F"))
} else if(command == driver.MAKE) {
year <- value <- Data_Status <- State <- MSN <- GCAM_fuel <- GCAM_sector <-
state <- sector <- fuel <- conv_Bbtu_EJ <- EIA_fuel <- EIA_sector <-
description.x <- description.y <- NULL # silence package check.
all_data <- list(...)[[1]]
# Load required inputs
EIA_SEDS_fuels <- get_data(all_data, "gcam-usa/EIA_SEDS_fuels")
EIA_SEDS_sectors <- get_data(all_data, "gcam-usa/EIA_SEDS_sectors")
EIA_use_all_Bbtu <- get_data(all_data, "gcam-usa/EIA_use_all_Bbtu")
A_fuel_conv <- get_data(all_data, "gcam-usa/A_fuel_conv")
# ===================================================
# Prep for output tables - add columns for GCAM sector and fuel names, using the substrings of the Mnemonic Series Name (MSN) code, and filter out U.S.
EIA_use_all_Bbtu %>%
gather_years %>%
mutate(EIA_fuel = substr(MSN, 1, 2), # First and second digits of MSN is energy code
EIA_sector = substr(MSN, 3, 4)) %>% # Third and fourth digits of MSN is sector code
left_join(EIA_SEDS_fuels, by = "EIA_fuel") %>%
left_join(EIA_SEDS_sectors, by = "EIA_sector") %>%
filter(State != "US") %>%
mutate(state = State, fuel = GCAM_fuel, sector = GCAM_sector) ->
Bbtu_with_GCAM_names
# Create 1 of the 2 output tables: narrow years from 1971-2010, convert billion BTU to EJ (fuel specific), remove rows that have no defined sector or fuel name
Bbtu_with_GCAM_names %>%
select(state, sector, fuel, year, value) %>%
filter(year %in% HISTORICAL_YEARS, !is.na(fuel), !is.na(sector)) %>%
left_join(A_fuel_conv, by = "fuel") %>%
mutate(value = value * conv_Bbtu_EJ) %>%
group_by(state, sector, fuel, year) %>%
summarise(value = sum(value)) %>%
arrange(fuel, sector) %>%
ungroup() ->
L101.inEIA_EJ_state_S_F
# Create other output table: leave units as billion BTU, getting rid of missing values: prior to 1980, lots are missing. These data are only used for state-wise allocations
Bbtu_with_GCAM_names %>%
select(Data_Status, state, MSN, year, value, EIA_fuel, EIA_sector, sector, fuel, -State, -description.x, -description.y) %>%
arrange(Data_Status, state, MSN, EIA_fuel, EIA_sector, sector, fuel, -year) -> # Year needs to be in descending order to use fill function
Bbtu_with_GCAM_names_intermediate
# To create this second output table, I need to split the dataframe and recombine
Bbtu_with_GCAM_names_intermediate %>%
filter(year %in% 1971:2011) %>% # Custom year range, want to keep NAs in 1960-1970
fill(value) %>% # Replace NAs in 1971-1979 with values from one year more recent
bind_rows(filter(Bbtu_with_GCAM_names_intermediate, year %in% 1960:1970)) %>% # Reattaching 1960-1970 rows
arrange(Data_Status, state, MSN, EIA_fuel, EIA_sector, sector, fuel, -year) ->
L101.EIA_use_all_Bbtu
# ===================================================
L101.EIA_use_all_Bbtu %>%
add_title("State Energy Data in Bbtu by Year, GCAM-Sector, and GCAM-Fuel") %>%
add_units("Billion BTU") %>%
add_comments("GCAM sector and fuel names were added, NAs for years 1971-1979 were replaced with most recent year's data available") %>%
add_legacy_name("L101.EIA_use_all_Bbtu") %>%
add_precursors("gcam-usa/EIA_use_all_Bbtu", "gcam-usa/EIA_SEDS_fuels",
"gcam-usa/EIA_SEDS_sectors") ->
L101.EIA_use_all_Bbtu
L101.inEIA_EJ_state_S_F %>%
add_title("State Energy Data in EJ by Year, GCAM-Sector, and GCAM-Fuel") %>%
add_units("EJ") %>%
add_comments("GCAM sector and fuel names were added, units converted to EJ, data with no GCAM fuel or sector name removed") %>%
add_legacy_name("L101.inEIA_EJ_state_S_F") %>%
add_precursors("gcam-usa/EIA_use_all_Bbtu", "gcam-usa/EIA_SEDS_fuels",
"gcam-usa/EIA_SEDS_sectors", "gcam-usa/A_fuel_conv") ->
L101.inEIA_EJ_state_S_F
return_data(L101.EIA_use_all_Bbtu, L101.inEIA_EJ_state_S_F)
} else {
stop("Unknown command")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.