content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#library loading
library(rio)
library(dplyr)
library(ggplot2)
library(energy)
# import cash_flows dataset
cash_flow_report <- import("data/cash_flow.csv")
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
fivenum()
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
IQR()
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
mean() -> mean
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
median() -> median
mean - median
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
var()
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
sd()
# skewness
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
fivenum()-> quartiles
q <- quartiles[2:4]
skewness <- ((q[3]- q[2])-(q[2]- q[1]))/(q[3]-q[1])
#relation among variables
cor (x =cash_flow_report$y, y = cash_flow_report$cash_flow)
#time variable transformation: we have to assign a progressive number to each unique value of quarter
oldest <- min(cash_flow_report$y)
cash_flow_report %>%
mutate(delays = difftime(cash_flow_report$y, oldest, units = "days")) -> cash_flow_report_mutation
cor(x = as.numeric(cash_flow_report_mutation$delays),
y = cash_flow_report_mutation$cash_flow)
dcor(cash_flow_report_mutation$delays, cash_flow_report_mutation$cash_flow)
# graphical eda
ggplot(data = cash_flow_report,aes(x))+
geom_histogram(stat = 'count')
ggplot(data = cash_flow_report,aes(y)) +
geom_histogram(stat = 'count')
ggplot(data = cash_flow_report,aes(cash_flow))+
geom_histogram()
ggplot(data = cash_flow_report,aes(cash_flow))+
geom_histogram(bins = 70)
boxplot(x = cash_flow_report$cash_flow, horizontal = TRUE)
boxplot.stats(x = cash_flow_report$cash_flow)
stats <- boxplot.stats(x = cash_flow_report$cash_flow)
outliers <- stats$out
cash_flow_report %>%
filter(cash_flow == outliers[3])
#scatterplots
cash_flow_report %>%
ggplot(aes(x = y, y = cash_flow))+
geom_point()
cash_flow_report %>%
ggplot(aes(x = y, y = cash_flow, group = x, colour = x))+
geom_point()
cash_flow_report %>%
ggplot(aes(x = y, y = cash_flow, group = x, colour = x))+
geom_point()+
geom_line()+
labs(title = "cash flows over time by region",
subtitle="quarterly data from 2014 to Q2 2017",
caption = "source: cash_flow_report")+
xlab("quarter of reporting")+
ylab("recorded cash flows (euro)")+
annotate("text", label = "the middle east cash flow series \n shows a unprecedent drop on the Q2 2017",
x = "2017-07-01" , y= 40000 ,hjust = 1, vjust =0)
cash_flow_report %>%
ggplot(aes(x = y, y = cash_flow, group = x, colour = x == "middle_east" ))+
geom_point()+
geom_line( alpha = .2)+
labs(title = "cash flows over time by region",
subtitle="quarterly data from 2014 to Q2 2017, middle east data in red",
caption = "source: cash_flow_report")+
xlab("quarter of reporting") +
ylab("recorded cash flows (euro)") +
annotate("text", label = "the middle east cash flow series \n shows a unprecedent drop on the Q2 2017",
x = "2017-07-01" , y= 40000 ,hjust = 1, vjust =0) +
scale_colour_manual(values = c("grey70", "red")) +
theme_minimal()+
theme(legend.position = "none")
cash_flow_report %>%
group_by( y) %>%
summarise(cash_flow = sum(cash_flow)) %>%
rename(date = y) %>%
ggplot(aes(x = date, y = cash_flow, group = 1))+
geom_line()+
geom_point()+
labs(title = "cash flows by quarter")
| /R-Data-Mining-master/Chapter06/chapter_6.R | permissive | cyrsis/RSandBox | R | false | false | 3,483 | r | #library loading
library(rio)
library(dplyr)
library(ggplot2)
library(energy)
# import cash_flows dataset
cash_flow_report <- import("data/cash_flow.csv")
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
fivenum()
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
IQR()
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
mean() -> mean
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
median() -> median
mean - median
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
var()
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
sd()
# skewness
cash_flow_report %>%
select(cash_flow) %>%
unlist() %>%
fivenum()-> quartiles
q <- quartiles[2:4]
skewness <- ((q[3]- q[2])-(q[2]- q[1]))/(q[3]-q[1])
#relation among variables
cor (x =cash_flow_report$y, y = cash_flow_report$cash_flow)
#time variable transformation: we have to assign a progressive number to each unique value of quarter
oldest <- min(cash_flow_report$y)
cash_flow_report %>%
mutate(delays = difftime(cash_flow_report$y, oldest, units = "days")) -> cash_flow_report_mutation
cor(x = as.numeric(cash_flow_report_mutation$delays),
y = cash_flow_report_mutation$cash_flow)
dcor(cash_flow_report_mutation$delays, cash_flow_report_mutation$cash_flow)
# graphical eda
ggplot(data = cash_flow_report,aes(x))+
geom_histogram(stat = 'count')
ggplot(data = cash_flow_report,aes(y)) +
geom_histogram(stat = 'count')
ggplot(data = cash_flow_report,aes(cash_flow))+
geom_histogram()
ggplot(data = cash_flow_report,aes(cash_flow))+
geom_histogram(bins = 70)
boxplot(x = cash_flow_report$cash_flow, horizontal = TRUE)
boxplot.stats(x = cash_flow_report$cash_flow)
stats <- boxplot.stats(x = cash_flow_report$cash_flow)
outliers <- stats$out
cash_flow_report %>%
filter(cash_flow == outliers[3])
#scatterplots
cash_flow_report %>%
ggplot(aes(x = y, y = cash_flow))+
geom_point()
cash_flow_report %>%
ggplot(aes(x = y, y = cash_flow, group = x, colour = x))+
geom_point()
cash_flow_report %>%
ggplot(aes(x = y, y = cash_flow, group = x, colour = x))+
geom_point()+
geom_line()+
labs(title = "cash flows over time by region",
subtitle="quarterly data from 2014 to Q2 2017",
caption = "source: cash_flow_report")+
xlab("quarter of reporting")+
ylab("recorded cash flows (euro)")+
annotate("text", label = "the middle east cash flow series \n shows a unprecedent drop on the Q2 2017",
x = "2017-07-01" , y= 40000 ,hjust = 1, vjust =0)
cash_flow_report %>%
ggplot(aes(x = y, y = cash_flow, group = x, colour = x == "middle_east" ))+
geom_point()+
geom_line( alpha = .2)+
labs(title = "cash flows over time by region",
subtitle="quarterly data from 2014 to Q2 2017, middle east data in red",
caption = "source: cash_flow_report")+
xlab("quarter of reporting") +
ylab("recorded cash flows (euro)") +
annotate("text", label = "the middle east cash flow series \n shows a unprecedent drop on the Q2 2017",
x = "2017-07-01" , y= 40000 ,hjust = 1, vjust =0) +
scale_colour_manual(values = c("grey70", "red")) +
theme_minimal()+
theme(legend.position = "none")
cash_flow_report %>%
group_by( y) %>%
summarise(cash_flow = sum(cash_flow)) %>%
rename(date = y) %>%
ggplot(aes(x = date, y = cash_flow, group = 1))+
geom_line()+
geom_point()+
labs(title = "cash flows by quarter")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setdiff2.R
\name{setdiff2}
\alias{setdiff2}
\title{Differences between sets a and b}
\usage{
setdiff2(a, b)
}
\arguments{
\item{a}{Required vector}
\item{b}{Required vector}
}
\value{
Vector of elements
}
\description{
Returns the elements that in a or b but not in both (i.e., the differences between sets a and b)
}
\examples{
setdiff2(1:10, 3:12)
setdiff2(c('a','b','c'), c('b','c','d'))
}
\seealso{
\code{\link[=setdiff]{setdiff()}} which is a bit different
}
| /man/setdiff2.Rd | no_license | ejanalysis/analyze.stuff | R | false | true | 543 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setdiff2.R
\name{setdiff2}
\alias{setdiff2}
\title{Differences between sets a and b}
\usage{
setdiff2(a, b)
}
\arguments{
\item{a}{Required vector}
\item{b}{Required vector}
}
\value{
Vector of elements
}
\description{
Returns the elements that in a or b but not in both (i.e., the differences between sets a and b)
}
\examples{
setdiff2(1:10, 3:12)
setdiff2(c('a','b','c'), c('b','c','d'))
}
\seealso{
\code{\link[=setdiff]{setdiff()}} which is a bit different
}
|
c6bb0c0247ec032a9f824b42d8c5c4e6 query04_query25_1344n.qdimacs 441 1259 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query04_query25_1344n/query04_query25_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 71 | r | c6bb0c0247ec032a9f824b42d8c5c4e6 query04_query25_1344n.qdimacs 441 1259 |
library(ggplot2)
data = read.table("/lustre/scr/y/o/yourston/pitcher_plant/mttoolbox_otus/rarefy_alpha_div_test.txt", header=T)
metric = "PD_whole_tree"
#metric = "chao1"
data = data[data$rare_level == 50000,]
data = data[data$metric == metric,]
data = data[data$rep == 1,]
meta = read.table("/lustre/scr/y/o/yourston/pitcher_plant/metadata.txt", header=T)
merged = merge(data, meta, by.x = "sample", by.y = "SampleID")
# t-test
hooded_vals = merged[merged$Plant_species == "hooded", "value"]
yellow_vals = merged[merged$Plant_species == "yellow", "value"]
t = t.test(hooded_vals, yellow_vals)
t.pval = round(t$p.value, digits=3)
t.pval.str = paste("p-val=", t.pval, sep="")
# make plot
flava_title = expression(italic("Sarracenia flava")) #yellow
minor_title = expression(italic("Sarracenia minor")) #hooded
p = ggplot(merged, aes(x=Plant_species, y= value, color=Plant_species)) +
geom_boxplot() +
geom_jitter(size=3) +
ggtitle("Alpha Diversity") +
ylab("PD whole tree metric") +
xlab("Plant Species") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(limits = c("hooded", "yellow"),
labels = c(minor_title, flava_title)) +
scale_color_manual(limits = c("hooded", "yellow"),
values = c("red", "lightblue"),
guide=F) +
theme_bw() +
theme(plot.title = element_text(hjust=.5, size = 22),
axis.text = element_text(size = 16),
axis.title = element_text(size = 16),
legend.title = element_text(size=18),
legend.text = element_text(size = 16),
legend.text.align = 0
) +
annotate("text", x=2.3, y=11.5, label=t.pval.str, size=6)
ggsave("alpha_div_test.pdf", plot=p)
| /make_alpha_div_fig.R | no_license | islandhopper81/pitcher_plant_utils | R | false | false | 1,698 | r |
library(ggplot2)
data = read.table("/lustre/scr/y/o/yourston/pitcher_plant/mttoolbox_otus/rarefy_alpha_div_test.txt", header=T)
metric = "PD_whole_tree"
#metric = "chao1"
data = data[data$rare_level == 50000,]
data = data[data$metric == metric,]
data = data[data$rep == 1,]
meta = read.table("/lustre/scr/y/o/yourston/pitcher_plant/metadata.txt", header=T)
merged = merge(data, meta, by.x = "sample", by.y = "SampleID")
# t-test
hooded_vals = merged[merged$Plant_species == "hooded", "value"]
yellow_vals = merged[merged$Plant_species == "yellow", "value"]
t = t.test(hooded_vals, yellow_vals)
t.pval = round(t$p.value, digits=3)
t.pval.str = paste("p-val=", t.pval, sep="")
# make plot
flava_title = expression(italic("Sarracenia flava")) #yellow
minor_title = expression(italic("Sarracenia minor")) #hooded
p = ggplot(merged, aes(x=Plant_species, y= value, color=Plant_species)) +
geom_boxplot() +
geom_jitter(size=3) +
ggtitle("Alpha Diversity") +
ylab("PD whole tree metric") +
xlab("Plant Species") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(limits = c("hooded", "yellow"),
labels = c(minor_title, flava_title)) +
scale_color_manual(limits = c("hooded", "yellow"),
values = c("red", "lightblue"),
guide=F) +
theme_bw() +
theme(plot.title = element_text(hjust=.5, size = 22),
axis.text = element_text(size = 16),
axis.title = element_text(size = 16),
legend.title = element_text(size=18),
legend.text = element_text(size = 16),
legend.text.align = 0
) +
annotate("text", x=2.3, y=11.5, label=t.pval.str, size=6)
ggsave("alpha_div_test.pdf", plot=p)
|
# Module UI
#' @title mod_about_page_ui and mod_about_page_server
#' @description A shiny Module.
#'
#' @param id shiny id
#' @param input internal
#' @param output internal
#' @param session internal
#'
#' @rdname mod_about_page
#'
#' @keywords internal
#' @export
#' @importFrom shiny NS tagList
mod_about_page_ui <- function(id){
ns <- shiny::NS(id)
shiny::tagList(
shinydashboard::dashboardPage(
shinydashboard::dashboardHeader(disable = T),
shinydashboard::dashboardSidebar(disable = T),
shinydashboard::dashboardBody(
shiny::fluidPage(
shinydashboard::infoBoxOutput(ns('about'), width = 12),
shinydashboard::box(
title = "Funding Partner",
width = 12,
solidHeader = T,
status = "primary",
shiny::textOutput(ns('group'))
)
))))
}
# Module Server
#' @rdname mod_about_page
#' @export
#' @keywords internal
mod_about_page_server <- function(input, output, session, syn, data_config){
ns <- session$ns
current_user_synapse_id <- shiny::reactive({
# code to get the synapse id of the current user here
user <- syn$getUserProfile()[['ownerId']]
return(user)
})
output$about <- shinydashboard::renderInfoBox({
shinydashboard::infoBox(
" ",
print("projectLive: Track the progress and impact of your funding initiatives in real time"),
icon = shiny::icon("university", "fa-1x"),
color = "light-blue",
fill = TRUE
)
})
output$group <- shiny::renderText({
txt <- "Navigate to the tabs at the top of the page to get more information about the participating investigators and the various resources that they have generated."
waiter::waiter_hide()
txt
})
data <- shiny::reactive({
shiny::req(syn, data_config)
tables <- data_config %>%
purrr::pluck("data_files") %>%
purrr::map_chr("synapse_id") %>%
purrr::map(read_rds_file_from_synapse, syn)
list(
"tables" = tables
)
})
}
| /R/mod_about_page.R | no_license | Sage-Bionetworks/projectLive_HTAN | R | false | false | 2,061 | r | # Module UI
#' @title mod_about_page_ui and mod_about_page_server
#' @description A shiny Module.
#'
#' @param id shiny id
#' @param input internal
#' @param output internal
#' @param session internal
#'
#' @rdname mod_about_page
#'
#' @keywords internal
#' @export
#' @importFrom shiny NS tagList
mod_about_page_ui <- function(id){
ns <- shiny::NS(id)
shiny::tagList(
shinydashboard::dashboardPage(
shinydashboard::dashboardHeader(disable = T),
shinydashboard::dashboardSidebar(disable = T),
shinydashboard::dashboardBody(
shiny::fluidPage(
shinydashboard::infoBoxOutput(ns('about'), width = 12),
shinydashboard::box(
title = "Funding Partner",
width = 12,
solidHeader = T,
status = "primary",
shiny::textOutput(ns('group'))
)
))))
}
# Module Server
#' @rdname mod_about_page
#' @export
#' @keywords internal
mod_about_page_server <- function(input, output, session, syn, data_config){
ns <- session$ns
current_user_synapse_id <- shiny::reactive({
# code to get the synapse id of the current user here
user <- syn$getUserProfile()[['ownerId']]
return(user)
})
output$about <- shinydashboard::renderInfoBox({
shinydashboard::infoBox(
" ",
print("projectLive: Track the progress and impact of your funding initiatives in real time"),
icon = shiny::icon("university", "fa-1x"),
color = "light-blue",
fill = TRUE
)
})
output$group <- shiny::renderText({
txt <- "Navigate to the tabs at the top of the page to get more information about the participating investigators and the various resources that they have generated."
waiter::waiter_hide()
txt
})
data <- shiny::reactive({
shiny::req(syn, data_config)
tables <- data_config %>%
purrr::pluck("data_files") %>%
purrr::map_chr("synapse_id") %>%
purrr::map(read_rds_file_from_synapse, syn)
list(
"tables" = tables
)
})
}
|
#Multiple Linear Regression
getwd()
vehicle <- read.csv('vehicle.csv')
str(vehicle)
library(psych)
pairs.panels(vehicle)
vehicle_plot <- vehicle[,c('Mileage', 'lh','lc')]
pairs.panels(vehicle_plot)
vehicle_plot2 <- vehicle[,c('State', 'lh','lc')]
pairs.panels(vehicle_plot2)
names(vehicle)
results <- lm(lc ~ State+lh, vehicle)
results
summary(results)
full.vehicle <- lm(lc ~. , vehicle)
full.vehicle
summary(full.vehicle)
anova(results, full.vehicle)
prediction <- predict(results, data.frame(State = 'HI', lh = 10), interval = 'confidence')
prediction
| /Multiple Linear Regression - Vehicle.R | no_license | Abhi062017/Uptill-Nov | R | false | false | 561 | r | #Multiple Linear Regression
getwd()
vehicle <- read.csv('vehicle.csv')
str(vehicle)
library(psych)
pairs.panels(vehicle)
vehicle_plot <- vehicle[,c('Mileage', 'lh','lc')]
pairs.panels(vehicle_plot)
vehicle_plot2 <- vehicle[,c('State', 'lh','lc')]
pairs.panels(vehicle_plot2)
names(vehicle)
results <- lm(lc ~ State+lh, vehicle)
results
summary(results)
full.vehicle <- lm(lc ~. , vehicle)
full.vehicle
summary(full.vehicle)
anova(results, full.vehicle)
prediction <- predict(results, data.frame(State = 'HI', lh = 10), interval = 'confidence')
prediction
|
# AutoReport Typed Report Panel Set
navlistPanel("Student Response Scoring",
tabPanel("Score Summary",
tabsetPanel(
tabPanel("Score Distribution",
textOutput("summaryStatement"),
plotlyOutput("typedDistributionPlot"),
tableOutput("displayRubric")
),
tabPanel("Scoring Probabilities",
uiOutput("chooseHistogram"),
plotlyOutput("probHistogram"),
textOutput("probStatement")
)
)
),
tabPanel("Responses Sorted by Rubric Bin",
textOutput("scoringTypedStatement"),
dataTableOutput(outputId="exampleResponses")
),
"Analysis of Term Usage",
tabPanel("Most Important Terms",
tabsetPanel(
tabPanel("Important Terms",
uiOutput("chooseImportant"),
plotlyOutput("importantTermsPlot"),
textOutput("importantTermsStatement")
),
tabPanel("Table Important Terms",
textOutput("ngOverStatement"),
dataTableOutput(outputId="overabundanceTable"),
img(src="overabundanceDef.png")
)
)
),
tabPanel("Web Diagrams of Important Terms",
textOutput("wdStatement"),
uiOutput("chooseWebDiagram"),
plotOutput("webDiagramPlot"),
uiOutput("webDiagramPredConf")
),
tabPanel("Term Usage and Association Map",
textOutput("mapStatement"),
uiOutput("chooseTermMap"),
plotOutput("termMapPlot"),
uiOutput("chooseMinFreq")
),
"Question Reference Material",
tabPanel("Scoring Model Performance",
htmlOutput("cvDisclaimer"),
uiOutput("select_cv_level"),
tableOutput("modelListTable"),
verbatimTextOutput("cvDataOut")
# uiOutput("cvDataDisplay")
)
) | /AutoReport/UIPanels/reportPanelsTyped.R | no_license | BeyondMultipleChoice/AACRAutoReport | R | false | false | 2,717 | r | # AutoReport Typed Report Panel Set
navlistPanel("Student Response Scoring",
tabPanel("Score Summary",
tabsetPanel(
tabPanel("Score Distribution",
textOutput("summaryStatement"),
plotlyOutput("typedDistributionPlot"),
tableOutput("displayRubric")
),
tabPanel("Scoring Probabilities",
uiOutput("chooseHistogram"),
plotlyOutput("probHistogram"),
textOutput("probStatement")
)
)
),
tabPanel("Responses Sorted by Rubric Bin",
textOutput("scoringTypedStatement"),
dataTableOutput(outputId="exampleResponses")
),
"Analysis of Term Usage",
tabPanel("Most Important Terms",
tabsetPanel(
tabPanel("Important Terms",
uiOutput("chooseImportant"),
plotlyOutput("importantTermsPlot"),
textOutput("importantTermsStatement")
),
tabPanel("Table Important Terms",
textOutput("ngOverStatement"),
dataTableOutput(outputId="overabundanceTable"),
img(src="overabundanceDef.png")
)
)
),
tabPanel("Web Diagrams of Important Terms",
textOutput("wdStatement"),
uiOutput("chooseWebDiagram"),
plotOutput("webDiagramPlot"),
uiOutput("webDiagramPredConf")
),
tabPanel("Term Usage and Association Map",
textOutput("mapStatement"),
uiOutput("chooseTermMap"),
plotOutput("termMapPlot"),
uiOutput("chooseMinFreq")
),
"Question Reference Material",
tabPanel("Scoring Model Performance",
htmlOutput("cvDisclaimer"),
uiOutput("select_cv_level"),
tableOutput("modelListTable"),
verbatimTextOutput("cvDataOut")
# uiOutput("cvDataDisplay")
)
) |
### MixSIAR Model for southern California stable isotope data
### Andrew D. Somerville
### Using r package, MixSIar
### Stock, B. C., Jackson, A. L., Ward, E. J., Parnell, A. C., Phillips, D. L., & Semmens, B. X. (2018). Analyzing mixing systems using a new generation of Bayesian tracer mixing models. PeerJ, 6, e5096.
### Stock, B. C. and B. X. Semmens (2016). MixSIAR GUI User Manual.Version 3.1. https://github.com/brianstock/MixSIAR/. doi:10.5281/zenodo.47719.
library(MixSIAR)
# Load mix data
mix <- load_mix_data(filename="CONSUMER_noSNI.csv",
iso_names=c("d13C","d15N"),
factors="Prov",
fac_random=FALSE,
fac_nested=FALSE,
cont_effects=NULL)
# Load source data
source <- load_source_data(filename="SOURCE_20201012.csv",
source_factors=NULL,
conc_dep=TRUE,
data_type="means",
mix)
# Load discrimination/TDF data
discr <- load_discr_data(filename="TEF.csv", mix)
# Isospace plot
plot_data(filename="isospace_plot",
plot_save_pdf=TRUE,
plot_save_png=FALSE,
mix,source,discr)
# Calculate standardized convex hull area
#if(mix$n.iso==2) calc_area(source=source,mix=mix,discr=discr)
################################################################################
# # PRIORS (construct alpha from geographic assumptions [island, coastal, inland])
################################################################################
# Resource importance scored as high = 3, med = 2, low = 1
# Uninformed Prior
Cali.alpha <- c(1,1,1,1)
# Generate alpha hyperparameters scaling sum(alpha)=n.sources
Cali.alpha <- Cali.alpha*length(Cali.alpha)/sum(Cali.alpha)
# the Dirichlet hyperparameters for the alpha.prior cannot be 0 (but can set = .01)
Cali.alpha[which(Cali.alpha==0)] <- 0.01
# Plot informative prior
plot_prior(alpha.prior=Cali.alpha,
source=source,
plot_save_pdf=FALSE,
plot_save_png=FALSE,
filename="prior_plot_Cali_inf")
# Define model structure and write JAGS model file
model_filename <- "MixSIAR_model.txt"
resid_err <- TRUE
process_err <- FALSE
write_JAGS_model(model_filename, resid_err, process_err, mix, source)
# Run the JAGS model ("short" first, then "short")
jags.1 <- run_model(run="test", mix, source, discr, model_filename, alpha.prior=1)
#jags.1 <- run_model(run="short", mix, source, discr, model_filename, alpha.prior=1)
# Process diagnostics, summary stats, and posterior plots
output_JAGS(jags.1, mix, source)
###################################################
## OUTPUT PLOTTING ##
library(MASS)
library(R2jags)
library(tidyverse)
library(RColorBrewer)
library(lattice)
library(dplyr)
library(grid)
attach.jags(jags.1)
post.Coastal <- data.frame(Prov = "2 Coastal", Marine.High = p.fac1[,2,1], Marine.Low = p.fac1[,2,2], Plants = p.fac1[,2,3], T.Mammal = p.fac1[,2,4])
post.Inland <- data.frame(Prov = "1 Inland",Marine.High = p.fac1[,1,1], Marine.Low = p.fac1[,1,2], Plants = p.fac1[,1,3], T.Mammal = p.fac1[,1,4])
post.San.Clemente <- data.frame(Prov = "5 San Clemente", Marine.High = p.fac1[,5,1], Marine.Low = p.fac1[,5,2], Plants = p.fac1[,5,3], T.Mammal = p.fac1[,5,4])
post.Santa.Cruz <- data.frame(Prov = "4 Santa Cruz", Marine.High = p.fac1[,4,1], Marine.Low = p.fac1[,4,2], Plants = p.fac1[,4,3], T.Mammal = p.fac1[,4,4])
post.Santa.Rosa <- data.frame(Prov = "3 Santa Rosa", Marine.High = p.fac1[,3,1], Marine.Low = p.fac1[,3,2], Plants = p.fac1[,3,3], T.Mammal = p.fac1[,3,4])
Coastal <- post.Coastal %>% gather(source,value, 2:5)
Inland <-post.Inland %>% gather(source,value, 2:5)
San_Clemente <- post.San.Clemente %>% gather(source,value, 2:5)
Santa_Cruz <- post.Santa.Cruz %>% gather(source,value, 2:5)
Santa_Rosa <- post.Santa.Rosa %>% gather(source,value, 2:5)
all <- rbind(Coastal, Inland, San_Clemente, Santa_Cruz, Santa_Rosa)
########### BOX PLOTS ###########
ggplot(aes(y = value, x = source, fill=source), data = all) +
geom_boxplot(outlier.colour = NA) +
coord_cartesian(ylim = c(0,1)) +
theme_bw() +
theme(panel.border = element_rect(colour = "black", fill=NA, size=1))+
scale_fill_manual(values=c("#225ea8", "#41b6c4", "#7fcdbb", "#ffffd9"))+
xlab("Source") +
ylab("Diet proportion") +
facet_wrap(~Prov, ncol=2) +
theme(legend.position=c(.80, .15))+
ggsave(filename = "Cali_BoxPlots_CONSUMER_noSNI_facet.tiff", width=8, height=11)
########## OVERALL VIOLIN PLOTS ###########
ggplot(aes(x = value, y=source, fill=source),data=all)+
geom_violin(trim = FALSE,
alpha = 0.8)+
geom_boxplot(alpha=0.3, color="black", width=.1)+
theme_bw() +
theme(legend.position="none", )+
scale_fill_manual(values=c("#225ea8", "#41b6c4", "#7fcdbb", "#ffffd9"))+
facet_wrap(~Prov, ncol=2, scales="free") +
coord_flip()+
xlab("Diet Proportions") +
ylab("Source") +
ggsave(filename = "Cali_BoxPlots_CONSUMER_noSNI_violin.tiff", width=8, height=11)
| /MixSIAR_SoCal_CONSUMER_20201019.R | no_license | ISU-LunchinatoRs/S21_California_Paleodiet | R | false | false | 5,212 | r | ### MixSIAR Model for southern California stable isotope data
### Andrew D. Somerville
### Using r package, MixSIar
### Stock, B. C., Jackson, A. L., Ward, E. J., Parnell, A. C., Phillips, D. L., & Semmens, B. X. (2018). Analyzing mixing systems using a new generation of Bayesian tracer mixing models. PeerJ, 6, e5096.
### Stock, B. C. and B. X. Semmens (2016). MixSIAR GUI User Manual.Version 3.1. https://github.com/brianstock/MixSIAR/. doi:10.5281/zenodo.47719.
library(MixSIAR)
# Load mix data
mix <- load_mix_data(filename="CONSUMER_noSNI.csv",
iso_names=c("d13C","d15N"),
factors="Prov",
fac_random=FALSE,
fac_nested=FALSE,
cont_effects=NULL)
# Load source data
source <- load_source_data(filename="SOURCE_20201012.csv",
source_factors=NULL,
conc_dep=TRUE,
data_type="means",
mix)
# Load discrimination/TDF data
discr <- load_discr_data(filename="TEF.csv", mix)
# Isospace plot
plot_data(filename="isospace_plot",
plot_save_pdf=TRUE,
plot_save_png=FALSE,
mix,source,discr)
# Calculate standardized convex hull area
#if(mix$n.iso==2) calc_area(source=source,mix=mix,discr=discr)
################################################################################
# # PRIORS (construct alpha from geographic assumptions [island, coastal, inland])
################################################################################
# Resource importance scored as high = 3, med = 2, low = 1
# Uninformed Prior
Cali.alpha <- c(1,1,1,1)
# Generate alpha hyperparameters scaling sum(alpha)=n.sources
Cali.alpha <- Cali.alpha*length(Cali.alpha)/sum(Cali.alpha)
# the Dirichlet hyperparameters for the alpha.prior cannot be 0 (but can set = .01)
Cali.alpha[which(Cali.alpha==0)] <- 0.01
# Plot informative prior
plot_prior(alpha.prior=Cali.alpha,
source=source,
plot_save_pdf=FALSE,
plot_save_png=FALSE,
filename="prior_plot_Cali_inf")
# Define model structure and write JAGS model file
model_filename <- "MixSIAR_model.txt"
resid_err <- TRUE
process_err <- FALSE
write_JAGS_model(model_filename, resid_err, process_err, mix, source)
# Run the JAGS model ("short" first, then "short")
jags.1 <- run_model(run="test", mix, source, discr, model_filename, alpha.prior=1)
#jags.1 <- run_model(run="short", mix, source, discr, model_filename, alpha.prior=1)
# Process diagnostics, summary stats, and posterior plots
output_JAGS(jags.1, mix, source)
###################################################
## OUTPUT PLOTTING ##
library(MASS)
library(R2jags)
library(tidyverse)
library(RColorBrewer)
library(lattice)
library(dplyr)
library(grid)
attach.jags(jags.1)
post.Coastal <- data.frame(Prov = "2 Coastal", Marine.High = p.fac1[,2,1], Marine.Low = p.fac1[,2,2], Plants = p.fac1[,2,3], T.Mammal = p.fac1[,2,4])
post.Inland <- data.frame(Prov = "1 Inland",Marine.High = p.fac1[,1,1], Marine.Low = p.fac1[,1,2], Plants = p.fac1[,1,3], T.Mammal = p.fac1[,1,4])
post.San.Clemente <- data.frame(Prov = "5 San Clemente", Marine.High = p.fac1[,5,1], Marine.Low = p.fac1[,5,2], Plants = p.fac1[,5,3], T.Mammal = p.fac1[,5,4])
post.Santa.Cruz <- data.frame(Prov = "4 Santa Cruz", Marine.High = p.fac1[,4,1], Marine.Low = p.fac1[,4,2], Plants = p.fac1[,4,3], T.Mammal = p.fac1[,4,4])
post.Santa.Rosa <- data.frame(Prov = "3 Santa Rosa", Marine.High = p.fac1[,3,1], Marine.Low = p.fac1[,3,2], Plants = p.fac1[,3,3], T.Mammal = p.fac1[,3,4])
Coastal <- post.Coastal %>% gather(source,value, 2:5)
Inland <-post.Inland %>% gather(source,value, 2:5)
San_Clemente <- post.San.Clemente %>% gather(source,value, 2:5)
Santa_Cruz <- post.Santa.Cruz %>% gather(source,value, 2:5)
Santa_Rosa <- post.Santa.Rosa %>% gather(source,value, 2:5)
all <- rbind(Coastal, Inland, San_Clemente, Santa_Cruz, Santa_Rosa)
########### BOX PLOTS ###########
ggplot(aes(y = value, x = source, fill=source), data = all) +
geom_boxplot(outlier.colour = NA) +
coord_cartesian(ylim = c(0,1)) +
theme_bw() +
theme(panel.border = element_rect(colour = "black", fill=NA, size=1))+
scale_fill_manual(values=c("#225ea8", "#41b6c4", "#7fcdbb", "#ffffd9"))+
xlab("Source") +
ylab("Diet proportion") +
facet_wrap(~Prov, ncol=2) +
theme(legend.position=c(.80, .15))+
ggsave(filename = "Cali_BoxPlots_CONSUMER_noSNI_facet.tiff", width=8, height=11)
########## OVERALL VIOLIN PLOTS ###########
ggplot(aes(x = value, y=source, fill=source),data=all)+
geom_violin(trim = FALSE,
alpha = 0.8)+
geom_boxplot(alpha=0.3, color="black", width=.1)+
theme_bw() +
theme(legend.position="none", )+
scale_fill_manual(values=c("#225ea8", "#41b6c4", "#7fcdbb", "#ffffd9"))+
facet_wrap(~Prov, ncol=2, scales="free") +
coord_flip()+
xlab("Diet Proportions") +
ylab("Source") +
ggsave(filename = "Cali_BoxPlots_CONSUMER_noSNI_violin.tiff", width=8, height=11)
|
/code/name_shuju_fin_result_model3.R | no_license | MIALAB-RUC/Trimer | R | false | false | 4,210 | r | ||
# download data
setwd("/Users/FTS/Desktop/Coursera/04_Exploratory_Data_Analysis/scripts")
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile="./data/download001.zip", method="curl")
list.files("./data")
# unzip data
unzip("./data/download001.zip", list=FALSE)
list.files("./")
# load data and subset data
rawdata <- read.csv("household_power_consumption.txt", sep=";", header=TRUE)
subdata <- rawdata[which(as.Date(rawdata$Date, "%d/%m/%Y") == "2007-02-01" | as.Date(rawdata$Date, "%d/%m/%Y") == "2007-02-02"),]
plotdata <- cbind(data.frame(strptime(paste(subdata$Date, subdata$Time), "%d/%m/%Y %H:%M:%S")), data.frame(as.double(as.character(subdata$Global_active_power))))
names(plotdata) <- c("date_time", "global_active_power")
# plot data
plot(x=plotdata$date_time, y=plotdata$global_active_power , type= "l", main = "", xlab="" , ylab = "Global Active Power (kilowatts)")
# copy to png
dev.copy(png, file = "plot2.png", width=480, height=480)
dev.off()
| /Scripts/plot2.R | no_license | mbernstein9/datasciencecoursera4 | R | false | false | 1,042 | r | # download data
setwd("/Users/FTS/Desktop/Coursera/04_Exploratory_Data_Analysis/scripts")
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile="./data/download001.zip", method="curl")
list.files("./data")
# unzip data
unzip("./data/download001.zip", list=FALSE)
list.files("./")
# load data and subset data
rawdata <- read.csv("household_power_consumption.txt", sep=";", header=TRUE)
subdata <- rawdata[which(as.Date(rawdata$Date, "%d/%m/%Y") == "2007-02-01" | as.Date(rawdata$Date, "%d/%m/%Y") == "2007-02-02"),]
plotdata <- cbind(data.frame(strptime(paste(subdata$Date, subdata$Time), "%d/%m/%Y %H:%M:%S")), data.frame(as.double(as.character(subdata$Global_active_power))))
names(plotdata) <- c("date_time", "global_active_power")
# plot data
plot(x=plotdata$date_time, y=plotdata$global_active_power , type= "l", main = "", xlab="" , ylab = "Global Active Power (kilowatts)")
# copy to png
dev.copy(png, file = "plot2.png", width=480, height=480)
dev.off()
|
## ORGANIZE DATA ####
# Duration
data_duration_so_ana = data_duration_so %>%
# Filter to only English-English and Japanese-Japanese tokens
filter((lang_pre == "eng" & lang_post == "eng") | (lang_pre == "jap" & lang_post == "jap")) %>%
# Center percent exposure on 50%
mutate(eng_percent_centered = eng_percent - 0.5) %>%
# Contrast code context
mutate(context_contrast = if_else(lang_pre == "eng", -0.5, 0.5))
# Formants
data_formants_so_o_ana = data_formants_so_o %>%
# Filter to only English-English and Japanese-Japanese tokens
filter((lang_pre == "eng" & lang_post == "eng") | (lang_pre == "jap" & lang_post == "jap")) %>%
# Center percent exposure on 50%
mutate(eng_percent_centered = eng_percent - 0.5) %>%
# Contrast code context
mutate(context_contrast = if_else(lang_pre == "eng", -0.5, 0.5))
## RUN MODELS ON DURATION ####
# Full model
duration_so_exp.full.lme = lmer(duration_so ~ eng_percent_centered * context_contrast +
(1 + context_contrast| pair/speaker),
data_duration_so_ana,
REML = F)
summary(duration_so_exp.full.lme)
# Removing percent exposure - n.s.
duration_so_exp.nope.lme = update(duration_so_exp.full.lme, . ~ .
- eng_percent_centered)
anova(duration_so_exp.full.lme, duration_so_exp.nope.lme)
# Removing context - n.s.
duration_so_exp.nocx.lme = update(duration_so_exp.full.lme, . ~ .
- context_contrast)
anova(duration_so_exp.full.lme, duration_so_exp.nocx.lme)
# Removing interaction - SIGNIFICANT
duration_so_exp.noint.lme = update(duration_so_exp.full.lme, . ~ .
- eng_percent_centered:context_contrast)
anova(duration_so_exp.full.lme, duration_so_exp.noint.lme)
#follow up
duration_so_eng_exp.full.lme = lmer(duration_so ~ eng_percent_centered +
(1 | pair/speaker),
data_duration_so_ana[data_duration_so_ana$context_contrast==-0.5,],
REML = F)
summary(duration_so_eng_exp.full.lme)
duration_so_eng_exp.nope.lme = update(duration_so_eng_exp.full.lme, . ~ .
- eng_percent_centered)
anova(duration_so_eng_exp.full.lme, duration_so_eng_exp.nope.lme)
duration_so_ja_exp.full.lme = lmer(duration_so ~ eng_percent_centered +
(1 | pair/speaker),
data_duration_so_ana[data_duration_so_ana$context_contrast==0.5,],
REML = F)
summary(duration_so_ja_exp.full.lme)
duration_so_ja_exp.nope.lme = update(duration_so_ja_exp.full.lme, . ~ .
- eng_percent_centered)
anova(duration_so_ja_exp.full.lme, duration_so_ja_exp.nope.lme)
## RUN MODELS ON FORMANTS - F1 ####
# Full model
formants_so_f1_exp.full.lme = lmer(f1_norm_sum ~ eng_percent_centered * context_contrast + percentage +
(1 + context_contrast + percentage | pair/speaker),
data_formants_so_o_ana,
REML = F)
summary(formants_so_f1_exp.full.lme)
# Removing percent exposure - n.s.
formants_so_f1_exp.nope.lme = update(formants_so_f1_exp.full.lme, . ~ .
- eng_percent_centered)
anova(formants_so_f1_exp.full.lme, formants_so_f1_exp.nope.lme)
# Removing context - n.s.
formants_so_f1_exp.nocx.lme = update(formants_so_f1_exp.full.lme, . ~ .
- context_contrast)
anova(formants_so_f1_exp.full.lme, formants_so_f1_exp.nocx.lme)
# Removing percentage - n.s.
formants_so_f1_exp.nopct.lme = update(formants_so_f1_exp.full.lme, . ~ .
- percentage)
anova(formants_so_f1_exp.full.lme, formants_so_f1_exp.nopct.lme)
# Removing interaction - SIGNIFICANT
formants_so_f1_exp.noint.lme = update(formants_so_f1_exp.full.lme, . ~ .
- eng_percent_centered:context_contrast)
anova(formants_so_f1_exp.full.lme, formants_so_f1_exp.noint.lme)
## RUN MODELS ON FORMANTS - F2 ####
# Full model
formants_so_f2_exp.full.lme = lmer(f2_norm_sum ~ eng_percent_centered * context_contrast + percentage +
(1 + context_contrast + percentage | pair/speaker),
data_formants_so_o_ana,
REML = F)
summary(formants_so_f2_exp.full.lme)
# Removing percent exposure - MODEL FAIL
formants_so_f2_exp.nope.lme = update(formants_so_f2_exp.full.lme, . ~ .
- eng_percent_centered)
anova(formants_so_f2_exp.full.lme, formants_so_f2_exp.nope.lme)
# Removing context - MODEL FAIL
formants_so_f2_exp.nocx.lme = update(formants_so_f2_exp.full.lme, . ~ .
- context_contrast)
anova(formants_so_f2_exp.full.lme, formants_so_f2_exp.nocx.lme)
# Removing percentage - MODEL FAIL
formants_so_f2_exp.nopct.lme = update(formants_so_f2_exp.full.lme, . ~ .
- percentage)
anova(formants_so_f2_exp.full.lme, formants_so_f2_exp.nopct.lme)
# Removing interaction - n.s.
formants_so_f2_exp.noint.lme = update(formants_so_f2_exp.full.lme, . ~ .
- eng_percent_centered:context_contrast)
anova(formants_so_f2_exp.full.lme, formants_so_f2_exp.noint.lme)
| /phonetic_analysis/scripts/so/phonetics_so_analyses_exposure.R | no_license | pagepiccinini/cs-japanese-english | R | false | false | 5,518 | r | ## ORGANIZE DATA ####
# Duration
data_duration_so_ana = data_duration_so %>%
# Filter to only English-English and Japanese-Japanese tokens
filter((lang_pre == "eng" & lang_post == "eng") | (lang_pre == "jap" & lang_post == "jap")) %>%
# Center percent exposure on 50%
mutate(eng_percent_centered = eng_percent - 0.5) %>%
# Contrast code context
mutate(context_contrast = if_else(lang_pre == "eng", -0.5, 0.5))
# Formants
data_formants_so_o_ana = data_formants_so_o %>%
# Filter to only English-English and Japanese-Japanese tokens
filter((lang_pre == "eng" & lang_post == "eng") | (lang_pre == "jap" & lang_post == "jap")) %>%
# Center percent exposure on 50%
mutate(eng_percent_centered = eng_percent - 0.5) %>%
# Contrast code context
mutate(context_contrast = if_else(lang_pre == "eng", -0.5, 0.5))
## RUN MODELS ON DURATION ####
# Full model
duration_so_exp.full.lme = lmer(duration_so ~ eng_percent_centered * context_contrast +
(1 + context_contrast| pair/speaker),
data_duration_so_ana,
REML = F)
summary(duration_so_exp.full.lme)
# Removing percent exposure - n.s.
duration_so_exp.nope.lme = update(duration_so_exp.full.lme, . ~ .
- eng_percent_centered)
anova(duration_so_exp.full.lme, duration_so_exp.nope.lme)
# Removing context - n.s.
duration_so_exp.nocx.lme = update(duration_so_exp.full.lme, . ~ .
- context_contrast)
anova(duration_so_exp.full.lme, duration_so_exp.nocx.lme)
# Removing interaction - SIGNIFICANT
duration_so_exp.noint.lme = update(duration_so_exp.full.lme, . ~ .
- eng_percent_centered:context_contrast)
anova(duration_so_exp.full.lme, duration_so_exp.noint.lme)
#follow up
duration_so_eng_exp.full.lme = lmer(duration_so ~ eng_percent_centered +
(1 | pair/speaker),
data_duration_so_ana[data_duration_so_ana$context_contrast==-0.5,],
REML = F)
summary(duration_so_eng_exp.full.lme)
duration_so_eng_exp.nope.lme = update(duration_so_eng_exp.full.lme, . ~ .
- eng_percent_centered)
anova(duration_so_eng_exp.full.lme, duration_so_eng_exp.nope.lme)
duration_so_ja_exp.full.lme = lmer(duration_so ~ eng_percent_centered +
(1 | pair/speaker),
data_duration_so_ana[data_duration_so_ana$context_contrast==0.5,],
REML = F)
summary(duration_so_ja_exp.full.lme)
duration_so_ja_exp.nope.lme = update(duration_so_ja_exp.full.lme, . ~ .
- eng_percent_centered)
anova(duration_so_ja_exp.full.lme, duration_so_ja_exp.nope.lme)
## RUN MODELS ON FORMANTS - F1 ####
# Full model
formants_so_f1_exp.full.lme = lmer(f1_norm_sum ~ eng_percent_centered * context_contrast + percentage +
(1 + context_contrast + percentage | pair/speaker),
data_formants_so_o_ana,
REML = F)
summary(formants_so_f1_exp.full.lme)
# Removing percent exposure - n.s.
formants_so_f1_exp.nope.lme = update(formants_so_f1_exp.full.lme, . ~ .
- eng_percent_centered)
anova(formants_so_f1_exp.full.lme, formants_so_f1_exp.nope.lme)
# Removing context - n.s.
formants_so_f1_exp.nocx.lme = update(formants_so_f1_exp.full.lme, . ~ .
- context_contrast)
anova(formants_so_f1_exp.full.lme, formants_so_f1_exp.nocx.lme)
# Removing percentage - n.s.
formants_so_f1_exp.nopct.lme = update(formants_so_f1_exp.full.lme, . ~ .
- percentage)
anova(formants_so_f1_exp.full.lme, formants_so_f1_exp.nopct.lme)
# Removing interaction - SIGNIFICANT
formants_so_f1_exp.noint.lme = update(formants_so_f1_exp.full.lme, . ~ .
- eng_percent_centered:context_contrast)
anova(formants_so_f1_exp.full.lme, formants_so_f1_exp.noint.lme)
## RUN MODELS ON FORMANTS - F2 ####
# Full model
formants_so_f2_exp.full.lme = lmer(f2_norm_sum ~ eng_percent_centered * context_contrast + percentage +
(1 + context_contrast + percentage | pair/speaker),
data_formants_so_o_ana,
REML = F)
summary(formants_so_f2_exp.full.lme)
# Removing percent exposure - MODEL FAIL
formants_so_f2_exp.nope.lme = update(formants_so_f2_exp.full.lme, . ~ .
- eng_percent_centered)
anova(formants_so_f2_exp.full.lme, formants_so_f2_exp.nope.lme)
# Removing context - MODEL FAIL
formants_so_f2_exp.nocx.lme = update(formants_so_f2_exp.full.lme, . ~ .
- context_contrast)
anova(formants_so_f2_exp.full.lme, formants_so_f2_exp.nocx.lme)
# Removing percentage - MODEL FAIL
formants_so_f2_exp.nopct.lme = update(formants_so_f2_exp.full.lme, . ~ .
- percentage)
anova(formants_so_f2_exp.full.lme, formants_so_f2_exp.nopct.lme)
# Removing interaction - n.s.
formants_so_f2_exp.noint.lme = update(formants_so_f2_exp.full.lme, . ~ .
- eng_percent_centered:context_contrast)
anova(formants_so_f2_exp.full.lme, formants_so_f2_exp.noint.lme)
|
\name{qtime}
\alias{qtime}
\title{Draw a time plot}
\usage{
qtime(time, y, data, period = NULL, group = NULL,
wrap = TRUE, shift = c(1, 7, 12, 24), size = 2, alpha = 1,
asp = NULL, main = NULL, xlab = NULL, ylab = NULL)
}
\arguments{
\item{time}{The variable indicating time, which is
displayed on the horizontal axis}
\item{y}{The variable(s) displayed on the vertical axis.
It must be a formula with only right hand side at the
moment. See examples.}
\item{data}{Mutaframe data to use}
\item{period}{The variable to group the time series.
Better to be 'year','month', or other time resolutions.
Default to be null. When it is not null, the key U and D
can be hit to separate the groups or overlap them
together to watch the patterns.}
\item{group}{Similar to period, but is used for
longitudinal data grouping.}
\item{wrap}{The switch for wrapping or not when zooming
in/out by hitting right arrow or left arrow. Default to
be TRUE.}
\item{shift}{Wrapping speed selector. The default
possible speeds are 1,7(for days a week),12(for
months),24(for hours).}
\item{size}{Point size, default to be 2.}
\item{alpha}{Transparency level, 1=completely opaque,
default to be 1.}
\item{asp}{Ratio between width and height of the plot.}
\item{main}{main title for the plot.}
\item{xlab}{label on horizontal axis, default is name of
x variable}
\item{ylab}{label on vertical axis, default is name of y
variable}
}
\description{
Draw a time-series plot.
}
\details{
Arrow up/down: in-/de-crease size of points. Arrow
left/right: wrap the time series when wrap=TRUE, while
zoom in/out with the center of the last clicked dot when
wrap=FALSE. Shift + right: when wrap=TRUE, the time
series will be folded directly to the width of maximal
value in argument shift. Shift + left: time series will
be backed to the original xaxis position, no matter wrap
is TRUE or FALSE. Key '+'/'-': de-/in-crease alpha level
(starts at alpha=1 by default). Key 'u'/'d': separate/mix
the series groups by shifting them up and down. Shift +
'u'/'d': for multivariate y's, separate/mix them by
shifting up and down. Key 'g': change the wrapping speed
circularly in the values of parameter 'shift'. Key 'm':
Switch the mode for series selecting. Default to be off.
When the argument 'group' is not null, users can turn it
on to hold a series and shift the series horizontally by
dragging with the mouse. When the wrapping mode is FALSE,
turning on the series selecting mode will make it
possible to pan the series which is zoomed in by dragging
with the mouse or pressing left/right arrows. Key 'w':
Switch the wrapping mode between TRUE and FALSE. When it
is TRUE, an indicator of 'wrapping period' will be shown
at the bottom right of the graph; otherwise there is not
any indicator on the bottom right.
}
\examples{
library(cranvas)
## example 1: NASA temperature data
data(nasa)
nasa11 <- subset(nasa, Gridx == 22 & Gridy == 21)
qnasa <- qdata(nasa11)
qtime(TimeIndx, ~ts, qnasa, shift = c(1, 12))
qtime(TimeIndx, ~ts, qnasa, wrap = FALSE)
qtime(TimeIndx, ~ts, qnasa, Year, shift = 1)
qtime(TimeIndx, ~ts, qnasa, Year, wrap = FALSE)
qtime(TimeIndx, ~ts + ps_tovs + ca_med, qnasa, shift = c(1,
12))
qtime(TimeIndx, ~ts + ps_tovs + ca_med, qnasa, wrap = FALSE)
qtime(TimeIndx, ~ts + ps_tovs + ca_med, qnasa, Year)
qtime(TimeIndx, ~ts + ps_tovs + ca_med, qnasa, Year,
wrap = FALSE)
library(reshape)
nasaTsCa <- nasa11[, c(6, 9, 14)]
nasaTsCa[, 2:3] <- rescaler(nasaTsCa[, 2:3])
nasaTsCa <- melt(nasaTsCa, 1)
qnasaTsCa <- qdata(nasaTsCa)
qtime(TimeIndx, ~value, qnasaTsCa, group = variable,
shift = c(1, 12))
## example 2: Remifentanil in the nlme package
require(nlme)
Rem <- qdata(Remifentanil[complete.cases(Remifentanil) &
Remifentanil$ID == 1, ])
Remi <- Remifentanil[complete.cases(Remifentanil),
]
Remi$ID <- factor(Remi$ID)
qRemi <- qdata(Remi)
qtime(Time, ~conc, Rem)
qtime(Time, ~conc, qRemi, group = ID)
qtime(Time, ~conc, qRemi, group = ID, wrap = FALSE)
# for categorical brushing self-link dataset by ID:
id <- link_cat(qRemi, "ID")
# remove_link(qRemi, id)
## example 3: Wages
data(wages)
wage <- qdata(wages[as.integer(as.character(wages$id)) <
2000, 1:3])
qtime(exper, ~lnw, wage, group = id)
id <- link_cat(wage, "id")
remove_link(wage, id)
## example 4: Lynx - for posterity
# Good to show off wrapping to investigate irregular series
data(lynx)
qlynx <- qdata(data.frame(Time = 1:114, lynx))
qtime(Time, ~lynx, qlynx, shift = 1:12)
## example 5: Sunspots - for posterity
# Good to show off wrapping to investigate irregular series
data(sunspots)
qsun <- qdata(data.frame(Time = 1:2820, sunspots))
qtime(Time, ~sunspots, qsun, shift = c(1, (1:10) *
10))
## example 6: Pigs
data(pigs)
qpig <- qdata(pigs)
qtime(TIME, ~GILTS + PROFIT + PRODUCTION + HERDSZ,
qpig, shift = c(1, 4))
library(reshape)
pigGP <- pigs[, c(1, 7, 8)]
pigGP[, 2:3] <- rescaler(pigGP[, 2:3])
pigGP <- melt(pigGP, 1)
qpigGP <- qdata(pigGP)
qtime(TIME, ~value, qpigGP, group = variable, shift = c(1,
4))
id <- link_cat(qpigGP, "variable")
# remove_link(qpigGP, id)
}
\seealso{
Other plots: \code{\link{qbar}}, \code{\link{qboxplot}},
\code{\link{qdensity}}, \code{\link{qhist}},
\code{\link{qmval}}, \code{\link{qparallel}},
\code{\link{qspine}}
}
| /man/qtime.Rd | no_license | NickSpyrison/cranvas | R | false | false | 5,397 | rd | \name{qtime}
\alias{qtime}
\title{Draw a time plot}
\usage{
qtime(time, y, data, period = NULL, group = NULL,
wrap = TRUE, shift = c(1, 7, 12, 24), size = 2, alpha = 1,
asp = NULL, main = NULL, xlab = NULL, ylab = NULL)
}
\arguments{
\item{time}{The variable indicating time, which is
displayed on the horizontal axis}
\item{y}{The variable(s) displayed on the vertical axis.
It must be a formula with only right hand side at the
moment. See examples.}
\item{data}{Mutaframe data to use}
\item{period}{The variable to group the time series.
Better to be 'year','month', or other time resolutions.
Default to be null. When it is not null, the key U and D
can be hit to separate the groups or overlap them
together to watch the patterns.}
\item{group}{Similar to period, but is used for
longitudinal data grouping.}
\item{wrap}{The switch for wrapping or not when zooming
in/out by hitting right arrow or left arrow. Default to
be TRUE.}
\item{shift}{Wrapping speed selector. The default
possible speeds are 1,7(for days a week),12(for
months),24(for hours).}
\item{size}{Point size, default to be 2.}
\item{alpha}{Transparency level, 1=completely opaque,
default to be 1.}
\item{asp}{Ratio between width and height of the plot.}
\item{main}{main title for the plot.}
\item{xlab}{label on horizontal axis, default is name of
x variable}
\item{ylab}{label on vertical axis, default is name of y
variable}
}
\description{
Draw a time-series plot.
}
\details{
Arrow up/down: in-/de-crease size of points. Arrow
left/right: wrap the time series when wrap=TRUE, while
zoom in/out with the center of the last clicked dot when
wrap=FALSE. Shift + right: when wrap=TRUE, the time
series will be folded directly to the width of maximal
value in argument shift. Shift + left: time series will
be backed to the original xaxis position, no matter wrap
is TRUE or FALSE. Key '+'/'-': de-/in-crease alpha level
(starts at alpha=1 by default). Key 'u'/'d': separate/mix
the series groups by shifting them up and down. Shift +
'u'/'d': for multivariate y's, separate/mix them by
shifting up and down. Key 'g': change the wrapping speed
circularly in the values of parameter 'shift'. Key 'm':
Switch the mode for series selecting. Default to be off.
When the argument 'group' is not null, users can turn it
on to hold a series and shift the series horizontally by
dragging with the mouse. When the wrapping mode is FALSE,
turning on the series selecting mode will make it
possible to pan the series which is zoomed in by dragging
with the mouse or pressing left/right arrows. Key 'w':
Switch the wrapping mode between TRUE and FALSE. When it
is TRUE, an indicator of 'wrapping period' will be shown
at the bottom right of the graph; otherwise there is not
any indicator on the bottom right.
}
\examples{
library(cranvas)
## example 1: NASA temperature data
data(nasa)
nasa11 <- subset(nasa, Gridx == 22 & Gridy == 21)
qnasa <- qdata(nasa11)
qtime(TimeIndx, ~ts, qnasa, shift = c(1, 12))
qtime(TimeIndx, ~ts, qnasa, wrap = FALSE)
qtime(TimeIndx, ~ts, qnasa, Year, shift = 1)
qtime(TimeIndx, ~ts, qnasa, Year, wrap = FALSE)
qtime(TimeIndx, ~ts + ps_tovs + ca_med, qnasa, shift = c(1,
12))
qtime(TimeIndx, ~ts + ps_tovs + ca_med, qnasa, wrap = FALSE)
qtime(TimeIndx, ~ts + ps_tovs + ca_med, qnasa, Year)
qtime(TimeIndx, ~ts + ps_tovs + ca_med, qnasa, Year,
wrap = FALSE)
library(reshape)
nasaTsCa <- nasa11[, c(6, 9, 14)]
nasaTsCa[, 2:3] <- rescaler(nasaTsCa[, 2:3])
nasaTsCa <- melt(nasaTsCa, 1)
qnasaTsCa <- qdata(nasaTsCa)
qtime(TimeIndx, ~value, qnasaTsCa, group = variable,
shift = c(1, 12))
## example 2: Remifentanil in the nlme package
require(nlme)
Rem <- qdata(Remifentanil[complete.cases(Remifentanil) &
Remifentanil$ID == 1, ])
Remi <- Remifentanil[complete.cases(Remifentanil),
]
Remi$ID <- factor(Remi$ID)
qRemi <- qdata(Remi)
qtime(Time, ~conc, Rem)
qtime(Time, ~conc, qRemi, group = ID)
qtime(Time, ~conc, qRemi, group = ID, wrap = FALSE)
# for categorical brushing self-link dataset by ID:
id <- link_cat(qRemi, "ID")
# remove_link(qRemi, id)
## example 3: Wages
data(wages)
wage <- qdata(wages[as.integer(as.character(wages$id)) <
2000, 1:3])
qtime(exper, ~lnw, wage, group = id)
id <- link_cat(wage, "id")
remove_link(wage, id)
## example 4: Lynx - for posterity
# Good to show off wrapping to investigate irregular series
data(lynx)
qlynx <- qdata(data.frame(Time = 1:114, lynx))
qtime(Time, ~lynx, qlynx, shift = 1:12)
## example 5: Sunspots - for posterity
# Good to show off wrapping to investigate irregular series
data(sunspots)
qsun <- qdata(data.frame(Time = 1:2820, sunspots))
qtime(Time, ~sunspots, qsun, shift = c(1, (1:10) *
10))
## example 6: Pigs
data(pigs)
qpig <- qdata(pigs)
qtime(TIME, ~GILTS + PROFIT + PRODUCTION + HERDSZ,
qpig, shift = c(1, 4))
library(reshape)
pigGP <- pigs[, c(1, 7, 8)]
pigGP[, 2:3] <- rescaler(pigGP[, 2:3])
pigGP <- melt(pigGP, 1)
qpigGP <- qdata(pigGP)
qtime(TIME, ~value, qpigGP, group = variable, shift = c(1,
4))
id <- link_cat(qpigGP, "variable")
# remove_link(qpigGP, id)
}
\seealso{
Other plots: \code{\link{qbar}}, \code{\link{qboxplot}},
\code{\link{qdensity}}, \code{\link{qhist}},
\code{\link{qmval}}, \code{\link{qparallel}},
\code{\link{qspine}}
}
|
library("knitr")
library("rgl")
#knit("HOCN.Rmd")
#markdownToHTML('HOCN.md', 'HOCN.html', options=c("use_xhml"))
#system("pandoc -s HOCN.html -o HOCN.pdf")
knit2html('HOCN.Rmd')
| /FDA_Pesticide_Glossary/HOCN.R | permissive | andrewdefries/andrewdefries.github.io | R | false | false | 180 | r | library("knitr")
library("rgl")
#knit("HOCN.Rmd")
#markdownToHTML('HOCN.md', 'HOCN.html', options=c("use_xhml"))
#system("pandoc -s HOCN.html -o HOCN.pdf")
knit2html('HOCN.Rmd')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcbdltcmt.R
\docType{data}
\name{rcbdltcmt}
\alias{rcbdltcmt}
\title{Line x Tester data (only Crosses) in Randomized Complete Block design.}
\format{
A data frame of 15 crosses derived from five lines and three testers.
\describe{
\item{replication}{four replications}
\item{line}{five inbred genotype}
\item{tester}{three inbred genotype}
\item{ph}{plant height}
\item{eh}{ear height}
}
}
\usage{
data(rcbdltcmt)
}
\description{
The Line x Tester data of containing only crosses laid out in Randomized Complete Block design.
}
\examples{
result = ltcmt(rcbdltcmt, replication, line, tester, rcbdltcmt[,4:5])
}
\seealso{
\code{\link{rcbdltc}}
,\code{\link{alphaltcchk}}
,\code{\link{rcbdltcchk}}
,\code{\link{alphaltcmt}}
}
\keyword{datasets}
| /man/rcbdltcmt.Rd | no_license | nandp1/gpbStat | R | false | true | 836 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcbdltcmt.R
\docType{data}
\name{rcbdltcmt}
\alias{rcbdltcmt}
\title{Line x Tester data (only Crosses) in Randomized Complete Block design.}
\format{
A data frame of 15 crosses derived from five lines and three testers.
\describe{
\item{replication}{four replications}
\item{line}{five inbred genotype}
\item{tester}{three inbred genotype}
\item{ph}{plant height}
\item{eh}{ear height}
}
}
\usage{
data(rcbdltcmt)
}
\description{
The Line x Tester data of containing only crosses laid out in Randomized Complete Block design.
}
\examples{
result = ltcmt(rcbdltcmt, replication, line, tester, rcbdltcmt[,4:5])
}
\seealso{
\code{\link{rcbdltc}}
,\code{\link{alphaltcchk}}
,\code{\link{rcbdltcchk}}
,\code{\link{alphaltcmt}}
}
\keyword{datasets}
|
fabatchaddon <-
function(params, x, batch) {
if(any(is.na(x)))
stop("Data contains missing values.")
if(!is.factor(batch))
stop("'batch' has to be of class 'factor'.")
if(!is.matrix(x))
stop("'x' has to be of class 'matrix'.")
if(max(table(batch)) >= ncol(x))
stop("The sample size within each batch has to be smaller than the number of variables.")
if(!inherits(params, "fabatch"))
stop("Input parameter 'params' has to be of class 'fabatch'.")
if(ncol(params$xadj) != ncol(x))
stop("Number of variables in test data matrix different to that of training data matrix.")
batches = levels(batch)
nbatches = length(batches)
nvars <- ncol(x)
sdb = as.list(rep(0,nbatches))
for (i in 1:nbatches) {
sdb[[i]] = apply(x[batch==batches[i],],2,sd)
}
sdb0=sdb
badvariableslisthere <- lapply(sdb, function(x) which(x==0))
badvariableshere <- sort(unique(unlist(badvariableslisthere)))
goodvariableshere <- setdiff(1:nvars, badvariableshere)
if(nbatches > 1) {
# Remove batch specific means:
adjustmentmod = lm(x~batch)
design = model.matrix(~batch)
adjustmentcoef = coef(adjustmentmod)
xb = x-design%*%adjustmentcoef
adjustmentcoef0 = adjustmentcoef
}
else
xb = scale(x, scale=FALSE)
pooledsds <- apply(xb, 2, sd)
# 'scaledxb' is X centered and scaled per batch:
scaledxb = xb
for (i in 1:nbatches) {
scaledxb[batch==batches[i],setdiff(1:nvars, badvariableslisthere[[i]])] =
scale(xb[batch==batches[i],setdiff(1:nvars, badvariableslisthere[[i]])],center=rep(0,nvars-length(badvariableslisthere[[i]])),scale=sdb[[i]][setdiff(1:nvars, badvariableslisthere[[i]])])
}
# Predict probabilities using the model estimated on the training data:
p1x = as.vector(1/(1+exp(-params$b0-as.matrix(scaledxb)[,setdiff(1:nvars, params$badvariables)]%*%params$b))) # mean(ifelse(p1x>0.5,2,1)!=ytest)
herealsogood <- lapply(badvariableslisthere,
function(x) which(setdiff(1:nvars, params$badvariables) %in% setdiff(1:nvars, x)))
goodinboth <- lapply(badvariableslisthere,
function(x) intersect(setdiff(1:nvars, params$badvariables), setdiff(1:nvars, x)))
scaledxbfa <- scaledxb
for (i in 1:nbatches) {
# Determine number of factors if not given:
if (is.null(params$nbfinput)){
##require("mnormt")
nbf = nbfactors(scale(sweep(scale(scaledxb[batch==batches[i],goodinboth[[i]]],center=params$m1[herealsogood[[i]]],scale=FALSE), 1, 1-p1x[batch==batches[i]], "*") +
sweep(scale(scaledxb[batch==batches[i],goodinboth[[i]]],center=params$m2[herealsogood[[i]]],scale=FALSE), 1, p1x[batch==batches[i]], "*")), maxnbfactors=min(c(floor(sum(batch==batches[i])/2), 12)), minerr=params$minerr)$optimalnbfactors
}
else
nbf <- params$nbfinput
if(is.na(nbf)) {
warning("There occured an issue in the factor estimation. Number of factors set to zero.")
nbf <- 0
}
if(nbf > 0) {
# Calculate the factors on 'cdta' (batch-centered-scaled and class-removed):
fa = emfahighdim(sweep(scale(scaledxb[batch==batches[i],goodinboth[[i]]],center=params$m1[herealsogood[[i]]],scale=FALSE), 1, 1-p1x[batch==batches[i]], "*") +
sweep(scale(scaledxb[batch==batches[i],goodinboth[[i]]],center=params$m2[herealsogood[[i]]],scale=FALSE), 1, p1x[batch==batches[i]], "*"),nbf=nbf,minerr=params$minerr)
# Remove the factor influences:
scaledxbfa[batch==batches[i],goodinboth[[i]]] = scaledxb[batch==batches[i],goodinboth[[i]]] - fa$Factors%*%t(fa$B)
}
else {
scaledxbfa[batch==batches[i],goodinboth[[i]]] <- scaledxb[batch==batches[i],goodinboth[[i]]]
fa <- NULL
}
}
means2batch <- sd2batch <- matrix(nrow=length(levels(batch)), ncol=ncol(scaledxbfa))
# scale again:
for (i in 1:nbatches) {
means2batch[i,] <- colMeans(scaledxbfa[batch==batches[i],])
sd2batch[i,] <- apply(scaledxbfa[batch==batches[i],], 2, sd)
sd2batch[i,][sd2batch[i,]==0] <- 1
scaledxbfa[batch==batches[i],] = scale(scaledxbfa[batch==batches[i],], center=means2batch[i,], scale=sd2batch[i,])
}
xadj <- sweep(sweep(scaledxbfa, 2, params$pooledsds, "*"), 2, params$meanoverall, "+")
if(length(params$badvariables) > 0) {
for(i in 1:nbatches) {
if(length(setdiff(params$badvariables, badvariableslisthere[[i]])) > 0) {
heregood <- setdiff(params$badvariables, badvariableslisthere[[i]])
meanssave <- colMeans(xadj[batch==batches[i],heregood, drop=FALSE])
xadj[batch==batches[i],heregood] <- scale(xadj[batch==batches[i],heregood, drop=FALSE], center=TRUE, scale=TRUE)
xadj[batch==batches[i],heregood] <- sweep(sweep(xadj[batch==batches[i],heregood, drop=FALSE], 2, pooledsds[heregood], "*"), 2, meanssave, "+")
}
}
}
return(xadj)
}
| /R/fabatchaddon.R | no_license | cran/bapred | R | false | false | 5,341 | r | fabatchaddon <-
function(params, x, batch) {
if(any(is.na(x)))
stop("Data contains missing values.")
if(!is.factor(batch))
stop("'batch' has to be of class 'factor'.")
if(!is.matrix(x))
stop("'x' has to be of class 'matrix'.")
if(max(table(batch)) >= ncol(x))
stop("The sample size within each batch has to be smaller than the number of variables.")
if(!inherits(params, "fabatch"))
stop("Input parameter 'params' has to be of class 'fabatch'.")
if(ncol(params$xadj) != ncol(x))
stop("Number of variables in test data matrix different to that of training data matrix.")
batches = levels(batch)
nbatches = length(batches)
nvars <- ncol(x)
sdb = as.list(rep(0,nbatches))
for (i in 1:nbatches) {
sdb[[i]] = apply(x[batch==batches[i],],2,sd)
}
sdb0=sdb
badvariableslisthere <- lapply(sdb, function(x) which(x==0))
badvariableshere <- sort(unique(unlist(badvariableslisthere)))
goodvariableshere <- setdiff(1:nvars, badvariableshere)
if(nbatches > 1) {
# Remove batch specific means:
adjustmentmod = lm(x~batch)
design = model.matrix(~batch)
adjustmentcoef = coef(adjustmentmod)
xb = x-design%*%adjustmentcoef
adjustmentcoef0 = adjustmentcoef
}
else
xb = scale(x, scale=FALSE)
pooledsds <- apply(xb, 2, sd)
# 'scaledxb' is X centered and scaled per batch:
scaledxb = xb
for (i in 1:nbatches) {
scaledxb[batch==batches[i],setdiff(1:nvars, badvariableslisthere[[i]])] =
scale(xb[batch==batches[i],setdiff(1:nvars, badvariableslisthere[[i]])],center=rep(0,nvars-length(badvariableslisthere[[i]])),scale=sdb[[i]][setdiff(1:nvars, badvariableslisthere[[i]])])
}
# Predict probabilities using the model estimated on the training data:
p1x = as.vector(1/(1+exp(-params$b0-as.matrix(scaledxb)[,setdiff(1:nvars, params$badvariables)]%*%params$b))) # mean(ifelse(p1x>0.5,2,1)!=ytest)
herealsogood <- lapply(badvariableslisthere,
function(x) which(setdiff(1:nvars, params$badvariables) %in% setdiff(1:nvars, x)))
goodinboth <- lapply(badvariableslisthere,
function(x) intersect(setdiff(1:nvars, params$badvariables), setdiff(1:nvars, x)))
scaledxbfa <- scaledxb
for (i in 1:nbatches) {
# Determine number of factors if not given:
if (is.null(params$nbfinput)){
##require("mnormt")
nbf = nbfactors(scale(sweep(scale(scaledxb[batch==batches[i],goodinboth[[i]]],center=params$m1[herealsogood[[i]]],scale=FALSE), 1, 1-p1x[batch==batches[i]], "*") +
sweep(scale(scaledxb[batch==batches[i],goodinboth[[i]]],center=params$m2[herealsogood[[i]]],scale=FALSE), 1, p1x[batch==batches[i]], "*")), maxnbfactors=min(c(floor(sum(batch==batches[i])/2), 12)), minerr=params$minerr)$optimalnbfactors
}
else
nbf <- params$nbfinput
if(is.na(nbf)) {
warning("There occured an issue in the factor estimation. Number of factors set to zero.")
nbf <- 0
}
if(nbf > 0) {
# Calculate the factors on 'cdta' (batch-centered-scaled and class-removed):
fa = emfahighdim(sweep(scale(scaledxb[batch==batches[i],goodinboth[[i]]],center=params$m1[herealsogood[[i]]],scale=FALSE), 1, 1-p1x[batch==batches[i]], "*") +
sweep(scale(scaledxb[batch==batches[i],goodinboth[[i]]],center=params$m2[herealsogood[[i]]],scale=FALSE), 1, p1x[batch==batches[i]], "*"),nbf=nbf,minerr=params$minerr)
# Remove the factor influences:
scaledxbfa[batch==batches[i],goodinboth[[i]]] = scaledxb[batch==batches[i],goodinboth[[i]]] - fa$Factors%*%t(fa$B)
}
else {
scaledxbfa[batch==batches[i],goodinboth[[i]]] <- scaledxb[batch==batches[i],goodinboth[[i]]]
fa <- NULL
}
}
means2batch <- sd2batch <- matrix(nrow=length(levels(batch)), ncol=ncol(scaledxbfa))
# scale again:
for (i in 1:nbatches) {
means2batch[i,] <- colMeans(scaledxbfa[batch==batches[i],])
sd2batch[i,] <- apply(scaledxbfa[batch==batches[i],], 2, sd)
sd2batch[i,][sd2batch[i,]==0] <- 1
scaledxbfa[batch==batches[i],] = scale(scaledxbfa[batch==batches[i],], center=means2batch[i,], scale=sd2batch[i,])
}
xadj <- sweep(sweep(scaledxbfa, 2, params$pooledsds, "*"), 2, params$meanoverall, "+")
if(length(params$badvariables) > 0) {
for(i in 1:nbatches) {
if(length(setdiff(params$badvariables, badvariableslisthere[[i]])) > 0) {
heregood <- setdiff(params$badvariables, badvariableslisthere[[i]])
meanssave <- colMeans(xadj[batch==batches[i],heregood, drop=FALSE])
xadj[batch==batches[i],heregood] <- scale(xadj[batch==batches[i],heregood, drop=FALSE], center=TRUE, scale=TRUE)
xadj[batch==batches[i],heregood] <- sweep(sweep(xadj[batch==batches[i],heregood, drop=FALSE], 2, pooledsds[heregood], "*"), 2, meanssave, "+")
}
}
}
return(xadj)
}
|
# Flyorescent spectra anslysis
# Copyright © 2020 Borys Olifirov
require(ggplot2)
require(gridExtra)
require(dplyr)
require(magrittr)
require(wavelength2colour)
setwd('/home/astria/Bio/Note/diff/fluo')
fluo.1 <- read.csv('fluo_4.csv')
fluo.2 <- read.csv('mTFP1.csv')
pass.band.1 <- c(475, 500)
pass.band.2 <- c(540, 600)
fluo.1[is.na(fluo.1)] <- 0
fluo.1 <- subset(fluo.1, w >= 300)
if (max(fluo.1$ex) < 90) {
fluo.1$ex <- fluo.1$ex * 100
fluo.1$em <- fluo.1$em * 100
}
fluo.2[is.na(fluo.2)] <- 0
fluo.2 <- subset(fluo.2, w >= 300)
if (max(fluo.2$ex) < 90) {
fluo.2$ex <- fluo.2$ex * 100
fluo.2$em <- fluo.2$em * 100
}
fluo.1.ex.col <- wavelength2hex(fluo.1$w[fluo.1$ex == max(fluo.1$ex)])
fluo.1.em.col <- wavelength2hex(530) # fluo.1$w[fluo.1$em == max(fluo.1$em)])
fluo.2.ex.col <- wavelength2hex(fluo.2$w[fluo.2$ex == max(fluo.2$ex)])
fluo.2.em.col <- wavelength2hex(fluo.2$w[fluo.2$em == max(fluo.2$em)])
ggplot() +
geom_ribbon(data = fluo.1, mapping = aes(x = w,
ymin = 0,
ymax = ex),
colour = fluo.1.ex.col,
fill = fluo.1.ex.col,
size = 0.2,
alpha = 0.25) +
geom_ribbon(data = fluo.1, mapping = aes(x = w,
ymin = 0,
ymax = em),
colour = fluo.1.em.col,
fill = fluo.1.em.col,
size = 1,
alpha = 0.25) +
geom_ribbon(data = fluo.2, mapping = aes(x = w,
ymin = 0,
ymax = ex),
colour = fluo.2.ex.col,
fill = fluo.2.ex.col,
size = 0.2,
alpha = 0.25) +
geom_ribbon(data = fluo.2, mapping = aes(x = w,
ymin = 0,
ymax = em),
colour = fluo.2.em.col, # '#808080',
fill = fluo.2.em.col,
size = 1,
alpha = 0.25) +
geom_rect(aes(xmin = pass.band.1[1],
xmax = pass.band.1[2],
ymin = 0, ymax = 110),
fill = 'red',
alpha = 0.3) +
geom_rect(aes(xmin = pass.band.2[1],
xmax = pass.band.2[2],
ymin = 0, ymax = 110),
fill = 'red',
alpha = 0.3) +
scale_x_continuous(limits = c(300, 700),
breaks = seq(300, 700, 50)) +
scale_y_continuous(limits = c(0, 110),
breaks = seq(0, 100, 20)) +
labs(y ='Intensity (%)', x = 'Wavelength (nm)') +
theme_minimal(base_size = 18,
base_family = 'oswald')
sum(fluo.2$em[fluo.2$w >= pass.band.1[1] & fluo.2$w <= pass.band.1[2]]) / sum(fluo.2$em)
| /diff/fluo/fluo.R | no_license | sonyann/Labnote | R | false | false | 2,814 | r | # Flyorescent spectra anslysis
# Copyright © 2020 Borys Olifirov
require(ggplot2)
require(gridExtra)
require(dplyr)
require(magrittr)
require(wavelength2colour)
setwd('/home/astria/Bio/Note/diff/fluo')
fluo.1 <- read.csv('fluo_4.csv')
fluo.2 <- read.csv('mTFP1.csv')
pass.band.1 <- c(475, 500)
pass.band.2 <- c(540, 600)
fluo.1[is.na(fluo.1)] <- 0
fluo.1 <- subset(fluo.1, w >= 300)
if (max(fluo.1$ex) < 90) {
fluo.1$ex <- fluo.1$ex * 100
fluo.1$em <- fluo.1$em * 100
}
fluo.2[is.na(fluo.2)] <- 0
fluo.2 <- subset(fluo.2, w >= 300)
if (max(fluo.2$ex) < 90) {
fluo.2$ex <- fluo.2$ex * 100
fluo.2$em <- fluo.2$em * 100
}
fluo.1.ex.col <- wavelength2hex(fluo.1$w[fluo.1$ex == max(fluo.1$ex)])
fluo.1.em.col <- wavelength2hex(530) # fluo.1$w[fluo.1$em == max(fluo.1$em)])
fluo.2.ex.col <- wavelength2hex(fluo.2$w[fluo.2$ex == max(fluo.2$ex)])
fluo.2.em.col <- wavelength2hex(fluo.2$w[fluo.2$em == max(fluo.2$em)])
ggplot() +
geom_ribbon(data = fluo.1, mapping = aes(x = w,
ymin = 0,
ymax = ex),
colour = fluo.1.ex.col,
fill = fluo.1.ex.col,
size = 0.2,
alpha = 0.25) +
geom_ribbon(data = fluo.1, mapping = aes(x = w,
ymin = 0,
ymax = em),
colour = fluo.1.em.col,
fill = fluo.1.em.col,
size = 1,
alpha = 0.25) +
geom_ribbon(data = fluo.2, mapping = aes(x = w,
ymin = 0,
ymax = ex),
colour = fluo.2.ex.col,
fill = fluo.2.ex.col,
size = 0.2,
alpha = 0.25) +
geom_ribbon(data = fluo.2, mapping = aes(x = w,
ymin = 0,
ymax = em),
colour = fluo.2.em.col, # '#808080',
fill = fluo.2.em.col,
size = 1,
alpha = 0.25) +
geom_rect(aes(xmin = pass.band.1[1],
xmax = pass.band.1[2],
ymin = 0, ymax = 110),
fill = 'red',
alpha = 0.3) +
geom_rect(aes(xmin = pass.band.2[1],
xmax = pass.band.2[2],
ymin = 0, ymax = 110),
fill = 'red',
alpha = 0.3) +
scale_x_continuous(limits = c(300, 700),
breaks = seq(300, 700, 50)) +
scale_y_continuous(limits = c(0, 110),
breaks = seq(0, 100, 20)) +
labs(y ='Intensity (%)', x = 'Wavelength (nm)') +
theme_minimal(base_size = 18,
base_family = 'oswald')
sum(fluo.2$em[fluo.2$w >= pass.band.1[1] & fluo.2$w <= pass.band.1[2]]) / sum(fluo.2$em)
|
context("Watershed Topology")
library("WatershedTools")
ws <- readRDS(system.file("testdata/testWS.rds", package="WatershedTools"))
outletID <- outlets(ws)$id
hwID <- headwaters(ws)[1,'id']
test_that("Accumulation works", {
expect_error(accum_outlet_us <-
accumulate(ws, upstream = Inf, downstream = outletID, direction = "up"), regex=NA)
expect_error(accum_outlet_us_parallel <-
accumulate(ws, upstream = Inf, downstream = outletID, direction = "up", parallel=TRUE),
regex=NA)
expect_error(accum_outlet_ds <-
accumulate(ws, upstream = Inf, downstream = outletID, direction = "down"), regex=NA)
expect_error(accum_hw_ds <-
accumulate(ws, upstream = hwID, downstream = Inf, direction = "down"), regex=NA)
expect_error(accum_hw_us <-
accumulate(ws, upstream = hwID, downstream = Inf, direction = "up"), regex=NA)
expect_error(accum_both_us <-
accumulate(ws, upstream = Inf, downstream = c(outletID, hwID), direction = "down"),
regex=NA)
expect_error(accum_out_hw_ds <-
accumulate(ws, upstream = hwID, downstream = outletID, direction = "down"), regex=NA)
expect_error(accum_out_hw_us <-
accumulate(ws, upstream = hwID, downstream = outletID, direction = "up"), regex=NA)
# results shouldn't depend on parallel computing or not
expect_identical(accum_outlet_us, accum_outlet_us_parallel)
# from outlet to all headwaters should recover all points
expect_identical(sort(accum_outlet_us[,1]), sort(ws[,'id']))
expect_identical(sort(accum_outlet_ds[,1]), sort(ws[,'id']))
# from hw to outlet should produce same pixels no matter which direction
expect_identical(sort(accum_hw_ds[,1]), sort(accum_hw_us[,1]))
expect_identical(sort(accum_out_hw_ds[,1]), sort(accum_out_hw_us[,1]))
expect_identical(accum_out_hw_ds, accum_hw_ds)
expect_identical(accum_out_hw_us, accum_hw_us)
## upstream always negative, downstream always positive
expect_gt(0, sum(accum_outlet_us[,2]))
expect_gt(0, sum(accum_hw_us[,2]))
expect_gt(sum(accum_outlet_ds[,2]), 0)
expect_gt(sum(accum_hw_ds[,2]), 0)
})
test_that("Watershed Distance", {
expect_error(dmat <- wsDistance(ws, c(outletID, hwID)), regex=NA)
expect_equal(dmat[1, outletID], 0)
expect_equal(dmat[2, hwID], 0)
expect_equal(dmat[1, hwID], -1*dmat[2, outletID])
expect_true(all(dmat[1,] <= 0))
expect_equal(ncol(dmat), nrow(ws$data))
})
test_that("Site By Reach", {
expect_error(sbyr <- siteByReach(ws, c(outletID, hwID)), regex=NA)
expect_equal(ncol(sbyr), max(ws[,'reachID']))
# outlet is connected to all reaches, headwater to itself only
expect_true(all(sbyr[1,] == 1))
expect_equal(sum(sbyr[2,]), 1)
expect_equal(unname(which(sbyr[2,] == 1)), ws[hwID, 'reachID'])
})
test_that("Downstream Neighbor", {
ussite <- which(ws$adjacency[outletID,]==1)
expect_error(ndsnb <- nearestDownstreamNeighbor(ws, c(outletID, ussite, hwID)), regex=NA)
expect_true(!outletID %in% ndsnb[,1])
expect_equal(unname(ndsnb[match(ussite, ndsnb[,1]),2]), outletID)
expect_equal(unname(ndsnb[match(hwID, ndsnb[,1]),2]), unname(ussite))
})
test_that("Slope", {
skip_on_cran()
ws2 = ws
pix = 358
ws2$data$elevation[pix] = 795
delev = 5
dx = 10
uspix = WatershedTools:::us(pix, ws2)
dspix = WatershedTools:::ds(pix, ws2)
ws2$data$elevation[uspix] = ws2$data$elevation[pix] + delev
ws2$data$elevation[dspix] = ws2$data$elevation[pix] - delev
ws2$data$length[c(uspix, pix, dspix)] = dx
expect_error(ws2$data$wsslope <- ws_slope(ws2), regex=NA)
expect_error(ws2$data$wsslope <- ws_slope(ws2, by='pixel'), regex=NA)
expect_equal(ws2[pix, 'wsslope'], delev/dx)
})
# test_that("Nearest Neighbors", {
# expect_error(nnsnb <- nearestNeighbors(ws, c(outletID, ussite, hwID),
# sites=c(outletID, ussite, hwID)), regex=NA)
# })
| /tests/testthat/test-topology.R | permissive | mtalluto/WatershedTools | R | false | false | 3,764 | r | context("Watershed Topology")
library("WatershedTools")
ws <- readRDS(system.file("testdata/testWS.rds", package="WatershedTools"))
outletID <- outlets(ws)$id
hwID <- headwaters(ws)[1,'id']
test_that("Accumulation works", {
expect_error(accum_outlet_us <-
accumulate(ws, upstream = Inf, downstream = outletID, direction = "up"), regex=NA)
expect_error(accum_outlet_us_parallel <-
accumulate(ws, upstream = Inf, downstream = outletID, direction = "up", parallel=TRUE),
regex=NA)
expect_error(accum_outlet_ds <-
accumulate(ws, upstream = Inf, downstream = outletID, direction = "down"), regex=NA)
expect_error(accum_hw_ds <-
accumulate(ws, upstream = hwID, downstream = Inf, direction = "down"), regex=NA)
expect_error(accum_hw_us <-
accumulate(ws, upstream = hwID, downstream = Inf, direction = "up"), regex=NA)
expect_error(accum_both_us <-
accumulate(ws, upstream = Inf, downstream = c(outletID, hwID), direction = "down"),
regex=NA)
expect_error(accum_out_hw_ds <-
accumulate(ws, upstream = hwID, downstream = outletID, direction = "down"), regex=NA)
expect_error(accum_out_hw_us <-
accumulate(ws, upstream = hwID, downstream = outletID, direction = "up"), regex=NA)
# results shouldn't depend on parallel computing or not
expect_identical(accum_outlet_us, accum_outlet_us_parallel)
# from outlet to all headwaters should recover all points
expect_identical(sort(accum_outlet_us[,1]), sort(ws[,'id']))
expect_identical(sort(accum_outlet_ds[,1]), sort(ws[,'id']))
# from hw to outlet should produce same pixels no matter which direction
expect_identical(sort(accum_hw_ds[,1]), sort(accum_hw_us[,1]))
expect_identical(sort(accum_out_hw_ds[,1]), sort(accum_out_hw_us[,1]))
expect_identical(accum_out_hw_ds, accum_hw_ds)
expect_identical(accum_out_hw_us, accum_hw_us)
## upstream always negative, downstream always positive
expect_gt(0, sum(accum_outlet_us[,2]))
expect_gt(0, sum(accum_hw_us[,2]))
expect_gt(sum(accum_outlet_ds[,2]), 0)
expect_gt(sum(accum_hw_ds[,2]), 0)
})
test_that("Watershed Distance", {
expect_error(dmat <- wsDistance(ws, c(outletID, hwID)), regex=NA)
expect_equal(dmat[1, outletID], 0)
expect_equal(dmat[2, hwID], 0)
expect_equal(dmat[1, hwID], -1*dmat[2, outletID])
expect_true(all(dmat[1,] <= 0))
expect_equal(ncol(dmat), nrow(ws$data))
})
test_that("Site By Reach", {
expect_error(sbyr <- siteByReach(ws, c(outletID, hwID)), regex=NA)
expect_equal(ncol(sbyr), max(ws[,'reachID']))
# outlet is connected to all reaches, headwater to itself only
expect_true(all(sbyr[1,] == 1))
expect_equal(sum(sbyr[2,]), 1)
expect_equal(unname(which(sbyr[2,] == 1)), ws[hwID, 'reachID'])
})
test_that("Downstream Neighbor", {
ussite <- which(ws$adjacency[outletID,]==1)
expect_error(ndsnb <- nearestDownstreamNeighbor(ws, c(outletID, ussite, hwID)), regex=NA)
expect_true(!outletID %in% ndsnb[,1])
expect_equal(unname(ndsnb[match(ussite, ndsnb[,1]),2]), outletID)
expect_equal(unname(ndsnb[match(hwID, ndsnb[,1]),2]), unname(ussite))
})
test_that("Slope", {
skip_on_cran()
ws2 = ws
pix = 358
ws2$data$elevation[pix] = 795
delev = 5
dx = 10
uspix = WatershedTools:::us(pix, ws2)
dspix = WatershedTools:::ds(pix, ws2)
ws2$data$elevation[uspix] = ws2$data$elevation[pix] + delev
ws2$data$elevation[dspix] = ws2$data$elevation[pix] - delev
ws2$data$length[c(uspix, pix, dspix)] = dx
expect_error(ws2$data$wsslope <- ws_slope(ws2), regex=NA)
expect_error(ws2$data$wsslope <- ws_slope(ws2, by='pixel'), regex=NA)
expect_equal(ws2[pix, 'wsslope'], delev/dx)
})
# test_that("Nearest Neighbors", {
# expect_error(nnsnb <- nearestNeighbors(ws, c(outletID, ussite, hwID),
# sites=c(outletID, ussite, hwID)), regex=NA)
# })
|
#! /usr/bin/Rscript
### Argument parsing ###########################################################
args <- commandArgs(trailingOnly=TRUE)
csv.filename <- args[1]
plot.filename <- args[2]
### Packages ###################################################################
library(ggplot2)
library(gggenes)
library(ggnewscale)
library(patchwork)
### Helper Fxns ################################################################
# Extract the mutations for all segments for each cell
#
# @param data data.frame containing mutation information
# @param segments vector of segment names to parse
# @param segment.df data.frame containing the coordinates of each segment
# @param pattern prefix pattern for each data.frame column for each segment
#
# @return Returns A data.frame containing mutations, insertions, and deletion
# locations for each segment.
ParseMutations <- function(data, segments, segment.df, pattern = "mutations_") {
mutations.df <- list()
for (i in 1:nrow(x = data)) {
mutations <- lapply(X = segments, FUN = function(x) {
ExtractSegmentMutations(
x = data[i, paste0(pattern, x)],
segment = x,
segment.df = segment.df,
cell = i)
})
mutations.df[[i]] <- do.call(what = rbind, args = mutations)
}
mutations.df <- do.call(what = rbind, args = mutations.df)
mutations.df$type <- factor(x = mutations.df$type)
return(mutations.df)
}
# Extract the mutations on a given segment for a given cell
#
# @param x A vector of mutations
# @param segment Name of the segment mutation is present on
# @param segment.df Data.frame containing the coordinates of each segment
# @param cell Index of cell - added into returned data.frame
#
# @return Returns a data.frame of mutations, insertions, and deletion locations
ExtractSegmentMutations <- function(x, segment, segment.df, cell) {
if (x == "WT" | x == "Not Detected") {
return(NULL)
}
mutations <- strsplit(x = x, split = " ")[[1]]
locations <- NULL
mutation.df <- list()
deletion.df <- list()
insertion.df <- list()
for (mutation in mutations) {
if (grepl(pattern = "^del", x = mutation)) {
del.loc <- as.numeric(strsplit(x = gsub(pattern = "del", x = mutation, replacement = ""), split = "to")[[1]])
del.loc <- del.loc + segment.df[segment.df$segment == segment, "start"]
deletion.df[[length(deletion.df)+1]] <- data.frame(start = del.loc[1], end = del.loc[2], type = "Deletion")
} else if (grepl(pattern = "^ins", x = mutation)) {
ins.loc <- gsub(pattern = "ins", x = mutation, replacement = "")
ins.length <- nchar(gsub(pattern = "([1-9]+)", x = ins.loc, replacement = ""))
ins.loc <- as.numeric(gsub(pattern = "([A-Z,a-z]+)", x = ins.loc, replacement = "")) + segment.df[segment.df$segment == segment, "start"]
insertion.df[[length(insertion.df) + 1]] <- data.frame(start = ins.loc, end = ins.loc + ins.length, type = "Insertion")
} else {
all_mutations <- strsplit(x = mutation, split = "_")[[1]]
locations <- as.numeric(sapply(X = all_mutations, FUN = gsub, pattern = "([A-Z,a-z]+)", replacement = ""))
locations <- locations[!is.na(locations)]
locations <- data.frame(start = locations + segment.df[segment.df$segment == segment, "start"],
type = "Non-synonymous")
if (any(all_mutations == "synonymous")) {
locations[which(all_mutations == "synonymous") - 1, "type"] <- "Synonymous"
}
if (any(all_mutations == "noncoding")) {
locations[which(all_mutations == "noncoding") - 1, "type"] <- "Non-coding"
}
locations$end <- locations$start
mutation.df[[length(x = mutation.df) + 1]] <- locations
}
}
return.df <- do.call(what = rbind, args = c(mutation.df, deletion.df, insertion.df))
return.df$cell <- cell
return(return.df)
}
# Create long data.frame of segment presence for each cell
#
# @param data data.frame containing segment presence information
# @param segment.df data.frame containing the coordinates of each segment
# @param pattern prefix pattern to match segment presence columns
#
# @return Returns a data.frame where each row contains a detected segment, its
# positional information, and cell index
ExtractSegmentPresence <- function(data, segment.df, pattern) {
segment.df.per.cell <- lapply(X = 1:nrow(x = data), FUN = function(x) {
segment.df$cell <- x
segment.df$present <- as.logical(x = unlist(x = data[x, paste0(pattern, segment.df$segment)]))
segment.df <- segment.df[segment.df$present, ]
})
segment.df.per.cell <- do.call(what = rbind, args = segment.df.per.cell)
segment.df.per.cell$present <- NULL
return(segment.df.per.cell)
}
# Main ggplot2 theme definition
PacBioTheme <- function () {
pacbio.theme <- theme(
axis.ticks.y=element_blank(),
axis.title.y=element_blank(),
axis.title.x=element_blank(),
axis.text.y=element_text(size = 12),
axis.text.x=element_text(size = 12),
panel.grid = element_blank(),
plot.background = element_blank(),
panel.background = element_blank(),
axis.line.x = element_line(),
plot.margin = margin(0, 20, 90, 0),
legend.key = element_rect(fill = NA),
legend.title=element_text(size = 15),
legend.text=element_text(size = 12)
)
return(pacbio.theme)
}
### Main plotting function #####################################################
PacBioPlot <- function(
data,
segment.df,
color.segments.by = "percent_viral_UMIs",
segment.color = '#0072B2',
order.segments.by = "percent_viral_UMIs",
order.name = "% mRNA from flu",
box = 'percent_supernatant',
box.name ='% supernatant',
box.color.high = '009E73',
ncol = 1,
arrow_height = 0.325, # vertical space per arrow in inches
arrow_frac_height = 0.55 # arrow takes up this much of available height
) {
ncells <- nrow(x = data)
segments <- segment.df$segment
segment.df.cells <- ExtractSegmentPresence(data = data, segment.df = segment.df, pattern = "present_")
mutations.df <- ParseMutations(data = data, segments = segments, segment.df = segment.df, pattern = "mutations_")
segment.df.cells <- segment.df.cells[segment.df.cells$cell <= ncells, ]
mutations.df <- mutations.df[mutations.df$cell <= ncells, ]
segment.df.cells[['segment_coloring']] <- dat[[color.segments.by]][segment.df.cells$cell]
if (!is.null(x = box)) {
if (length(x = box) != length(x = box.name)) {
stop("Please provide equal length vectors for the parameters box and box.name.")
}
box.col.names <- paste0('box_', 1:length(x = box))
for (i in 1:length(x = box)) {
segment.df.cells[[box.col.names[i]]] <- dat[[box[i]]][segment.df.cells$cell]
}
}
# reorder cells
segment.df.cells[['segment_ordering']] <- dat[[order.segments.by]][segment.df.cells$cell]
cell.remap <- 1:ncells
names(x = cell.remap) <- order(unique(x = segment.df.cells[, c("cell", "segment_ordering")])$segment_ordering, decreasing = TRUE)
segment.df.cells$cell_ordered <- sapply(X = segment.df.cells$cell, FUN = function(x) cell.remap[as.character(x = x)])
mutations.df$cell_ordered <- sapply(X = mutations.df$cell, FUN = function(x) cell.remap[as.character(x = x)])
# dummy data.frame for plotting missing segments
missing.segment.df <- data.frame(y = 1:ncells)
indel.df <- mutations.df[mutations.df$type %in% c("Insertion", "Deletion"), ]
indel.df$type <- droplevels(indel.df$type)
mutations.df <- mutations.df[!mutations.df$type %in% c("Insertion", "Deletion"), ]
mutations.df$type <- droplevels(x = mutations.df$type)
box.labels <- unique(x = segment.df.cells[, c(box.col.names, "cell_ordered")])
box.labels[, 1:length(x = box.col.names)] <- apply(box.labels[, 1:length(x = box.col.names), drop = FALSE], MARGIN = 2, FUN = round, digits = 1)
cell.cols <- split(1:ncells, ceiling(seq_along(1:ncells)/(ncells/ncol)))
plots <- lapply(X = cell.cols, FUN = function(x) {
ncells.plot <- length(x = x)
segment.df.cells.sub <- segment.df.cells[segment.df.cells$cell_ordered %in% x, ]
segment.df.cells.sub$cell_ordered <- abs(segment.df.cells.sub[segment.df.cells.sub$cell_ordered %in% x, "cell_ordered"] - (max(x) + 1))
missing.segment.df.sub <- missing.segment.df[missing.segment.df$y %in% x, ,drop=FALSE]
missing.segment.df.sub$y <- abs(missing.segment.df.sub$y - (max(x) + 1))
mutations.df.sub <- mutations.df[mutations.df$cell_ordered %in% x, ]
mutations.df.sub$cell_ordered <- abs(mutations.df.sub[mutations.df.sub$cell_ordered %in% x, "cell_ordered"] - (max(x) + 1))
indel.df.sub <- indel.df[indel.df$cell_ordered %in% x, ]
indel.df.sub$cell_ordered <- abs(indel.df.sub[indel.df.sub$cell_ordered %in% x, "cell_ordered"] - (max(x) + 1))
box.labels.sub <- box.labels[box.labels$cell_ordered %in% x, ]
box.labels.sub$cell_ordered <- abs(box.labels.sub[box.labels.sub$cell_ordered %in% x, "cell_ordered"] - (max(x) + 1))
plot <- ggplot(segment.df.cells.sub) +
geom_segment(data = missing.segment.df.sub, aes_string(y = "y", yend = "y", x = 0, xend= max(segment.df$end)), color = "grey") +
geom_gene_arrow(aes_string(xmin = "start", xmax = "end", y = "cell_ordered"),
arrow_body_height = unit(arrow_frac_height * arrow_height, 'in'),
arrowhead_height = unit(arrow_frac_height * arrow_height, 'in'),
arrowhead_width = unit(0.3 * arrow_height, 'in'),
fill = segment.color,
size = 0.4) +
geom_segment(data = indel.df.sub, aes_string(x = "start", xend = "end", y = "cell_ordered", yend = "cell_ordered", color = "type"), size = 2.5) +
scale_color_manual(values = c('#CC79A7', '#F0E442'), name = "Indel class", drop = FALSE, guide = guide_legend(order = 2)) +
new_scale_color() +
geom_point(data = mutations.df.sub, aes_string(x = "start", y = "cell_ordered", fill = "type"), color = "black", size = 3.5, pch = 21) +
scale_fill_manual(values = c("#999999", '#E69F00', "#009E73"), name = "Mutation class", drop = FALSE, guide = guide_legend(order = 1)) +
new_scale_fill()
x.pos <- NULL
if (!is.null(x = box)) {
for (i in 1:length(x = box)) {
x.pos[i] <- -500 - ((i-1) * 500)
box.labels.sub[is.na(box.labels.sub)] <- "NA"
plot <- plot + geom_tile(aes_string(x = x.pos[i], y = "cell_ordered", width = 400, height = arrow_frac_height + 0.1, fill = box.col.names[i]), color = "black") +
geom_text(data = box.labels.sub, aes_string(x = x.pos[i], y = "cell_ordered", label = box.col.names[i]), size = 3) +
scale_fill_gradient(low = 'white', high = box.color.high[i], name = box.name[i], limits = c(min(segment.df.cells[[box.col.names[i]]]), max(segment.df.cells[[box.col.names[i]]]))) +
new_scale_fill()
if (length(x = box) > 1) {
plot <- plot + coord_cartesian(clip = 'off') +
annotate(geom = "text", x = x.pos[i], y = 0, label = box.name[i], angle = 90, size = 4, hjust = 1)
}
}
}
if (length(x = box) > 1) {
plot <- plot +
scale_x_continuous("", breaks = c(x.pos, segment.df$start + segment.df$len/2), labels = c(rep("", times = length(x = box)), gsub(pattern = "flu", replacement = "", x = segment.df$segment)), limits = c(min(x.pos)-200, max(segment.df$end)), expand = c(0.01, 0.01)) +
scale_y_continuous("", breaks = 1:ncells.plot, labels = paste0('Cell ', rev(x))) +
coord_cartesian(clip = "off", ylim = c(0.3, ncells.plot + 0.5), expand = FALSE) +
PacBioTheme()
} else {
plot <- plot +
scale_x_continuous("", breaks = c(x.pos, segment.df$start + segment.df$len/2), labels = c(box.name, gsub(pattern = "flu", replacement = "", x = segment.df$segment)), limits = c(min(x.pos)-200, max(segment.df$end)), expand = c(0.01, 0.01)) +
scale_y_continuous("", breaks = 1:ncells.plot, labels = paste0('Cell ', rev(x)), expand = c(0.01, 0.01), limits = c(0.5, ncells.plot + 0.5)) +
PacBioTheme() + theme(plot.margin = margin(0, 20, 0, 0))
}
})
wrap_plots(plots) + plot_layout(guides = 'collect')
}
### Setup ######################################################################
# Read in the data and generate influenza segment data.frame with positioning
# information.
dat <- read.csv(file = csv.filename)
dat$percent_viral_UMIs <- dat$frac_viral_UMIs * 100
dat$percent_supernatant <- dat$freq_supernatant * 100
segment.df <- data.frame(
segment = c("fluPB2", "fluPB1", "fluPA", "fluHA", "fluNP", "fluNA", "fluM", "fluNS"),
len = c(2341, 2341, 2233, 2035, 1565, 1735, 1027, 890)
)
segment.df$end <- cumsum(segment.df$len)
segment.df$start <- segment.df$end - segment.df$len + 1
### Generate/Save plot #########################################################
plot <- PacBioPlot(
dat,
segment.df,
box = c("percent_viral_UMIs", "percent_supernatant"),
box.name = c("% mRNA from flu", "% Supernatant"),
box.color.high = c("#009E73", "#E69F00"),
order.segments.by = "percent_viral_UMIs",
order.name = "% supernatant",
color.segments.by = 'percent_supernatant',
ncol = 1
)
ggsave(plot, filename = plot.filename, width = 14, height = 30)
| /scripts/viral_genomes_plot.R | no_license | jbloomlab/barcoded_flu_pdmH1N1 | R | false | false | 13,234 | r | #! /usr/bin/Rscript
### Argument parsing ###########################################################
args <- commandArgs(trailingOnly=TRUE)
csv.filename <- args[1]
plot.filename <- args[2]
### Packages ###################################################################
library(ggplot2)
library(gggenes)
library(ggnewscale)
library(patchwork)
### Helper Fxns ################################################################
# Extract the mutations for all segments for each cell
#
# @param data data.frame containing mutation information
# @param segments vector of segment names to parse
# @param segment.df data.frame containing the coordinates of each segment
# @param pattern prefix pattern for each data.frame column for each segment
#
# @return Returns A data.frame containing mutations, insertions, and deletion
# locations for each segment.
ParseMutations <- function(data, segments, segment.df, pattern = "mutations_") {
mutations.df <- list()
for (i in 1:nrow(x = data)) {
mutations <- lapply(X = segments, FUN = function(x) {
ExtractSegmentMutations(
x = data[i, paste0(pattern, x)],
segment = x,
segment.df = segment.df,
cell = i)
})
mutations.df[[i]] <- do.call(what = rbind, args = mutations)
}
mutations.df <- do.call(what = rbind, args = mutations.df)
mutations.df$type <- factor(x = mutations.df$type)
return(mutations.df)
}
# Extract the mutations on a given segment for a given cell
#
# @param x A vector of mutations
# @param segment Name of the segment mutation is present on
# @param segment.df Data.frame containing the coordinates of each segment
# @param cell Index of cell - added into returned data.frame
#
# @return Returns a data.frame of mutations, insertions, and deletion locations
ExtractSegmentMutations <- function(x, segment, segment.df, cell) {
if (x == "WT" | x == "Not Detected") {
return(NULL)
}
mutations <- strsplit(x = x, split = " ")[[1]]
locations <- NULL
mutation.df <- list()
deletion.df <- list()
insertion.df <- list()
for (mutation in mutations) {
if (grepl(pattern = "^del", x = mutation)) {
del.loc <- as.numeric(strsplit(x = gsub(pattern = "del", x = mutation, replacement = ""), split = "to")[[1]])
del.loc <- del.loc + segment.df[segment.df$segment == segment, "start"]
deletion.df[[length(deletion.df)+1]] <- data.frame(start = del.loc[1], end = del.loc[2], type = "Deletion")
} else if (grepl(pattern = "^ins", x = mutation)) {
ins.loc <- gsub(pattern = "ins", x = mutation, replacement = "")
ins.length <- nchar(gsub(pattern = "([1-9]+)", x = ins.loc, replacement = ""))
ins.loc <- as.numeric(gsub(pattern = "([A-Z,a-z]+)", x = ins.loc, replacement = "")) + segment.df[segment.df$segment == segment, "start"]
insertion.df[[length(insertion.df) + 1]] <- data.frame(start = ins.loc, end = ins.loc + ins.length, type = "Insertion")
} else {
all_mutations <- strsplit(x = mutation, split = "_")[[1]]
locations <- as.numeric(sapply(X = all_mutations, FUN = gsub, pattern = "([A-Z,a-z]+)", replacement = ""))
locations <- locations[!is.na(locations)]
locations <- data.frame(start = locations + segment.df[segment.df$segment == segment, "start"],
type = "Non-synonymous")
if (any(all_mutations == "synonymous")) {
locations[which(all_mutations == "synonymous") - 1, "type"] <- "Synonymous"
}
if (any(all_mutations == "noncoding")) {
locations[which(all_mutations == "noncoding") - 1, "type"] <- "Non-coding"
}
locations$end <- locations$start
mutation.df[[length(x = mutation.df) + 1]] <- locations
}
}
return.df <- do.call(what = rbind, args = c(mutation.df, deletion.df, insertion.df))
return.df$cell <- cell
return(return.df)
}
# Create long data.frame of segment presence for each cell
#
# @param data data.frame containing segment presence information
# @param segment.df data.frame containing the coordinates of each segment
# @param pattern prefix pattern to match segment presence columns
#
# @return Returns a data.frame where each row contains a detected segment, its
# positional information, and cell index
ExtractSegmentPresence <- function(data, segment.df, pattern) {
segment.df.per.cell <- lapply(X = 1:nrow(x = data), FUN = function(x) {
segment.df$cell <- x
segment.df$present <- as.logical(x = unlist(x = data[x, paste0(pattern, segment.df$segment)]))
segment.df <- segment.df[segment.df$present, ]
})
segment.df.per.cell <- do.call(what = rbind, args = segment.df.per.cell)
segment.df.per.cell$present <- NULL
return(segment.df.per.cell)
}
# Main ggplot2 theme definition
PacBioTheme <- function () {
pacbio.theme <- theme(
axis.ticks.y=element_blank(),
axis.title.y=element_blank(),
axis.title.x=element_blank(),
axis.text.y=element_text(size = 12),
axis.text.x=element_text(size = 12),
panel.grid = element_blank(),
plot.background = element_blank(),
panel.background = element_blank(),
axis.line.x = element_line(),
plot.margin = margin(0, 20, 90, 0),
legend.key = element_rect(fill = NA),
legend.title=element_text(size = 15),
legend.text=element_text(size = 12)
)
return(pacbio.theme)
}
### Main plotting function #####################################################
PacBioPlot <- function(
data,
segment.df,
color.segments.by = "percent_viral_UMIs",
segment.color = '#0072B2',
order.segments.by = "percent_viral_UMIs",
order.name = "% mRNA from flu",
box = 'percent_supernatant',
box.name ='% supernatant',
box.color.high = '009E73',
ncol = 1,
arrow_height = 0.325, # vertical space per arrow in inches
arrow_frac_height = 0.55 # arrow takes up this much of available height
) {
ncells <- nrow(x = data)
segments <- segment.df$segment
segment.df.cells <- ExtractSegmentPresence(data = data, segment.df = segment.df, pattern = "present_")
mutations.df <- ParseMutations(data = data, segments = segments, segment.df = segment.df, pattern = "mutations_")
segment.df.cells <- segment.df.cells[segment.df.cells$cell <= ncells, ]
mutations.df <- mutations.df[mutations.df$cell <= ncells, ]
segment.df.cells[['segment_coloring']] <- dat[[color.segments.by]][segment.df.cells$cell]
if (!is.null(x = box)) {
if (length(x = box) != length(x = box.name)) {
stop("Please provide equal length vectors for the parameters box and box.name.")
}
box.col.names <- paste0('box_', 1:length(x = box))
for (i in 1:length(x = box)) {
segment.df.cells[[box.col.names[i]]] <- dat[[box[i]]][segment.df.cells$cell]
}
}
# reorder cells
segment.df.cells[['segment_ordering']] <- dat[[order.segments.by]][segment.df.cells$cell]
cell.remap <- 1:ncells
names(x = cell.remap) <- order(unique(x = segment.df.cells[, c("cell", "segment_ordering")])$segment_ordering, decreasing = TRUE)
segment.df.cells$cell_ordered <- sapply(X = segment.df.cells$cell, FUN = function(x) cell.remap[as.character(x = x)])
mutations.df$cell_ordered <- sapply(X = mutations.df$cell, FUN = function(x) cell.remap[as.character(x = x)])
# dummy data.frame for plotting missing segments
missing.segment.df <- data.frame(y = 1:ncells)
indel.df <- mutations.df[mutations.df$type %in% c("Insertion", "Deletion"), ]
indel.df$type <- droplevels(indel.df$type)
mutations.df <- mutations.df[!mutations.df$type %in% c("Insertion", "Deletion"), ]
mutations.df$type <- droplevels(x = mutations.df$type)
box.labels <- unique(x = segment.df.cells[, c(box.col.names, "cell_ordered")])
box.labels[, 1:length(x = box.col.names)] <- apply(box.labels[, 1:length(x = box.col.names), drop = FALSE], MARGIN = 2, FUN = round, digits = 1)
cell.cols <- split(1:ncells, ceiling(seq_along(1:ncells)/(ncells/ncol)))
plots <- lapply(X = cell.cols, FUN = function(x) {
ncells.plot <- length(x = x)
segment.df.cells.sub <- segment.df.cells[segment.df.cells$cell_ordered %in% x, ]
segment.df.cells.sub$cell_ordered <- abs(segment.df.cells.sub[segment.df.cells.sub$cell_ordered %in% x, "cell_ordered"] - (max(x) + 1))
missing.segment.df.sub <- missing.segment.df[missing.segment.df$y %in% x, ,drop=FALSE]
missing.segment.df.sub$y <- abs(missing.segment.df.sub$y - (max(x) + 1))
mutations.df.sub <- mutations.df[mutations.df$cell_ordered %in% x, ]
mutations.df.sub$cell_ordered <- abs(mutations.df.sub[mutations.df.sub$cell_ordered %in% x, "cell_ordered"] - (max(x) + 1))
indel.df.sub <- indel.df[indel.df$cell_ordered %in% x, ]
indel.df.sub$cell_ordered <- abs(indel.df.sub[indel.df.sub$cell_ordered %in% x, "cell_ordered"] - (max(x) + 1))
box.labels.sub <- box.labels[box.labels$cell_ordered %in% x, ]
box.labels.sub$cell_ordered <- abs(box.labels.sub[box.labels.sub$cell_ordered %in% x, "cell_ordered"] - (max(x) + 1))
plot <- ggplot(segment.df.cells.sub) +
geom_segment(data = missing.segment.df.sub, aes_string(y = "y", yend = "y", x = 0, xend= max(segment.df$end)), color = "grey") +
geom_gene_arrow(aes_string(xmin = "start", xmax = "end", y = "cell_ordered"),
arrow_body_height = unit(arrow_frac_height * arrow_height, 'in'),
arrowhead_height = unit(arrow_frac_height * arrow_height, 'in'),
arrowhead_width = unit(0.3 * arrow_height, 'in'),
fill = segment.color,
size = 0.4) +
geom_segment(data = indel.df.sub, aes_string(x = "start", xend = "end", y = "cell_ordered", yend = "cell_ordered", color = "type"), size = 2.5) +
scale_color_manual(values = c('#CC79A7', '#F0E442'), name = "Indel class", drop = FALSE, guide = guide_legend(order = 2)) +
new_scale_color() +
geom_point(data = mutations.df.sub, aes_string(x = "start", y = "cell_ordered", fill = "type"), color = "black", size = 3.5, pch = 21) +
scale_fill_manual(values = c("#999999", '#E69F00', "#009E73"), name = "Mutation class", drop = FALSE, guide = guide_legend(order = 1)) +
new_scale_fill()
x.pos <- NULL
if (!is.null(x = box)) {
for (i in 1:length(x = box)) {
x.pos[i] <- -500 - ((i-1) * 500)
box.labels.sub[is.na(box.labels.sub)] <- "NA"
plot <- plot + geom_tile(aes_string(x = x.pos[i], y = "cell_ordered", width = 400, height = arrow_frac_height + 0.1, fill = box.col.names[i]), color = "black") +
geom_text(data = box.labels.sub, aes_string(x = x.pos[i], y = "cell_ordered", label = box.col.names[i]), size = 3) +
scale_fill_gradient(low = 'white', high = box.color.high[i], name = box.name[i], limits = c(min(segment.df.cells[[box.col.names[i]]]), max(segment.df.cells[[box.col.names[i]]]))) +
new_scale_fill()
if (length(x = box) > 1) {
plot <- plot + coord_cartesian(clip = 'off') +
annotate(geom = "text", x = x.pos[i], y = 0, label = box.name[i], angle = 90, size = 4, hjust = 1)
}
}
}
if (length(x = box) > 1) {
plot <- plot +
scale_x_continuous("", breaks = c(x.pos, segment.df$start + segment.df$len/2), labels = c(rep("", times = length(x = box)), gsub(pattern = "flu", replacement = "", x = segment.df$segment)), limits = c(min(x.pos)-200, max(segment.df$end)), expand = c(0.01, 0.01)) +
scale_y_continuous("", breaks = 1:ncells.plot, labels = paste0('Cell ', rev(x))) +
coord_cartesian(clip = "off", ylim = c(0.3, ncells.plot + 0.5), expand = FALSE) +
PacBioTheme()
} else {
plot <- plot +
scale_x_continuous("", breaks = c(x.pos, segment.df$start + segment.df$len/2), labels = c(box.name, gsub(pattern = "flu", replacement = "", x = segment.df$segment)), limits = c(min(x.pos)-200, max(segment.df$end)), expand = c(0.01, 0.01)) +
scale_y_continuous("", breaks = 1:ncells.plot, labels = paste0('Cell ', rev(x)), expand = c(0.01, 0.01), limits = c(0.5, ncells.plot + 0.5)) +
PacBioTheme() + theme(plot.margin = margin(0, 20, 0, 0))
}
})
wrap_plots(plots) + plot_layout(guides = 'collect')
}
### Setup ######################################################################
# Read in the data and generate influenza segment data.frame with positioning
# information.
dat <- read.csv(file = csv.filename)
dat$percent_viral_UMIs <- dat$frac_viral_UMIs * 100
dat$percent_supernatant <- dat$freq_supernatant * 100
segment.df <- data.frame(
segment = c("fluPB2", "fluPB1", "fluPA", "fluHA", "fluNP", "fluNA", "fluM", "fluNS"),
len = c(2341, 2341, 2233, 2035, 1565, 1735, 1027, 890)
)
segment.df$end <- cumsum(segment.df$len)
segment.df$start <- segment.df$end - segment.df$len + 1
### Generate/Save plot #########################################################
plot <- PacBioPlot(
dat,
segment.df,
box = c("percent_viral_UMIs", "percent_supernatant"),
box.name = c("% mRNA from flu", "% Supernatant"),
box.color.high = c("#009E73", "#E69F00"),
order.segments.by = "percent_viral_UMIs",
order.name = "% supernatant",
color.segments.by = 'percent_supernatant',
ncol = 1
)
ggsave(plot, filename = plot.filename, width = 14, height = 30)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\name{download_microbiome}
\alias{download_microbiome}
\title{Download microbiome data sets}
\usage{
download_microbiome(id = "datasets")
}
\arguments{
\item{id}{Data set name. For options, see download_microbiome()}
}
\value{
Data set
}
\description{
Download microbiome data sets
}
\examples{
# x <- download_microbiome("peerj32")
}
\author{
Contact: Leo Lahti \email{microbiome-admin@googlegroups.com}
}
\references{
To cite the microbiome R package, see citation('microbiome')
}
\keyword{utilities}
| /man/download_microbiome.Rd | no_license | hjanime/microbiome | R | false | false | 597 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\name{download_microbiome}
\alias{download_microbiome}
\title{Download microbiome data sets}
\usage{
download_microbiome(id = "datasets")
}
\arguments{
\item{id}{Data set name. For options, see download_microbiome()}
}
\value{
Data set
}
\description{
Download microbiome data sets
}
\examples{
# x <- download_microbiome("peerj32")
}
\author{
Contact: Leo Lahti \email{microbiome-admin@googlegroups.com}
}
\references{
To cite the microbiome R package, see citation('microbiome')
}
\keyword{utilities}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/de_solver.R
\name{solve_de}
\alias{solve_de}
\title{solve_de}
\usage{
solve_de(sim, params, inits, Tmax, numsteps = 10000, solver = "ode",
sizestep = NULL, verbose = FALSE, data.times = NULL,
method = "lsoda", ...)
}
\arguments{
\item{sim}{function; solver compatible specification of the DE}
\item{params}{numeric; named vector of parameter values}
\item{inits}{numeric; initial values. Must be in the same order as specified within sim!}
\item{Tmax}{numeric; maximum timestep}
\item{numsteps}{numeric}
\item{solver}{Choice of solver to use 1 or "ode" = deSolve::ode, 2 or "dde" = PBSddesolve::dde, 3 or "dede" = deSolve::dede}
\item{sizestep}{for solver}
\item{verbose}{passed to deSolve::ode}
\item{data.times}{numeric a vector of times at which the ODE is to be evaluated. Defaults to NULL. If value is supplied it takes precedence over any value supplied to \code{numsteps} or \code{sizesteps}.}
\item{method}{solver method}
\item{...}{additional arguments to solver}
}
\value{
integrated ode object. Data structure depends on the employed solver.
}
\description{
solve_de
}
| /man/solve_de.Rd | no_license | waternk/debinfer | R | false | true | 1,172 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/de_solver.R
\name{solve_de}
\alias{solve_de}
\title{solve_de}
\usage{
solve_de(sim, params, inits, Tmax, numsteps = 10000, solver = "ode",
sizestep = NULL, verbose = FALSE, data.times = NULL,
method = "lsoda", ...)
}
\arguments{
\item{sim}{function; solver compatible specification of the DE}
\item{params}{numeric; named vector of parameter values}
\item{inits}{numeric; initial values. Must be in the same order as specified within sim!}
\item{Tmax}{numeric; maximum timestep}
\item{numsteps}{numeric}
\item{solver}{Choice of solver to use 1 or "ode" = deSolve::ode, 2 or "dde" = PBSddesolve::dde, 3 or "dede" = deSolve::dede}
\item{sizestep}{for solver}
\item{verbose}{passed to deSolve::ode}
\item{data.times}{numeric a vector of times at which the ODE is to be evaluated. Defaults to NULL. If value is supplied it takes precedence over any value supplied to \code{numsteps} or \code{sizesteps}.}
\item{method}{solver method}
\item{...}{additional arguments to solver}
}
\value{
integrated ode object. Data structure depends on the employed solver.
}
\description{
solve_de
}
|
testlist <- list(a = 0L, b = 0L, x = c(-1073790465L, -1088728832L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610056550-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 518 | r | testlist <- list(a = 0L, b = 0L, x = c(-1073790465L, -1088728832L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
################################################################################
#
# File name: Expr_CN_Profile.R
#
# Authors: Jacek Marzec ( j.marzec@qmul.ac.uk )
#
# Barts Cancer Institute,
# Queen Mary, University of London
# Charterhouse Square, London EC1M 6BQ
#
################################################################################
################################################################################
#
# Description: Script generating box-plots and bar-plots to visualise expression measurments across samples and groups (as indicated in target file) from normalised expression data for user-defined gene. NOTE: the script allowes to process gene matrix with duplicated gene IDs.
#
# Command line use example: R --file=./Expr_CN_Profile.R --args "CCLE_PC_processed_mRNA.txt" "CCLE_PC_processed_CN.txt" "CCLE_target.txt" "Target" "KRAS" "Example_results/PC_Expr_CN_Profile"
#
# First arg: Full path with name of the normalised expression matrix
# Second arg: Full path with name of the relative linear copy-number matrix
# Third arg: Full path with name of the text file with samples annotation. The file is expected to include the following columns: sample name (1st column) and annotation (3rd column)
# Forth arg: Variable from the samples annotation file to be used for samples colouring
# Fifth arg: ID of gene/probe of interest
# Six arg: Full path with name of the output folder
#
################################################################################
# silent warnings
options(warn=-1)
##### Clear workspace
rm(list=ls())
##### Close any open graphics devices
graphics.off()
### Setting environment for pandoc
Sys.setenv(HOME = "")
#===============================================================================
# Functions
#===============================================================================
##### Create 'not in' operator
"%!in%" <- function(x,table) match(x,table, nomatch = 0) == 0
##### Assign colours to analysed groups
getTargetsColours <- function(targets) {
##### Predefined selection of colours for groups
targets.colours <- c("red","blue","green","darkgoldenrod","darkred","deepskyblue", "coral", "cornflowerblue", "chartreuse4", "bisque4", "chocolate3", "cadetblue3", "darkslategrey", "lightgoldenrod4", "mediumpurple4", "orangered3")
f.targets <- factor(targets)
vec.targets <- targets.colours[1:length(levels(f.targets))]
targets.colour <- rep(0,length(f.targets))
for(i in 1:length(f.targets))
targets.colour[i] <- vec.targets[ f.targets[i]==levels(f.targets)]
return( list(vec.targets, targets.colour) )
}
##### Deal with the duplicated genes
duplGenes <- function(expData) {
genesList <- NULL
genesRepl <- NULL
for ( i in 1:nrow(expData) ) {
geneName <- expData[i,1]
##### Distingish duplicated genes by adding duplicate number
if ( geneName %in% genesList ) {
##### Report genes with more than one duplicates
if ( geneName %in% names(genesRepl) ) {
genesRepl[[ geneName ]] = genesRepl[[ geneName ]]+1
geneName <- paste(geneName, "-", genesRepl[[ geneName ]], sep="")
} else {
genesRepl[[ geneName ]] <- 2
geneName <- paste(geneName, "-2", sep="")
}
}
genesList <- c(genesList,geneName)
}
rownames(expData) <- genesList
##### Remove the first column with gene names, which now are used as row names
expData <- expData[, -1]
return(expData)
}
#===============================================================================
# Load libraries
#===============================================================================
suppressMessages(library(plotly))
suppressMessages(library(optparse))
#===============================================================================
# Catching the arguments
#===============================================================================
option_list = list(
make_option(c("-e", "--exp_file"), action="store", default=NA, type='character',
help="File containing experimental data"),
make_option(c("-n", "--cn_file"), action="store", default=NA, type='character',
help="File containing CN data"),
make_option(c("-t", "--target"), action="store", default=NA, type='character',
help="Clinical data saved in tab-delimited format"),
make_option(c("-c", "--colouring"), action="store", default=NA, type='character',
help="Variable from the samples annotation file to be used for samples colouring"),
make_option(c("-p", "--gene"), action="store", default=NA, type='character',
help="ID of gene/probe of interest"),
make_option(c("-d", "--dir"), action="store", default=NA, type='character',
help="Default directory"),
make_option(c("-x", "--hexcode"), action="store", default=NA, type='character',
help="unique_id to save temporary plots")
)
opt = parse_args(OptionParser(option_list=option_list))
expFile <- opt$exp_file
cnFile <- opt$cn_file
annFile <- opt$target
target <- opt$colouring
gene <- opt$gene
outFolder <- opt$dir
hexcode <- opt$hexcode
#===============================================================================
# Main
#===============================================================================
# Read file with expression data
expData <- read.table(expFile,sep="\t",as.is=TRUE,header=TRUE,row.names=NULL)
# Deal with the duplicated genes
expData <- duplGenes(expData)
# Read file with CN data
cnData <- read.table(cnFile,sep="\t",as.is=TRUE,header=TRUE,row.names=NULL)
# Deal with the duplicated genes
cnData <- duplGenes(cnData)
# Keep only samples present in both the expression and CN datasets
absentSamples.cnData <- colnames(expData)[colnames(expData) %!in% colnames(cnData)]
absentSamples.expData <- colnames(cnData)[colnames(cnData) %!in% colnames(expData)]
expData <- expData[,colnames(expData) %in% colnames(cnData)]
cnData <- cnData[,colnames(cnData) %in% colnames(expData)]
# Make sure that the samples order in the expression and CN matrices are the same
cnData <- cnData[, colnames(expData)]
# Retrieve the expression data file name
coreName <- strsplit(expFile, "/")
coreName <- coreName[[1]][length(coreName[[1]])]
# Read sample annotation file
annData <- read.table(annFile,sep="\t",as.is=TRUE,header=TRUE,row.names=1)
rownames(annData) <- gsub("-", ".", rownames(annData))
# Keep only samples with annotation info
expData <- expData[,colnames(expData) %in% rownames(annData)]
cnData <- cnData[,colnames(cnData) %in% rownames(annData)]
annData <- subset(annData, rownames(annData) %in% colnames(expData))
# Make sure that the samples order in the data matrix and annotation file is the same
annData <- annData[colnames(expData),]
# Check if the queried genes is present in the expression data
genes <- rownames(expData)
if ( gene %!in% rownames(expData) ) {
#cat("The gene/probe", gene, "is not present in the data!", sep=" ")
q()
# ... and extract the expression of the gene of inteterest
} else {
gene.expr <- data.matrix(expData[gene, ])
gene.cn <- data.matrix(cnData[gene, ])
}
# Change working directory to the project workspace
setwd(outFolder)
# # Report samples not present in the the expression or CN matrices
# if ( length(absentSamples.expData) > 0 ) {
#
# write(absentSamples.expData, file = paste(coreName, gene, "absent_in_mRNA_data.txt", sep = "_"), append = FALSE, sep="\t")
# }
#
# if ( length(absentSamples.cnData) > 0 ) {
#
# write(absentSamples.cnData, file = paste(coreName, gene, "absent_in_CN_data.txt", sep = "_"), append = FALSE, sep="\t")
# }
#===============================================================================
# Generate mRNA expression vs DNA copy-number scatterplot
#===============================================================================
targets <- annData[,target]
targets.colour <- getTargetsColours(targets)
# Calculate Pearson correlation coefficient
expr_cn.corr <- round(
cor.test( as.numeric(gene.expr), as.numeric(gene.cn), method = "pearson" )$estimate, digits=2
)
# Generate scatter plot (PLOTLY)
# Prepare data frame
gene.df <- data.frame(targets, as.numeric(gene.cn), as.numeric(gene.expr))
colnames(gene.df) <- c("Target", "CN", "mRNA")
p <- plot_ly(gene.df, x = ~CN, y = ~mRNA, color = ~Target, text=colnames(gene.expr), colors = targets.colour[[1]], type='scatter', mode = "markers", marker = list(size=10, symbol="circle"), width = 800, height = 600) %>%
layout(title = paste0("Pearson's r = ", expr_cn.corr), xaxis = list(title = paste0(gene, " relative linear copy-number values")), yaxis = list(title = paste0(gene, " mRNA expression")), margin = list(l=50, r=50, b=50, t=50, pad=4), autosize = F, legend = list(orientation = 'h', y = 1))
# Save the box-plot as html (PLOTLY)
htmlwidgets::saveWidget(p, paste0(hexcode, "_mRNA_vs_CN_plot.html"))
#===============================================================================
# Calculate putative copy-number alterations
#===============================================================================
# Draw histogram of correlation coefficients (PLOTLY)
p <- plot_ly(x = ~as.numeric(gene.cn), type = 'histogram', width = 800, height = 500) %>%
layout(xaxis = list( title = paste0(gene, " relative linear copy-number values")), yaxis = list( title = "Frequency"), margin = list(l=50, r=50, b=50, t=50, pad=4), autosize = F)
# Save the histogram as html (PLOTLY)
htmlwidgets::saveWidget(p, paste0(hexcode, "_corr_hist.html"))
# Assign gain for linear CN values above 0.5 and loss for linear CN values below -0.5
gene.cn[ gene.cn > 0.5 ] <- 1
gene.cn[ gene.cn < -0.5 ] <- -1
gene.cn[ gene.cn <= 0.5 & gene.cn >= -0.5 ] <- 0
#===============================================================================
# Generate mRNA expression vs putative DNA copy-number alterations box-plot
#===============================================================================
# Preprare dataframe
gene.df <- data.frame(targets, rep(unique(targets)[1],length(targets)), as.numeric(gene.cn), as.numeric(gene.expr))
colnames(gene.df) <- c("Target", "Box", "CN", "mRNA")
gene.cn[ gene.cn == 1 ] <- "(1) Gain"
gene.cn[ gene.cn == -1 ] <- "(-1) Loss"
gene.cn[ gene.cn == 0 ] <- "(0) Diploid"
gene.df <- data.frame(targets, rep(unique(targets)[1],length(targets)), data.frame(t(gene.cn)), as.numeric(gene.expr))
colnames(gene.df) <- c("Target", "Box", "CN", "mRNA")
# Generate box-plot (PLOTLY)
p <- plot_ly(
gene.df,
x = ~CN,
y = ~mRNA,
color = ~Target,
colors = targets.colour[[1]],
type='scatter',
mode = "markers",
marker = list(size=10, symbol="circle"),
width = 800,
height = 600,
text=colnames(gene.expr)
) %>%
add_boxplot(
gene.df, x= ~CN, y= ~mRNA, color = ~Box, key=FALSE, line = list(color = "grey"), showlegend=FALSE
) %>%
layout(
title = "",
xaxis = list(title = paste0(gene, " relative linear copy-number values")),
yaxis = list(title = paste0(gene, " mRNA expression")),
margin = list(l=50, r=50, b=50, t=50, pad=4), autosize = F, legend = list(orientation = 'h', y = 1)
)
# Save the box-plot as html (PLOTLY)
htmlwidgets::saveWidget(p, paste0(hexcode, "_mRNA_vs_CN_boxplot.html"))
##### Clear workspace
rm(list=ls())
##### Close any open graphics devices
graphics.off()
| /pages/scripts/LiveExprCN.R | no_license | JMarzec/BCN_Analytics | R | false | false | 11,464 | r | ################################################################################
#
# File name: Expr_CN_Profile.R
#
# Authors: Jacek Marzec ( j.marzec@qmul.ac.uk )
#
# Barts Cancer Institute,
# Queen Mary, University of London
# Charterhouse Square, London EC1M 6BQ
#
################################################################################
################################################################################
#
# Description: Script generating box-plots and bar-plots to visualise expression measurments across samples and groups (as indicated in target file) from normalised expression data for user-defined gene. NOTE: the script allowes to process gene matrix with duplicated gene IDs.
#
# Command line use example: R --file=./Expr_CN_Profile.R --args "CCLE_PC_processed_mRNA.txt" "CCLE_PC_processed_CN.txt" "CCLE_target.txt" "Target" "KRAS" "Example_results/PC_Expr_CN_Profile"
#
# First arg: Full path with name of the normalised expression matrix
# Second arg: Full path with name of the relative linear copy-number matrix
# Third arg: Full path with name of the text file with samples annotation. The file is expected to include the following columns: sample name (1st column) and annotation (3rd column)
# Forth arg: Variable from the samples annotation file to be used for samples colouring
# Fifth arg: ID of gene/probe of interest
# Six arg: Full path with name of the output folder
#
################################################################################
# silent warnings
options(warn=-1)
##### Clear workspace
rm(list=ls())
##### Close any open graphics devices
graphics.off()
### Setting environment for pandoc
Sys.setenv(HOME = "")
#===============================================================================
# Functions
#===============================================================================
##### Create 'not in' operator
"%!in%" <- function(x,table) match(x,table, nomatch = 0) == 0
##### Assign colours to analysed groups
getTargetsColours <- function(targets) {
##### Predefined selection of colours for groups
targets.colours <- c("red","blue","green","darkgoldenrod","darkred","deepskyblue", "coral", "cornflowerblue", "chartreuse4", "bisque4", "chocolate3", "cadetblue3", "darkslategrey", "lightgoldenrod4", "mediumpurple4", "orangered3")
f.targets <- factor(targets)
vec.targets <- targets.colours[1:length(levels(f.targets))]
targets.colour <- rep(0,length(f.targets))
for(i in 1:length(f.targets))
targets.colour[i] <- vec.targets[ f.targets[i]==levels(f.targets)]
return( list(vec.targets, targets.colour) )
}
##### Deal with the duplicated genes
duplGenes <- function(expData) {
genesList <- NULL
genesRepl <- NULL
for ( i in 1:nrow(expData) ) {
geneName <- expData[i,1]
##### Distingish duplicated genes by adding duplicate number
if ( geneName %in% genesList ) {
##### Report genes with more than one duplicates
if ( geneName %in% names(genesRepl) ) {
genesRepl[[ geneName ]] = genesRepl[[ geneName ]]+1
geneName <- paste(geneName, "-", genesRepl[[ geneName ]], sep="")
} else {
genesRepl[[ geneName ]] <- 2
geneName <- paste(geneName, "-2", sep="")
}
}
genesList <- c(genesList,geneName)
}
rownames(expData) <- genesList
##### Remove the first column with gene names, which now are used as row names
expData <- expData[, -1]
return(expData)
}
#===============================================================================
# Load libraries
#===============================================================================
suppressMessages(library(plotly))
suppressMessages(library(optparse))
#===============================================================================
# Catching the arguments
#===============================================================================
option_list = list(
make_option(c("-e", "--exp_file"), action="store", default=NA, type='character',
help="File containing experimental data"),
make_option(c("-n", "--cn_file"), action="store", default=NA, type='character',
help="File containing CN data"),
make_option(c("-t", "--target"), action="store", default=NA, type='character',
help="Clinical data saved in tab-delimited format"),
make_option(c("-c", "--colouring"), action="store", default=NA, type='character',
help="Variable from the samples annotation file to be used for samples colouring"),
make_option(c("-p", "--gene"), action="store", default=NA, type='character',
help="ID of gene/probe of interest"),
make_option(c("-d", "--dir"), action="store", default=NA, type='character',
help="Default directory"),
make_option(c("-x", "--hexcode"), action="store", default=NA, type='character',
help="unique_id to save temporary plots")
)
opt = parse_args(OptionParser(option_list=option_list))
expFile <- opt$exp_file
cnFile <- opt$cn_file
annFile <- opt$target
target <- opt$colouring
gene <- opt$gene
outFolder <- opt$dir
hexcode <- opt$hexcode
#===============================================================================
# Main
#===============================================================================
# Read file with expression data
expData <- read.table(expFile,sep="\t",as.is=TRUE,header=TRUE,row.names=NULL)
# Deal with the duplicated genes
expData <- duplGenes(expData)
# Read file with CN data
cnData <- read.table(cnFile,sep="\t",as.is=TRUE,header=TRUE,row.names=NULL)
# Deal with the duplicated genes
cnData <- duplGenes(cnData)
# Keep only samples present in both the expression and CN datasets
absentSamples.cnData <- colnames(expData)[colnames(expData) %!in% colnames(cnData)]
absentSamples.expData <- colnames(cnData)[colnames(cnData) %!in% colnames(expData)]
expData <- expData[,colnames(expData) %in% colnames(cnData)]
cnData <- cnData[,colnames(cnData) %in% colnames(expData)]
# Make sure that the samples order in the expression and CN matrices are the same
cnData <- cnData[, colnames(expData)]
# Retrieve the expression data file name
coreName <- strsplit(expFile, "/")
coreName <- coreName[[1]][length(coreName[[1]])]
# Read sample annotation file
annData <- read.table(annFile,sep="\t",as.is=TRUE,header=TRUE,row.names=1)
rownames(annData) <- gsub("-", ".", rownames(annData))
# Keep only samples with annotation info
expData <- expData[,colnames(expData) %in% rownames(annData)]
cnData <- cnData[,colnames(cnData) %in% rownames(annData)]
annData <- subset(annData, rownames(annData) %in% colnames(expData))
# Make sure that the samples order in the data matrix and annotation file is the same
annData <- annData[colnames(expData),]
# Check if the queried genes is present in the expression data
genes <- rownames(expData)
if ( gene %!in% rownames(expData) ) {
#cat("The gene/probe", gene, "is not present in the data!", sep=" ")
q()
# ... and extract the expression of the gene of inteterest
} else {
gene.expr <- data.matrix(expData[gene, ])
gene.cn <- data.matrix(cnData[gene, ])
}
# Change working directory to the project workspace
setwd(outFolder)
# # Report samples not present in the the expression or CN matrices
# if ( length(absentSamples.expData) > 0 ) {
#
# write(absentSamples.expData, file = paste(coreName, gene, "absent_in_mRNA_data.txt", sep = "_"), append = FALSE, sep="\t")
# }
#
# if ( length(absentSamples.cnData) > 0 ) {
#
# write(absentSamples.cnData, file = paste(coreName, gene, "absent_in_CN_data.txt", sep = "_"), append = FALSE, sep="\t")
# }
#===============================================================================
# Generate mRNA expression vs DNA copy-number scatterplot
#===============================================================================
targets <- annData[,target]
targets.colour <- getTargetsColours(targets)
# Calculate Pearson correlation coefficient
expr_cn.corr <- round(
cor.test( as.numeric(gene.expr), as.numeric(gene.cn), method = "pearson" )$estimate, digits=2
)
# Generate scatter plot (PLOTLY)
# Prepare data frame
gene.df <- data.frame(targets, as.numeric(gene.cn), as.numeric(gene.expr))
colnames(gene.df) <- c("Target", "CN", "mRNA")
p <- plot_ly(gene.df, x = ~CN, y = ~mRNA, color = ~Target, text=colnames(gene.expr), colors = targets.colour[[1]], type='scatter', mode = "markers", marker = list(size=10, symbol="circle"), width = 800, height = 600) %>%
layout(title = paste0("Pearson's r = ", expr_cn.corr), xaxis = list(title = paste0(gene, " relative linear copy-number values")), yaxis = list(title = paste0(gene, " mRNA expression")), margin = list(l=50, r=50, b=50, t=50, pad=4), autosize = F, legend = list(orientation = 'h', y = 1))
# Save the box-plot as html (PLOTLY)
htmlwidgets::saveWidget(p, paste0(hexcode, "_mRNA_vs_CN_plot.html"))
#===============================================================================
# Calculate putative copy-number alterations
#===============================================================================
# Draw histogram of correlation coefficients (PLOTLY)
p <- plot_ly(x = ~as.numeric(gene.cn), type = 'histogram', width = 800, height = 500) %>%
layout(xaxis = list( title = paste0(gene, " relative linear copy-number values")), yaxis = list( title = "Frequency"), margin = list(l=50, r=50, b=50, t=50, pad=4), autosize = F)
# Save the histogram as html (PLOTLY)
htmlwidgets::saveWidget(p, paste0(hexcode, "_corr_hist.html"))
# Assign gain for linear CN values above 0.5 and loss for linear CN values below -0.5
gene.cn[ gene.cn > 0.5 ] <- 1
gene.cn[ gene.cn < -0.5 ] <- -1
gene.cn[ gene.cn <= 0.5 & gene.cn >= -0.5 ] <- 0
#===============================================================================
# Generate mRNA expression vs putative DNA copy-number alterations box-plot
#===============================================================================
# Preprare dataframe
gene.df <- data.frame(targets, rep(unique(targets)[1],length(targets)), as.numeric(gene.cn), as.numeric(gene.expr))
colnames(gene.df) <- c("Target", "Box", "CN", "mRNA")
gene.cn[ gene.cn == 1 ] <- "(1) Gain"
gene.cn[ gene.cn == -1 ] <- "(-1) Loss"
gene.cn[ gene.cn == 0 ] <- "(0) Diploid"
gene.df <- data.frame(targets, rep(unique(targets)[1],length(targets)), data.frame(t(gene.cn)), as.numeric(gene.expr))
colnames(gene.df) <- c("Target", "Box", "CN", "mRNA")
# Generate box-plot (PLOTLY)
p <- plot_ly(
gene.df,
x = ~CN,
y = ~mRNA,
color = ~Target,
colors = targets.colour[[1]],
type='scatter',
mode = "markers",
marker = list(size=10, symbol="circle"),
width = 800,
height = 600,
text=colnames(gene.expr)
) %>%
add_boxplot(
gene.df, x= ~CN, y= ~mRNA, color = ~Box, key=FALSE, line = list(color = "grey"), showlegend=FALSE
) %>%
layout(
title = "",
xaxis = list(title = paste0(gene, " relative linear copy-number values")),
yaxis = list(title = paste0(gene, " mRNA expression")),
margin = list(l=50, r=50, b=50, t=50, pad=4), autosize = F, legend = list(orientation = 'h', y = 1)
)
# Save the box-plot as html (PLOTLY)
htmlwidgets::saveWidget(p, paste0(hexcode, "_mRNA_vs_CN_boxplot.html"))
##### Clear workspace
rm(list=ls())
##### Close any open graphics devices
graphics.off()
|
skip_if_not(require("MASS"))
context("prepare_results.mca")
data(farms)
mca <- MASS::mca(farms[4:20, 2:4], nf = 5)
supi_df <- farms[1:3, 2:4]
supi <- predict(mca, supi_df, type = "row")
rownames(supi) <- rownames(supi_df)
mca$supi <- supi
mca$supv <- predict(mca, farms[4:20, 1, drop = FALSE], type = "factor")
res <- prepare_results(mca)
test_that("Eigenvalues are equals", {
expect_equal(100 * mca$d / (mca$p - 1), res$eig$percent)
})
test_that("Variables results are equal", {
expect_equal(
as.vector(round(mca$cs[, 1], 3)),
res$vars[res$vars$Type == "Active" & res$vars$Axis == "1", "Coord"]
)
})
test_that("Supplementary variables results are equal", {
expect_equal(
as.vector(round(mca$supv[, 4], 3)),
data.frame(res$var)[res$var$Type == "Supplementary" & res$var$Axis == "4", "Coord"]
)
})
test_that("Individuals results are equal", {
expect_equal(
as.vector(round(mca$rs[, 1], 3)),
data.frame(res$ind)[res$ind$Type == "Active" & res$ind$Axis == "1", "Coord"]
)
})
test_that("Supplementary individuals results are equal", {
expect_equal(
as.vector(round(mca$supi[, 4], 3)),
data.frame(res$ind)[res$ind$Type == "Supplementary" & res$ind$Axis == "4", "Coord"]
)
})
test_that("Qualitative data are equal", {
ids <- c("5", "11", "14", "16", "20")
data <- eval(as.list(mca$call)$df)
data$Name <- rownames(data)
expect_equal(
as.character(res$quali_data$Use[res$quali_data$Name %in% ids]),
as.character(data[ids, "Use"])
)
}) | /tests/testthat/test_prepare_results_MASS_mca.R | no_license | juba/explor | R | false | false | 1,503 | r | skip_if_not(require("MASS"))
context("prepare_results.mca")
data(farms)
mca <- MASS::mca(farms[4:20, 2:4], nf = 5)
supi_df <- farms[1:3, 2:4]
supi <- predict(mca, supi_df, type = "row")
rownames(supi) <- rownames(supi_df)
mca$supi <- supi
mca$supv <- predict(mca, farms[4:20, 1, drop = FALSE], type = "factor")
res <- prepare_results(mca)
test_that("Eigenvalues are equals", {
expect_equal(100 * mca$d / (mca$p - 1), res$eig$percent)
})
test_that("Variables results are equal", {
expect_equal(
as.vector(round(mca$cs[, 1], 3)),
res$vars[res$vars$Type == "Active" & res$vars$Axis == "1", "Coord"]
)
})
test_that("Supplementary variables results are equal", {
expect_equal(
as.vector(round(mca$supv[, 4], 3)),
data.frame(res$var)[res$var$Type == "Supplementary" & res$var$Axis == "4", "Coord"]
)
})
test_that("Individuals results are equal", {
expect_equal(
as.vector(round(mca$rs[, 1], 3)),
data.frame(res$ind)[res$ind$Type == "Active" & res$ind$Axis == "1", "Coord"]
)
})
test_that("Supplementary individuals results are equal", {
expect_equal(
as.vector(round(mca$supi[, 4], 3)),
data.frame(res$ind)[res$ind$Type == "Supplementary" & res$ind$Axis == "4", "Coord"]
)
})
test_that("Qualitative data are equal", {
ids <- c("5", "11", "14", "16", "20")
data <- eval(as.list(mca$call)$df)
data$Name <- rownames(data)
expect_equal(
as.character(res$quali_data$Use[res$quali_data$Name %in% ids]),
as.character(data[ids, "Use"])
)
}) |
library(shiny)
library(shinythemes)
library(readr)
library(ggplot2)
library(stringr)
library(dplyr)
library(DT)
library(tools)
library(prophet)
library(forecast)
# Define UI for application that plots features of movies
ui <- fluidPage(
titlePanel("Forecasting", windowTitle = "Forecasting"),
# Sidebar layout with a input and output definitions
sidebarLayout(
# Inputs
sidebarPanel(
fileInput("file1", "Load File (Only csv files and only time series per day)",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv")),
checkboxInput("header", "Header", TRUE),
h4("Select Variables"), # Third level header: Plotting
# Select variable for date
selectInput(inputId = "datevar",
label = "Date (format:YYYY-MM-DD):",
choices = names(df)),
# Select variable for Time Series
selectInput(inputId = "tsvar",
label = "Time Series:",
choices = names(df)),
# Set forecasting horizon
sliderInput(inputId = "horizon",
label = "Days ahead for forecasting:",
min = 0, max = 100,
value = 30),
#Parameters
h4("Select Parameters"),
sliderInput(inputId = "train.size",
label = "Training sample size(%):",
min = 0, max = 100,
value = 80),
# Set Zoom in
sliderInput(inputId = "zoomin",
label = "Choose time period to zoom in on all the plots(%)",
min = 0, max = 100,
value = c(0,100))
),
# Output:
mainPanel(
tabsetPanel(id = "tabspanel", type = "tabs",
tabPanel(title = "File Content",
tableOutput("contents")),
tabPanel(title = "Data Visualization",
plotOutput(outputId = "tseriesplot")),
tabPanel(title = "Regression",
fluidRow(h4('The result of the model'),
tableOutput("regressionresult")),
fluidRow(h4('Plot of testing data set (red & black) and forecast values (blue)'),
plotOutput(outputId = 'regressionplot')),
fluidRow(h4('The forecast values'),
tableOutput("regressionforecast"))),
tabPanel(title = "Classical Decomposition",
fluidRow(h4('The result of the model'),
tableOutput("decompositionresult")),
fluidRow(h4('Plot of testing data set (red & black) and forecast values (blue)'),
plotOutput(outputId = 'decompositionplot')),
fluidRow(h4('The forecast values'),
tableOutput("decompositionforecast"))),
tabPanel(title = "Smoothing Method",
fluidRow(h4('The result of the model'),
tableOutput("smoothresult")),
fluidRow(h4('Plot of testing data set (red & black) and forecast values (blue)'),
plotOutput(outputId = 'smoothplot')),
fluidRow(h4('The forecast values'),
tableOutput("smoothforecast"))
),
tabPanel(title = "Arima",
fluidRow(h4('The result of the model'),
tableOutput("arimaresult")),
fluidRow(h4('Plot of testing data set (red & black) and forecast values (blue)'),
plotOutput(outputId = 'arimaplot')),
fluidRow(h4('The forecast values'),
tableOutput("arimaforecast"))
),
tabPanel(title = "Prophet",
fluidRow(h4('The result of the model'),
tableOutput("prophetresult")),
fluidRow(h4('Plot of testing data set (red & black) and forecast values (blue)'),
plotOutput(outputId = 'prophetplot')),
fluidRow(h4('The forecast values'),
tableOutput("prophetforecast"))
),
tabPanel(title = "Summary",
h4('The results of the models'),
tableOutput("summary"))
)
)
)
)
# Define server function required to create the scatterplot
server <- function(input, output, session) {
# Create plot object the plotOutput function is expecting
output$tseriesplot <- renderPlot({
data=df()
data[,input$datevar]=as.Date(data[,input$datevar])
ggplot(data = data[round(input$zoomin[1]*nrow(data)/100):round(input$zoomin[2]*nrow(data)/100),],
aes_string(x = input$datevar, y = input$tsvar)) +
geom_line()+scale_x_date(date_labels = "%b %y")
})
#Create file content table
output$contents <- renderTable({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, it will be a data frame with 'name',
# 'size', 'type', and 'datapath' columns. The 'datapath'
# column will contain the local filenames where the data can
# be found.
#inFile <- input$file1
if (is.null(df()))
return(NULL)
df()
})
# For regression method
output$regressionresult <- renderTable({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
data$trend = c(1:nrow(df()))
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
for (i in 1:length(train.data$ds)){
train.data$quarter[i]=(quarters(train.data$ds[i]))
}
for (i in 1:length(train.data$ds)){
train.data$weekday[i]=(weekdays(train.data$ds[i]))
}
train.data$q1=0
train.data$q1[train.data$quarter=='Q1']=1
train.data$q2=0
train.data$q2[train.data$quarter=='Q2']=1
train.data$q3=0
train.data$q3[train.data$quarter=='Q3']=1
train.data$mon=0
train.data$mon[train.data$weekday=='Monday']=1
train.data$tue=0
train.data$tue[train.data$weekday=='Tuesday']=1
train.data$wed=0
train.data$wed[train.data$weekday=='Wednesday']=1
train.data$thu=0
train.data$thu[train.data$weekday=='Thursday']=1
train.data$fri=0
train.data$fri[train.data$weekday=='Friday']=1
train.data$sat=0
train.data$sat[train.data$weekday=='Saturday']=1
train.data$lag1=lag(train.data$y,1)
train.data$lag2=lag(train.data$y,2)
train.data$lag3=lag(train.data$y,3)
train.data$lag4=lag(train.data$y,4)
train.data$lag5=lag(train.data$y,5)
train.data$lag6=lag(train.data$y,6)
train.data$lag7=lag(train.data$y,7)
train.data$lag8=lag(train.data$y,8)
train.data$lag9=lag(train.data$y,9)
train.data$lag10=lag(train.data$y,10)
train.data$lag14=lag(train.data$y,14)
rm=lm(y~trend+q1+q2+q3+mon+tue+wed+thu+fri+sat+lag1+lag2+lag6+lag7+lag8+lag14, data = train.data)
for (i in 1:(nrow(df())-ceiling(nrow(df())*input$train.size/100))){
newrow=train.data[nrow(train.data),]
newrow$trend=newrow$trend+1
newrow$ds=newrow$ds+1
newrow$quarter=quarters(newrow$ds)
newrow$weekday=weekdays(newrow$ds)
newrow$q1=0
newrow$q1[newrow$quarter=='Q1']=1
newrow$q2=0
newrow$q2[newrow$quarter=='Q2']=1
newrow$q3=0
newrow$q3[newrow$quarter=='Q3']=1
newrow$mon=0
newrow$mon[newrow$weekday=='Monday']=1
newrow$tue=0
newrow$tue[newrow$weekday=='Tuesday']=1
newrow$wed=0
newrow$wed[newrow$weekday=='Wednesday']=1
newrow$thu=0
newrow$thu[newrow$weekday=='Thursday']=1
newrow$fri=0
newrow$fri[newrow$weekday=='Friday']=1
newrow$sat=0
newrow$sat[newrow$weekday=='Saturday']=1
train.data=rbind(train.data,newrow)
rownames(train.data) = 1:nrow(train.data)
train.data[nrow(train.data),]$lag1=train.data[nrow(train.data)-1,]$y
train.data[nrow(train.data),]$lag2=train.data[nrow(train.data)-2,]$y
train.data[nrow(train.data),]$lag3=train.data[nrow(train.data)-3,]$y
train.data[nrow(train.data),]$lag4=train.data[nrow(train.data)-4,]$y
train.data[nrow(train.data),]$lag5=train.data[nrow(train.data)-5,]$y
train.data[nrow(train.data),]$lag6=train.data[nrow(train.data)-6,]$y
train.data[nrow(train.data),]$lag7=train.data[nrow(train.data)-7,]$y
train.data[nrow(train.data),]$lag8=train.data[nrow(train.data)-8,]$y
train.data[nrow(train.data),]$lag9=train.data[nrow(train.data)-9,]$y
train.data[nrow(train.data),]$lag10=train.data[nrow(train.data)-10,]$y
train.data[nrow(train.data),]$lag14=train.data[nrow(train.data)-14,]$y
train.data[nrow(train.data),]$y=predict(rm, newdata = train.data[nrow(train.data),], interval = "prediction")[1,'fit']
}
#restore
test.data=train.data[(ceiling(nrow(df())*input$train.size/100)+1):nrow(train.data),]
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
error=accuracy(data[ceiling(nrow(df())*input$train.size/100+1):nrow(df()), 'y'], test.data$y)
results=data.frame("RMSE"=error['Test set','RMSE'],"MAE"=error['Test set','MAE'],
"MAPE"=paste0(round(error['Test set','MAPE'],2), "%"),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=nrow(train.data), "Count Test"=nrow(test.data),
"Count Forecast"=input$horizon)
})
output$regressionplot <- renderPlot({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
data$trend = c(1:nrow(df()))
dataall = data
for (i in 1:length(data$ds)){
data$quarter[i]=(quarters(data$ds[i]))
}
for (i in 1:length(data$ds)){
data$weekday[i]=(weekdays(data$ds[i]))
}
data$q1=0
data$q1[data$quarter=='Q1']=1
data$q2=0
data$q2[data$quarter=='Q2']=1
data$q3=0
data$q3[data$quarter=='Q3']=1
data$mon=0
data$mon[data$weekday=='Monday']=1
data$tue=0
data$tue[data$weekday=='Tuesday']=1
data$wed=0
data$wed[data$weekday=='Wednesday']=1
data$thu=0
data$thu[data$weekday=='Thursday']=1
data$fri=0
data$fri[data$weekday=='Friday']=1
data$sat=0
data$sat[data$weekday=='Saturday']=1
data$lag1=lag(data$y,1)
data$lag2=lag(data$y,2)
data$lag3=lag(data$y,3)
data$lag4=lag(data$y,4)
data$lag5=lag(data$y,5)
data$lag6=lag(data$y,6)
data$lag7=lag(data$y,7)
data$lag8=lag(data$y,8)
data$lag9=lag(data$y,9)
data$lag10=lag(data$y,10)
data$lag14=lag(data$y,14) ## For all data
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
rm=lm(y~trend+q1+q2+q3+mon+tue+wed+thu+fri+sat+lag1+lag2+lag6+lag7+lag8+lag14, data = train.data)
for (i in 1:(nrow(df())-ceiling(nrow(df())*input$train.size/100))){
newrow=train.data[nrow(train.data),]
newrow$trend=newrow$trend+1
newrow$ds=newrow$ds+1
newrow$quarter=quarters(newrow$ds)
newrow$weekday=weekdays(newrow$ds)
newrow$q1=0
newrow$q1[newrow$quarter=='Q1']=1
newrow$q2=0
newrow$q2[newrow$quarter=='Q2']=1
newrow$q3=0
newrow$q3[newrow$quarter=='Q3']=1
newrow$mon=0
newrow$mon[newrow$weekday=='Monday']=1
newrow$tue=0
newrow$tue[newrow$weekday=='Tuesday']=1
newrow$wed=0
newrow$wed[newrow$weekday=='Wednesday']=1
newrow$thu=0
newrow$thu[newrow$weekday=='Thursday']=1
newrow$fri=0
newrow$fri[newrow$weekday=='Friday']=1
newrow$sat=0
newrow$sat[newrow$weekday=='Saturday']=1
train.data=rbind(train.data,newrow)
rownames(train.data) = 1:nrow(train.data)
train.data[nrow(train.data),]$lag1=train.data[nrow(train.data)-1,]$y
train.data[nrow(train.data),]$lag2=train.data[nrow(train.data)-2,]$y
train.data[nrow(train.data),]$lag3=train.data[nrow(train.data)-3,]$y
train.data[nrow(train.data),]$lag4=train.data[nrow(train.data)-4,]$y
train.data[nrow(train.data),]$lag5=train.data[nrow(train.data)-5,]$y
train.data[nrow(train.data),]$lag6=train.data[nrow(train.data)-6,]$y
train.data[nrow(train.data),]$lag7=train.data[nrow(train.data)-7,]$y
train.data[nrow(train.data),]$lag8=train.data[nrow(train.data)-8,]$y
train.data[nrow(train.data),]$lag9=train.data[nrow(train.data)-9,]$y
train.data[nrow(train.data),]$lag10=train.data[nrow(train.data)-10,]$y
train.data[nrow(train.data),]$lag14=train.data[nrow(train.data)-14,]$y
train.data[nrow(train.data),]$y=predict(rm, newdata = train.data[nrow(train.data),], interval = "prediction")[1,'fit']
}
test.data=train.data[(ceiling(nrow(df())*input$train.size/100)+1):nrow(train.data),]
# For test data
for (i in 1:input$horizon){
newrow=data[nrow(data),]
newrow$trend=newrow$trend+1
newrow$ds=newrow$ds+1
newrow$quarter=quarters(newrow$ds)
newrow$weekday=weekdays(newrow$ds)
newrow$q1=0
newrow$q1[newrow$quarter=='Q1']=1
newrow$q2=0
newrow$q2[newrow$quarter=='Q2']=1
newrow$q3=0
newrow$q3[newrow$quarter=='Q3']=1
newrow$mon=0
newrow$mon[newrow$weekday=='Monday']=1
newrow$tue=0
newrow$tue[newrow$weekday=='Tuesday']=1
newrow$wed=0
newrow$wed[newrow$weekday=='Wednesday']=1
newrow$thu=0
newrow$thu[newrow$weekday=='Thursday']=1
newrow$fri=0
newrow$fri[newrow$weekday=='Friday']=1
newrow$sat=0
newrow$sat[newrow$weekday=='Saturday']=1
data=rbind(data,newrow)
rownames(data) = 1:nrow(data)
data[nrow(data),]$lag1=data[nrow(data)-1,]$y
data[nrow(data),]$lag2=data[nrow(data)-2,]$y
data[nrow(data),]$lag3=data[nrow(data)-3,]$y
data[nrow(data),]$lag4=data[nrow(data)-4,]$y
data[nrow(data),]$lag5=data[nrow(data)-5,]$y
data[nrow(data),]$lag6=data[nrow(data)-6,]$y
data[nrow(data),]$lag7=data[nrow(data)-7,]$y
data[nrow(data),]$lag8=data[nrow(data)-8,]$y
data[nrow(data),]$lag9=data[nrow(data)-9,]$y
data[nrow(data),]$lag10=data[nrow(data)-10,]$y
data[nrow(data),]$lag14=data[nrow(data)-14,]$y
data[nrow(data),]$y=predict(rm, newdata = data[nrow(data),],
interval = "prediction")[1,'fit']
}
forecast <- data[(nrow(df())+1):(nrow(df())+input$horizon),]
ggplot(data = forecast, aes(x=ds, y=y, color = 'Forecasting \n values'))+
geom_line()+
geom_line(data = test.data, aes(x=ds, y=y, color = 'Testing \n set'))+
geom_line(data = dataall[round(input$zoomin[1]*nrow(dataall)/100):round(input$zoomin[2]*nrow(dataall)/100),]
, aes(x=ds, y=y, color = 'True \n values'))+
scale_x_date(date_labels = "%b %y")+
scale_color_manual(name = "", values = c("True \n values" = "black", 'Testing \n set' = 'deepskyblue2',
'Forecasting \n values' = 'firebrick2'))+
ylab('Value')
})
output$regressionforecast <- renderTable({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
data$trend = c(1:nrow(df()))
for (i in 1:length(data$ds)){
data$quarter[i]=(quarters(data$ds[i]))
}
for (i in 1:length(data$ds)){
data$weekday[i]=(weekdays(data$ds[i]))
}
data$q1=0
data$q1[data$quarter=='Q1']=1
data$q2=0
data$q2[data$quarter=='Q2']=1
data$q3=0
data$q3[data$quarter=='Q3']=1
data$mon=0
data$mon[data$weekday=='Monday']=1
data$tue=0
data$tue[data$weekday=='Tuesday']=1
data$wed=0
data$wed[data$weekday=='Wednesday']=1
data$thu=0
data$thu[data$weekday=='Thursday']=1
data$fri=0
data$fri[data$weekday=='Friday']=1
data$sat=0
data$sat[data$weekday=='Saturday']=1
data$lag1=lag(data$y,1)
data$lag2=lag(data$y,2)
data$lag3=lag(data$y,3)
data$lag4=lag(data$y,4)
data$lag5=lag(data$y,5)
data$lag6=lag(data$y,6)
data$lag7=lag(data$y,7)
data$lag8=lag(data$y,8)
data$lag9=lag(data$y,9)
data$lag10=lag(data$y,10)
data$lag14=lag(data$y,14) ## For all data
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
rm=lm(y~trend+q1+q2+q3+mon+tue+wed+thu+fri+sat+lag1+lag2+lag6+lag7+lag8+lag14, data = train.data)
lwr = c()
upr = c()
for (i in 1:input$horizon){
newrow=data[nrow(data),]
newrow$trend=newrow$trend+1
newrow$ds=newrow$ds+1
newrow$quarter=quarters(newrow$ds)
newrow$weekday=weekdays(newrow$ds)
newrow$q1=0
newrow$q1[newrow$quarter=='Q1']=1
newrow$q2=0
newrow$q2[newrow$quarter=='Q2']=1
newrow$q3=0
newrow$q3[newrow$quarter=='Q3']=1
newrow$mon=0
newrow$mon[newrow$weekday=='Monday']=1
newrow$tue=0
newrow$tue[newrow$weekday=='Tuesday']=1
newrow$wed=0
newrow$wed[newrow$weekday=='Wednesday']=1
newrow$thu=0
newrow$thu[newrow$weekday=='Thursday']=1
newrow$fri=0
newrow$fri[newrow$weekday=='Friday']=1
newrow$sat=0
newrow$sat[newrow$weekday=='Saturday']=1
data=rbind(data,newrow)
rownames(data) = 1:nrow(data)
data[nrow(data),]$lag1=data[nrow(data)-1,]$y
data[nrow(data),]$lag2=data[nrow(data)-2,]$y
data[nrow(data),]$lag3=data[nrow(data)-3,]$y
data[nrow(data),]$lag4=data[nrow(data)-4,]$y
data[nrow(data),]$lag5=data[nrow(data)-5,]$y
data[nrow(data),]$lag6=data[nrow(data)-6,]$y
data[nrow(data),]$lag7=data[nrow(data)-7,]$y
data[nrow(data),]$lag8=data[nrow(data)-8,]$y
data[nrow(data),]$lag9=data[nrow(data)-9,]$y
data[nrow(data),]$lag10=data[nrow(data)-10,]$y
data[nrow(data),]$lag14=data[nrow(data)-14,]$y
data[nrow(data),]$y=predict(rm, newdata = data[nrow(data),],
interval = "prediction")[1,'fit']
lwr=c(lwr, predict(rm, newdata = data[nrow(data),],
interval = "prediction")[1,'lwr'])
upr=c(upr, predict(rm, newdata = data[nrow(data),],
interval = "prediction")[1,'upr'])
}
forecast <- data[(nrow(df())+1):(nrow(df())+input$horizon),'y']
forecastdates <- seq(from=(as.Date(data$ds[nrow(df())])+1), by=1, length.out=input$horizon)
forecastdates <- as.character(forecastdates)
forecastvalue <- data.frame('ds'=forecastdates,
'yhat'=forecast,
'yhat_lower'=lwr,
'yhat_upper'=upr)
})
# For classical decomposition method
output$decompositionresult <- renderTable({
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data$ds=format(as.Date(train.data$ds))
test.data$ds=format(as.Date(test.data$ds))
ms=msts(train.data$y,seasonal.periods=c(7,2*7,30.5),ts.frequency=30.5)
model=mstl(ms) # lambda = 0 do the multiplicative decomposition
fc=forecast(model,h = nrow(test.data), level = 0,allow.multiplicative.trend = T)
trainerror=data.frame(accuracy(fc$x,fc$fitted))
testerror=data.frame(accuracy(fc$mean,test.data$y))
results=data.frame("RMSE"=testerror$RMSE,"MAE"=testerror$MAE,
"MAPE"=paste0(round(testerror$MAPE,2), "%"),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=nrow(train.data), "Count Test"=nrow(test.data),
"Count Forecast"=input$horizon)
})
output$decompositionplot <- renderPlot({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data$ds=format(as.Date(train.data$ds))
test.data$ds=format(as.Date(test.data$ds))
msall=msts(data$y,seasonal.periods=c(7,2*7,30.5),ts.frequency=30.5)
model2=mstl(msall)
forecast=forecast(model2,h = input$horizon, level = 0,allow.multiplicative.trend = T)
ms=msts(train.data$y,seasonal.periods=c(7,2*7,30.5),ts.frequency=30.5)
model=mstl(ms) # lambda = 0 do the multiplicative decomposition
testresult=forecast(model,h = nrow(test.data), level = 0,allow.multiplicative.trend = T)
data2 <- window(msall,start=c(1,round(input$zoomin[1]*nrow(data)/100)),
end=c(1,round(input$zoomin[2]*nrow(data)/100)))
autoplot(data2, series = "True \n values", col = "black")+
autolayer(testresult$mean,series = "Testing \n set")+
autolayer(forecast$mean,series="Forecasting \n values")+
ylab('Value')+theme_bw()
})
output$decompositionforecast <- renderTable({
data=df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=format(as.Date(data$ds))
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
msall=msts(data$y,seasonal.periods=c(7,2*7,30.5),ts.frequency=30.5)
model2=mstl(msall)
forecastall=forecast(model2,h = input$horizon, level = 95,allow.multiplicative.trend = T)
forecastdates <- seq(from=(as.Date(data$ds[nrow(data)])+1), by=1, length.out=input$horizon)
forecastdates <- as.character(forecastdates)
forecastvalue <- data.frame('ds'=forecastdates,
'yhat'=forecastall[['mean']][1:input$horizon],
'yhat_lower'=forecastall[['lower']][1:input$horizon],
'yhat_upper'=forecastall[['upper']][1:input$horizon])
})
# For smoothing method
output$smoothresult <- renderTable({
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(df())*input$train.size/100)+2), frequency = 7)
m_smooth <- ets(train.data, allow.multiplicative.trend = T)
forecast <- forecast(m_smooth,h=nrow(df())-ceiling(nrow(df())*input$train.size/100),level=95)
error=accuracy(forecast, test.data)
results=data.frame("RMSE"=error['Test set','RMSE'],"MAE"=error['Test set','MAE'],
"MAPE"=paste0(round(error['Test set','MAPE'],2), "%"),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=length(train.data), "Count Test"=length(test.data),
"Count Forecast"=input$horizon
)
})
output$smoothplot <- renderPlot({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data1 <- ts(data$y, start=c(1,1), frequency = 7)
m_smooth1 <- ets(data1, allow.multiplicative.trend = T)
forecast <- forecast(m_smooth1, h=input$horizon,level=95)
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(data)*input$train.size/100)+2), frequency = 7)
m_smooth2 <- ets(train.data, allow.multiplicative.trend = T)
testresult <- forecast(m_smooth2,h=nrow(data)-ceiling(nrow(data)*input$train.size/100),level=95)
data2 <- window(data1,start=c(1,round(input$zoomin[1]*nrow(data)/100)),
end=c(1,round(input$zoomin[2]*nrow(data)/100)))
autoplot(data2, series = "True \n values", col = "black")+
autolayer(testresult$mean,series = "Testing \n set")+
autolayer(forecast$mean,series="Forecasting \n values")+
ylab('Value')+theme_bw()
})
output$smoothforecast <- renderTable({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data1 <- ts(data$y, start=c(1,1), frequency = 7)
m_smooth1 <- ets(data1, allow.multiplicative.trend = T)
forecast <- forecast(m_smooth1, h=input$horizon,level=95)
forecastdates <- seq(from=(as.Date(data$ds[nrow(data)])+1), by=1, length.out=input$horizon)
forecastdates <- as.character(forecastdates)
forecastvalue <- data.frame('ds'=forecastdates,
'yhat'=forecast['mean'],
'yhat_lower'=forecast['lower'],
'yhat_upper'=forecast['upper'])
})
# For Arima method
output$arimaresult <- renderTable({
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(df())*input$train.size/100)+2), frequency = 7)
m_arima <- Arima(log(train.data), order=c(3,1,1),seasonal=c(0,1,1))
forecast <- forecast(m_arima,h=nrow(df())-ceiling(nrow(df())*input$train.size/100),level=95)
error=accuracy(exp(forecast$mean), test.data)
data.frame("RMSE"=error['Test set','RMSE'],"MAE"=error['Test set','MAE'],
"MAPE"=paste0(round(error['Test set','MAPE'],2), "%"),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=length(train.data), "Count Test"=length(test.data),
"Count Forecast"=input$horizon
)
})
output$arimaplot <- renderPlot({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data1 <- ts(data$y, start=c(1,1), frequency = 7)
m_arima1 <- Arima(log(data1), order=c(3,1,1),seasonal=c(0,1,1))
forecast <- forecast(m_arima1, h=input$horizon,level=95)
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(data)*input$train.size/100)+2), frequency = 7)
m_arima2 <- Arima(log(train.data), order=c(3,1,1),seasonal=c(0,1,1))
testresult <- forecast(m_arima2,h=nrow(data)-ceiling(nrow(data)*input$train.size/100),level=95)
data2 <- window(data1,start=c(1,round(input$zoomin[1]*nrow(data)/100)),
end=c(1,round(input$zoomin[2]*nrow(data)/100)))
autoplot(data2, series = "True \n values", col = "black")+
autolayer(exp(testresult$mean),series = "Testing \n set")+
autolayer(exp(forecast$mean),series="Forecasting \n values")+
ylab('Value')+theme_bw()
})
output$arimaforecast <- renderTable({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data1 <- ts(data$y, start=c(1,1), frequency = 7)
m_arima1 <- Arima(log(data1), order=c(3,1,1),seasonal=c(0,1,1))
forecast <- forecast(m_arima1, h=input$horizon,level=95)
forecastdates <- seq(from=(as.Date(data$ds[nrow(data)])+1), by=1, length.out=input$horizon)
forecastdates <- as.character(forecastdates)
forecastvalue <- data.frame('ds'=forecastdates,
'yhat'=exp(forecast$mean),
'yhat_lower'=exp(forecast$lower[1:input$horizon]),
'yhat_upper'=exp(forecast$upper[1:input$horizon]))
})
# For prophet
output$prophetresult <- renderTable({
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data$ds=format(as.Date(train.data$ds))
test.data$ds=format(as.Date(test.data$ds))
m_prophet <- prophet(train.data,
growth = 'linear',
seasonality.mode = 'multiplicative',
changepoint.prior.scale = 30,
seasonality.prior.scale = 35
)
future <- make_future_dataframe(m_prophet, periods = nrow(df())-ceiling(nrow(df())*input$train.size/100))
forecast <- predict(m_prophet, future)
forecast$ds=format(forecast$ds)
# Function that returns Root Mean Squared Error
rmse <- function(error)
{
sqrt(mean(error^2))
}
# Function that returns Mean Absolute Error
mae <- function(error)
{
mean(abs(error))
}
# Function that returns Mean Absolute Error
mape <- function(error, truevalue)
{
mean(abs(error/truevalue))
}
error=forecast[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),'yhat']-test.data$y
results=data.frame("RMSE"=rmse(error),"MAE"=mae(error),
"MAPE"=paste0(round(mape(error, test.data$y)*100,2), "%"),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=nrow(train.data), "Count Test"=nrow(test.data),
"Count Forecast"=input$horizon)
})
output$prophetplot <- renderPlot({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
m_prophet <- prophet(data,
growth = 'linear',
seasonality.mode = 'multiplicative',
changepoint.prior.scale = 30,
seasonality.prior.scale = 35
)
future <- make_future_dataframe(m_prophet, periods = input$horizon, include_history = FALSE)
forecast <- predict(m_prophet, future)
forecast$ds=as.Date(forecast$ds)
testdata <- data.frame(data[-(1:round(nrow(data)*input$train.size/100)),]$ds)
names(testdata)=c('ds')
testresult <- predict(m_prophet, testdata)
testresult$ds=as.Date(testresult$ds)
ggplot(data = forecast, aes(x=ds, y=yhat, color = 'Forecasting \n values'))+
geom_line()+
geom_line(data = testresult, aes(x=ds, y=yhat, color = 'Testing \n set'))+
geom_line(data = data[round(input$zoomin[1]*nrow(data)/100):round(input$zoomin[2]*nrow(data)/100),]
, aes(x=ds, y=y, color = 'True \n values'))+
scale_x_date(date_labels = "%b %y")+
scale_color_manual(name = "", values = c("True \n values" = "black", 'Testing \n set' = 'deepskyblue2',
'Forecasting \n values' = 'firebrick2'))+
ylab('Value')
})
output$prophetforecast <- renderTable({
data=df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=format(as.Date(data$ds))
m_prophet <- prophet(data,
growth = 'linear',
seasonality.mode = 'multiplicative',
changepoint.prior.scale = 30,
seasonality.prior.scale = 35
)
future <- make_future_dataframe(m_prophet, periods = input$horizon)
forecast <- predict(m_prophet, future)
forecast$ds=format(forecast$ds)
tail(forecast[c('ds', 'yhat', 'yhat_lower', 'yhat_upper')],input$horizon)
})
# For summary
output$summary <- renderTable({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
data$trend = c(1:nrow(df()))
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
for (i in 1:length(train.data$ds)){
train.data$quarter[i]=(quarters(train.data$ds[i]))
}
for (i in 1:length(train.data$ds)){
train.data$weekday[i]=(weekdays(train.data$ds[i]))
}
train.data$q1=0
train.data$q1[train.data$quarter=='Q1']=1
train.data$q2=0
train.data$q2[train.data$quarter=='Q2']=1
train.data$q3=0
train.data$q3[train.data$quarter=='Q3']=1
train.data$mon=0
train.data$mon[train.data$weekday=='Monday']=1
train.data$tue=0
train.data$tue[train.data$weekday=='Tuesday']=1
train.data$wed=0
train.data$wed[train.data$weekday=='Wednesday']=1
train.data$thu=0
train.data$thu[train.data$weekday=='Thursday']=1
train.data$fri=0
train.data$fri[train.data$weekday=='Friday']=1
train.data$sat=0
train.data$sat[train.data$weekday=='Saturday']=1
train.data$lag1=lag(train.data$y,1)
train.data$lag2=lag(train.data$y,2)
train.data$lag3=lag(train.data$y,3)
train.data$lag4=lag(train.data$y,4)
train.data$lag5=lag(train.data$y,5)
train.data$lag6=lag(train.data$y,6)
train.data$lag7=lag(train.data$y,7)
train.data$lag8=lag(train.data$y,8)
train.data$lag9=lag(train.data$y,9)
train.data$lag10=lag(train.data$y,10)
train.data$lag14=lag(train.data$y,14)
rm=lm(y~trend+q1+q2+q3+mon+tue+wed+thu+fri+sat+lag1+lag2+lag6+lag7+lag8+lag14, data = train.data)
for (i in 1:(nrow(df())-ceiling(nrow(df())*input$train.size/100))){
newrow=train.data[nrow(train.data),]
newrow$trend=newrow$trend+1
newrow$ds=newrow$ds+1
newrow$quarter=quarters(newrow$ds)
newrow$weekday=weekdays(newrow$ds)
newrow$q1=0
newrow$q1[newrow$quarter=='Q1']=1
newrow$q2=0
newrow$q2[newrow$quarter=='Q2']=1
newrow$q3=0
newrow$q3[newrow$quarter=='Q3']=1
newrow$mon=0
newrow$mon[newrow$weekday=='Monday']=1
newrow$tue=0
newrow$tue[newrow$weekday=='Tuesday']=1
newrow$wed=0
newrow$wed[newrow$weekday=='Wednesday']=1
newrow$thu=0
newrow$thu[newrow$weekday=='Thursday']=1
newrow$fri=0
newrow$fri[newrow$weekday=='Friday']=1
newrow$sat=0
newrow$sat[newrow$weekday=='Saturday']=1
train.data=rbind(train.data,newrow)
rownames(train.data) = 1:nrow(train.data)
train.data[nrow(train.data),]$lag1=train.data[nrow(train.data)-1,]$y
train.data[nrow(train.data),]$lag2=train.data[nrow(train.data)-2,]$y
train.data[nrow(train.data),]$lag3=train.data[nrow(train.data)-3,]$y
train.data[nrow(train.data),]$lag4=train.data[nrow(train.data)-4,]$y
train.data[nrow(train.data),]$lag5=train.data[nrow(train.data)-5,]$y
train.data[nrow(train.data),]$lag6=train.data[nrow(train.data)-6,]$y
train.data[nrow(train.data),]$lag7=train.data[nrow(train.data)-7,]$y
train.data[nrow(train.data),]$lag8=train.data[nrow(train.data)-8,]$y
train.data[nrow(train.data),]$lag9=train.data[nrow(train.data)-9,]$y
train.data[nrow(train.data),]$lag10=train.data[nrow(train.data)-10,]$y
train.data[nrow(train.data),]$lag14=train.data[nrow(train.data)-14,]$y
train.data[nrow(train.data),]$y=predict(rm, newdata = train.data[nrow(train.data),], interval = "prediction")[1,'fit']
}
#restore
test.data=train.data[(ceiling(nrow(df())*input$train.size/100)+1):nrow(train.data),]
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
error=accuracy(data[ceiling(nrow(df())*input$train.size/100+1):nrow(df()), 'y'], test.data$y)
model1 = 'Regression'
rmse1 = error['Test set','RMSE']
mae1 = error['Test set','MAE']
mape1 = paste0(round(error['Test set','MAPE'],2), "%")
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data$ds=format(as.Date(train.data$ds))
test.data$ds=format(as.Date(test.data$ds))
ms=msts(train.data$y,seasonal.periods=c(7,2*7,30.5),ts.frequency=30.5)
model=mstl(ms) # lambda = 0 do the multiplicative decomposition
fc=forecast(model,h = nrow(test.data), level = 0,allow.multiplicative.trend = T)
trainerror=data.frame(accuracy(fc$x,fc$fitted))
testerror=data.frame(accuracy(fc$mean,test.data$y))
model2 = 'Classical Decomposition'
rmse2 = testerror$RMSE
mae2 = testerror$MAE
mape2 = paste0(round(testerror$MAPE,2), "%")
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(df())*input$train.size/100)+2), frequency = 7)
m_smooth <- ets(train.data, allow.multiplicative.trend = T)
forecast <- forecast(m_smooth,h=nrow(df())-ceiling(nrow(df())*input$train.size/100),level=95)
error=accuracy(forecast, test.data)
model3 = 'Smoothing Method'
rmse3 = error['Test set','RMSE']
mae3 = error['Test set','MAE']
mape3 = paste0(round(error['Test set','MAPE'],2), "%")
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(df())*input$train.size/100)+2), frequency = 7)
m_arima <- Arima(log(train.data), order=c(3,1,1),seasonal=c(0,1,1))
forecast <- forecast(m_arima,h=nrow(df())-ceiling(nrow(df())*input$train.size/100),level=95)
error=accuracy(exp(forecast$mean), test.data)
model4 = 'Arima'
rmse4 = error['Test set','RMSE']
mae4 = error['Test set','MAE']
mape4 = paste0(round(error['Test set','MAPE'],2), "%")
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data$ds=format(as.Date(train.data$ds))
test.data$ds=format(as.Date(test.data$ds))
m_prophet <- prophet(train.data,
growth = 'linear',
seasonality.mode = 'multiplicative',
changepoint.prior.scale = 30,
seasonality.prior.scale = 35
)
future <- make_future_dataframe(m_prophet, periods = nrow(df())-ceiling(nrow(df())*input$train.size/100))
forecast <- predict(m_prophet, future)
forecast$ds=format(forecast$ds)
# Function that returns Root Mean Squared Error
rmse <- function(error)
{
sqrt(mean(error^2))
}
# Function that returns Mean Absolute Error
mae <- function(error)
{
mean(abs(error))
}
# Function that returns Mean Absolute Error
mape <- function(error, truevalue)
{
mean(abs(error/truevalue))
}
error=forecast[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),'yhat']-test.data$y
model5 = 'Prophet'
rmse5 = rmse(error)
mae5 = mae(error)
mape5 = paste0(round(mape(error, test.data$y)*100,2), "%")
results=data.frame("Models"=c(model1, model2, model3, model4, model5),
"RMSE"=c(rmse1,rmse2,rmse3,rmse4,rmse5),
"MAE"=c(mae1,mae2,mae3,mae4,mae5),
"MAPE"=c(mape1,mape2,mape3,mape4,mape5),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=nrow(train.data), "Count Test"=nrow(test.data),
"Count Forecast"=input$horizon)
})
# Reactive objects
df <- reactive({
req(input$file1)
read.csv(input$file1$datapath, header = input$header)
})
observeEvent(df(), {
updateSelectInput(session, "datevar", choices=colnames(df()))
updateSelectInput(session, "tsvar", choices=colnames(df()), selected = colnames(df())[2])
})
}
# Create Shiny app object
shinyApp(ui = ui, server = server)
| /Models_Shiny.R | no_license | xiaoyan-lin/TimeSeriesDashboard-RShiny | R | false | false | 43,062 | r | library(shiny)
library(shinythemes)
library(readr)
library(ggplot2)
library(stringr)
library(dplyr)
library(DT)
library(tools)
library(prophet)
library(forecast)
# Define UI for application that plots features of movies
ui <- fluidPage(
titlePanel("Forecasting", windowTitle = "Forecasting"),
# Sidebar layout with a input and output definitions
sidebarLayout(
# Inputs
sidebarPanel(
fileInput("file1", "Load File (Only csv files and only time series per day)",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv")),
checkboxInput("header", "Header", TRUE),
h4("Select Variables"), # Third level header: Plotting
# Select variable for date
selectInput(inputId = "datevar",
label = "Date (format:YYYY-MM-DD):",
choices = names(df)),
# Select variable for Time Series
selectInput(inputId = "tsvar",
label = "Time Series:",
choices = names(df)),
# Set forecasting horizon
sliderInput(inputId = "horizon",
label = "Days ahead for forecasting:",
min = 0, max = 100,
value = 30),
#Parameters
h4("Select Parameters"),
sliderInput(inputId = "train.size",
label = "Training sample size(%):",
min = 0, max = 100,
value = 80),
# Set Zoom in
sliderInput(inputId = "zoomin",
label = "Choose time period to zoom in on all the plots(%)",
min = 0, max = 100,
value = c(0,100))
),
# Output:
mainPanel(
tabsetPanel(id = "tabspanel", type = "tabs",
tabPanel(title = "File Content",
tableOutput("contents")),
tabPanel(title = "Data Visualization",
plotOutput(outputId = "tseriesplot")),
tabPanel(title = "Regression",
fluidRow(h4('The result of the model'),
tableOutput("regressionresult")),
fluidRow(h4('Plot of testing data set (red & black) and forecast values (blue)'),
plotOutput(outputId = 'regressionplot')),
fluidRow(h4('The forecast values'),
tableOutput("regressionforecast"))),
tabPanel(title = "Classical Decomposition",
fluidRow(h4('The result of the model'),
tableOutput("decompositionresult")),
fluidRow(h4('Plot of testing data set (red & black) and forecast values (blue)'),
plotOutput(outputId = 'decompositionplot')),
fluidRow(h4('The forecast values'),
tableOutput("decompositionforecast"))),
tabPanel(title = "Smoothing Method",
fluidRow(h4('The result of the model'),
tableOutput("smoothresult")),
fluidRow(h4('Plot of testing data set (red & black) and forecast values (blue)'),
plotOutput(outputId = 'smoothplot')),
fluidRow(h4('The forecast values'),
tableOutput("smoothforecast"))
),
tabPanel(title = "Arima",
fluidRow(h4('The result of the model'),
tableOutput("arimaresult")),
fluidRow(h4('Plot of testing data set (red & black) and forecast values (blue)'),
plotOutput(outputId = 'arimaplot')),
fluidRow(h4('The forecast values'),
tableOutput("arimaforecast"))
),
tabPanel(title = "Prophet",
fluidRow(h4('The result of the model'),
tableOutput("prophetresult")),
fluidRow(h4('Plot of testing data set (red & black) and forecast values (blue)'),
plotOutput(outputId = 'prophetplot')),
fluidRow(h4('The forecast values'),
tableOutput("prophetforecast"))
),
tabPanel(title = "Summary",
h4('The results of the models'),
tableOutput("summary"))
)
)
)
)
# Define server function required to create the scatterplot
server <- function(input, output, session) {
# Create plot object the plotOutput function is expecting
output$tseriesplot <- renderPlot({
data=df()
data[,input$datevar]=as.Date(data[,input$datevar])
ggplot(data = data[round(input$zoomin[1]*nrow(data)/100):round(input$zoomin[2]*nrow(data)/100),],
aes_string(x = input$datevar, y = input$tsvar)) +
geom_line()+scale_x_date(date_labels = "%b %y")
})
#Create file content table
output$contents <- renderTable({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, it will be a data frame with 'name',
# 'size', 'type', and 'datapath' columns. The 'datapath'
# column will contain the local filenames where the data can
# be found.
#inFile <- input$file1
if (is.null(df()))
return(NULL)
df()
})
# For regression method
output$regressionresult <- renderTable({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
data$trend = c(1:nrow(df()))
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
for (i in 1:length(train.data$ds)){
train.data$quarter[i]=(quarters(train.data$ds[i]))
}
for (i in 1:length(train.data$ds)){
train.data$weekday[i]=(weekdays(train.data$ds[i]))
}
train.data$q1=0
train.data$q1[train.data$quarter=='Q1']=1
train.data$q2=0
train.data$q2[train.data$quarter=='Q2']=1
train.data$q3=0
train.data$q3[train.data$quarter=='Q3']=1
train.data$mon=0
train.data$mon[train.data$weekday=='Monday']=1
train.data$tue=0
train.data$tue[train.data$weekday=='Tuesday']=1
train.data$wed=0
train.data$wed[train.data$weekday=='Wednesday']=1
train.data$thu=0
train.data$thu[train.data$weekday=='Thursday']=1
train.data$fri=0
train.data$fri[train.data$weekday=='Friday']=1
train.data$sat=0
train.data$sat[train.data$weekday=='Saturday']=1
train.data$lag1=lag(train.data$y,1)
train.data$lag2=lag(train.data$y,2)
train.data$lag3=lag(train.data$y,3)
train.data$lag4=lag(train.data$y,4)
train.data$lag5=lag(train.data$y,5)
train.data$lag6=lag(train.data$y,6)
train.data$lag7=lag(train.data$y,7)
train.data$lag8=lag(train.data$y,8)
train.data$lag9=lag(train.data$y,9)
train.data$lag10=lag(train.data$y,10)
train.data$lag14=lag(train.data$y,14)
rm=lm(y~trend+q1+q2+q3+mon+tue+wed+thu+fri+sat+lag1+lag2+lag6+lag7+lag8+lag14, data = train.data)
for (i in 1:(nrow(df())-ceiling(nrow(df())*input$train.size/100))){
newrow=train.data[nrow(train.data),]
newrow$trend=newrow$trend+1
newrow$ds=newrow$ds+1
newrow$quarter=quarters(newrow$ds)
newrow$weekday=weekdays(newrow$ds)
newrow$q1=0
newrow$q1[newrow$quarter=='Q1']=1
newrow$q2=0
newrow$q2[newrow$quarter=='Q2']=1
newrow$q3=0
newrow$q3[newrow$quarter=='Q3']=1
newrow$mon=0
newrow$mon[newrow$weekday=='Monday']=1
newrow$tue=0
newrow$tue[newrow$weekday=='Tuesday']=1
newrow$wed=0
newrow$wed[newrow$weekday=='Wednesday']=1
newrow$thu=0
newrow$thu[newrow$weekday=='Thursday']=1
newrow$fri=0
newrow$fri[newrow$weekday=='Friday']=1
newrow$sat=0
newrow$sat[newrow$weekday=='Saturday']=1
train.data=rbind(train.data,newrow)
rownames(train.data) = 1:nrow(train.data)
train.data[nrow(train.data),]$lag1=train.data[nrow(train.data)-1,]$y
train.data[nrow(train.data),]$lag2=train.data[nrow(train.data)-2,]$y
train.data[nrow(train.data),]$lag3=train.data[nrow(train.data)-3,]$y
train.data[nrow(train.data),]$lag4=train.data[nrow(train.data)-4,]$y
train.data[nrow(train.data),]$lag5=train.data[nrow(train.data)-5,]$y
train.data[nrow(train.data),]$lag6=train.data[nrow(train.data)-6,]$y
train.data[nrow(train.data),]$lag7=train.data[nrow(train.data)-7,]$y
train.data[nrow(train.data),]$lag8=train.data[nrow(train.data)-8,]$y
train.data[nrow(train.data),]$lag9=train.data[nrow(train.data)-9,]$y
train.data[nrow(train.data),]$lag10=train.data[nrow(train.data)-10,]$y
train.data[nrow(train.data),]$lag14=train.data[nrow(train.data)-14,]$y
train.data[nrow(train.data),]$y=predict(rm, newdata = train.data[nrow(train.data),], interval = "prediction")[1,'fit']
}
#restore
test.data=train.data[(ceiling(nrow(df())*input$train.size/100)+1):nrow(train.data),]
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
error=accuracy(data[ceiling(nrow(df())*input$train.size/100+1):nrow(df()), 'y'], test.data$y)
results=data.frame("RMSE"=error['Test set','RMSE'],"MAE"=error['Test set','MAE'],
"MAPE"=paste0(round(error['Test set','MAPE'],2), "%"),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=nrow(train.data), "Count Test"=nrow(test.data),
"Count Forecast"=input$horizon)
})
output$regressionplot <- renderPlot({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
data$trend = c(1:nrow(df()))
dataall = data
for (i in 1:length(data$ds)){
data$quarter[i]=(quarters(data$ds[i]))
}
for (i in 1:length(data$ds)){
data$weekday[i]=(weekdays(data$ds[i]))
}
data$q1=0
data$q1[data$quarter=='Q1']=1
data$q2=0
data$q2[data$quarter=='Q2']=1
data$q3=0
data$q3[data$quarter=='Q3']=1
data$mon=0
data$mon[data$weekday=='Monday']=1
data$tue=0
data$tue[data$weekday=='Tuesday']=1
data$wed=0
data$wed[data$weekday=='Wednesday']=1
data$thu=0
data$thu[data$weekday=='Thursday']=1
data$fri=0
data$fri[data$weekday=='Friday']=1
data$sat=0
data$sat[data$weekday=='Saturday']=1
data$lag1=lag(data$y,1)
data$lag2=lag(data$y,2)
data$lag3=lag(data$y,3)
data$lag4=lag(data$y,4)
data$lag5=lag(data$y,5)
data$lag6=lag(data$y,6)
data$lag7=lag(data$y,7)
data$lag8=lag(data$y,8)
data$lag9=lag(data$y,9)
data$lag10=lag(data$y,10)
data$lag14=lag(data$y,14) ## For all data
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
rm=lm(y~trend+q1+q2+q3+mon+tue+wed+thu+fri+sat+lag1+lag2+lag6+lag7+lag8+lag14, data = train.data)
for (i in 1:(nrow(df())-ceiling(nrow(df())*input$train.size/100))){
newrow=train.data[nrow(train.data),]
newrow$trend=newrow$trend+1
newrow$ds=newrow$ds+1
newrow$quarter=quarters(newrow$ds)
newrow$weekday=weekdays(newrow$ds)
newrow$q1=0
newrow$q1[newrow$quarter=='Q1']=1
newrow$q2=0
newrow$q2[newrow$quarter=='Q2']=1
newrow$q3=0
newrow$q3[newrow$quarter=='Q3']=1
newrow$mon=0
newrow$mon[newrow$weekday=='Monday']=1
newrow$tue=0
newrow$tue[newrow$weekday=='Tuesday']=1
newrow$wed=0
newrow$wed[newrow$weekday=='Wednesday']=1
newrow$thu=0
newrow$thu[newrow$weekday=='Thursday']=1
newrow$fri=0
newrow$fri[newrow$weekday=='Friday']=1
newrow$sat=0
newrow$sat[newrow$weekday=='Saturday']=1
train.data=rbind(train.data,newrow)
rownames(train.data) = 1:nrow(train.data)
train.data[nrow(train.data),]$lag1=train.data[nrow(train.data)-1,]$y
train.data[nrow(train.data),]$lag2=train.data[nrow(train.data)-2,]$y
train.data[nrow(train.data),]$lag3=train.data[nrow(train.data)-3,]$y
train.data[nrow(train.data),]$lag4=train.data[nrow(train.data)-4,]$y
train.data[nrow(train.data),]$lag5=train.data[nrow(train.data)-5,]$y
train.data[nrow(train.data),]$lag6=train.data[nrow(train.data)-6,]$y
train.data[nrow(train.data),]$lag7=train.data[nrow(train.data)-7,]$y
train.data[nrow(train.data),]$lag8=train.data[nrow(train.data)-8,]$y
train.data[nrow(train.data),]$lag9=train.data[nrow(train.data)-9,]$y
train.data[nrow(train.data),]$lag10=train.data[nrow(train.data)-10,]$y
train.data[nrow(train.data),]$lag14=train.data[nrow(train.data)-14,]$y
train.data[nrow(train.data),]$y=predict(rm, newdata = train.data[nrow(train.data),], interval = "prediction")[1,'fit']
}
test.data=train.data[(ceiling(nrow(df())*input$train.size/100)+1):nrow(train.data),]
# For test data
for (i in 1:input$horizon){
newrow=data[nrow(data),]
newrow$trend=newrow$trend+1
newrow$ds=newrow$ds+1
newrow$quarter=quarters(newrow$ds)
newrow$weekday=weekdays(newrow$ds)
newrow$q1=0
newrow$q1[newrow$quarter=='Q1']=1
newrow$q2=0
newrow$q2[newrow$quarter=='Q2']=1
newrow$q3=0
newrow$q3[newrow$quarter=='Q3']=1
newrow$mon=0
newrow$mon[newrow$weekday=='Monday']=1
newrow$tue=0
newrow$tue[newrow$weekday=='Tuesday']=1
newrow$wed=0
newrow$wed[newrow$weekday=='Wednesday']=1
newrow$thu=0
newrow$thu[newrow$weekday=='Thursday']=1
newrow$fri=0
newrow$fri[newrow$weekday=='Friday']=1
newrow$sat=0
newrow$sat[newrow$weekday=='Saturday']=1
data=rbind(data,newrow)
rownames(data) = 1:nrow(data)
data[nrow(data),]$lag1=data[nrow(data)-1,]$y
data[nrow(data),]$lag2=data[nrow(data)-2,]$y
data[nrow(data),]$lag3=data[nrow(data)-3,]$y
data[nrow(data),]$lag4=data[nrow(data)-4,]$y
data[nrow(data),]$lag5=data[nrow(data)-5,]$y
data[nrow(data),]$lag6=data[nrow(data)-6,]$y
data[nrow(data),]$lag7=data[nrow(data)-7,]$y
data[nrow(data),]$lag8=data[nrow(data)-8,]$y
data[nrow(data),]$lag9=data[nrow(data)-9,]$y
data[nrow(data),]$lag10=data[nrow(data)-10,]$y
data[nrow(data),]$lag14=data[nrow(data)-14,]$y
data[nrow(data),]$y=predict(rm, newdata = data[nrow(data),],
interval = "prediction")[1,'fit']
}
forecast <- data[(nrow(df())+1):(nrow(df())+input$horizon),]
ggplot(data = forecast, aes(x=ds, y=y, color = 'Forecasting \n values'))+
geom_line()+
geom_line(data = test.data, aes(x=ds, y=y, color = 'Testing \n set'))+
geom_line(data = dataall[round(input$zoomin[1]*nrow(dataall)/100):round(input$zoomin[2]*nrow(dataall)/100),]
, aes(x=ds, y=y, color = 'True \n values'))+
scale_x_date(date_labels = "%b %y")+
scale_color_manual(name = "", values = c("True \n values" = "black", 'Testing \n set' = 'deepskyblue2',
'Forecasting \n values' = 'firebrick2'))+
ylab('Value')
})
output$regressionforecast <- renderTable({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
data$trend = c(1:nrow(df()))
for (i in 1:length(data$ds)){
data$quarter[i]=(quarters(data$ds[i]))
}
for (i in 1:length(data$ds)){
data$weekday[i]=(weekdays(data$ds[i]))
}
data$q1=0
data$q1[data$quarter=='Q1']=1
data$q2=0
data$q2[data$quarter=='Q2']=1
data$q3=0
data$q3[data$quarter=='Q3']=1
data$mon=0
data$mon[data$weekday=='Monday']=1
data$tue=0
data$tue[data$weekday=='Tuesday']=1
data$wed=0
data$wed[data$weekday=='Wednesday']=1
data$thu=0
data$thu[data$weekday=='Thursday']=1
data$fri=0
data$fri[data$weekday=='Friday']=1
data$sat=0
data$sat[data$weekday=='Saturday']=1
data$lag1=lag(data$y,1)
data$lag2=lag(data$y,2)
data$lag3=lag(data$y,3)
data$lag4=lag(data$y,4)
data$lag5=lag(data$y,5)
data$lag6=lag(data$y,6)
data$lag7=lag(data$y,7)
data$lag8=lag(data$y,8)
data$lag9=lag(data$y,9)
data$lag10=lag(data$y,10)
data$lag14=lag(data$y,14) ## For all data
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
rm=lm(y~trend+q1+q2+q3+mon+tue+wed+thu+fri+sat+lag1+lag2+lag6+lag7+lag8+lag14, data = train.data)
lwr = c()
upr = c()
for (i in 1:input$horizon){
newrow=data[nrow(data),]
newrow$trend=newrow$trend+1
newrow$ds=newrow$ds+1
newrow$quarter=quarters(newrow$ds)
newrow$weekday=weekdays(newrow$ds)
newrow$q1=0
newrow$q1[newrow$quarter=='Q1']=1
newrow$q2=0
newrow$q2[newrow$quarter=='Q2']=1
newrow$q3=0
newrow$q3[newrow$quarter=='Q3']=1
newrow$mon=0
newrow$mon[newrow$weekday=='Monday']=1
newrow$tue=0
newrow$tue[newrow$weekday=='Tuesday']=1
newrow$wed=0
newrow$wed[newrow$weekday=='Wednesday']=1
newrow$thu=0
newrow$thu[newrow$weekday=='Thursday']=1
newrow$fri=0
newrow$fri[newrow$weekday=='Friday']=1
newrow$sat=0
newrow$sat[newrow$weekday=='Saturday']=1
data=rbind(data,newrow)
rownames(data) = 1:nrow(data)
data[nrow(data),]$lag1=data[nrow(data)-1,]$y
data[nrow(data),]$lag2=data[nrow(data)-2,]$y
data[nrow(data),]$lag3=data[nrow(data)-3,]$y
data[nrow(data),]$lag4=data[nrow(data)-4,]$y
data[nrow(data),]$lag5=data[nrow(data)-5,]$y
data[nrow(data),]$lag6=data[nrow(data)-6,]$y
data[nrow(data),]$lag7=data[nrow(data)-7,]$y
data[nrow(data),]$lag8=data[nrow(data)-8,]$y
data[nrow(data),]$lag9=data[nrow(data)-9,]$y
data[nrow(data),]$lag10=data[nrow(data)-10,]$y
data[nrow(data),]$lag14=data[nrow(data)-14,]$y
data[nrow(data),]$y=predict(rm, newdata = data[nrow(data),],
interval = "prediction")[1,'fit']
lwr=c(lwr, predict(rm, newdata = data[nrow(data),],
interval = "prediction")[1,'lwr'])
upr=c(upr, predict(rm, newdata = data[nrow(data),],
interval = "prediction")[1,'upr'])
}
forecast <- data[(nrow(df())+1):(nrow(df())+input$horizon),'y']
forecastdates <- seq(from=(as.Date(data$ds[nrow(df())])+1), by=1, length.out=input$horizon)
forecastdates <- as.character(forecastdates)
forecastvalue <- data.frame('ds'=forecastdates,
'yhat'=forecast,
'yhat_lower'=lwr,
'yhat_upper'=upr)
})
# For classical decomposition method
output$decompositionresult <- renderTable({
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data$ds=format(as.Date(train.data$ds))
test.data$ds=format(as.Date(test.data$ds))
ms=msts(train.data$y,seasonal.periods=c(7,2*7,30.5),ts.frequency=30.5)
model=mstl(ms) # lambda = 0 do the multiplicative decomposition
fc=forecast(model,h = nrow(test.data), level = 0,allow.multiplicative.trend = T)
trainerror=data.frame(accuracy(fc$x,fc$fitted))
testerror=data.frame(accuracy(fc$mean,test.data$y))
results=data.frame("RMSE"=testerror$RMSE,"MAE"=testerror$MAE,
"MAPE"=paste0(round(testerror$MAPE,2), "%"),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=nrow(train.data), "Count Test"=nrow(test.data),
"Count Forecast"=input$horizon)
})
output$decompositionplot <- renderPlot({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data$ds=format(as.Date(train.data$ds))
test.data$ds=format(as.Date(test.data$ds))
msall=msts(data$y,seasonal.periods=c(7,2*7,30.5),ts.frequency=30.5)
model2=mstl(msall)
forecast=forecast(model2,h = input$horizon, level = 0,allow.multiplicative.trend = T)
ms=msts(train.data$y,seasonal.periods=c(7,2*7,30.5),ts.frequency=30.5)
model=mstl(ms) # lambda = 0 do the multiplicative decomposition
testresult=forecast(model,h = nrow(test.data), level = 0,allow.multiplicative.trend = T)
data2 <- window(msall,start=c(1,round(input$zoomin[1]*nrow(data)/100)),
end=c(1,round(input$zoomin[2]*nrow(data)/100)))
autoplot(data2, series = "True \n values", col = "black")+
autolayer(testresult$mean,series = "Testing \n set")+
autolayer(forecast$mean,series="Forecasting \n values")+
ylab('Value')+theme_bw()
})
output$decompositionforecast <- renderTable({
data=df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=format(as.Date(data$ds))
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
msall=msts(data$y,seasonal.periods=c(7,2*7,30.5),ts.frequency=30.5)
model2=mstl(msall)
forecastall=forecast(model2,h = input$horizon, level = 95,allow.multiplicative.trend = T)
forecastdates <- seq(from=(as.Date(data$ds[nrow(data)])+1), by=1, length.out=input$horizon)
forecastdates <- as.character(forecastdates)
forecastvalue <- data.frame('ds'=forecastdates,
'yhat'=forecastall[['mean']][1:input$horizon],
'yhat_lower'=forecastall[['lower']][1:input$horizon],
'yhat_upper'=forecastall[['upper']][1:input$horizon])
})
# For smoothing method
output$smoothresult <- renderTable({
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(df())*input$train.size/100)+2), frequency = 7)
m_smooth <- ets(train.data, allow.multiplicative.trend = T)
forecast <- forecast(m_smooth,h=nrow(df())-ceiling(nrow(df())*input$train.size/100),level=95)
error=accuracy(forecast, test.data)
results=data.frame("RMSE"=error['Test set','RMSE'],"MAE"=error['Test set','MAE'],
"MAPE"=paste0(round(error['Test set','MAPE'],2), "%"),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=length(train.data), "Count Test"=length(test.data),
"Count Forecast"=input$horizon
)
})
output$smoothplot <- renderPlot({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data1 <- ts(data$y, start=c(1,1), frequency = 7)
m_smooth1 <- ets(data1, allow.multiplicative.trend = T)
forecast <- forecast(m_smooth1, h=input$horizon,level=95)
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(data)*input$train.size/100)+2), frequency = 7)
m_smooth2 <- ets(train.data, allow.multiplicative.trend = T)
testresult <- forecast(m_smooth2,h=nrow(data)-ceiling(nrow(data)*input$train.size/100),level=95)
data2 <- window(data1,start=c(1,round(input$zoomin[1]*nrow(data)/100)),
end=c(1,round(input$zoomin[2]*nrow(data)/100)))
autoplot(data2, series = "True \n values", col = "black")+
autolayer(testresult$mean,series = "Testing \n set")+
autolayer(forecast$mean,series="Forecasting \n values")+
ylab('Value')+theme_bw()
})
output$smoothforecast <- renderTable({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data1 <- ts(data$y, start=c(1,1), frequency = 7)
m_smooth1 <- ets(data1, allow.multiplicative.trend = T)
forecast <- forecast(m_smooth1, h=input$horizon,level=95)
forecastdates <- seq(from=(as.Date(data$ds[nrow(data)])+1), by=1, length.out=input$horizon)
forecastdates <- as.character(forecastdates)
forecastvalue <- data.frame('ds'=forecastdates,
'yhat'=forecast['mean'],
'yhat_lower'=forecast['lower'],
'yhat_upper'=forecast['upper'])
})
# For Arima method
output$arimaresult <- renderTable({
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(df())*input$train.size/100)+2), frequency = 7)
m_arima <- Arima(log(train.data), order=c(3,1,1),seasonal=c(0,1,1))
forecast <- forecast(m_arima,h=nrow(df())-ceiling(nrow(df())*input$train.size/100),level=95)
error=accuracy(exp(forecast$mean), test.data)
data.frame("RMSE"=error['Test set','RMSE'],"MAE"=error['Test set','MAE'],
"MAPE"=paste0(round(error['Test set','MAPE'],2), "%"),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=length(train.data), "Count Test"=length(test.data),
"Count Forecast"=input$horizon
)
})
output$arimaplot <- renderPlot({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data1 <- ts(data$y, start=c(1,1), frequency = 7)
m_arima1 <- Arima(log(data1), order=c(3,1,1),seasonal=c(0,1,1))
forecast <- forecast(m_arima1, h=input$horizon,level=95)
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(data)*input$train.size/100)+2), frequency = 7)
m_arima2 <- Arima(log(train.data), order=c(3,1,1),seasonal=c(0,1,1))
testresult <- forecast(m_arima2,h=nrow(data)-ceiling(nrow(data)*input$train.size/100),level=95)
data2 <- window(data1,start=c(1,round(input$zoomin[1]*nrow(data)/100)),
end=c(1,round(input$zoomin[2]*nrow(data)/100)))
autoplot(data2, series = "True \n values", col = "black")+
autolayer(exp(testresult$mean),series = "Testing \n set")+
autolayer(exp(forecast$mean),series="Forecasting \n values")+
ylab('Value')+theme_bw()
})
output$arimaforecast <- renderTable({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data1 <- ts(data$y, start=c(1,1), frequency = 7)
m_arima1 <- Arima(log(data1), order=c(3,1,1),seasonal=c(0,1,1))
forecast <- forecast(m_arima1, h=input$horizon,level=95)
forecastdates <- seq(from=(as.Date(data$ds[nrow(data)])+1), by=1, length.out=input$horizon)
forecastdates <- as.character(forecastdates)
forecastvalue <- data.frame('ds'=forecastdates,
'yhat'=exp(forecast$mean),
'yhat_lower'=exp(forecast$lower[1:input$horizon]),
'yhat_upper'=exp(forecast$upper[1:input$horizon]))
})
# For prophet
output$prophetresult <- renderTable({
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data$ds=format(as.Date(train.data$ds))
test.data$ds=format(as.Date(test.data$ds))
m_prophet <- prophet(train.data,
growth = 'linear',
seasonality.mode = 'multiplicative',
changepoint.prior.scale = 30,
seasonality.prior.scale = 35
)
future <- make_future_dataframe(m_prophet, periods = nrow(df())-ceiling(nrow(df())*input$train.size/100))
forecast <- predict(m_prophet, future)
forecast$ds=format(forecast$ds)
# Function that returns Root Mean Squared Error
rmse <- function(error)
{
sqrt(mean(error^2))
}
# Function that returns Mean Absolute Error
mae <- function(error)
{
mean(abs(error))
}
# Function that returns Mean Absolute Error
mape <- function(error, truevalue)
{
mean(abs(error/truevalue))
}
error=forecast[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),'yhat']-test.data$y
results=data.frame("RMSE"=rmse(error),"MAE"=mae(error),
"MAPE"=paste0(round(mape(error, test.data$y)*100,2), "%"),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=nrow(train.data), "Count Test"=nrow(test.data),
"Count Forecast"=input$horizon)
})
output$prophetplot <- renderPlot({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
m_prophet <- prophet(data,
growth = 'linear',
seasonality.mode = 'multiplicative',
changepoint.prior.scale = 30,
seasonality.prior.scale = 35
)
future <- make_future_dataframe(m_prophet, periods = input$horizon, include_history = FALSE)
forecast <- predict(m_prophet, future)
forecast$ds=as.Date(forecast$ds)
testdata <- data.frame(data[-(1:round(nrow(data)*input$train.size/100)),]$ds)
names(testdata)=c('ds')
testresult <- predict(m_prophet, testdata)
testresult$ds=as.Date(testresult$ds)
ggplot(data = forecast, aes(x=ds, y=yhat, color = 'Forecasting \n values'))+
geom_line()+
geom_line(data = testresult, aes(x=ds, y=yhat, color = 'Testing \n set'))+
geom_line(data = data[round(input$zoomin[1]*nrow(data)/100):round(input$zoomin[2]*nrow(data)/100),]
, aes(x=ds, y=y, color = 'True \n values'))+
scale_x_date(date_labels = "%b %y")+
scale_color_manual(name = "", values = c("True \n values" = "black", 'Testing \n set' = 'deepskyblue2',
'Forecasting \n values' = 'firebrick2'))+
ylab('Value')
})
output$prophetforecast <- renderTable({
data=df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=format(as.Date(data$ds))
m_prophet <- prophet(data,
growth = 'linear',
seasonality.mode = 'multiplicative',
changepoint.prior.scale = 30,
seasonality.prior.scale = 35
)
future <- make_future_dataframe(m_prophet, periods = input$horizon)
forecast <- predict(m_prophet, future)
forecast$ds=format(forecast$ds)
tail(forecast[c('ds', 'yhat', 'yhat_lower', 'yhat_upper')],input$horizon)
})
# For summary
output$summary <- renderTable({
data <- df()[,c(input$datevar,input$tsvar)]
names(data)=c("ds","y")
data$ds=as.Date(data$ds)
data$trend = c(1:nrow(df()))
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
for (i in 1:length(train.data$ds)){
train.data$quarter[i]=(quarters(train.data$ds[i]))
}
for (i in 1:length(train.data$ds)){
train.data$weekday[i]=(weekdays(train.data$ds[i]))
}
train.data$q1=0
train.data$q1[train.data$quarter=='Q1']=1
train.data$q2=0
train.data$q2[train.data$quarter=='Q2']=1
train.data$q3=0
train.data$q3[train.data$quarter=='Q3']=1
train.data$mon=0
train.data$mon[train.data$weekday=='Monday']=1
train.data$tue=0
train.data$tue[train.data$weekday=='Tuesday']=1
train.data$wed=0
train.data$wed[train.data$weekday=='Wednesday']=1
train.data$thu=0
train.data$thu[train.data$weekday=='Thursday']=1
train.data$fri=0
train.data$fri[train.data$weekday=='Friday']=1
train.data$sat=0
train.data$sat[train.data$weekday=='Saturday']=1
train.data$lag1=lag(train.data$y,1)
train.data$lag2=lag(train.data$y,2)
train.data$lag3=lag(train.data$y,3)
train.data$lag4=lag(train.data$y,4)
train.data$lag5=lag(train.data$y,5)
train.data$lag6=lag(train.data$y,6)
train.data$lag7=lag(train.data$y,7)
train.data$lag8=lag(train.data$y,8)
train.data$lag9=lag(train.data$y,9)
train.data$lag10=lag(train.data$y,10)
train.data$lag14=lag(train.data$y,14)
rm=lm(y~trend+q1+q2+q3+mon+tue+wed+thu+fri+sat+lag1+lag2+lag6+lag7+lag8+lag14, data = train.data)
for (i in 1:(nrow(df())-ceiling(nrow(df())*input$train.size/100))){
newrow=train.data[nrow(train.data),]
newrow$trend=newrow$trend+1
newrow$ds=newrow$ds+1
newrow$quarter=quarters(newrow$ds)
newrow$weekday=weekdays(newrow$ds)
newrow$q1=0
newrow$q1[newrow$quarter=='Q1']=1
newrow$q2=0
newrow$q2[newrow$quarter=='Q2']=1
newrow$q3=0
newrow$q3[newrow$quarter=='Q3']=1
newrow$mon=0
newrow$mon[newrow$weekday=='Monday']=1
newrow$tue=0
newrow$tue[newrow$weekday=='Tuesday']=1
newrow$wed=0
newrow$wed[newrow$weekday=='Wednesday']=1
newrow$thu=0
newrow$thu[newrow$weekday=='Thursday']=1
newrow$fri=0
newrow$fri[newrow$weekday=='Friday']=1
newrow$sat=0
newrow$sat[newrow$weekday=='Saturday']=1
train.data=rbind(train.data,newrow)
rownames(train.data) = 1:nrow(train.data)
train.data[nrow(train.data),]$lag1=train.data[nrow(train.data)-1,]$y
train.data[nrow(train.data),]$lag2=train.data[nrow(train.data)-2,]$y
train.data[nrow(train.data),]$lag3=train.data[nrow(train.data)-3,]$y
train.data[nrow(train.data),]$lag4=train.data[nrow(train.data)-4,]$y
train.data[nrow(train.data),]$lag5=train.data[nrow(train.data)-5,]$y
train.data[nrow(train.data),]$lag6=train.data[nrow(train.data)-6,]$y
train.data[nrow(train.data),]$lag7=train.data[nrow(train.data)-7,]$y
train.data[nrow(train.data),]$lag8=train.data[nrow(train.data)-8,]$y
train.data[nrow(train.data),]$lag9=train.data[nrow(train.data)-9,]$y
train.data[nrow(train.data),]$lag10=train.data[nrow(train.data)-10,]$y
train.data[nrow(train.data),]$lag14=train.data[nrow(train.data)-14,]$y
train.data[nrow(train.data),]$y=predict(rm, newdata = train.data[nrow(train.data),], interval = "prediction")[1,'fit']
}
#restore
test.data=train.data[(ceiling(nrow(df())*input$train.size/100)+1):nrow(train.data),]
train.data=data[1:ceiling(nrow(df())*input$train.size/100),]
error=accuracy(data[ceiling(nrow(df())*input$train.size/100+1):nrow(df()), 'y'], test.data$y)
model1 = 'Regression'
rmse1 = error['Test set','RMSE']
mae1 = error['Test set','MAE']
mape1 = paste0(round(error['Test set','MAPE'],2), "%")
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data$ds=format(as.Date(train.data$ds))
test.data$ds=format(as.Date(test.data$ds))
ms=msts(train.data$y,seasonal.periods=c(7,2*7,30.5),ts.frequency=30.5)
model=mstl(ms) # lambda = 0 do the multiplicative decomposition
fc=forecast(model,h = nrow(test.data), level = 0,allow.multiplicative.trend = T)
trainerror=data.frame(accuracy(fc$x,fc$fitted))
testerror=data.frame(accuracy(fc$mean,test.data$y))
model2 = 'Classical Decomposition'
rmse2 = testerror$RMSE
mae2 = testerror$MAE
mape2 = paste0(round(testerror$MAPE,2), "%")
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(df())*input$train.size/100)+2), frequency = 7)
m_smooth <- ets(train.data, allow.multiplicative.trend = T)
forecast <- forecast(m_smooth,h=nrow(df())-ceiling(nrow(df())*input$train.size/100),level=95)
error=accuracy(forecast, test.data)
model3 = 'Smoothing Method'
rmse3 = error['Test set','RMSE']
mae3 = error['Test set','MAE']
mape3 = paste0(round(error['Test set','MAPE'],2), "%")
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data <- ts(train.data$y, start=c(1,1), frequency = 7)
test.data <- ts(test.data$y, start=c(1,ceiling(nrow(df())*input$train.size/100)+2), frequency = 7)
m_arima <- Arima(log(train.data), order=c(3,1,1),seasonal=c(0,1,1))
forecast <- forecast(m_arima,h=nrow(df())-ceiling(nrow(df())*input$train.size/100),level=95)
error=accuracy(exp(forecast$mean), test.data)
model4 = 'Arima'
rmse4 = error['Test set','RMSE']
mae4 = error['Test set','MAE']
mape4 = paste0(round(error['Test set','MAPE'],2), "%")
train.data=df()[1:ceiling(nrow(df())*input$train.size/100),c(input$datevar,input$tsvar)]
test.data=df()[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),c(input$datevar,input$tsvar)]
names(train.data)=c("ds","y")
names(test.data)=c("ds","y")
train.data$ds=format(as.Date(train.data$ds))
test.data$ds=format(as.Date(test.data$ds))
m_prophet <- prophet(train.data,
growth = 'linear',
seasonality.mode = 'multiplicative',
changepoint.prior.scale = 30,
seasonality.prior.scale = 35
)
future <- make_future_dataframe(m_prophet, periods = nrow(df())-ceiling(nrow(df())*input$train.size/100))
forecast <- predict(m_prophet, future)
forecast$ds=format(forecast$ds)
# Function that returns Root Mean Squared Error
rmse <- function(error)
{
sqrt(mean(error^2))
}
# Function that returns Mean Absolute Error
mae <- function(error)
{
mean(abs(error))
}
# Function that returns Mean Absolute Error
mape <- function(error, truevalue)
{
mean(abs(error/truevalue))
}
error=forecast[(ceiling(nrow(df())*input$train.size/100)+1):nrow(df()),'yhat']-test.data$y
model5 = 'Prophet'
rmse5 = rmse(error)
mae5 = mae(error)
mape5 = paste0(round(mape(error, test.data$y)*100,2), "%")
results=data.frame("Models"=c(model1, model2, model3, model4, model5),
"RMSE"=c(rmse1,rmse2,rmse3,rmse4,rmse5),
"MAE"=c(mae1,mae2,mae3,mae4,mae5),
"MAPE"=c(mape1,mape2,mape3,mape4,mape5),
"Total Records"=nrow(df()),
"Training Percentage"=paste0(input$train.size,"%"),
"Count Train"=nrow(train.data), "Count Test"=nrow(test.data),
"Count Forecast"=input$horizon)
})
# Reactive objects
df <- reactive({
req(input$file1)
read.csv(input$file1$datapath, header = input$header)
})
observeEvent(df(), {
updateSelectInput(session, "datevar", choices=colnames(df()))
updateSelectInput(session, "tsvar", choices=colnames(df()), selected = colnames(df())[2])
})
}
# Create Shiny app object
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scaled.R
\name{scaled}
\alias{scaled}
\title{Power-law scaling for doses}
\usage{
scaled(dose, a = 4)
}
\arguments{
\item{dose}{A numeric vector of doses}
\item{a}{A numeric exponent for power-law rescaling}
}
\value{
A rescaled vector of doses
}
\description{
Implement an inverse power-law scaling for drug dose.
}
\author{
David C. Norris
}
| /man/scaled.Rd | no_license | cran/DTAT | R | false | true | 423 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scaled.R
\name{scaled}
\alias{scaled}
\title{Power-law scaling for doses}
\usage{
scaled(dose, a = 4)
}
\arguments{
\item{dose}{A numeric vector of doses}
\item{a}{A numeric exponent for power-law rescaling}
}
\value{
A rescaled vector of doses
}
\description{
Implement an inverse power-law scaling for drug dose.
}
\author{
David C. Norris
}
|
context("Testing download_realtime functions")
test_that("download_realtime_ws returns the correct data header", {
skip_on_cran()
skip_on_travis()
token_out <- get_ws_token(username = Sys.getenv("WS_USRNM"), password = Sys.getenv("WS_PWD"))
ws_test <- download_realtime_ws(STATION_NUMBER = "08MF005",
parameters = c(46), ## Water level and temperature
start_date = Sys.Date(),
end_date = Sys.Date(),
token = token_out)
expect_identical(colnames(ws_test),
c("STATION_NUMBER", "Date", "Name_En", "Value", "Unit", "Grade",
"Symbol", "Approval", "Parameter", "Code"))
})
test_that("download_realtime_dd returns the correct data header", {
expect_identical(
colnames(download_realtime_dd(STATION_NUMBER = "08MF005", PROV_TERR_STATE_LOC = "BC")),
c("STATION_NUMBER", "PROV_TERR_STATE_LOC", "Date", "Parameter", "Value", "Grade", "Symbol", "Code")
)
})
test_that("download_realtime_dd can download stations from multiple provinces using PROV_TERR_STATE_LOC", {
download_realtime_dd(PROV_TERR_STATE_LOC = c("QC", "PE"))
})
test_that("download_realtime_dd can download stations from multiple provinces using STATION_NUMBER", {
expect_error(download_realtime_dd(STATION_NUMBER = c("01CD005", "08MF005")), regexp = NA)
})
test_that("When STATION_NUMBER is ALL there is an error", {
expect_error(download_realtime_dd(STATION_NUMBER = "ALL"))
})
| /tests/testthat/test_download_realtime.R | permissive | stephhazlitt/tidyhydat | R | false | false | 1,536 | r | context("Testing download_realtime functions")
test_that("download_realtime_ws returns the correct data header", {
skip_on_cran()
skip_on_travis()
token_out <- get_ws_token(username = Sys.getenv("WS_USRNM"), password = Sys.getenv("WS_PWD"))
ws_test <- download_realtime_ws(STATION_NUMBER = "08MF005",
parameters = c(46), ## Water level and temperature
start_date = Sys.Date(),
end_date = Sys.Date(),
token = token_out)
expect_identical(colnames(ws_test),
c("STATION_NUMBER", "Date", "Name_En", "Value", "Unit", "Grade",
"Symbol", "Approval", "Parameter", "Code"))
})
test_that("download_realtime_dd returns the correct data header", {
expect_identical(
colnames(download_realtime_dd(STATION_NUMBER = "08MF005", PROV_TERR_STATE_LOC = "BC")),
c("STATION_NUMBER", "PROV_TERR_STATE_LOC", "Date", "Parameter", "Value", "Grade", "Symbol", "Code")
)
})
test_that("download_realtime_dd can download stations from multiple provinces using PROV_TERR_STATE_LOC", {
download_realtime_dd(PROV_TERR_STATE_LOC = c("QC", "PE"))
})
test_that("download_realtime_dd can download stations from multiple provinces using STATION_NUMBER", {
expect_error(download_realtime_dd(STATION_NUMBER = c("01CD005", "08MF005")), regexp = NA)
})
test_that("When STATION_NUMBER is ALL there is an error", {
expect_error(download_realtime_dd(STATION_NUMBER = "ALL"))
})
|
\name{lpm}
\alias{lpm}
\title{calculate a lower partial moment for a time series}
\usage{
lpm(R, n = 2, threshold = 0, about_mean = FALSE)
}
\arguments{
\item{R}{xts data}
\item{n}{the n-th moment to return}
\item{threshold}{threshold can be the mean or any point
as desired}
\item{about_mean}{TRUE/FALSE calculate LPM about the mean
under the threshold or use the threshold to calculate the
LPM around (if FALSE)}
}
\description{
Caclulate a Lower Partial Moment around the mean or a
specified threshold.
}
\details{
Lower partial moments capture negative deviation from a
reference point. That reference point may be the mean, or
some specified threshold that has other meaning for the
investor.
}
\author{
Kyle Balkissoon \email{kylebalkisoon@gmail.com}
}
\references{
Huffman S.P. & Moll C.R., "The impact of Asymmetry on
Expected Stock Returns: An Investigation of Alternative
Risk Measures", Algorithmic Finance 1, 2011 p. 79-93
}
| /man/lpm.Rd | no_license | guillermozbta/portafolio-master | R | false | false | 955 | rd | \name{lpm}
\alias{lpm}
\title{calculate a lower partial moment for a time series}
\usage{
lpm(R, n = 2, threshold = 0, about_mean = FALSE)
}
\arguments{
\item{R}{xts data}
\item{n}{the n-th moment to return}
\item{threshold}{threshold can be the mean or any point
as desired}
\item{about_mean}{TRUE/FALSE calculate LPM about the mean
under the threshold or use the threshold to calculate the
LPM around (if FALSE)}
}
\description{
Caclulate a Lower Partial Moment around the mean or a
specified threshold.
}
\details{
Lower partial moments capture negative deviation from a
reference point. That reference point may be the mean, or
some specified threshold that has other meaning for the
investor.
}
\author{
Kyle Balkissoon \email{kylebalkisoon@gmail.com}
}
\references{
Huffman S.P. & Moll C.R., "The impact of Asymmetry on
Expected Stock Returns: An Investigation of Alternative
Risk Measures", Algorithmic Finance 1, 2011 p. 79-93
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/open.tunnel.R
\name{open_tunnel}
\alias{open_tunnel}
\title{Title}
\usage{
open_tunnel(remote_host, user = NULL, password = NULL,
tunnel_dir = "~/.pecan/tunnel/", wait.time = 15)
}
\arguments{
\item{remote_host}{name of remote server to connect to (e.g. geo.bu.edu)}
\item{user}{username on remote_host}
\item{password}{password on remote_host}
\item{tunnel_dir}{directory to store tunnel file in, typically from settings$host}
\item{wait.time}{how long to give system to connect before deleting password (seconds)}
}
\description{
Title
}
| /utils/man/open_tunnel.Rd | permissive | Kah5/pecan | R | false | true | 624 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/open.tunnel.R
\name{open_tunnel}
\alias{open_tunnel}
\title{Title}
\usage{
open_tunnel(remote_host, user = NULL, password = NULL,
tunnel_dir = "~/.pecan/tunnel/", wait.time = 15)
}
\arguments{
\item{remote_host}{name of remote server to connect to (e.g. geo.bu.edu)}
\item{user}{username on remote_host}
\item{password}{password on remote_host}
\item{tunnel_dir}{directory to store tunnel file in, typically from settings$host}
\item{wait.time}{how long to give system to connect before deleting password (seconds)}
}
\description{
Title
}
|
# Count Theft Category
library(readr)
library(plyr)
# 2012 Crime data
Crime_2012 <- read_csv("Desktop/texas/Crime_Reports_2012.csv")
category2012 <- count(grep('THEFT', Crime_2012$`Highest Offense Description`, value=TRUE))
theft_count2012 <- sum(category2012$freq)
rowcount2012 <- nrow(Crime_2012)
percent2012 <- theft_count2012/rowcount2012
# 2013 Crime data
Crime_2013 <- read_csv("Desktop/texas/Crime_Reports_2013.csv")
category2013 <- count(grep('THEFT', Crime_2013$`Highest Offense Description`, value=TRUE))
theft_count2013 <- sum(category2013$freq)
rowcount2013 <- nrow(Crime_2013)
percent2013 <- theft_count2013/rowcount2013
# 2014 Crime data
Crime_2014 <- read_csv("Desktop/texas/Crime_Reports_2014.csv")
category2014 <- count(grep('THEFT', Crime_2014$`Highest Offense Description`, value=TRUE))
theft_count2014 <- sum(category2014$freq)
rowcount2014 <- nrow(Crime_2014)
percent2014 <- theft_count2014/rowcount2014
# 2015 Crime data
Crime_2015 <- read_csv("Desktop/texas/Crime_Reports_2015.csv")
category2015 <- count(grep('THEFT', Crime_2015$`Highest Offense Description`, value=TRUE))
theft_count2015 <- sum(category2015$freq)
rowcount2015 <- nrow(Crime_2015)
percent2015 <- theft_count2015/rowcount2015
# 2016 Crime data
Crime_2016 <- read_csv("Desktop/texas/Crime_Reports_2016.csv")
category2016 <- count(grep('THEFT', Crime_2016$`Highest Offense Description`, value=TRUE))
theft_count2016 <- sum(category2016$freq)
rowcount2016 <- nrow(Crime_2016)
percent2016 <- theft_count2016/rowcount2016
# 2017 Crime data
Crime_2017 <- read_csv("Desktop/texas/Crime_Reports_2017.csv")
category2017 <- count(grep('THEFT', Crime_2017$`Highest Offense Description`, value=TRUE))
theft_count2017 <- sum(category2017$freq)
rowcount2017 <- nrow(Crime_2017)
percent2017 <- theft_count2017/rowcount2017
# 2018 Crime data
Crime_2018 <- read_csv("Desktop/texas/Crime_Reports_2018.csv")
category2018 <- count(grep('THEFT', Crime_2018$`Highest Offense Description`, value=TRUE))
theft_count2018 <- sum(category2018$freq)
rowcount2018 <- nrow(Crime_2018)
percent2018 <- theft_count2018/rowcount2018
# Use ggplot to create barchart
library(ggplot2)
library(ggthemes)
library(RColorBrewer)
dev.off()
data<-data.frame(Year = c("2012", "2013", "2014", "2015", "2016", "2017", "2018"),
Theft_Percentage= c(percent2012, percent2013, percent2014, percent2015, percent2016, percent2017, percent2018))
ggplot(data, aes(x=Year, y=Theft_Percentage))+
geom_bar(stat="identity", position="dodge", fill = brewer.pal(7, "Set1")[2], width = 0.7)+
ggtitle("Theft Crime Percentage")+
theme_economist(base_size=10)+
scale_fill_economist()+
theme(axis.ticks.length=unit(0.7,'cm'))+
theme(plot.title = element_text(hjust = 0.45, size = 20, face = "bold"))+
theme(axis.text=element_text(size=12, vjust = 1),
axis.title=element_text(size=14,face = "bold", vjust = 0))
data2<-data.frame(Year = c("2012", "2013", "2014", "2015", "2016", "2017", "2018"),
Total_Crime= c(rowcount2012, rowcount2013, rowcount2014, rowcount2015, rowcount2016, rowcount2017, rowcount2018))
ggplot(data2, aes(x=Year, y=Total_Crime))+
geom_bar(stat="identity", position="dodge", fill = brewer.pal(7, "Set1")[2], width = 0.7)+
ggtitle("Total Crime")+
theme_economist(base_size=10)+
scale_fill_economist()+
theme(axis.ticks.length=unit(0.7,'cm'))+
theme(plot.title = element_text(hjust = 0.45, size = 20, face = "bold"))+
theme(axis.text=element_text(size=12, vjust = 1),
axis.title=element_text(size=14,face = "bold", vjust = 0))
data3<-data.frame(Year = c("2012", "2013", "2014", "2015", "2016", "2017", "2018"),
Total_Theft= c(theft_count2012, theft_count2013, theft_count2014, theft_count2015, theft_count2016, theft_count2017, theft_count2018))
ggplot(data3, aes(x=Year, y=Total_Theft))+
geom_bar(stat="identity", position="dodge", fill = brewer.pal(7, "Set1")[2], width = 0.7)+
ggtitle("Total Theft")+
theme_economist(base_size=10)+
scale_fill_economist()+
theme(axis.ticks.length=unit(0.7,'cm'))+
theme(plot.title = element_text(hjust = 0.45, size = 20, face = "bold"))+
theme(axis.text=element_text(size=12, vjust = 1),
axis.title=element_text(size=14,face = "bold", vjust = 0))
| /Texas_crime/texas 2015.R | no_license | yunfei-xu/Decriminalization_Final_Project | R | false | false | 4,262 | r | # Count Theft Category
library(readr)
library(plyr)
# 2012 Crime data
Crime_2012 <- read_csv("Desktop/texas/Crime_Reports_2012.csv")
category2012 <- count(grep('THEFT', Crime_2012$`Highest Offense Description`, value=TRUE))
theft_count2012 <- sum(category2012$freq)
rowcount2012 <- nrow(Crime_2012)
percent2012 <- theft_count2012/rowcount2012
# 2013 Crime data
Crime_2013 <- read_csv("Desktop/texas/Crime_Reports_2013.csv")
category2013 <- count(grep('THEFT', Crime_2013$`Highest Offense Description`, value=TRUE))
theft_count2013 <- sum(category2013$freq)
rowcount2013 <- nrow(Crime_2013)
percent2013 <- theft_count2013/rowcount2013
# 2014 Crime data
Crime_2014 <- read_csv("Desktop/texas/Crime_Reports_2014.csv")
category2014 <- count(grep('THEFT', Crime_2014$`Highest Offense Description`, value=TRUE))
theft_count2014 <- sum(category2014$freq)
rowcount2014 <- nrow(Crime_2014)
percent2014 <- theft_count2014/rowcount2014
# 2015 Crime data
Crime_2015 <- read_csv("Desktop/texas/Crime_Reports_2015.csv")
category2015 <- count(grep('THEFT', Crime_2015$`Highest Offense Description`, value=TRUE))
theft_count2015 <- sum(category2015$freq)
rowcount2015 <- nrow(Crime_2015)
percent2015 <- theft_count2015/rowcount2015
# 2016 Crime data
Crime_2016 <- read_csv("Desktop/texas/Crime_Reports_2016.csv")
category2016 <- count(grep('THEFT', Crime_2016$`Highest Offense Description`, value=TRUE))
theft_count2016 <- sum(category2016$freq)
rowcount2016 <- nrow(Crime_2016)
percent2016 <- theft_count2016/rowcount2016
# 2017 Crime data
Crime_2017 <- read_csv("Desktop/texas/Crime_Reports_2017.csv")
category2017 <- count(grep('THEFT', Crime_2017$`Highest Offense Description`, value=TRUE))
theft_count2017 <- sum(category2017$freq)
rowcount2017 <- nrow(Crime_2017)
percent2017 <- theft_count2017/rowcount2017
# 2018 Crime data
Crime_2018 <- read_csv("Desktop/texas/Crime_Reports_2018.csv")
category2018 <- count(grep('THEFT', Crime_2018$`Highest Offense Description`, value=TRUE))
theft_count2018 <- sum(category2018$freq)
rowcount2018 <- nrow(Crime_2018)
percent2018 <- theft_count2018/rowcount2018
# Use ggplot to create barchart
library(ggplot2)
library(ggthemes)
library(RColorBrewer)
dev.off()
data<-data.frame(Year = c("2012", "2013", "2014", "2015", "2016", "2017", "2018"),
Theft_Percentage= c(percent2012, percent2013, percent2014, percent2015, percent2016, percent2017, percent2018))
ggplot(data, aes(x=Year, y=Theft_Percentage))+
geom_bar(stat="identity", position="dodge", fill = brewer.pal(7, "Set1")[2], width = 0.7)+
ggtitle("Theft Crime Percentage")+
theme_economist(base_size=10)+
scale_fill_economist()+
theme(axis.ticks.length=unit(0.7,'cm'))+
theme(plot.title = element_text(hjust = 0.45, size = 20, face = "bold"))+
theme(axis.text=element_text(size=12, vjust = 1),
axis.title=element_text(size=14,face = "bold", vjust = 0))
data2<-data.frame(Year = c("2012", "2013", "2014", "2015", "2016", "2017", "2018"),
Total_Crime= c(rowcount2012, rowcount2013, rowcount2014, rowcount2015, rowcount2016, rowcount2017, rowcount2018))
ggplot(data2, aes(x=Year, y=Total_Crime))+
geom_bar(stat="identity", position="dodge", fill = brewer.pal(7, "Set1")[2], width = 0.7)+
ggtitle("Total Crime")+
theme_economist(base_size=10)+
scale_fill_economist()+
theme(axis.ticks.length=unit(0.7,'cm'))+
theme(plot.title = element_text(hjust = 0.45, size = 20, face = "bold"))+
theme(axis.text=element_text(size=12, vjust = 1),
axis.title=element_text(size=14,face = "bold", vjust = 0))
data3<-data.frame(Year = c("2012", "2013", "2014", "2015", "2016", "2017", "2018"),
Total_Theft= c(theft_count2012, theft_count2013, theft_count2014, theft_count2015, theft_count2016, theft_count2017, theft_count2018))
ggplot(data3, aes(x=Year, y=Total_Theft))+
geom_bar(stat="identity", position="dodge", fill = brewer.pal(7, "Set1")[2], width = 0.7)+
ggtitle("Total Theft")+
theme_economist(base_size=10)+
scale_fill_economist()+
theme(axis.ticks.length=unit(0.7,'cm'))+
theme(plot.title = element_text(hjust = 0.45, size = 20, face = "bold"))+
theme(axis.text=element_text(size=12, vjust = 1),
axis.title=element_text(size=14,face = "bold", vjust = 0))
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/neuronlist.R
\name{find.soma}
\alias{find.soma}
\title{Find neurons with soma inside 3d selection box (usually drawn in rgl window)}
\usage{
find.soma(sel3dfun = select3d(), indices = names(db),
db = getOption("nat.default.neuronlist"), invert = FALSE)
}
\arguments{
\item{sel3dfun}{A \code{\link{select3d}} style function
to indicate if points are within region}
\item{indices}{Names of neurons to search (defaults to
all neurons in list)}
\item{db}{\code{neuronlist} to search. Can also be a
character vector naming the neuronlist. Defaults to
\code{options('nat.default.neuronlist')}.}
\item{invert}{Whether to return neurons outside the
selection box (default \code{FALSE})}
}
\value{
Character vector of names of selected neurons
}
\description{
Find neurons with soma inside 3d selection box (usually
drawn in rgl window)
}
\details{
Can work on \code{neuronlist}s containing \code{neuron}
objects \emph{or} \code{neuronlist}s whose attached
data.frame contains soma positions specified in columns
called X,Y,Z .
}
\seealso{
\code{\link{select3d}, \link{subset.neuronlist},
\link{find.neuron}}
}
| /man/find.soma.Rd | no_license | michaeljohndolan/nat | R | false | false | 1,212 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/neuronlist.R
\name{find.soma}
\alias{find.soma}
\title{Find neurons with soma inside 3d selection box (usually drawn in rgl window)}
\usage{
find.soma(sel3dfun = select3d(), indices = names(db),
db = getOption("nat.default.neuronlist"), invert = FALSE)
}
\arguments{
\item{sel3dfun}{A \code{\link{select3d}} style function
to indicate if points are within region}
\item{indices}{Names of neurons to search (defaults to
all neurons in list)}
\item{db}{\code{neuronlist} to search. Can also be a
character vector naming the neuronlist. Defaults to
\code{options('nat.default.neuronlist')}.}
\item{invert}{Whether to return neurons outside the
selection box (default \code{FALSE})}
}
\value{
Character vector of names of selected neurons
}
\description{
Find neurons with soma inside 3d selection box (usually
drawn in rgl window)
}
\details{
Can work on \code{neuronlist}s containing \code{neuron}
objects \emph{or} \code{neuronlist}s whose attached
data.frame contains soma positions specified in columns
called X,Y,Z .
}
\seealso{
\code{\link{select3d}, \link{subset.neuronlist},
\link{find.neuron}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/union_AmCharts.R
\docType{methods}
\name{plot,AmCharts-method}
\alias{plot,AmCharts-method}
\title{PLOTTING METHOD}
\usage{
\S4method{plot}{AmCharts}(x, y, width = "100\%", height = NULL,
background = "#ffffff", ...)
}
\arguments{
\item{x}{\linkS4class{AmChart}}
\item{y}{unused.}
\item{width}{\code{character}.}
\item{height}{\code{character}.B}
\item{background}{\code{character}.}
\item{...}{Other properties.}
}
\description{
Basic method to plot an AmChart
}
\details{
Plots an object of class \code{\linkS4class{AmChart}}
}
| /man/plot.AmChart.Rd | no_license | msabr027/rAmCharts | R | false | true | 616 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/union_AmCharts.R
\docType{methods}
\name{plot,AmCharts-method}
\alias{plot,AmCharts-method}
\title{PLOTTING METHOD}
\usage{
\S4method{plot}{AmCharts}(x, y, width = "100\%", height = NULL,
background = "#ffffff", ...)
}
\arguments{
\item{x}{\linkS4class{AmChart}}
\item{y}{unused.}
\item{width}{\code{character}.}
\item{height}{\code{character}.B}
\item{background}{\code{character}.}
\item{...}{Other properties.}
}
\description{
Basic method to plot an AmChart
}
\details{
Plots an object of class \code{\linkS4class{AmChart}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gs_webapp.R
\name{gs_webapp_auth_url}
\alias{gs_webapp_auth_url}
\title{Build URL for authentication}
\usage{
gs_webapp_auth_url(client_id = getOption("googlesheets.webapp.client_id"),
redirect_uri = getOption("googlesheets.webapp.redirect_uri"),
access_type = "online", approval_prompt = "auto")
}
\arguments{
\item{client_id}{client id obtained from Google Developers Console}
\item{redirect_uri}{where the response is sent, should be one of the
redirect_uri values listed for the project in Google's Developer Console,
must match exactly as listed including any trailing '/'}
\item{access_type}{either "online" (no refresh token) or "offline" (refresh
token), determines whether a refresh token is returned in the response}
\item{approval_prompt}{either "force" or "auto", determines whether the user
is reprompted for consent, If set to "auto", then the user only has to see
the consent page once for the first time through the authorization
sequence. If set to "force" then user will have to grant consent everytime
even if they have previously done so.}
}
\description{
Build the Google URL that \code{googlesheets} needs to direct users to in
order to authenticate in a Web Server Application. This function is designed
for use in Shiny apps. In contrast, the default authorization sequence in
\code{googlesheets} is appropriate for a user working directly with R on a
local computer, where the default handshakes between the local computer and
Google work just fine. The first step in the Shiny-based workflow is to form
the Google URL where the user can authenticate him or herself with Google.
After success, the response, in the form of an authorization code, is sent to
the \code{redirect_uri} (see below) which \code{\link{gs_webapp_get_token}}
uses to exchange for an access token. This token is then stored in the usual
manner for this package and used for subsequent API requests.
}
\details{
That was the good news. The bad news is you'll need to use the
\href{https://console.developers.google.com}{Google Developers Console} to
\strong{obtain your own client ID and secret and declare the
\code{redirect_uri} specific to your project}. Inform \code{googlesheets} of
this information by providing as function arguments or by defining these
options. For example, you can put lines like this into a Project-specific
\code{.Rprofile} file:
options("googlesheets.webapp.client_id" = MY_CLIENT_ID)
options("googlesheets.webapp.client_secret" = MY_CLIENT_SECRET)
options("googlesheets.webapp.redirect_uri" = MY_REDIRECT_URI)
Based on Google Developers' guide to
\href{https://developers.google.com/identity/protocols/OAuth2WebServer}{Using
OAuth2.0 for Web Server Applications}.
}
\seealso{
\code{\link{gs_webapp_get_token}}
}
| /man/gs_webapp_auth_url.Rd | no_license | colinloftin-awhere/googlesheets | R | false | true | 2,827 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gs_webapp.R
\name{gs_webapp_auth_url}
\alias{gs_webapp_auth_url}
\title{Build URL for authentication}
\usage{
gs_webapp_auth_url(client_id = getOption("googlesheets.webapp.client_id"),
redirect_uri = getOption("googlesheets.webapp.redirect_uri"),
access_type = "online", approval_prompt = "auto")
}
\arguments{
\item{client_id}{client id obtained from Google Developers Console}
\item{redirect_uri}{where the response is sent, should be one of the
redirect_uri values listed for the project in Google's Developer Console,
must match exactly as listed including any trailing '/'}
\item{access_type}{either "online" (no refresh token) or "offline" (refresh
token), determines whether a refresh token is returned in the response}
\item{approval_prompt}{either "force" or "auto", determines whether the user
is reprompted for consent, If set to "auto", then the user only has to see
the consent page once for the first time through the authorization
sequence. If set to "force" then user will have to grant consent everytime
even if they have previously done so.}
}
\description{
Build the Google URL that \code{googlesheets} needs to direct users to in
order to authenticate in a Web Server Application. This function is designed
for use in Shiny apps. In contrast, the default authorization sequence in
\code{googlesheets} is appropriate for a user working directly with R on a
local computer, where the default handshakes between the local computer and
Google work just fine. The first step in the Shiny-based workflow is to form
the Google URL where the user can authenticate him or herself with Google.
After success, the response, in the form of an authorization code, is sent to
the \code{redirect_uri} (see below) which \code{\link{gs_webapp_get_token}}
uses to exchange for an access token. This token is then stored in the usual
manner for this package and used for subsequent API requests.
}
\details{
That was the good news. The bad news is you'll need to use the
\href{https://console.developers.google.com}{Google Developers Console} to
\strong{obtain your own client ID and secret and declare the
\code{redirect_uri} specific to your project}. Inform \code{googlesheets} of
this information by providing as function arguments or by defining these
options. For example, you can put lines like this into a Project-specific
\code{.Rprofile} file:
options("googlesheets.webapp.client_id" = MY_CLIENT_ID)
options("googlesheets.webapp.client_secret" = MY_CLIENT_SECRET)
options("googlesheets.webapp.redirect_uri" = MY_REDIRECT_URI)
Based on Google Developers' guide to
\href{https://developers.google.com/identity/protocols/OAuth2WebServer}{Using
OAuth2.0 for Web Server Applications}.
}
\seealso{
\code{\link{gs_webapp_get_token}}
}
|
library('shiny')
options(shiny.maxRequestSize = 20000*1024^2)
shinyServer(function(input, output,session) {
output$mymap <- renderLeaflet({
req(input$file1)
df <- read.csv(input$file1$datapath,
header = input$header,
sep = input$sep,
quote = input$quote)
leaflet(df) %>%addProviderTiles("OpenStreetMap.Mapnik")%>% addCircleMarkers(
lng = ~longitude,
lat = ~latitude,
radius=3,
weight=2
)
})
observeEvent(input$mymap_marker_click, {
leafletProxy("mymap", session) %>%
removeMarker(input$map1_marker_click$id)
})
})
| /server.R | no_license | miniroses/minioses | R | false | false | 698 | r | library('shiny')
options(shiny.maxRequestSize = 20000*1024^2)
shinyServer(function(input, output,session) {
output$mymap <- renderLeaflet({
req(input$file1)
df <- read.csv(input$file1$datapath,
header = input$header,
sep = input$sep,
quote = input$quote)
leaflet(df) %>%addProviderTiles("OpenStreetMap.Mapnik")%>% addCircleMarkers(
lng = ~longitude,
lat = ~latitude,
radius=3,
weight=2
)
})
observeEvent(input$mymap_marker_click, {
leafletProxy("mymap", session) %>%
removeMarker(input$map1_marker_click$id)
})
})
|
#####################################################################
# Load, Process and Save Air Temperature Data #
# REIPAA #
# #
# Homogenise time interval and Extract daily values when #
# observation interval is higher. Also deal with NA values #
# #
# Author: PiM Lefeuvre Date: 2014-09-03 #
# Raw Data are resampled to provide regular time series #
#####################################################################
###########################################
# Clean up Workspace
# rm(list = ls(all = TRUE))
###########################################
# Set Path
setwd(path.wd)
Sys.setenv(TZ="UTC")
# Load libraries
library(chron)
library(hydroTSM) # cmd: izoo2rzoo
library(zoo)
# Load User functions
source("../../UserFunction/subsample.R")
source("../../UserFunction/axPOSIX.R")
source("../../UserFunction/remove_col.R")
############################################
# Load data
Raw1995 <- read.csv("Raw/Reipaa_TP_1995-1996.txt",sep=';',
as.is=T,skip=30,header=T,blank.lines.skip=T)
Raw1996 <- read.csv("Raw/Reipaa_TP_1996-1998.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw1998 <- read.csv("Raw/Reipaa_TP_1998-2000.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2000 <- read.csv("Raw/Reipaa_TP_2000-2002.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2002 <- read.csv("Raw/Reipaa_TP_2002-2004.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2004 <- read.csv("Raw/Reipaa_TP_2004-2006.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2006 <- read.csv("Raw/Reipaa_TP_2006-2008.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2008 <- read.csv("Raw/Reipaa_TP_2008-2010.txt",sep = ';',
as.is=T,skip=33,header=T,blank.lines.skip=T)
Raw2010 <- read.csv("Raw/Reipaa_TP_2010-2012.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2012 <- read.csv("Raw/Reipaa_TP_2012-2014.txt",sep = ';',
as.is=T,skip=28,header=T,blank.lines.skip=T)
# Remove tail, which is some text generated by eKlima (2 lines)
# NOTE: sometimes more to avoid overlap between two years
Raw1995 <- Raw1995[1:(nrow(Raw1995)-5),]
Raw1996 <- Raw1996[1:(nrow(Raw1996)-2),]
Raw1998 <- Raw1998[1:(nrow(Raw1998)-2),]
Raw2000 <- Raw2000[1:(nrow(Raw2000)-2),]
Raw2002 <- Raw2002[1:(nrow(Raw2002)-2),]
Raw2004 <- Raw2004[1:(nrow(Raw2004)-2),]
Raw2006 <- Raw2006[1:(nrow(Raw2006)-2),]
Raw2008 <- Raw2008[1:(nrow(Raw2008)-8),]
Raw2010 <- Raw2010[1:(nrow(Raw2010)-2),]
Raw2012 <- Raw2012[1:(nrow(Raw2012)-5),] #Ends 2013/02/01 24:00
# Combine all Data
Raw <- rbind(Raw1995,Raw1996,Raw1998,Raw2000,Raw2002,Raw2004,
Raw2006,Raw2008,Raw2010,Raw2012)
class(Raw)
# Clear memory
# rm(Raw1995,Raw1996,Raw1998,Raw2000,Raw2002,Raw2004,Raw2006,Raw2008,Raw2010,Raw2012)
# Replace "x" (Very uncertain data) by NA.
Raw$TA[Raw$TA=="x"] <- NA
# Convert charachter into numeric
Raw$TA <-as.numeric(Raw$TA)
############################################
############################################
# PROCESSING NA and Zero VALUES
# Remove NA values
is.na(Raw) <- (Raw == -9999)
# Remove empty data columns (containing only NA)
Output <- remove_col(Raw)
list2env(Output,env=environment())
rm(Output)
############################################
############################################
## DATES
# Format Dates in POSIXlt - POSIXct
Dates <- as.POSIXct(strptime(paste(Raw$Year,Raw$Mnth,
Raw$Date, Raw$Time.NMT.),
"%Y %m %d %H"))
class(Dates)
############################################
############################################
# Transform in zoo object. Remove the column "Dates" because replaced by order.by and the column Water_level2 because the data do not show anything interesting (only 2009-2010)
lcol <- ncol(Raw)
ts.Raw <- zoo(x=Raw$TA, order.by=Dates)
############
# SUBSAMPLING
sub.start <- "2009-001"
sub.end <- "2014-001"
ts.sub <- subsample(ts.Raw,sub.start,sub.end,F)
############
# Regularly spaced zoo
ts.reg <- izoo2rzoo(ts.sub,date.fmt="%Y-%m-%d %H:%M:%S", tstep="hour")
# # Approximate only when the gap (No Values) is one hour only.
# ts.reg <- na.approx(ts.reg,maxgap=1)
############################################
# Plot
plot(ts.reg, type="p", col="orange3",pch=".",xaxt="n",
main="Air Temperature from Reipaa",
xlab="Time [year]",ylab="Temperature [C]")
abline(h=0,lty=3)
# Time axis
axPOSIX(ts.sub,"years","%Y")
############################################
############################################
# SAVE
# Save data in zoo format
path <- "../../Processing/Data/MetData/AirTemp"
write.zoo(ts.reg,file=sprintf("%s/Reipaa_AT_1hr_full.csv",path),sep=",")
### ARCHIVE
| /MetData/Engabreen_Temperature_Precipitation/LoadAT_Reipaa_save.R | no_license | pmlefeuvre/NVE_work | R | false | false | 5,567 | r |
#####################################################################
# Load, Process and Save Air Temperature Data #
# REIPAA #
# #
# Homogenise time interval and Extract daily values when #
# observation interval is higher. Also deal with NA values #
# #
# Author: PiM Lefeuvre Date: 2014-09-03 #
# Raw Data are resampled to provide regular time series #
#####################################################################
###########################################
# Clean up Workspace
# rm(list = ls(all = TRUE))
###########################################
# Set Path
setwd(path.wd)
Sys.setenv(TZ="UTC")
# Load libraries
library(chron)
library(hydroTSM) # cmd: izoo2rzoo
library(zoo)
# Load User functions
source("../../UserFunction/subsample.R")
source("../../UserFunction/axPOSIX.R")
source("../../UserFunction/remove_col.R")
############################################
# Load data
Raw1995 <- read.csv("Raw/Reipaa_TP_1995-1996.txt",sep=';',
as.is=T,skip=30,header=T,blank.lines.skip=T)
Raw1996 <- read.csv("Raw/Reipaa_TP_1996-1998.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw1998 <- read.csv("Raw/Reipaa_TP_1998-2000.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2000 <- read.csv("Raw/Reipaa_TP_2000-2002.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2002 <- read.csv("Raw/Reipaa_TP_2002-2004.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2004 <- read.csv("Raw/Reipaa_TP_2004-2006.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2006 <- read.csv("Raw/Reipaa_TP_2006-2008.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2008 <- read.csv("Raw/Reipaa_TP_2008-2010.txt",sep = ';',
as.is=T,skip=33,header=T,blank.lines.skip=T)
Raw2010 <- read.csv("Raw/Reipaa_TP_2010-2012.txt",sep = ';',
as.is=T,skip=26,header=T,blank.lines.skip=T)
Raw2012 <- read.csv("Raw/Reipaa_TP_2012-2014.txt",sep = ';',
as.is=T,skip=28,header=T,blank.lines.skip=T)
# Remove tail, which is some text generated by eKlima (2 lines)
# NOTE: sometimes more to avoid overlap between two years
Raw1995 <- Raw1995[1:(nrow(Raw1995)-5),]
Raw1996 <- Raw1996[1:(nrow(Raw1996)-2),]
Raw1998 <- Raw1998[1:(nrow(Raw1998)-2),]
Raw2000 <- Raw2000[1:(nrow(Raw2000)-2),]
Raw2002 <- Raw2002[1:(nrow(Raw2002)-2),]
Raw2004 <- Raw2004[1:(nrow(Raw2004)-2),]
Raw2006 <- Raw2006[1:(nrow(Raw2006)-2),]
Raw2008 <- Raw2008[1:(nrow(Raw2008)-8),]
Raw2010 <- Raw2010[1:(nrow(Raw2010)-2),]
Raw2012 <- Raw2012[1:(nrow(Raw2012)-5),] #Ends 2013/02/01 24:00
# Combine all Data
Raw <- rbind(Raw1995,Raw1996,Raw1998,Raw2000,Raw2002,Raw2004,
Raw2006,Raw2008,Raw2010,Raw2012)
class(Raw)
# Clear memory
# rm(Raw1995,Raw1996,Raw1998,Raw2000,Raw2002,Raw2004,Raw2006,Raw2008,Raw2010,Raw2012)
# Replace "x" (Very uncertain data) by NA.
Raw$TA[Raw$TA=="x"] <- NA
# Convert charachter into numeric
Raw$TA <-as.numeric(Raw$TA)
############################################
############################################
# PROCESSING NA and Zero VALUES
# Remove NA values
is.na(Raw) <- (Raw == -9999)
# Remove empty data columns (containing only NA)
Output <- remove_col(Raw)
list2env(Output,env=environment())
rm(Output)
############################################
############################################
## DATES
# Format Dates in POSIXlt - POSIXct
Dates <- as.POSIXct(strptime(paste(Raw$Year,Raw$Mnth,
Raw$Date, Raw$Time.NMT.),
"%Y %m %d %H"))
class(Dates)
############################################
############################################
# Transform in zoo object. Remove the column "Dates" because replaced by order.by and the column Water_level2 because the data do not show anything interesting (only 2009-2010)
lcol <- ncol(Raw)
ts.Raw <- zoo(x=Raw$TA, order.by=Dates)
############
# SUBSAMPLING
sub.start <- "2009-001"
sub.end <- "2014-001"
ts.sub <- subsample(ts.Raw,sub.start,sub.end,F)
############
# Regularly spaced zoo
ts.reg <- izoo2rzoo(ts.sub,date.fmt="%Y-%m-%d %H:%M:%S", tstep="hour")
# # Approximate only when the gap (No Values) is one hour only.
# ts.reg <- na.approx(ts.reg,maxgap=1)
############################################
# Plot
plot(ts.reg, type="p", col="orange3",pch=".",xaxt="n",
main="Air Temperature from Reipaa",
xlab="Time [year]",ylab="Temperature [C]")
abline(h=0,lty=3)
# Time axis
axPOSIX(ts.sub,"years","%Y")
############################################
############################################
# SAVE
# Save data in zoo format
path <- "../../Processing/Data/MetData/AirTemp"
write.zoo(ts.reg,file=sprintf("%s/Reipaa_AT_1hr_full.csv",path),sep=",")
### ARCHIVE
|
v <- c(19, 5, 2, 19, 29, 42, 32, 35, 25, 18, 6, 22, 7, 28, 11, 8, 9, 37, 45, 23)
suma <- 0
for (i in 1:length(v)) {
suma <- suma + v[i]
}
suma
suma <- 0
for (i in v) {
suma <- suma + i
}
suma
sum(suma)
riba <- 30
suma <- 0
kiek <- 0
for (i in seq_along(v)) {
if (v[i] > riba) {
suma <- suma + v[i]
kiek <- kiek + 1
}
}
suma
kiek
sum(v[v > riba])
sum(v > riba)
nelyginiai <- seq(1, length(v), by = 2)
for (i in nelyginiai) {
cat("elemento", i, "reiksme:", v[i], "\n")
}
v[nelyginiai]
zodis <- c("L", "I", "E", "T", "U", "V", "A")
| /R/3_3/main.R | no_license | mnorkin/magistras-3 | R | false | false | 582 | r | v <- c(19, 5, 2, 19, 29, 42, 32, 35, 25, 18, 6, 22, 7, 28, 11, 8, 9, 37, 45, 23)
suma <- 0
for (i in 1:length(v)) {
suma <- suma + v[i]
}
suma
suma <- 0
for (i in v) {
suma <- suma + i
}
suma
sum(suma)
riba <- 30
suma <- 0
kiek <- 0
for (i in seq_along(v)) {
if (v[i] > riba) {
suma <- suma + v[i]
kiek <- kiek + 1
}
}
suma
kiek
sum(v[v > riba])
sum(v > riba)
nelyginiai <- seq(1, length(v), by = 2)
for (i in nelyginiai) {
cat("elemento", i, "reiksme:", v[i], "\n")
}
v[nelyginiai]
zodis <- c("L", "I", "E", "T", "U", "V", "A")
|
#'Dataset of agritural plant output and area in chapter11
#'
#'A dataset containing plant_area and output_value 2 variables of 29 objects
#'@format a dataframe with 29 rows and 2 variables
#'\describe{
#' \item{plant_area}{area for plant}
#' \item{output_value}{output}
#'}
"plantarea_outputvalue"
| /R/data_plantarea_outputvalue.R | no_license | zhaoxue-xmu/RDA | R | false | false | 301 | r | #'Dataset of agritural plant output and area in chapter11
#'
#'A dataset containing plant_area and output_value 2 variables of 29 objects
#'@format a dataframe with 29 rows and 2 variables
#'\describe{
#' \item{plant_area}{area for plant}
#' \item{output_value}{output}
#'}
"plantarea_outputvalue"
|
structure(list(
url = "https://api.sleeper.app/v1/league/522458773317046272/matchups/5/",
status_code = 200L, headers = structure(list(
date = "Thu, 08 Oct 2020 12:17:05 GMT",
`content-type` = "application/json; charset=utf-8", `transfer-encoding` = "chunked",
connection = "keep-alive", vary = "Accept-Encoding",
`cache-control` = "max-age=0, private, must-revalidate",
`x-request-id` = "f903feaec902f6042709e8efa576a5b7",
`access-control-allow-origin` = "*", `access-control-expose-headers` = "etag",
`access-control-allow-credentials` = "true", `strict-transport-security` = "max-age=15724800; includeSubDomains",
`content-encoding` = "gzip", `cf-cache-status` = "MISS",
`cf-request-id` = "05a9bdd07b0000ecaf7b010200000001",
`expect-ct` = "max-age=604800, report-uri=\"https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct\"",
server = "cloudflare", `cf-ray` = "5defcbfa5c66ecaf-DFW"
), class = c(
"insensitive",
"list"
)), all_headers = list(list(
status = 200L, version = "HTTP/1.1",
headers = structure(list(
date = "Thu, 08 Oct 2020 12:17:05 GMT",
`content-type` = "application/json; charset=utf-8",
`transfer-encoding` = "chunked", connection = "keep-alive",
vary = "Accept-Encoding", `cache-control` = "max-age=0, private, must-revalidate",
`x-request-id` = "f903feaec902f6042709e8efa576a5b7",
`access-control-allow-origin` = "*", `access-control-expose-headers` = "etag",
`access-control-allow-credentials` = "true", `strict-transport-security` = "max-age=15724800; includeSubDomains",
`content-encoding` = "gzip", `cf-cache-status` = "MISS",
`cf-request-id` = "05a9bdd07b0000ecaf7b010200000001",
`expect-ct` = "max-age=604800, report-uri=\"https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct\"",
server = "cloudflare", `cf-ray` = "5defcbfa5c66ecaf-DFW"
), class = c(
"insensitive",
"list"
))
)), cookies = structure(list(
domain = "#HttpOnly_.sleeper.app",
flag = TRUE, path = "/", secure = TRUE, expiration = structure(1604620002, class = c(
"POSIXct",
"POSIXt"
)), name = "__cfduid", value = "REDACTED"
), row.names = c(
NA,
-1L
), class = "data.frame"), content = charToRaw("[{\"starters\":[\"289\",\"4098\",\"4137\",\"1426\",\"6149\",\"4144\",\"1825\",\"344\"],\"roster_id\":1,\"points\":0.0,\"players\":[\"2025\",\"4089\",\"6068\",\"1339\",\"5068\",\"5965\",\"289\",\"4199\",\"4037\",\"421\",\"6804\",\"4080\",\"7082\",\"6001\",\"1706\",\"4149\",\"4144\",\"1426\",\"6826\",\"344\",\"1825\",\"2822\",\"4150\",\"5022\",\"6149\",\"1110\",\"4992\",\"4098\",\"7407\",\"2197\",\"4866\",\"4137\",\"4252\"],\"matchup_id\":2,\"custom_points\":null},{\"starters\":[\"333\",\"6790\",\"3594\",\"2319\",\"6783\",\"5010\",\"6786\",\"0\"],\"roster_id\":2,\"points\":0.0,\"players\":[\"6919\",\"6849\",\"6823\",\"6801\",\"6790\",\"6786\",\"6783\",\"6032\",\"6011\",\"5884\",\"5854\",\"5845\",\"5130\",\"5010\",\"4985\",\"4036\",\"3594\",\"333\",\"2319\",\"1592\"],\"matchup_id\":1,\"custom_points\":null},{\"starters\":[\"4046\",\"1848\",\"2588\",\"2309\",\"1479\",\"4054\",\"947\",\"6148\"],\"roster_id\":3,\"points\":0.0,\"players\":[\"947\",\"788\",\"695\",\"6904\",\"6895\",\"6886\",\"6694\",\"6148\",\"5906\",\"58\",\"5374\",\"5131\",\"5113\",\"5107\",\"5013\",\"4988\",\"4892\",\"4663\",\"4066\",\"4054\",\"4046\",\"4034\",\"2749\",\"2673\",\"2588\",\"2309\",\"223\",\"1848\",\"167\",\"1479\",\"1346\",\"1067\"],\"matchup_id\":5,\"custom_points\":null},{\"starters\":[\"3294\",\"2431\",\"2315\",\"3321\",\"1992\",\"4973\",\"4981\",\"2216\"],\"roster_id\":4,\"points\":0.0,\"players\":[\"7227\",\"6960\",\"6951\",\"6885\",\"6853\",\"6820\",\"6271\",\"5937\",\"5916\",\"5857\",\"5848\",\"4995\",\"4981\",\"4973\",\"4157\",\"4038\",\"3423\",\"3321\",\"3294\",\"3242\",\"3163\",\"2431\",\"2320\",\"2315\",\"2257\",\"2216\",\"2161\",\"1992\",\"138\"],\"matchup_id\":3,\"custom_points\":null},{\"starters\":[\"4984\",\"3198\",\"6813\",\"4039\",\"4082\",\"2505\",\"5890\",\"7045\"],\"roster_id\":5,\"points\":0.0,\"players\":[\"928\",\"7045\",\"6908\",\"6847\",\"6843\",\"6819\",\"6813\",\"6744\",\"6402\",\"6074\",\"5985\",\"5915\",\"5890\",\"5889\",\"5863\",\"5248\",\"5100\",\"5009\",\"5004\",\"5000\",\"4984\",\"4464\",\"4171\",\"4152\",\"4082\",\"4039\",\"4017\",\"3200\",\"3198\",\"2505\",\"2251\",\"1244\"],\"matchup_id\":6,\"custom_points\":null},{\"starters\":[\"6770\",\"5892\",\"1387\",\"4983\",\"1352\",\"4217\",\"5038\",\"3157\"],\"roster_id\":6,\"points\":0.0,\"players\":[\"954\",\"6845\",\"1352\",\"6139\",\"6770\",\"1502\",\"1817\",\"6850\",\"1387\",\"6878\",\"6798\",\"5162\",\"3868\",\"943\",\"5038\",\"1029\",\"6126\",\"1388\",\"616\",\"4217\",\"4951\",\"4319\",\"4983\",\"4033\",\"3157\",\"6805\",\"5973\",\"2346\",\"5892\",\"6943\",\"6797\",\"5878\",\"6290\"],\"matchup_id\":3,\"custom_points\":null},{\"starters\":[\"1234\",\"6955\",\"4273\",\"2449\",\"4040\",\"5001\",\"2391\",\"538\"],\"roster_id\":7,\"points\":0.0,\"players\":[\"956\",\"7107\",\"7090\",\"7086\",\"6955\",\"6913\",\"6909\",\"6519\",\"6156\",\"6012\",\"6007\",\"538\",\"536\",\"5121\",\"5046\",\"5001\",\"4274\",\"4273\",\"4187\",\"4068\",\"4040\",\"3969\",\"3286\",\"2449\",\"24\",\"2391\",\"2381\",\"2325\",\"2118\",\"1329\",\"1234\",\"1149\"],\"matchup_id\":1,\"custom_points\":null},{\"starters\":[\"1049\",\"4035\",\"6151\",\"5872\",\"6794\",\"5032\",\"0\",\"5980\"],\"roster_id\":8,\"points\":0.0,\"players\":[\"6938\",\"6906\",\"6824\",\"6794\",\"6557\",\"6151\",\"5987\",\"5980\",\"5886\",\"5872\",\"5870\",\"5844\",\"5170\",\"515\",\"5045\",\"5032\",\"4949\",\"4622\",\"4602\",\"4351\",\"4218\",\"4035\",\"3328\",\"1166\",\"1049\"],\"matchup_id\":2,\"custom_points\":null},{\"starters\":[\"4881\",\"6130\",\"4018\",\"3225\",\"5927\",\"3214\",\"6945\",\"5052\"],\"roster_id\":9,\"points\":0.0,\"players\":[\"7085\",\"6957\",\"6945\",\"6927\",\"6920\",\"6866\",\"6789\",\"6421\",\"6239\",\"6130\",\"5995\",\"5927\",\"5880\",\"5185\",\"5052\",\"5024\",\"4950\",\"4881\",\"4571\",\"4455\",\"4454\",\"4055\",\"4018\",\"3225\",\"3214\",\"3202\",\"2331\",\"2168\",\"1837\",\"1408\",\"1386\"],\"matchup_id\":6,\"custom_points\":null},{\"starters\":[\"3161\",\"3306\",\"5122\",\"5846\",\"4131\",\"4993\",\"2078\",\"7135\"],\"roster_id\":10,\"points\":0.0,\"players\":[\"829\",\"7135\",\"6996\",\"5846\",\"5549\",\"5122\",\"5026\",\"4993\",\"4718\",\"4696\",\"4131\",\"367\",\"331\",\"3306\",\"3161\",\"240\",\"2399\",\"2214\",\"2078\",\"1984\",\"1833\",\"1535\",\"1379\",\"1071\"],\"matchup_id\":4,\"custom_points\":null},{\"starters\":[\"5849\",\"5850\",\"6806\",\"3199\",\"1689\",\"1466\",\"2410\",\"6803\"],\"roster_id\":11,\"points\":0.0,\"players\":[\"7064\",\"6956\",\"6931\",\"6857\",\"6806\",\"6803\",\"6699\",\"5955\",\"5917\",\"5859\",\"5850\",\"5849\",\"5347\",\"5110\",\"4962\",\"4943\",\"4741\",\"4381\",\"3668\",\"3664\",\"3199\",\"2410\",\"2382\",\"232\",\"2028\",\"1911\",\"184\",\"1689\",\"1476\",\"1466\"],\"matchup_id\":5,\"custom_points\":null},{\"starters\":[\"2152\",\"3164\",\"4029\",\"830\",\"6814\",\"5012\",\"147\",\"2374\"],\"roster_id\":12,\"points\":0.0,\"players\":[\"96\",\"830\",\"6870\",\"6869\",\"6828\",\"6814\",\"6768\",\"642\",\"6136\",\"5967\",\"5911\",\"5323\",\"5284\",\"5086\",\"5012\",\"4146\",\"4029\",\"3271\",\"3164\",\"2378\",\"2374\",\"2306\",\"2238\",\"2152\",\"2133\",\"1793\",\"1555\",\"1500\",\"147\",\"1144\"],\"matchup_id\":4,\"custom_points\":null}]"),
date = structure(1602159425, class = c("POSIXct", "POSIXt"), tzone = "GMT"), times = c(
redirect = 0, namelookup = 6.6e-05,
connect = 6.8e-05, pretransfer = 0.000179, starttransfer = 0.179371,
total = 0.266308
)
), class = "response")
| /tests/testthat/api.sleeper.app/v1/league/522458773317046272/matchups/5.R | permissive | tonyelhabr/ffscrapr | R | false | false | 7,753 | r | structure(list(
url = "https://api.sleeper.app/v1/league/522458773317046272/matchups/5/",
status_code = 200L, headers = structure(list(
date = "Thu, 08 Oct 2020 12:17:05 GMT",
`content-type` = "application/json; charset=utf-8", `transfer-encoding` = "chunked",
connection = "keep-alive", vary = "Accept-Encoding",
`cache-control` = "max-age=0, private, must-revalidate",
`x-request-id` = "f903feaec902f6042709e8efa576a5b7",
`access-control-allow-origin` = "*", `access-control-expose-headers` = "etag",
`access-control-allow-credentials` = "true", `strict-transport-security` = "max-age=15724800; includeSubDomains",
`content-encoding` = "gzip", `cf-cache-status` = "MISS",
`cf-request-id` = "05a9bdd07b0000ecaf7b010200000001",
`expect-ct` = "max-age=604800, report-uri=\"https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct\"",
server = "cloudflare", `cf-ray` = "5defcbfa5c66ecaf-DFW"
), class = c(
"insensitive",
"list"
)), all_headers = list(list(
status = 200L, version = "HTTP/1.1",
headers = structure(list(
date = "Thu, 08 Oct 2020 12:17:05 GMT",
`content-type` = "application/json; charset=utf-8",
`transfer-encoding` = "chunked", connection = "keep-alive",
vary = "Accept-Encoding", `cache-control` = "max-age=0, private, must-revalidate",
`x-request-id` = "f903feaec902f6042709e8efa576a5b7",
`access-control-allow-origin` = "*", `access-control-expose-headers` = "etag",
`access-control-allow-credentials` = "true", `strict-transport-security` = "max-age=15724800; includeSubDomains",
`content-encoding` = "gzip", `cf-cache-status` = "MISS",
`cf-request-id` = "05a9bdd07b0000ecaf7b010200000001",
`expect-ct` = "max-age=604800, report-uri=\"https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct\"",
server = "cloudflare", `cf-ray` = "5defcbfa5c66ecaf-DFW"
), class = c(
"insensitive",
"list"
))
)), cookies = structure(list(
domain = "#HttpOnly_.sleeper.app",
flag = TRUE, path = "/", secure = TRUE, expiration = structure(1604620002, class = c(
"POSIXct",
"POSIXt"
)), name = "__cfduid", value = "REDACTED"
), row.names = c(
NA,
-1L
), class = "data.frame"), content = charToRaw("[{\"starters\":[\"289\",\"4098\",\"4137\",\"1426\",\"6149\",\"4144\",\"1825\",\"344\"],\"roster_id\":1,\"points\":0.0,\"players\":[\"2025\",\"4089\",\"6068\",\"1339\",\"5068\",\"5965\",\"289\",\"4199\",\"4037\",\"421\",\"6804\",\"4080\",\"7082\",\"6001\",\"1706\",\"4149\",\"4144\",\"1426\",\"6826\",\"344\",\"1825\",\"2822\",\"4150\",\"5022\",\"6149\",\"1110\",\"4992\",\"4098\",\"7407\",\"2197\",\"4866\",\"4137\",\"4252\"],\"matchup_id\":2,\"custom_points\":null},{\"starters\":[\"333\",\"6790\",\"3594\",\"2319\",\"6783\",\"5010\",\"6786\",\"0\"],\"roster_id\":2,\"points\":0.0,\"players\":[\"6919\",\"6849\",\"6823\",\"6801\",\"6790\",\"6786\",\"6783\",\"6032\",\"6011\",\"5884\",\"5854\",\"5845\",\"5130\",\"5010\",\"4985\",\"4036\",\"3594\",\"333\",\"2319\",\"1592\"],\"matchup_id\":1,\"custom_points\":null},{\"starters\":[\"4046\",\"1848\",\"2588\",\"2309\",\"1479\",\"4054\",\"947\",\"6148\"],\"roster_id\":3,\"points\":0.0,\"players\":[\"947\",\"788\",\"695\",\"6904\",\"6895\",\"6886\",\"6694\",\"6148\",\"5906\",\"58\",\"5374\",\"5131\",\"5113\",\"5107\",\"5013\",\"4988\",\"4892\",\"4663\",\"4066\",\"4054\",\"4046\",\"4034\",\"2749\",\"2673\",\"2588\",\"2309\",\"223\",\"1848\",\"167\",\"1479\",\"1346\",\"1067\"],\"matchup_id\":5,\"custom_points\":null},{\"starters\":[\"3294\",\"2431\",\"2315\",\"3321\",\"1992\",\"4973\",\"4981\",\"2216\"],\"roster_id\":4,\"points\":0.0,\"players\":[\"7227\",\"6960\",\"6951\",\"6885\",\"6853\",\"6820\",\"6271\",\"5937\",\"5916\",\"5857\",\"5848\",\"4995\",\"4981\",\"4973\",\"4157\",\"4038\",\"3423\",\"3321\",\"3294\",\"3242\",\"3163\",\"2431\",\"2320\",\"2315\",\"2257\",\"2216\",\"2161\",\"1992\",\"138\"],\"matchup_id\":3,\"custom_points\":null},{\"starters\":[\"4984\",\"3198\",\"6813\",\"4039\",\"4082\",\"2505\",\"5890\",\"7045\"],\"roster_id\":5,\"points\":0.0,\"players\":[\"928\",\"7045\",\"6908\",\"6847\",\"6843\",\"6819\",\"6813\",\"6744\",\"6402\",\"6074\",\"5985\",\"5915\",\"5890\",\"5889\",\"5863\",\"5248\",\"5100\",\"5009\",\"5004\",\"5000\",\"4984\",\"4464\",\"4171\",\"4152\",\"4082\",\"4039\",\"4017\",\"3200\",\"3198\",\"2505\",\"2251\",\"1244\"],\"matchup_id\":6,\"custom_points\":null},{\"starters\":[\"6770\",\"5892\",\"1387\",\"4983\",\"1352\",\"4217\",\"5038\",\"3157\"],\"roster_id\":6,\"points\":0.0,\"players\":[\"954\",\"6845\",\"1352\",\"6139\",\"6770\",\"1502\",\"1817\",\"6850\",\"1387\",\"6878\",\"6798\",\"5162\",\"3868\",\"943\",\"5038\",\"1029\",\"6126\",\"1388\",\"616\",\"4217\",\"4951\",\"4319\",\"4983\",\"4033\",\"3157\",\"6805\",\"5973\",\"2346\",\"5892\",\"6943\",\"6797\",\"5878\",\"6290\"],\"matchup_id\":3,\"custom_points\":null},{\"starters\":[\"1234\",\"6955\",\"4273\",\"2449\",\"4040\",\"5001\",\"2391\",\"538\"],\"roster_id\":7,\"points\":0.0,\"players\":[\"956\",\"7107\",\"7090\",\"7086\",\"6955\",\"6913\",\"6909\",\"6519\",\"6156\",\"6012\",\"6007\",\"538\",\"536\",\"5121\",\"5046\",\"5001\",\"4274\",\"4273\",\"4187\",\"4068\",\"4040\",\"3969\",\"3286\",\"2449\",\"24\",\"2391\",\"2381\",\"2325\",\"2118\",\"1329\",\"1234\",\"1149\"],\"matchup_id\":1,\"custom_points\":null},{\"starters\":[\"1049\",\"4035\",\"6151\",\"5872\",\"6794\",\"5032\",\"0\",\"5980\"],\"roster_id\":8,\"points\":0.0,\"players\":[\"6938\",\"6906\",\"6824\",\"6794\",\"6557\",\"6151\",\"5987\",\"5980\",\"5886\",\"5872\",\"5870\",\"5844\",\"5170\",\"515\",\"5045\",\"5032\",\"4949\",\"4622\",\"4602\",\"4351\",\"4218\",\"4035\",\"3328\",\"1166\",\"1049\"],\"matchup_id\":2,\"custom_points\":null},{\"starters\":[\"4881\",\"6130\",\"4018\",\"3225\",\"5927\",\"3214\",\"6945\",\"5052\"],\"roster_id\":9,\"points\":0.0,\"players\":[\"7085\",\"6957\",\"6945\",\"6927\",\"6920\",\"6866\",\"6789\",\"6421\",\"6239\",\"6130\",\"5995\",\"5927\",\"5880\",\"5185\",\"5052\",\"5024\",\"4950\",\"4881\",\"4571\",\"4455\",\"4454\",\"4055\",\"4018\",\"3225\",\"3214\",\"3202\",\"2331\",\"2168\",\"1837\",\"1408\",\"1386\"],\"matchup_id\":6,\"custom_points\":null},{\"starters\":[\"3161\",\"3306\",\"5122\",\"5846\",\"4131\",\"4993\",\"2078\",\"7135\"],\"roster_id\":10,\"points\":0.0,\"players\":[\"829\",\"7135\",\"6996\",\"5846\",\"5549\",\"5122\",\"5026\",\"4993\",\"4718\",\"4696\",\"4131\",\"367\",\"331\",\"3306\",\"3161\",\"240\",\"2399\",\"2214\",\"2078\",\"1984\",\"1833\",\"1535\",\"1379\",\"1071\"],\"matchup_id\":4,\"custom_points\":null},{\"starters\":[\"5849\",\"5850\",\"6806\",\"3199\",\"1689\",\"1466\",\"2410\",\"6803\"],\"roster_id\":11,\"points\":0.0,\"players\":[\"7064\",\"6956\",\"6931\",\"6857\",\"6806\",\"6803\",\"6699\",\"5955\",\"5917\",\"5859\",\"5850\",\"5849\",\"5347\",\"5110\",\"4962\",\"4943\",\"4741\",\"4381\",\"3668\",\"3664\",\"3199\",\"2410\",\"2382\",\"232\",\"2028\",\"1911\",\"184\",\"1689\",\"1476\",\"1466\"],\"matchup_id\":5,\"custom_points\":null},{\"starters\":[\"2152\",\"3164\",\"4029\",\"830\",\"6814\",\"5012\",\"147\",\"2374\"],\"roster_id\":12,\"points\":0.0,\"players\":[\"96\",\"830\",\"6870\",\"6869\",\"6828\",\"6814\",\"6768\",\"642\",\"6136\",\"5967\",\"5911\",\"5323\",\"5284\",\"5086\",\"5012\",\"4146\",\"4029\",\"3271\",\"3164\",\"2378\",\"2374\",\"2306\",\"2238\",\"2152\",\"2133\",\"1793\",\"1555\",\"1500\",\"147\",\"1144\"],\"matchup_id\":4,\"custom_points\":null}]"),
date = structure(1602159425, class = c("POSIXct", "POSIXt"), tzone = "GMT"), times = c(
redirect = 0, namelookup = 6.6e-05,
connect = 6.8e-05, pretransfer = 0.000179, starttransfer = 0.179371,
total = 0.266308
)
), class = "response")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_manipulation.R
\name{data.trim}
\alias{data.trim}
\alias{data.trim.d}
\title{Trim the data}
\usage{
data.trim(t, data, trimmed = F)
data.trim.d(d, data, trimmed = F)
}
\arguments{
\item{t}{Time of interest to pause/stop the study, which could be an interim stage or the final stage.}
\item{data}{There are two possible structures allowed for this input data. The first type needs to have \code{trimmed=F} and include variables: a \code{treatment} variable with "experimental" denoting treatment group, \code{cnsr} variable with value 1 denoting censoring, \code{ct} variable denoting event time from the origin of the study, which equals the sum of entering time \code{enterT} and the survival time (time to event or censoring). A dataset simulated from from R package \href{https://github.com/keaven/nphsim}{nphsim} should fit the first type well enough (see the example1). The second type can be any data.frame or data.table output from a \code{data.trim} function, including variables: \code{ct} denoting event time from the origin of the study or the sum of entering time and the survival time, \code{survival} denoting the survival time or time to event/censoring, \code{delta} as an event indicator, \code{enterT} as entering time (example 2). For the second type, we set \code{trimmed=T} to avoid extra computations, but should be fine if \code{trimmed=F}.}
\item{trimmed}{Whether this data has been trimmed by \code{data.trim} or \code{data.trim.d} before.}
\item{d}{Event counts to pause/stop the study.}
}
\value{
Note that \code{data.trim} only outputs a data.table odered by \code{ct}, the event/censoring time since the start of the study (calendar scale), including variables in the input data.table/frame \code{data}, and additional/updated variables of event indicator \code{delta}, \code{ct}, follow-up time \code{survival} since the enrollment.
\code{data.trim.d} outpus a list of two components. The first component is the data censored with \code{d} events have been observed, ordered by \code{ct}, the event/censoring time since the start of the study (calendar scale). The second component is the time of the stopping point when \code{d} events have been observed.
}
\description{
Trim the data according to event number or time
}
\details{
\code{data.trim} is to trim the data upto \code{t}, \code{data.trim.d} is to trim the data upto the cound \code{d}.
}
\examples{
}
\seealso{
\code{\link{FH.frac.cal}}
}
\author{
Lili Wang
}
| /man/data.trim.Rd | no_license | lilywang1988/IAfrac | R | false | true | 2,542 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_manipulation.R
\name{data.trim}
\alias{data.trim}
\alias{data.trim.d}
\title{Trim the data}
\usage{
data.trim(t, data, trimmed = F)
data.trim.d(d, data, trimmed = F)
}
\arguments{
\item{t}{Time of interest to pause/stop the study, which could be an interim stage or the final stage.}
\item{data}{There are two possible structures allowed for this input data. The first type needs to have \code{trimmed=F} and include variables: a \code{treatment} variable with "experimental" denoting treatment group, \code{cnsr} variable with value 1 denoting censoring, \code{ct} variable denoting event time from the origin of the study, which equals the sum of entering time \code{enterT} and the survival time (time to event or censoring). A dataset simulated from from R package \href{https://github.com/keaven/nphsim}{nphsim} should fit the first type well enough (see the example1). The second type can be any data.frame or data.table output from a \code{data.trim} function, including variables: \code{ct} denoting event time from the origin of the study or the sum of entering time and the survival time, \code{survival} denoting the survival time or time to event/censoring, \code{delta} as an event indicator, \code{enterT} as entering time (example 2). For the second type, we set \code{trimmed=T} to avoid extra computations, but should be fine if \code{trimmed=F}.}
\item{trimmed}{Whether this data has been trimmed by \code{data.trim} or \code{data.trim.d} before.}
\item{d}{Event counts to pause/stop the study.}
}
\value{
Note that \code{data.trim} only outputs a data.table odered by \code{ct}, the event/censoring time since the start of the study (calendar scale), including variables in the input data.table/frame \code{data}, and additional/updated variables of event indicator \code{delta}, \code{ct}, follow-up time \code{survival} since the enrollment.
\code{data.trim.d} outpus a list of two components. The first component is the data censored with \code{d} events have been observed, ordered by \code{ct}, the event/censoring time since the start of the study (calendar scale). The second component is the time of the stopping point when \code{d} events have been observed.
}
\description{
Trim the data according to event number or time
}
\details{
\code{data.trim} is to trim the data upto \code{t}, \code{data.trim.d} is to trim the data upto the cound \code{d}.
}
\examples{
}
\seealso{
\code{\link{FH.frac.cal}}
}
\author{
Lili Wang
}
|
# New Leads
source(paste(Path, "/R_Code/Allocation_Manual_New.R", sep = ""))
# Recycled Leads
source(paste(Path, "/R_Code/Allocation_Manual_Recycled.R", sep = ""))
| /R_Code/Manual_Allocation.R | no_license | CharlFM/Lead_Matching | R | false | false | 165 | r | # New Leads
source(paste(Path, "/R_Code/Allocation_Manual_New.R", sep = ""))
# Recycled Leads
source(paste(Path, "/R_Code/Allocation_Manual_Recycled.R", sep = ""))
|
##1
soap=c(rep("Reg",4),rep("Deo",4),rep("Moist",4))
soap
weight=c(-0.30,-0.10,-0.14,0.40,2.63,2.61,2.41,3.15,1.86,2.03,2.26,1.82)
weight
weight.lost=data.frame(soap,weight)
weight.lost
##2
weight=c(-0.30,-0.10,-0.14,0.40,2.63,2.61,2.41,3.15,1.86,2.03,2.26,1.82)*0.001
weight
weight.lost=data.frame(soap,weight)
weight.lost
##3
mean(weight)
sd(weight)
mean(weight[soap=="Reg"])
sd(weight[soap=="Reg"])
mean(weight[soap=="Deo"])
sd(weight[soap=="Deo"])
mean(weight[soap=="Moist"])
sd(weight[soap=="Moist"])
##4
hist(weight,main="Histogram of Weight Loss (in kilograms)")
##5
boxplot(weight~soap,data=weight.lost,
main="Boxplot of Weight Loss With Different Soaps",
xlab="Soap",ylab="Weight Lost (kg)")
| /HW1.r | no_license | LucasHumphrey/R-461 | R | false | false | 752 | r | ##1
soap=c(rep("Reg",4),rep("Deo",4),rep("Moist",4))
soap
weight=c(-0.30,-0.10,-0.14,0.40,2.63,2.61,2.41,3.15,1.86,2.03,2.26,1.82)
weight
weight.lost=data.frame(soap,weight)
weight.lost
##2
weight=c(-0.30,-0.10,-0.14,0.40,2.63,2.61,2.41,3.15,1.86,2.03,2.26,1.82)*0.001
weight
weight.lost=data.frame(soap,weight)
weight.lost
##3
mean(weight)
sd(weight)
mean(weight[soap=="Reg"])
sd(weight[soap=="Reg"])
mean(weight[soap=="Deo"])
sd(weight[soap=="Deo"])
mean(weight[soap=="Moist"])
sd(weight[soap=="Moist"])
##4
hist(weight,main="Histogram of Weight Loss (in kilograms)")
##5
boxplot(weight~soap,data=weight.lost,
main="Boxplot of Weight Loss With Different Soaps",
xlab="Soap",ylab="Weight Lost (kg)")
|
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
# Dependencies(When hosting in Github,copy below from Dependencies.R)
#ShinyApp_R
# Define server logic
shinyServer(function(input, output, session) {
output$currentTime <- renderText({invalidateLater(1000, session)
paste("Current time: ",Sys.time())})
options(shiny.maxRequestSize=30*1024^2)
####Coverting uploaded data in to dataframe
uploadedtext <- reactive({
if (is.null(input$fileinput1)) { # Finds 'fileinput1' from ui.R
return('No Text exists') } else{
Data1 <- readLines(input$fileinput1$datapath,encoding = "UTF-8")
return(Data1)
}
})
annotate <- reactive({
if (is.null(input$fileinput2)) { # locate 'udpipfl' from ui.R
return(NULL) } else{
udpipe_model <- udpipe_load_model(file = input$fileinput2$datapath)
ud_text <- udpipe_annotate(udpipe_model, uploadedtext())
ud_text <- as.data.frame(ud_text)
return(ud_text)
}
})
##########Annotated Document Code
output$Annotate <- renderDataTable(
{
out <- annotate()
return(out)
}
)
##########cooccurance graph code for Xpos
output$Cooccurance <- renderPlot(
{
model = udpipe_load_model(file = input$fileinput2$datapath)
txt <- udpipe_annotate(model, uploadedtext())
txt <- as.data.frame(txt)
data_cooc <- udpipe::cooccurrence(x = subset(txt, xpos %in% input$checkgroup1), term = "lemma",
group = c("doc_id", "paragraph_id", "sentence_id")
)
wordnetwork <- data_cooc
wordnetwork <- igraph::graph_from_data_frame(wordnetwork)
ggraph(wordnetwork, layout = "fr") + geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "orange") +
geom_node_text(aes(label = name), col = "darkgreen", size = 4) +
theme_graph(base_family = "Arial Narrow") +
theme(legend.position = "none")
}
)
##########Wordcloud
output$plotNN = renderPlot({
if('NN' %in% input$checkgroup1)
{
all_nouns = annotate() %>% subset(., xpos %in% "NN")
top_nouns = txt_freq(all_nouns$lemma)
wordcloud(words = top_nouns$key,
freq = top_nouns$freq,
min.freq = input$min_freq,
max.words = input$max_freq,
random.order = FALSE,
colors = brewer.pal(6, "Dark2"))
}
else
{return(NULL)}
})
output$plotVB = renderPlot({
if('VB' %in% input$checkgroup1)
{
all_verbs = annotate() %>% subset(., xpos %in% "VB")
top_verbs = txt_freq(all_verbs$lemma)
wordcloud(words = top_verbs$key,
freq = top_verbs$freq,
min.freq = input$min_freq,
max.words = input$max_freq,
random.order = FALSE,
colors = brewer.pal(6, "Dark2"))
}
else
{return(NULL)}
})
output$plotRB = renderPlot({
if('RB' %in% input$checkgroup1)
{
all_adverbs = annotate() %>% subset(., xpos %in% "RB")
top_adverbs = txt_freq(all_adverbs$lemma)
wordcloud(words = top_adverbs$key,
freq = top_adverbs$freq,
min.freq = input$min_freq,
max.words = input$max_freq,
random.order = FALSE,
colors = brewer.pal(6, "Dark2"))
}
else
{return(NULL)}
})
output$plotJJ = renderPlot({
if('JJ' %in% input$checkgroup1)
{
all_adjec = annotate() %>% subset(., xpos %in% "JJ")
top_adjec = txt_freq(all_adjec$lemma)
wordcloud(words = top_adjec$key,
freq = top_adjec$freq,
min.freq = input$min_freq,
max.words = input$max_freq,
random.order = FALSE,
colors = brewer.pal(6, "Dark2"))
}
else
{return(NULL)}
})
})
| /server.R | no_license | VVSKushwanthReddy/ShinyApp_R | R | false | false | 4,709 | r | # This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
# Dependencies(When hosting in Github,copy below from Dependencies.R)
#ShinyApp_R
# Define server logic
shinyServer(function(input, output, session) {
output$currentTime <- renderText({invalidateLater(1000, session)
paste("Current time: ",Sys.time())})
options(shiny.maxRequestSize=30*1024^2)
####Coverting uploaded data in to dataframe
uploadedtext <- reactive({
if (is.null(input$fileinput1)) { # Finds 'fileinput1' from ui.R
return('No Text exists') } else{
Data1 <- readLines(input$fileinput1$datapath,encoding = "UTF-8")
return(Data1)
}
})
annotate <- reactive({
if (is.null(input$fileinput2)) { # locate 'udpipfl' from ui.R
return(NULL) } else{
udpipe_model <- udpipe_load_model(file = input$fileinput2$datapath)
ud_text <- udpipe_annotate(udpipe_model, uploadedtext())
ud_text <- as.data.frame(ud_text)
return(ud_text)
}
})
##########Annotated Document Code
output$Annotate <- renderDataTable(
{
out <- annotate()
return(out)
}
)
##########cooccurance graph code for Xpos
output$Cooccurance <- renderPlot(
{
model = udpipe_load_model(file = input$fileinput2$datapath)
txt <- udpipe_annotate(model, uploadedtext())
txt <- as.data.frame(txt)
data_cooc <- udpipe::cooccurrence(x = subset(txt, xpos %in% input$checkgroup1), term = "lemma",
group = c("doc_id", "paragraph_id", "sentence_id")
)
wordnetwork <- data_cooc
wordnetwork <- igraph::graph_from_data_frame(wordnetwork)
ggraph(wordnetwork, layout = "fr") + geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "orange") +
geom_node_text(aes(label = name), col = "darkgreen", size = 4) +
theme_graph(base_family = "Arial Narrow") +
theme(legend.position = "none")
}
)
##########Wordcloud
output$plotNN = renderPlot({
if('NN' %in% input$checkgroup1)
{
all_nouns = annotate() %>% subset(., xpos %in% "NN")
top_nouns = txt_freq(all_nouns$lemma)
wordcloud(words = top_nouns$key,
freq = top_nouns$freq,
min.freq = input$min_freq,
max.words = input$max_freq,
random.order = FALSE,
colors = brewer.pal(6, "Dark2"))
}
else
{return(NULL)}
})
output$plotVB = renderPlot({
if('VB' %in% input$checkgroup1)
{
all_verbs = annotate() %>% subset(., xpos %in% "VB")
top_verbs = txt_freq(all_verbs$lemma)
wordcloud(words = top_verbs$key,
freq = top_verbs$freq,
min.freq = input$min_freq,
max.words = input$max_freq,
random.order = FALSE,
colors = brewer.pal(6, "Dark2"))
}
else
{return(NULL)}
})
output$plotRB = renderPlot({
if('RB' %in% input$checkgroup1)
{
all_adverbs = annotate() %>% subset(., xpos %in% "RB")
top_adverbs = txt_freq(all_adverbs$lemma)
wordcloud(words = top_adverbs$key,
freq = top_adverbs$freq,
min.freq = input$min_freq,
max.words = input$max_freq,
random.order = FALSE,
colors = brewer.pal(6, "Dark2"))
}
else
{return(NULL)}
})
output$plotJJ = renderPlot({
if('JJ' %in% input$checkgroup1)
{
all_adjec = annotate() %>% subset(., xpos %in% "JJ")
top_adjec = txt_freq(all_adjec$lemma)
wordcloud(words = top_adjec$key,
freq = top_adjec$freq,
min.freq = input$min_freq,
max.words = input$max_freq,
random.order = FALSE,
colors = brewer.pal(6, "Dark2"))
}
else
{return(NULL)}
})
})
|
rm(list = ls())
library("rvest")
library("RSelenium")
library("wdman")
library(stringr)
results <- list()
categories <-
c("Lehrende:",
"Veranstaltungsart:",
"Orga-Einheit:",
"Anzeige im Stundenplan:",
"Semesterwochenstunden:",
"Credits",
"Standort",
"Unterrichtssprache:",
"Min. | Max. Teilnehmerzahl:",
"Inhalte:",
"Zugelassene Hilfsmittel f?r die Pr?fung:",
"Literatur:",
"Lernziele:",
"Weitere Informationen zu den Pr?fungsleistungen:" )
ms_text <- function(category = "Lehrende:",
tt = tt1){
start <- str_locate(tt,category)[,2]+2
endvector <-
str_locate_all(string = tt,pattern = categories [!categories %in% c(category)]) %>%
unlist()
end <- endvector[endvector > start ] %>% min()
text <- str_sub(tt,
start,
end-1)
return(text)
}
human_wait = function(t = 2, tt = 4){
Sys.sleep(sample(seq(t, tt, by=0.001), 1))
}
########
url = "https://zuhause.zeppelin-university.net/scripts/mgrqispi.dll?APPNAME=CampusNet&PRGNAME=STARTPAGE_DISPATCH&ARGUMENTS=-N000000000000001"
#ws <- remdr$getWindowSize()
# print(paste0("PLZ: ",plz," ",Sys.time()))
rd = rsDriver()
remdr = rd[["client"]]
# Reset window size to standard
#remdr$setWindowSize(ws$width, ws$height)
remdr$navigate(url)
human_wait()
# Enter Username
remdr$findElement(using = 'xpath', value = '//*[@id="username"]')$sendKeysToElement(list("14201770"))
# Enter Username
remdr$findElement(using = 'xpath', value = '//*[@id="password"]')$sendKeysToElement(list("Rojo0175"))
# Login Button clicken
remdr$findElement(using = 'xpath', value = '//*[@id="contentSpacer_IE"]/h2/form/fieldset/div[3]/input')$clickElement()
# All Semesters
semesters <- function(inner){
links <- remdr$findElements(using = 'css selector', "a")
linktext <- unlist(lapply(links, function(x){x$getElementText()}))
sem_index <<- which(sapply(linktext,function(x)stringr::str_detect(x,"Veranstaltungen\n")))
semesters <- links[sem_index]
for(level1 in 1:length(semesters)){
l1 <<- level1
semlinks <- remdr$findElements(using = 'css selector', "a")
semlinktext <- unlist(lapply(semlinks, function(x){x$getElementText()}))
sem_index <- which(sapply(semlinktext,function(x)stringr::str_detect(x,"Veranstaltungen\n")))
semesters <- links[sem_index]
semesters[[level1]]$clickElement()
inner
}
}
av <- c("Zusatzkurse und StudentStudies","Workshop |ZU|plus","PhD Courses","ZU Courses Taught in English")
nextlevel <- function(xpath = '//*[@id="auditRegistration_list"]//*',
whichlevel,
avoid = NULL,
inner = NULL,
innereval = NULL){
links <- remdr$findElements(using = 'xpath', xpath)
links <- links[seq(2,length(links),by = 2)]
linktext <- unlist(lapply(links, function(x){x$getElementText()}))
eval(parse(text=paste0("linktext",whichlevel,"<<- linktext")))
#types <- which(!linktext %in% c(avoid))
for(level in c(1:length(links))){
eval(parse(text=paste0("l",whichlevel," <<- level")))
links <- remdr$findElements(using = 'xpath', '//*[@id="auditRegistration_list"]//*')
links <- links[seq(2,length(links),by = 2)]
linktext <- unlist(lapply(links, function(x){x$getElementText()}))
links[[level]]$clickElement()
if(!is.null(inner)) {inner}
}
remdr$goBack()
}
laststep <- function(){
source("02_source_functions.R")
remdr$goBack()
}
laststep()
############# ACTUAL
# Click on Vorlesungsverzeichnis
remdr$findElement(using = 'xpath', value = '//*[@id="link000621"]/a')$clickElement()
results <- data.frame()
speSS18 <- results
spe <- dplyr::bind_rows(speFS14 %>% data.frame(sem = "FS14"),
speSS15 %>% data.frame(sem = "SS15"),
speFS15 %>% data.frame(sem = "FS15"),
speSS16 %>% data.frame(sem = "SS16"),
speFS16 %>% data.frame(sem = "FS16"),
speSS17 %>% data.frame(sem = "SS17"),
speFS17 %>% data.frame(sem = "FS17"),
speSS18 %>% data.frame(sem = "SS18"))
write.csv2(spe,"spe.csv")
laststep()
semesters(inner =
nextlevel(whichlevel = "2",
avoid = av,
inner = nextlevel(whichlevel = "3",
avoid = "Zeppelin Jahr",
inner = nextlevel(whichlevel = "4",
avoid = "xxx",
inner = laststep())))
)
# level studiengang
links3 <- remdr$findElements(using = 'xpath', '//*[@id="auditRegistration_list"]//*')
links3 <- links3[seq(2,length(links3),by = 2)]
linktext3 <- unlist(lapply(links3, function(x){x$getElementText()}))
without_old_zeppelin <-
which(! str_detect(linktext3,"Zeppelin Jahr"))
for(level3 in c(1:length(links3))[without_old_zeppelin]){
links3 <- remdr$findElements(using = 'xpath', '//*[@id="auditRegistration_list"]//*')
links3 <- links3[seq(2,length(links3),by = 2)]
linktext3 <- unlist(lapply(links3, function(x){x$getElementText()}))
links3[[level3]]$clickElement()
# level studienphase
links4 <- remdr$findElements(using = 'xpath', '//*[@id="auditRegistration_list"]//*')
links4 <- links4[seq(2,length(links4),by = 2)]
linktext4 <- unlist(lapply(links4, function(x){x$getElementText()}))
humboldt <- which(!linktext4 %in% c("Humboldtjahr","Bachelor-Phase"))
for(level4 in c(1:length(links4))[humboldt]){
links4 <- remdr$findElements(using = 'xpath', '//*[@id="auditRegistration_list"]//*')
links4 <- links4[seq(2,length(links4),by = 2)]
linktext4 <- unlist(lapply(links4, function(x){x$getElementText()}))
links4[[level4]]$clickElement()
source("02_source_functions.R")
remdr$goBack()
####
} # level5 over
remdr$goBack()
} #level4 over
remdr$goBack()
} #level 3 over
remdr$goBack()
} #level 2 over
remdr$goBack()
results$X1[is.na(results$X1)] <- results$lehrende_all[is.na(results$X1)]
write.csv2(results,"zuhause_abfall16.csv")
write.csv2(results,"zuhause_spring15bisfall16.csv")
write.csv2(results,"zuhause_fall14.csv")
# Close Window
remdr$close()
rd$server$stop()
| /01_scaping_zuhause_NEU.R | no_license | schliebs/reverse_predictive_policing | R | false | false | 6,438 | r | rm(list = ls())
library("rvest")
library("RSelenium")
library("wdman")
library(stringr)
results <- list()
categories <-
c("Lehrende:",
"Veranstaltungsart:",
"Orga-Einheit:",
"Anzeige im Stundenplan:",
"Semesterwochenstunden:",
"Credits",
"Standort",
"Unterrichtssprache:",
"Min. | Max. Teilnehmerzahl:",
"Inhalte:",
"Zugelassene Hilfsmittel f?r die Pr?fung:",
"Literatur:",
"Lernziele:",
"Weitere Informationen zu den Pr?fungsleistungen:" )
ms_text <- function(category = "Lehrende:",
tt = tt1){
start <- str_locate(tt,category)[,2]+2
endvector <-
str_locate_all(string = tt,pattern = categories [!categories %in% c(category)]) %>%
unlist()
end <- endvector[endvector > start ] %>% min()
text <- str_sub(tt,
start,
end-1)
return(text)
}
human_wait = function(t = 2, tt = 4){
Sys.sleep(sample(seq(t, tt, by=0.001), 1))
}
########
url = "https://zuhause.zeppelin-university.net/scripts/mgrqispi.dll?APPNAME=CampusNet&PRGNAME=STARTPAGE_DISPATCH&ARGUMENTS=-N000000000000001"
#ws <- remdr$getWindowSize()
# print(paste0("PLZ: ",plz," ",Sys.time()))
rd = rsDriver()
remdr = rd[["client"]]
# Reset window size to standard
#remdr$setWindowSize(ws$width, ws$height)
remdr$navigate(url)
human_wait()
# Enter Username
remdr$findElement(using = 'xpath', value = '//*[@id="username"]')$sendKeysToElement(list("14201770"))
# Enter Username
remdr$findElement(using = 'xpath', value = '//*[@id="password"]')$sendKeysToElement(list("Rojo0175"))
# Login Button clicken
remdr$findElement(using = 'xpath', value = '//*[@id="contentSpacer_IE"]/h2/form/fieldset/div[3]/input')$clickElement()
# All Semesters
semesters <- function(inner){
links <- remdr$findElements(using = 'css selector', "a")
linktext <- unlist(lapply(links, function(x){x$getElementText()}))
sem_index <<- which(sapply(linktext,function(x)stringr::str_detect(x,"Veranstaltungen\n")))
semesters <- links[sem_index]
for(level1 in 1:length(semesters)){
l1 <<- level1
semlinks <- remdr$findElements(using = 'css selector', "a")
semlinktext <- unlist(lapply(semlinks, function(x){x$getElementText()}))
sem_index <- which(sapply(semlinktext,function(x)stringr::str_detect(x,"Veranstaltungen\n")))
semesters <- links[sem_index]
semesters[[level1]]$clickElement()
inner
}
}
av <- c("Zusatzkurse und StudentStudies","Workshop |ZU|plus","PhD Courses","ZU Courses Taught in English")
nextlevel <- function(xpath = '//*[@id="auditRegistration_list"]//*',
whichlevel,
avoid = NULL,
inner = NULL,
innereval = NULL){
links <- remdr$findElements(using = 'xpath', xpath)
links <- links[seq(2,length(links),by = 2)]
linktext <- unlist(lapply(links, function(x){x$getElementText()}))
eval(parse(text=paste0("linktext",whichlevel,"<<- linktext")))
#types <- which(!linktext %in% c(avoid))
for(level in c(1:length(links))){
eval(parse(text=paste0("l",whichlevel," <<- level")))
links <- remdr$findElements(using = 'xpath', '//*[@id="auditRegistration_list"]//*')
links <- links[seq(2,length(links),by = 2)]
linktext <- unlist(lapply(links, function(x){x$getElementText()}))
links[[level]]$clickElement()
if(!is.null(inner)) {inner}
}
remdr$goBack()
}
laststep <- function(){
source("02_source_functions.R")
remdr$goBack()
}
laststep()
############# ACTUAL
# Click on Vorlesungsverzeichnis
remdr$findElement(using = 'xpath', value = '//*[@id="link000621"]/a')$clickElement()
results <- data.frame()
speSS18 <- results
spe <- dplyr::bind_rows(speFS14 %>% data.frame(sem = "FS14"),
speSS15 %>% data.frame(sem = "SS15"),
speFS15 %>% data.frame(sem = "FS15"),
speSS16 %>% data.frame(sem = "SS16"),
speFS16 %>% data.frame(sem = "FS16"),
speSS17 %>% data.frame(sem = "SS17"),
speFS17 %>% data.frame(sem = "FS17"),
speSS18 %>% data.frame(sem = "SS18"))
write.csv2(spe,"spe.csv")
laststep()
semesters(inner =
nextlevel(whichlevel = "2",
avoid = av,
inner = nextlevel(whichlevel = "3",
avoid = "Zeppelin Jahr",
inner = nextlevel(whichlevel = "4",
avoid = "xxx",
inner = laststep())))
)
# level studiengang
links3 <- remdr$findElements(using = 'xpath', '//*[@id="auditRegistration_list"]//*')
links3 <- links3[seq(2,length(links3),by = 2)]
linktext3 <- unlist(lapply(links3, function(x){x$getElementText()}))
without_old_zeppelin <-
which(! str_detect(linktext3,"Zeppelin Jahr"))
for(level3 in c(1:length(links3))[without_old_zeppelin]){
links3 <- remdr$findElements(using = 'xpath', '//*[@id="auditRegistration_list"]//*')
links3 <- links3[seq(2,length(links3),by = 2)]
linktext3 <- unlist(lapply(links3, function(x){x$getElementText()}))
links3[[level3]]$clickElement()
# level studienphase
links4 <- remdr$findElements(using = 'xpath', '//*[@id="auditRegistration_list"]//*')
links4 <- links4[seq(2,length(links4),by = 2)]
linktext4 <- unlist(lapply(links4, function(x){x$getElementText()}))
humboldt <- which(!linktext4 %in% c("Humboldtjahr","Bachelor-Phase"))
for(level4 in c(1:length(links4))[humboldt]){
links4 <- remdr$findElements(using = 'xpath', '//*[@id="auditRegistration_list"]//*')
links4 <- links4[seq(2,length(links4),by = 2)]
linktext4 <- unlist(lapply(links4, function(x){x$getElementText()}))
links4[[level4]]$clickElement()
source("02_source_functions.R")
remdr$goBack()
####
} # level5 over
remdr$goBack()
} #level4 over
remdr$goBack()
} #level 3 over
remdr$goBack()
} #level 2 over
remdr$goBack()
results$X1[is.na(results$X1)] <- results$lehrende_all[is.na(results$X1)]
write.csv2(results,"zuhause_abfall16.csv")
write.csv2(results,"zuhause_spring15bisfall16.csv")
write.csv2(results,"zuhause_fall14.csv")
# Close Window
remdr$close()
rd$server$stop()
|
library("glmnet")
library("randomForest")
library("pROC")
library(pheatmap)
library(ggplot2)
library(RColorBrewer)
library("caret") #for confusion matrix
library(e1071)
library("verification") # for roc p value
library(scatterplot3d)
library("ggsci")
library("Rtsne")
dmr_mat_total <- read.table("./met_mat/cfDNA_mat",sep = "\t",stringsAsFactors = F,header = T,row.names = 1)
dmr_mat_total <- dmr_mat_total[!grepl("^M",rownames(dmr_mat_total)),] #rm the rows of chrom MT
dmr_mat_total <- dmr_mat_total[,!grepl("Z17",colnames(dmr_mat_total))] # remove the Z17 sample from the XH
#add chr to be a name
rownames(dmr_mat_total) = paste0("chr",rownames(dmr_mat_total))
{####filter by tissue
cpg.mat = read.table("./met_mat/tissue_mat",stringsAsFactors = F,sep="\t",header=T,na.strings = "-")
#paste chr start end
cpg.mat[,2] = as.character(cpg.mat[,2])
cpg.mat[,3] = as.character(cpg.mat[,3])
cpg.mat[,1] = apply(as.matrix(cpg.mat[,1:3]),1,function(x) paste(x,collapse = "_"))
#rm these col
cpg.mat = cpg.mat[,-(4:6)]
#rm the last col
cpg.mat = cpg.mat[,-dim(cpg.mat)[2]]
#fill the na value
nm.idx = grepl("N",colnames(cpg.mat))
early.idx = grepl("T",colnames(cpg.mat))
for(i in 1:dim(cpg.mat)[1])
{
na.idx = is.na(cpg.mat[i,])
cpg.mat[i,nm.idx&na.idx] = median(unlist(cpg.mat[i,nm.idx]),na.rm = T)
cpg.mat[i,early.idx&na.idx] = median(unlist(cpg.mat[i,early.idx]),na.rm = T)
}
#calculate the mean methylation for each region
region = unique(cpg.mat[,1])
region.mat = c()
for(i in 1:length(region)) #collapse the positions into region
{
tmp = colMeans(cpg.mat[cpg.mat[,1]==region[i],4:dim(cpg.mat)[2]],na.rm = T)
region.mat = rbind(region.mat,tmp)
rownames(region.mat)[i] = region[i]
}
#Calculate the p value and mean differ
nm.idx = which(grepl("N",colnames(region.mat))==T)
early.idx = which(grepl("T",colnames(region.mat))==T)
region.mat = cbind(region.mat,tissue_pvalue = 0,tissue_early_meth_mean = 0, tissue_normal_meth_mean= 0, tissue_meandiff = 0)
for(i in 1:dim(region.mat)[1])
{
region.mat[i,"tissue_pvalue"] = t.test(x=region.mat[i,nm.idx],y = region.mat[i,early.idx])$p.value
region.mat[i,"tissue_early_meth_mean"] = mean(region.mat[i,early.idx])
region.mat[i,"tissue_normal_meth_mean"] = mean(region.mat[i,nm.idx])
region.mat[i,"tissue_meandiff"] = mean(region.mat[i,early.idx])-mean(region.mat[i,nm.idx])
}
#read the training set's p and meandiffer
train_dmr_differ_info = dmr_mat_total[,1:2]
filtered_dmr_name = rownames(region.mat)[(abs(region.mat[,"tissue_meandiff"])>0.2)&
(train_dmr_differ_info$meandiff*region.mat[,"tissue_meandiff"]>0)]
filtered_dmr_name = paste0("chr",filtered_dmr_name)
dmr_mat_total = dmr_mat_total[filtered_dmr_name,]
##output the 68 regions' detail info
out.tmp <- t(apply(dmr_mat_total,1,function(x)
{
mean_normal_in_cfdna = mean(x[grepl("Training_Normal",colnames(dmr_mat_total))])
mean_early_in_cfdna = mean(x[grepl("Training_Early",colnames(dmr_mat_total))])
return(c(mean_normal_in_cfdna,mean_early_in_cfdna))
}))
out.tmp <-cbind(out.tmp, dmr_mat_total[,1:2])
colnames(out.tmp)[1:2] <-c("cfdna_normal_meth_mean","cfdna_early_meth_mean")
rownames(region.mat) <-paste0("chr",rownames(region.mat))
out.tmp <-cbind(out.tmp,region.mat[filtered_dmr_name,c(10,11,9,12)])
#write.table(out.tmp,"model/DMR68_info.txt",row.names = T,col.names = T,quote = F,sep="\t")
}
| /3. Marker identification/filtration_by_tissue.R | permissive | lizhengbio/cfWGBS-pipline | R | false | false | 3,605 | r | library("glmnet")
library("randomForest")
library("pROC")
library(pheatmap)
library(ggplot2)
library(RColorBrewer)
library("caret") #for confusion matrix
library(e1071)
library("verification") # for roc p value
library(scatterplot3d)
library("ggsci")
library("Rtsne")
dmr_mat_total <- read.table("./met_mat/cfDNA_mat",sep = "\t",stringsAsFactors = F,header = T,row.names = 1)
dmr_mat_total <- dmr_mat_total[!grepl("^M",rownames(dmr_mat_total)),] #rm the rows of chrom MT
dmr_mat_total <- dmr_mat_total[,!grepl("Z17",colnames(dmr_mat_total))] # remove the Z17 sample from the XH
#add chr to be a name
rownames(dmr_mat_total) = paste0("chr",rownames(dmr_mat_total))
{####filter by tissue
cpg.mat = read.table("./met_mat/tissue_mat",stringsAsFactors = F,sep="\t",header=T,na.strings = "-")
#paste chr start end
cpg.mat[,2] = as.character(cpg.mat[,2])
cpg.mat[,3] = as.character(cpg.mat[,3])
cpg.mat[,1] = apply(as.matrix(cpg.mat[,1:3]),1,function(x) paste(x,collapse = "_"))
#rm these col
cpg.mat = cpg.mat[,-(4:6)]
#rm the last col
cpg.mat = cpg.mat[,-dim(cpg.mat)[2]]
#fill the na value
nm.idx = grepl("N",colnames(cpg.mat))
early.idx = grepl("T",colnames(cpg.mat))
for(i in 1:dim(cpg.mat)[1])
{
na.idx = is.na(cpg.mat[i,])
cpg.mat[i,nm.idx&na.idx] = median(unlist(cpg.mat[i,nm.idx]),na.rm = T)
cpg.mat[i,early.idx&na.idx] = median(unlist(cpg.mat[i,early.idx]),na.rm = T)
}
#calculate the mean methylation for each region
region = unique(cpg.mat[,1])
region.mat = c()
for(i in 1:length(region)) #collapse the positions into region
{
tmp = colMeans(cpg.mat[cpg.mat[,1]==region[i],4:dim(cpg.mat)[2]],na.rm = T)
region.mat = rbind(region.mat,tmp)
rownames(region.mat)[i] = region[i]
}
#Calculate the p value and mean differ
nm.idx = which(grepl("N",colnames(region.mat))==T)
early.idx = which(grepl("T",colnames(region.mat))==T)
region.mat = cbind(region.mat,tissue_pvalue = 0,tissue_early_meth_mean = 0, tissue_normal_meth_mean= 0, tissue_meandiff = 0)
for(i in 1:dim(region.mat)[1])
{
region.mat[i,"tissue_pvalue"] = t.test(x=region.mat[i,nm.idx],y = region.mat[i,early.idx])$p.value
region.mat[i,"tissue_early_meth_mean"] = mean(region.mat[i,early.idx])
region.mat[i,"tissue_normal_meth_mean"] = mean(region.mat[i,nm.idx])
region.mat[i,"tissue_meandiff"] = mean(region.mat[i,early.idx])-mean(region.mat[i,nm.idx])
}
#read the training set's p and meandiffer
train_dmr_differ_info = dmr_mat_total[,1:2]
filtered_dmr_name = rownames(region.mat)[(abs(region.mat[,"tissue_meandiff"])>0.2)&
(train_dmr_differ_info$meandiff*region.mat[,"tissue_meandiff"]>0)]
filtered_dmr_name = paste0("chr",filtered_dmr_name)
dmr_mat_total = dmr_mat_total[filtered_dmr_name,]
##output the 68 regions' detail info
out.tmp <- t(apply(dmr_mat_total,1,function(x)
{
mean_normal_in_cfdna = mean(x[grepl("Training_Normal",colnames(dmr_mat_total))])
mean_early_in_cfdna = mean(x[grepl("Training_Early",colnames(dmr_mat_total))])
return(c(mean_normal_in_cfdna,mean_early_in_cfdna))
}))
out.tmp <-cbind(out.tmp, dmr_mat_total[,1:2])
colnames(out.tmp)[1:2] <-c("cfdna_normal_meth_mean","cfdna_early_meth_mean")
rownames(region.mat) <-paste0("chr",rownames(region.mat))
out.tmp <-cbind(out.tmp,region.mat[filtered_dmr_name,c(10,11,9,12)])
#write.table(out.tmp,"model/DMR68_info.txt",row.names = T,col.names = T,quote = F,sep="\t")
}
|
library(tidyr)
library(dplyr)
library(ggplot2)
# Plots CIs for the quantile
fullData_ConfidenceLowerBound = read.csv("../../results/tradeoff/listener-curve-binomial-confidence-bound-quantile.tsv", sep="\t")
fullData_ConfidenceLowerBound_05 = read.csv("../../results/tradeoff/listener-curve-binomial-confidence-bound-quantile-05.tsv", sep="\t")
fullData_BinomialTest = read.csv("../../results/tradeoff/listener-curve-binomial-test.tsv", sep="\t")
memListenerSurpPlot_onlyWordForms_boundedVocab = function(language) {
data = fullData_ConfidenceLowerBound %>% filter(Language == language)
data2 = fullData_BinomialTest %>% filter(Language == language)
data3 = fullData_ConfidenceLowerBound_05 %>% filter(Language == language)
plot = ggplot(data, aes(x=Memory, y=LowerConfidenceBound, fill=Type, color=Type))
plot = plot + geom_line(size=1, linetype="dotted")
plot = plot + geom_line(data=data3, size=2, linetype="dashed")
plot = plot + geom_line(data=data2, aes(x=Memory, y=BetterEmpirical), size=2)
data2 = data2 %>% mutate(pValue_print = ifelse(round(pValue,5) == 0, "p<0.00001", paste("p=", round(pValue,5), sep="")))
plot = plot + geom_text(data=data2 %>% filter(Position %% 9 == 0, Type == "REAL_REAL"), aes(x=Memory, y=BetterEmpirical+0.1, label=pValue_print), size=3)
plot = plot + geom_text(data=data2 %>% filter(Position %% 9 == 0, Type == "GROUND"), aes(x=Memory, y=BetterEmpirical+0.05, label=pValue_print), size=3)
plot = plot + theme_classic()
plot = plot + theme(legend.position="none")
plot = plot + ylim(0,1.1)
plot = plot + ylab("Quantile")
plot = plot + xlab("Memory")
plot = plot + theme(text = element_text(size=20))
ggsave(plot, file=paste("figures/",language,"-listener-surprisal-memory-QUANTILES_onlyWordForms_boundedVocab.pdf", sep=""), height=3.5, width=4.5)
return(plot)
}
languages = read.csv("languages.tsv", sep="\t")
for(language in languages) {
memListenerSurpPlot_onlyWordForms_boundedVocab(language)
}
| /code/analysis/visualize_neural/ARCHIVE/plot_wordsOnly_quantile.R | no_license | m-hahn/memory-surprisal | R | false | false | 2,018 | r | library(tidyr)
library(dplyr)
library(ggplot2)
# Plots CIs for the quantile
fullData_ConfidenceLowerBound = read.csv("../../results/tradeoff/listener-curve-binomial-confidence-bound-quantile.tsv", sep="\t")
fullData_ConfidenceLowerBound_05 = read.csv("../../results/tradeoff/listener-curve-binomial-confidence-bound-quantile-05.tsv", sep="\t")
fullData_BinomialTest = read.csv("../../results/tradeoff/listener-curve-binomial-test.tsv", sep="\t")
memListenerSurpPlot_onlyWordForms_boundedVocab = function(language) {
data = fullData_ConfidenceLowerBound %>% filter(Language == language)
data2 = fullData_BinomialTest %>% filter(Language == language)
data3 = fullData_ConfidenceLowerBound_05 %>% filter(Language == language)
plot = ggplot(data, aes(x=Memory, y=LowerConfidenceBound, fill=Type, color=Type))
plot = plot + geom_line(size=1, linetype="dotted")
plot = plot + geom_line(data=data3, size=2, linetype="dashed")
plot = plot + geom_line(data=data2, aes(x=Memory, y=BetterEmpirical), size=2)
data2 = data2 %>% mutate(pValue_print = ifelse(round(pValue,5) == 0, "p<0.00001", paste("p=", round(pValue,5), sep="")))
plot = plot + geom_text(data=data2 %>% filter(Position %% 9 == 0, Type == "REAL_REAL"), aes(x=Memory, y=BetterEmpirical+0.1, label=pValue_print), size=3)
plot = plot + geom_text(data=data2 %>% filter(Position %% 9 == 0, Type == "GROUND"), aes(x=Memory, y=BetterEmpirical+0.05, label=pValue_print), size=3)
plot = plot + theme_classic()
plot = plot + theme(legend.position="none")
plot = plot + ylim(0,1.1)
plot = plot + ylab("Quantile")
plot = plot + xlab("Memory")
plot = plot + theme(text = element_text(size=20))
ggsave(plot, file=paste("figures/",language,"-listener-surprisal-memory-QUANTILES_onlyWordForms_boundedVocab.pdf", sep=""), height=3.5, width=4.5)
return(plot)
}
languages = read.csv("languages.tsv", sep="\t")
for(language in languages) {
memListenerSurpPlot_onlyWordForms_boundedVocab(language)
}
|
library(tidyverse)
source('scripts/read_files.R')
source('scripts/animate_play.R')
source('scripts/pick_play.R')
games <- read_games()
plays <- read_plays()
positions <- read_positions()
tracking <- read_tracking(max_week = 1L, positions = positions, drop_cols = TRUE, cols = c('time', 'display_name'))
tracking
tracking %>% arrange(desc(a))
one_play <-
tracking %>%
filter(game_id == 2018090901, play_id == 5369, nfl_id == 2555540)
one_play
events_pass_outcome <- sprintf('pass_outcome_%s', c('caught', 'incomplete', 'interception', 'touchdown'))
events_throw <- c('pass_forward', 'pass_shovel')
tracking_clipped_at_pass_outcome <- tracking %>% clip_tracking_at_events(events = events_pass_outcome)
tracking_clipped_at_pass_outcome
# 1 - 359 -> 2 # o - o_lag5 + 360
# 359 - 1 -> -2 # o - o_lag5 - 360
o_changes <-
tracking_clipped_at_pass_outcome %>%
filter(game_id == dplyr::first(game_id)) %>%
# filter(play_id == dplyr::first(play_id)) %>%
arrange(game_id, play_id, nfl_id, frame_id) %>%
group_by(game_id, play_id, nfl_id) %>%
mutate(o_lag1 = dplyr::lag(o, n = 1L), dir_lag1 = dplyr::lag(dir, n = 1L)) %>%
ungroup() %>%
mutate(
# o_diff1 = o - o_lag1,
o_diff1 = case_when(
o <= 90 & o_lag1 >= 270 ~ (o - o_lag1 + 360),
o >= 270 & o_lag1 <= 90 ~ (o - o_lag1 - 360),
TRUE ~ o - o_lag1
),
dir_diff1 = case_when(
dir <= 90 & dir_lag1 >= 270 ~ (dir - dir_lag1 + 360),
dir >= 270 & dir_lag1 <= 90 ~ (dir - dir_lag1 - 360),
TRUE ~ dir - dir_lag1
)
) %>%
arrange(desc(abs(o_diff1))) %>%
relocate(o, o_lag1, o_diff1, dir, dir_lag1, dir_diff1)
o_changes
o_changes %>%
filter(!is.na(o_lag1)) %>%
select(o_diff1, dir_diff1) %>%
head(1000) %>%
# sample_frac(0.1) %>%
ggplot() +
aes(x = o_diff1, y = dir_diff1) +
geom_point()
o_changes %>%
filter(!is.na(o_lag1)) %>%
select(o_diff1, dir_diff1) %>%
corrr::correlate()
o_changes %>% select(dir) %>% skimr::skim()
o_changes %>% drop_na() %>% ggplot() + aes(x = o_diff1) + geom_histogram(binwidth = 30)
| /scripts/eda_db_angles.R | no_license | SUNNY11286/oh_snap | R | false | false | 2,066 | r |
library(tidyverse)
source('scripts/read_files.R')
source('scripts/animate_play.R')
source('scripts/pick_play.R')
games <- read_games()
plays <- read_plays()
positions <- read_positions()
tracking <- read_tracking(max_week = 1L, positions = positions, drop_cols = TRUE, cols = c('time', 'display_name'))
tracking
tracking %>% arrange(desc(a))
one_play <-
tracking %>%
filter(game_id == 2018090901, play_id == 5369, nfl_id == 2555540)
one_play
events_pass_outcome <- sprintf('pass_outcome_%s', c('caught', 'incomplete', 'interception', 'touchdown'))
events_throw <- c('pass_forward', 'pass_shovel')
tracking_clipped_at_pass_outcome <- tracking %>% clip_tracking_at_events(events = events_pass_outcome)
tracking_clipped_at_pass_outcome
# 1 - 359 -> 2 # o - o_lag5 + 360
# 359 - 1 -> -2 # o - o_lag5 - 360
o_changes <-
tracking_clipped_at_pass_outcome %>%
filter(game_id == dplyr::first(game_id)) %>%
# filter(play_id == dplyr::first(play_id)) %>%
arrange(game_id, play_id, nfl_id, frame_id) %>%
group_by(game_id, play_id, nfl_id) %>%
mutate(o_lag1 = dplyr::lag(o, n = 1L), dir_lag1 = dplyr::lag(dir, n = 1L)) %>%
ungroup() %>%
mutate(
# o_diff1 = o - o_lag1,
o_diff1 = case_when(
o <= 90 & o_lag1 >= 270 ~ (o - o_lag1 + 360),
o >= 270 & o_lag1 <= 90 ~ (o - o_lag1 - 360),
TRUE ~ o - o_lag1
),
dir_diff1 = case_when(
dir <= 90 & dir_lag1 >= 270 ~ (dir - dir_lag1 + 360),
dir >= 270 & dir_lag1 <= 90 ~ (dir - dir_lag1 - 360),
TRUE ~ dir - dir_lag1
)
) %>%
arrange(desc(abs(o_diff1))) %>%
relocate(o, o_lag1, o_diff1, dir, dir_lag1, dir_diff1)
o_changes
o_changes %>%
filter(!is.na(o_lag1)) %>%
select(o_diff1, dir_diff1) %>%
head(1000) %>%
# sample_frac(0.1) %>%
ggplot() +
aes(x = o_diff1, y = dir_diff1) +
geom_point()
o_changes %>%
filter(!is.na(o_lag1)) %>%
select(o_diff1, dir_diff1) %>%
corrr::correlate()
o_changes %>% select(dir) %>% skimr::skim()
o_changes %>% drop_na() %>% ggplot() + aes(x = o_diff1) + geom_histogram(binwidth = 30)
|
#chamando a funcao selfTrain adaptada
ST <- funcSelfTrain(as.formula(paste(classe,'~', '.')), base_treino_self_training,learner('rpartXse',list(se=0.5)),'f',0.9,10,1,TRUE)
matriz_confusao1 = table(predict(ST,base_teste,type='class'),base_teste$class)
n <- length(base_teste$class)
cat("\n Acerto (%) = \n", levels(base_original[, classe]), "\n", diag(matriz_confusao1) / colSums(matriz_confusao1) * 100)
acc <- ((sum(diag(matriz_confusao1)) / n) * 100)
acc_g <- c(acc_g, acc)
bd <- c(bd, bd_nome)
tx <- c(tx, taxa)
cat("\n Acerto global (%) =", acc)
cat('FIM') #, '\t base de dados ', i, '\n', 'total rotulados: ', total_rotulados, '\n')
| /testes_experimentos/treinamento.R | no_license | karlianev/projeto_karliane | R | false | false | 641 | r | #chamando a funcao selfTrain adaptada
ST <- funcSelfTrain(as.formula(paste(classe,'~', '.')), base_treino_self_training,learner('rpartXse',list(se=0.5)),'f',0.9,10,1,TRUE)
matriz_confusao1 = table(predict(ST,base_teste,type='class'),base_teste$class)
n <- length(base_teste$class)
cat("\n Acerto (%) = \n", levels(base_original[, classe]), "\n", diag(matriz_confusao1) / colSums(matriz_confusao1) * 100)
acc <- ((sum(diag(matriz_confusao1)) / n) * 100)
acc_g <- c(acc_g, acc)
bd <- c(bd, bd_nome)
tx <- c(tx, taxa)
cat("\n Acerto global (%) =", acc)
cat('FIM') #, '\t base de dados ', i, '\n', 'total rotulados: ', total_rotulados, '\n')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mask_raster.R
\name{mask_raster}
\alias{mask_raster}
\title{mask raster}
\usage{
mask_raster(spatialpoint, name, name.supplement = "", ahn_raster,
AHN3 = TRUE, azimuth, radius)
}
\arguments{
\item{spatialpoint}{single sf point in RD new coordinates}
\item{name}{of AWS or location}
\item{ahn_raster}{AHN raster}
\item{AHN3}{Default TRUE. Set to FALSE if AHN2 needs to be used.}
\item{radius}{distance radius of raster in metres.}
\item{azimuith}{solar azimuth angle in degrees}
}
\value{
ahn mask
}
\description{
section mask of raster
}
\examples{
}
\author{
Jelle Stuurman
}
| /man/mask_raster.Rd | no_license | Jellest/temperatureSC | R | false | true | 664 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mask_raster.R
\name{mask_raster}
\alias{mask_raster}
\title{mask raster}
\usage{
mask_raster(spatialpoint, name, name.supplement = "", ahn_raster,
AHN3 = TRUE, azimuth, radius)
}
\arguments{
\item{spatialpoint}{single sf point in RD new coordinates}
\item{name}{of AWS or location}
\item{ahn_raster}{AHN raster}
\item{AHN3}{Default TRUE. Set to FALSE if AHN2 needs to be used.}
\item{radius}{distance radius of raster in metres.}
\item{azimuith}{solar azimuth angle in degrees}
}
\value{
ahn mask
}
\description{
section mask of raster
}
\examples{
}
\author{
Jelle Stuurman
}
|
library(deSolve)
seir_ode <- function(t,y,par){
S <- y[1]
E <- y[2]
I <- y[3]
R <- y[4]
R0 <- par[1]
a <- par[2]
gamma <- par[3]
N <- par[4]
beta <- gamma*R0
dS = -beta * S * I / N
dE = beta * S * I / N - a * E
dI = a * E - gamma * I
dR = gamma * I
list(c(dS,dE,dI,dR))
}
N = 83e6
E0 = 40e3
I0 = 10e3
times <- seq(0,150,by=1)
Y0 <- c(N-(E0-I0),E0,I0,0)
out <- ode(y=Y0,times=times,func=seir_ode,par=c(2.0,1/5.5,1/9,N))
| /deterministic_models/seir.R | no_license | joshuakuepper/Challenge-1757 | R | false | false | 452 | r | library(deSolve)
seir_ode <- function(t,y,par){
S <- y[1]
E <- y[2]
I <- y[3]
R <- y[4]
R0 <- par[1]
a <- par[2]
gamma <- par[3]
N <- par[4]
beta <- gamma*R0
dS = -beta * S * I / N
dE = beta * S * I / N - a * E
dI = a * E - gamma * I
dR = gamma * I
list(c(dS,dE,dI,dR))
}
N = 83e6
E0 = 40e3
I0 = 10e3
times <- seq(0,150,by=1)
Y0 <- c(N-(E0-I0),E0,I0,0)
out <- ode(y=Y0,times=times,func=seir_ode,par=c(2.0,1/5.5,1/9,N))
|
library(tidyverse)
library(lubridate)
library(extrafont)
loadfonts(device = "win")
colores_1 <- c('#008080','#002456')
colores_2 <- c('#31f48e','#07aa7b','#0bbeff','#0086ff','#01437d','#ff0040')
dat <- tibble(date = c(seq(date('2014-01-01'),date('2019-12-31'),1))) %>%
mutate(cat_1 = 10,
cat_2 = 12)
for (i in 2:nrow(dat)) {
dat$cat_1[i] <- (1 + runif(1,-0.0025,0.0035))*dat$cat_1[i - 1]
dat$cat_2[i] <- (1 + runif(1,-0.0025,0.0035))*dat$cat_2[i - 1]
}
dat %>%
gather('color','y',-date) %>%
group_by(month(date),year(date),color) %>%
summarise(y = mean(y)) %>%
ggplot(aes(x = `month(date)`,y = y,color = color)) +
geom_line() + geom_point() +
scale_x_continuous(breaks = seq(1,12,1),
labels = str_sub(c('Enero','Febrero','Marzo','Abril','Mayo','Junio',
'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre'),1,3)) +
scale_color_manual(values = colores_1) +
theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.text.x = element_text(angle = 90,face = 'bold'),
axis.text.y = element_text(face = 'bold'),
axis.title.y = element_text(face = 'bold'),
axis.title.x = element_blank(),
axis.ticks = element_blank(),
text = element_text(family = 'Arial'),
plot.title = element_text(hjust = 0.5,face = 'bold'),
legend.title = element_blank(),
legend.text = element_text(face = 'bold'),
legend.position = 'bottom') +
ylab('Eje y\n') +
ggtitle('Título') +
facet_wrap(~`year(date)`,scales = 'free')
| /Facet wrap de gráficas de línea por categorías.R | no_license | alejandroxag/R-ggplots | R | false | false | 1,671 | r |
library(tidyverse)
library(lubridate)
library(extrafont)
loadfonts(device = "win")
colores_1 <- c('#008080','#002456')
colores_2 <- c('#31f48e','#07aa7b','#0bbeff','#0086ff','#01437d','#ff0040')
dat <- tibble(date = c(seq(date('2014-01-01'),date('2019-12-31'),1))) %>%
mutate(cat_1 = 10,
cat_2 = 12)
for (i in 2:nrow(dat)) {
dat$cat_1[i] <- (1 + runif(1,-0.0025,0.0035))*dat$cat_1[i - 1]
dat$cat_2[i] <- (1 + runif(1,-0.0025,0.0035))*dat$cat_2[i - 1]
}
dat %>%
gather('color','y',-date) %>%
group_by(month(date),year(date),color) %>%
summarise(y = mean(y)) %>%
ggplot(aes(x = `month(date)`,y = y,color = color)) +
geom_line() + geom_point() +
scale_x_continuous(breaks = seq(1,12,1),
labels = str_sub(c('Enero','Febrero','Marzo','Abril','Mayo','Junio',
'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre'),1,3)) +
scale_color_manual(values = colores_1) +
theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.text.x = element_text(angle = 90,face = 'bold'),
axis.text.y = element_text(face = 'bold'),
axis.title.y = element_text(face = 'bold'),
axis.title.x = element_blank(),
axis.ticks = element_blank(),
text = element_text(family = 'Arial'),
plot.title = element_text(hjust = 0.5,face = 'bold'),
legend.title = element_blank(),
legend.text = element_text(face = 'bold'),
legend.position = 'bottom') +
ylab('Eje y\n') +
ggtitle('Título') +
facet_wrap(~`year(date)`,scales = 'free')
|
# Intermediate R Webinar: Data Manipulation with dplyr
# Install if necessary and load dplyr
#install.packages("dplyr)
library(dplyr)
# Set working directory
setwd("~/Documents/R_Class_Meetup/Class6")
# Read in data
census <- read.csv("~/Documents/R_Class_Meetup/Class6/Data/US_census_data.csv", stringsAsFactors = F)
medicaid.exp <- read.csv("~/Documents/R_Class_Meetup/Class6/Data/medicaid_expansion_status.csv", stringsAsFactors = F)
rural.codes <- read.csv("~/Documents/Opioid_Research/Data/Other_Data/ruralurbancodes2013.csv", header = T, stringsAsFactors = F)
# Print column names of census
ncol(census)
# Subset large data set to just county, pct_lt_poverty, unemployment_pct, pct_gt_high_school
census.sm <- census %>%
select(county, pct_lt_poverty, unemployment_pct, pct_gt_high_school)
# Checking result
colnames(census.sm)
# Drop unemployment_pct from census.sm
census.demo <- census.sm %>%
select(-unemployment_pct)
# Checking result
colnames(census.demo)
# Select county, state, FIPS, all variables containing "total"
# and all variables containing gt_65
retire.df <- census %>%
select(county, state, FIPS, contains("total"), contains("gt_65"))
# Check by printing column names
colnames(retire.df)
# Finding the 75th percentile of pct_lt_poverty
pov.75 <- quantile(census$pct_lt_poverty, probs = 0.75)
# Subset to only those states with pct_lt_poverty greater than the 75th percentile
high.poverty <- census %>% filter(pct_lt_poverty > pov.75)
summary(high.poverty$pct_lt_poverty)
# Sort census.sm by
sorted.df <- census.sm %>%
arrange(pct_lt_poverty, -unemployment_pct, pct_gt_high_school)
# Show first 6 rows to check data
head(sorted.df)
# Chained dplyr functions example
# First select these variables: county, state, FIPS, unemployment_pct, pct_govt_cash_asst, pct_food_stamps, pct_public_ins_lt_65
# Second filter to those counties with unemployment above the national average of 4.1
high.unemploy <- census %>%
select(county, state, FIPS, unemployment_pct, pct_govt_cash_asst,
pct_food_stamps, pct_public_ins_lt_65) %>%
filter(unemployment_pct > 4.1)
# Check new data frame for accuracy
colnames(high.unemploy)
min(high.unemploy$unemployment_pct)
# Subset retire.df from above to only those variables including 2017, county, and state
retire.sm <- retire.df %>%
select(county, state, ends_with("2017")) %>%
mutate(perc.gt.65 = pop_gt_65_2017/pop_total_2017*100)
head(retire.sm, n = 3L)
# Pull median pct_food_stamps
median.food.stmp <- median(census$pct_food_stamps)
# Pull 33rd percentile and 66th percentile for unemployment_pct
perc.unemp <- quantile(census$unemployment_pct, probs = c(0.333, 0.666), na.rm = T)
unemploy.foodst <- census %>%
# Select to county, state, FIPS, pct_food_stamps, pct_public_ins_19_to_64, pct_public_ins_lt_19, and unemployment_pct
select(county, state, FIPS, pct_food_stamps, pct_public_ins_19_to_64,
pct_public_ins_lt_19, unemployment_pct) %>%
# Create high/low group for pct_food_stamps with median value
mutate(food_stamps_hl = if_else(pct_food_stamps > median.food.stmp, "High", "Low"),
# Create high, medium, low groups with 33rd and 66th percentiles on census unemployment_pct with case_when=
unemp.hml = case_when(unemployment_pct < perc.unemp[1] ~ "Low",
between(unemployment_pct, perc.unemp[1], perc.unemp[2]) ~ "Medium",
unemployment_pct > perc.unemp[2] ~ "High",
TRUE ~ as.character(NA)))
# Check results
table(unemploy.foodst$unemp.hml)
table(unemploy.foodst$food_stamps_hl)
# Create summary statistics of unemploy.foodst grouped by unemp.hml
pub.health.ins.summ <- unemploy.foodst %>%
group_by(unemp.hml) %>%
# Calculate mean adult and child public health insurance percents
summarize(adult.pub.ins.pct = mean(pct_public_ins_19_to_64),
child.pub.ins.pct = mean(pct_public_ins_lt_19))
# To use group_by() and retain the original structure of the data frame use mutate() instead of summarize()
pub.health.ins <- unemploy.foodst %>%
group_by(unemp.hml) %>%
# Calculate mean adult and child public health insurance percents
mutate(adult.pub.ins.pct = mean(pct_public_ins_19_to_64),
child.pub.ins.pct = mean(pct_public_ins_lt_19))
# Use if/else to make anything between 1-3 Metropolitan
# Everything else should be Rural
rural.codes.grp <- rural.codes %>%
mutate(rural.metro = if_else(between(RUCC_2013,1,3), "Metropolitan", "Rural")) %>%
# Removing RUCC_2013 rural code variable
select(-RUCC_2013)
# Join in Medicaid expansion status with an inner join (keeping only matches)
# Chain another join in with left join to maintain the number of rows in the left data frame
pub.health.ins.exp <- inner_join(unemploy.foodst, medicaid.exp) %>%
left_join(., rural.codes.grp)
# See that the above left join brings in 2 NA values for the 2 states where FIPS has been changed
# The joins above are equivalent to the code below
pub.health.ins.exp <- inner_join(unemploy.foodst, medicaid.exp, by = "state") %>%
left_join(., rural.codes.grp, by = "FIPS")
# Summarize public health insurance by rural.metro and medcaid expansion status
med.exp.rur.summ <- pub.health.ins.exp %>%
group_by(rural.metro, medicaid.expand) %>%
summarize(adult.pub.ins.pct = mean(pct_public_ins_19_to_64),
child.pub.ins.pct = mean(pct_public_ins_lt_19))
# Create a small data frame with duplicates
duplicate.df <- data.frame(cbind(c(1, 2, 3, 1),
c(4, 5, 6, 4),
c(7, 8, 9, 7)))
colnames(duplicate.df) <- c("var1", "var2", "var3")
# Show how dplyr can remove duplicates
no.dup <- duplicate.df %>%
distinct(.)
no.dup
| /IntermediateR/Class1/RProgramming_dplyr_Package_InClass_Code.R | no_license | kelsey-huntzberry/DataAnalysisLab | R | false | false | 5,753 | r | # Intermediate R Webinar: Data Manipulation with dplyr
# Install if necessary and load dplyr
#install.packages("dplyr)
library(dplyr)
# Set working directory
setwd("~/Documents/R_Class_Meetup/Class6")
# Read in data
census <- read.csv("~/Documents/R_Class_Meetup/Class6/Data/US_census_data.csv", stringsAsFactors = F)
medicaid.exp <- read.csv("~/Documents/R_Class_Meetup/Class6/Data/medicaid_expansion_status.csv", stringsAsFactors = F)
rural.codes <- read.csv("~/Documents/Opioid_Research/Data/Other_Data/ruralurbancodes2013.csv", header = T, stringsAsFactors = F)
# Print column names of census
ncol(census)
# Subset large data set to just county, pct_lt_poverty, unemployment_pct, pct_gt_high_school
census.sm <- census %>%
select(county, pct_lt_poverty, unemployment_pct, pct_gt_high_school)
# Checking result
colnames(census.sm)
# Drop unemployment_pct from census.sm
census.demo <- census.sm %>%
select(-unemployment_pct)
# Checking result
colnames(census.demo)
# Select county, state, FIPS, all variables containing "total"
# and all variables containing gt_65
retire.df <- census %>%
select(county, state, FIPS, contains("total"), contains("gt_65"))
# Check by printing column names
colnames(retire.df)
# Finding the 75th percentile of pct_lt_poverty
pov.75 <- quantile(census$pct_lt_poverty, probs = 0.75)
# Subset to only those states with pct_lt_poverty greater than the 75th percentile
high.poverty <- census %>% filter(pct_lt_poverty > pov.75)
summary(high.poverty$pct_lt_poverty)
# Sort census.sm by
sorted.df <- census.sm %>%
arrange(pct_lt_poverty, -unemployment_pct, pct_gt_high_school)
# Show first 6 rows to check data
head(sorted.df)
# Chained dplyr functions example
# First select these variables: county, state, FIPS, unemployment_pct, pct_govt_cash_asst, pct_food_stamps, pct_public_ins_lt_65
# Second filter to those counties with unemployment above the national average of 4.1
high.unemploy <- census %>%
select(county, state, FIPS, unemployment_pct, pct_govt_cash_asst,
pct_food_stamps, pct_public_ins_lt_65) %>%
filter(unemployment_pct > 4.1)
# Check new data frame for accuracy
colnames(high.unemploy)
min(high.unemploy$unemployment_pct)
# Subset retire.df from above to only those variables including 2017, county, and state
retire.sm <- retire.df %>%
select(county, state, ends_with("2017")) %>%
mutate(perc.gt.65 = pop_gt_65_2017/pop_total_2017*100)
head(retire.sm, n = 3L)
# Pull median pct_food_stamps
median.food.stmp <- median(census$pct_food_stamps)
# Pull 33rd percentile and 66th percentile for unemployment_pct
perc.unemp <- quantile(census$unemployment_pct, probs = c(0.333, 0.666), na.rm = T)
unemploy.foodst <- census %>%
# Select to county, state, FIPS, pct_food_stamps, pct_public_ins_19_to_64, pct_public_ins_lt_19, and unemployment_pct
select(county, state, FIPS, pct_food_stamps, pct_public_ins_19_to_64,
pct_public_ins_lt_19, unemployment_pct) %>%
# Create high/low group for pct_food_stamps with median value
mutate(food_stamps_hl = if_else(pct_food_stamps > median.food.stmp, "High", "Low"),
# Create high, medium, low groups with 33rd and 66th percentiles on census unemployment_pct with case_when=
unemp.hml = case_when(unemployment_pct < perc.unemp[1] ~ "Low",
between(unemployment_pct, perc.unemp[1], perc.unemp[2]) ~ "Medium",
unemployment_pct > perc.unemp[2] ~ "High",
TRUE ~ as.character(NA)))
# Check results
table(unemploy.foodst$unemp.hml)
table(unemploy.foodst$food_stamps_hl)
# Create summary statistics of unemploy.foodst grouped by unemp.hml
pub.health.ins.summ <- unemploy.foodst %>%
group_by(unemp.hml) %>%
# Calculate mean adult and child public health insurance percents
summarize(adult.pub.ins.pct = mean(pct_public_ins_19_to_64),
child.pub.ins.pct = mean(pct_public_ins_lt_19))
# To use group_by() and retain the original structure of the data frame use mutate() instead of summarize()
pub.health.ins <- unemploy.foodst %>%
group_by(unemp.hml) %>%
# Calculate mean adult and child public health insurance percents
mutate(adult.pub.ins.pct = mean(pct_public_ins_19_to_64),
child.pub.ins.pct = mean(pct_public_ins_lt_19))
# Use if/else to make anything between 1-3 Metropolitan
# Everything else should be Rural
rural.codes.grp <- rural.codes %>%
mutate(rural.metro = if_else(between(RUCC_2013,1,3), "Metropolitan", "Rural")) %>%
# Removing RUCC_2013 rural code variable
select(-RUCC_2013)
# Join in Medicaid expansion status with an inner join (keeping only matches)
# Chain another join in with left join to maintain the number of rows in the left data frame
pub.health.ins.exp <- inner_join(unemploy.foodst, medicaid.exp) %>%
left_join(., rural.codes.grp)
# See that the above left join brings in 2 NA values for the 2 states where FIPS has been changed
# The joins above are equivalent to the code below
pub.health.ins.exp <- inner_join(unemploy.foodst, medicaid.exp, by = "state") %>%
left_join(., rural.codes.grp, by = "FIPS")
# Summarize public health insurance by rural.metro and medcaid expansion status
med.exp.rur.summ <- pub.health.ins.exp %>%
group_by(rural.metro, medicaid.expand) %>%
summarize(adult.pub.ins.pct = mean(pct_public_ins_19_to_64),
child.pub.ins.pct = mean(pct_public_ins_lt_19))
# Create a small data frame with duplicates
duplicate.df <- data.frame(cbind(c(1, 2, 3, 1),
c(4, 5, 6, 4),
c(7, 8, 9, 7)))
colnames(duplicate.df) <- c("var1", "var2", "var3")
# Show how dplyr can remove duplicates
no.dup <- duplicate.df %>%
distinct(.)
no.dup
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/tools.R
\name{show_config_file}
\alias{show_config_file}
\title{Read and print Plotly config file, wrapping get_credentials_file()}
\usage{
show_config_file(args = c())
}
\arguments{
\item{args}{Character vector of keys you are looking up}
}
\value{
List of keyword-value pairs (credentials)
}
\description{
Read and print Plotly config file, wrapping get_credentials_file()
}
| /expend_analyses/EndSp_budget/plotly/man/show_config_file.Rd | no_license | jacob-ogre/esa_expenditures_old | R | false | false | 464 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/tools.R
\name{show_config_file}
\alias{show_config_file}
\title{Read and print Plotly config file, wrapping get_credentials_file()}
\usage{
show_config_file(args = c())
}
\arguments{
\item{args}{Character vector of keys you are looking up}
}
\value{
List of keyword-value pairs (credentials)
}
\description{
Read and print Plotly config file, wrapping get_credentials_file()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/timePretty.R
\name{timePretty}
\alias{timePretty}
\title{Pretty Axis}
\usage{
timePretty(x, labels = "Auto")
}
\arguments{
\item{x}{time difference data}
\item{labels}{either "Auto," which lets the function decide how many labels,
the approximate number of labels, or the actual labels to use.}
}
\value{
Information about the axis labels.
}
\description{
Constructs information for making a nicely formatted date/time axis.
}
\seealso{
\code{\link{timePlot}}
}
\keyword{dplot}
| /man/timePretty.Rd | permissive | ldecicco-USGS/smwrGraphs | R | false | true | 557 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/timePretty.R
\name{timePretty}
\alias{timePretty}
\title{Pretty Axis}
\usage{
timePretty(x, labels = "Auto")
}
\arguments{
\item{x}{time difference data}
\item{labels}{either "Auto," which lets the function decide how many labels,
the approximate number of labels, or the actual labels to use.}
}
\value{
Information about the axis labels.
}
\description{
Constructs information for making a nicely formatted date/time axis.
}
\seealso{
\code{\link{timePlot}}
}
\keyword{dplot}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model2netcdf.dvmdostem.R
\name{model2netcdf.dvmdostem}
\alias{model2netcdf.dvmdostem}
\title{Code to convert dvmdostem netcdf output into into CF standard}
\arguments{
\item{outdir}{Location of dvmdostem model output}
\item{runstart}{??}
\item{runend}{??}
}
\description{
Code to convert dvmdostem netcdf output into into CF standard
}
\examples{
\dontrun{
# example code here?
}
}
\author{
Tobey Carman, Shawn Serbin
}
| /models/dvmdostem/man/model2netcdf.dvmdostem.Rd | permissive | yan130/pecan | R | false | true | 503 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model2netcdf.dvmdostem.R
\name{model2netcdf.dvmdostem}
\alias{model2netcdf.dvmdostem}
\title{Code to convert dvmdostem netcdf output into into CF standard}
\arguments{
\item{outdir}{Location of dvmdostem model output}
\item{runstart}{??}
\item{runend}{??}
}
\description{
Code to convert dvmdostem netcdf output into into CF standard
}
\examples{
\dontrun{
# example code here?
}
}
\author{
Tobey Carman, Shawn Serbin
}
|
# Load packages and functions ---------------------------------------------
library(fda.usc)
library(roahd)
library(energy)
library(entropy)
library(partykit)
library(cluster)
library(igraph)
library(NetworkDistance)
source("functions_v2.R")
source("node_v2.R")
source("split_v2.R")
source("party_v2.R")
source("plot_v2.R")
source("NKI_data_import.R")
# Import data -------------------------------------------------------------
nki <- generate_dataset(data_folder = 'NKI_Rockland/',
y_filename = 'NKI_clinical_information.txt',
y_column = 'WASI_FULL_4',
output_filename = 'NKIdata.RData',
output_folder = ".",
ext_save = FALSE)
# Dataset construction ----------------------------------------------------
# Response
resp <- nki$y
# Covariates list
cov.list <- list(lapply(nki$structural, function(g) igraph::graph_from_adjacency_matrix(g, weighted = T)),
lapply(nki$functional, function(g) igraph::graph_from_adjacency_matrix(g, weighted = T)))
# Energy Tree fit ---------------------------------------------------------
# Fit
set.seed(2948)
etree_fit <- etree(response = resp,
covariates = cov.list,
case.weights = NULL,
minbucket = 5,
alpha = 0.5,
R = 1000,
split.type = 'cluster',
coef.split.type = 'test')
# Plot
plot(etree_fit)
# Fitted values
y_fitted <- predict(etree_fit)
# Mean Error Prediction
(MEP_etree <- (sum((resp-y_fitted)^2)/length(resp))/(var(resp)))
# Root Mean Square Error
(MEP_etree <- sqrt(sum((resp-y_fitted)^2)/length(resp)))
# Mean Square Percentage Error
(MEP_etree <- sum(((resp-y_fitted)/resp)^2)/length(resp))
# Prediction --------------------------------------------------------------
# Predicted values
y_pred <- predict(etree_fit, newdata = cov.list)
| /NKI_fit.R | no_license | tulliapadellini/energytree | R | false | false | 1,975 | r |
# Load packages and functions ---------------------------------------------
library(fda.usc)
library(roahd)
library(energy)
library(entropy)
library(partykit)
library(cluster)
library(igraph)
library(NetworkDistance)
source("functions_v2.R")
source("node_v2.R")
source("split_v2.R")
source("party_v2.R")
source("plot_v2.R")
source("NKI_data_import.R")
# Import data -------------------------------------------------------------
nki <- generate_dataset(data_folder = 'NKI_Rockland/',
y_filename = 'NKI_clinical_information.txt',
y_column = 'WASI_FULL_4',
output_filename = 'NKIdata.RData',
output_folder = ".",
ext_save = FALSE)
# Dataset construction ----------------------------------------------------
# Response
resp <- nki$y
# Covariates list
cov.list <- list(lapply(nki$structural, function(g) igraph::graph_from_adjacency_matrix(g, weighted = T)),
lapply(nki$functional, function(g) igraph::graph_from_adjacency_matrix(g, weighted = T)))
# Energy Tree fit ---------------------------------------------------------
# Fit
set.seed(2948)
etree_fit <- etree(response = resp,
covariates = cov.list,
case.weights = NULL,
minbucket = 5,
alpha = 0.5,
R = 1000,
split.type = 'cluster',
coef.split.type = 'test')
# Plot
plot(etree_fit)
# Fitted values
y_fitted <- predict(etree_fit)
# Mean Error Prediction
(MEP_etree <- (sum((resp-y_fitted)^2)/length(resp))/(var(resp)))
# Root Mean Square Error
(MEP_etree <- sqrt(sum((resp-y_fitted)^2)/length(resp)))
# Mean Square Percentage Error
(MEP_etree <- sum(((resp-y_fitted)/resp)^2)/length(resp))
# Prediction --------------------------------------------------------------
# Predicted values
y_pred <- predict(etree_fit, newdata = cov.list)
|
f <- file("./output/karyotype.txt")
if(!file.exists("./temp/figure7.xlsx"))
{
source("./figure7p1.R")
}
edges <- read.xlsx("./temp/figure7.xlsx")
#edges$V2 <- gsub("_", " ", edges$V2)
colors <- c("blues-6-seq", "bugn-6-seq", "bupu-6-seq", "gnbu-6-seq", "greens-6-seq", "oranges-6-seq", "orrd-6-seq",
"pubu-6-seq",
"pubugn-6-seq",
"purd-6-seq",
"purples-6-seq",
"rdpu-6-seq",
"reds-6-seq",
"ylgn-6-seq",
"ylgnbu-6-seq",
"ylorbr-6-seq",
"ylorrd-6-seq")
c1 <- 1
c2 <- 1
boxes <- unique(c(edges$V1, edges$V2))
output <- c()
for(b in boxes)
{
if(c2 > 6)
{
c2 <- 1
c1 <- c1 + 1
}
if(c1 > length(colors)){c1 <- 1}
cout <- paste(colors[c1], c2, sep = "-")
output <- c(output, paste("chr -", b, b, 0, 100, cout, sep = " "))
c2 <- c2 + 1
}
writeLines(output, f, useBytes = TRUE)
close(f)
output <- c()
f <- file("./output/edges.txt")
for(e in 1:nrow(edges))
{
if(edges[e, "V3"] == 1)
{
n <- paste(edges[e, "V1"], 1, 25, edges[e, "V2"], 1, 25, "color=green_a3", sep = " ")
}
if(edges[e, "V3"] == 2)
{
n <- paste(edges[e, "V1"], 26, 50, edges[e, "V2"], 26, 50, "color=red_a3", sep = " ")
}
if(edges[e, "V3"] == 3)
{
n <- paste(edges[e, "V1"], 51, 75, edges[e, "V2"], 51, 75, "color=vdblue,z=80", sep = " ")
}
if(edges[e, "V3"] == 4)
{
n <- paste(edges[e, "V1"], 76, 100, edges[e, "V2"], 76, 100, "color=vlred_a3", sep = " ")
}
output <- c(n, output)
}
writeLines(output, f)
close(f) | /Archive/Figure7p2.R | no_license | bhklab/DrugTissue | R | false | false | 1,569 | r | f <- file("./output/karyotype.txt")
if(!file.exists("./temp/figure7.xlsx"))
{
source("./figure7p1.R")
}
edges <- read.xlsx("./temp/figure7.xlsx")
#edges$V2 <- gsub("_", " ", edges$V2)
colors <- c("blues-6-seq", "bugn-6-seq", "bupu-6-seq", "gnbu-6-seq", "greens-6-seq", "oranges-6-seq", "orrd-6-seq",
"pubu-6-seq",
"pubugn-6-seq",
"purd-6-seq",
"purples-6-seq",
"rdpu-6-seq",
"reds-6-seq",
"ylgn-6-seq",
"ylgnbu-6-seq",
"ylorbr-6-seq",
"ylorrd-6-seq")
c1 <- 1
c2 <- 1
boxes <- unique(c(edges$V1, edges$V2))
output <- c()
for(b in boxes)
{
if(c2 > 6)
{
c2 <- 1
c1 <- c1 + 1
}
if(c1 > length(colors)){c1 <- 1}
cout <- paste(colors[c1], c2, sep = "-")
output <- c(output, paste("chr -", b, b, 0, 100, cout, sep = " "))
c2 <- c2 + 1
}
writeLines(output, f, useBytes = TRUE)
close(f)
output <- c()
f <- file("./output/edges.txt")
for(e in 1:nrow(edges))
{
if(edges[e, "V3"] == 1)
{
n <- paste(edges[e, "V1"], 1, 25, edges[e, "V2"], 1, 25, "color=green_a3", sep = " ")
}
if(edges[e, "V3"] == 2)
{
n <- paste(edges[e, "V1"], 26, 50, edges[e, "V2"], 26, 50, "color=red_a3", sep = " ")
}
if(edges[e, "V3"] == 3)
{
n <- paste(edges[e, "V1"], 51, 75, edges[e, "V2"], 51, 75, "color=vdblue,z=80", sep = " ")
}
if(edges[e, "V3"] == 4)
{
n <- paste(edges[e, "V1"], 76, 100, edges[e, "V2"], 76, 100, "color=vlred_a3", sep = " ")
}
output <- c(n, output)
}
writeLines(output, f)
close(f) |
## exanding group example
library(tcltk)
w <- tktoplevel()
tkwm.title(w, "Expanding frame example")
f <- ttkframe(w)
tkpack(f, expand=TRUE, fill="both")
f1 <- ttkframe(f); tkpack(f1)
b <- ttkbutton(f1, text=">"); tkpack(b, side="left")
l <- ttklabel(f1, text=" click to expand"); tkpack(l)
f2 <- ttkframe(f); tkpack(f2, expand=TRUE, fill="both")
## exp is frame to pack things into
exp <- ttkframe(f2); exp$env$state <- FALSE
## as a sample we pack in l a simple label.
l <- ttklabel(exp, text="this is some text for the frame")
tkpack(l)
resizeFrame <- function(f) {
tcl("update","idletasks")
toplevel <- tkwinfo("toplevel",f)
reqHeight <- as.numeric(tkwinfo("reqheight", toplevel))
curHeight <- as.numeric(tkwinfo("height", toplevel))
if(reqHeight == curHeight)
return()
reqWidth <- as.numeric(tkwinfo("reqwidth", toplevel))
tkwm.withdraw(toplevel)
tkwm.geometry(toplevel,paste(reqHeight,reqWidth,sep="x"))
tkwm.deiconify(toplevel)
}
cb <- function() {
if(exp$env$state) {
## close
tkpack("forget", exp)
tkconfigure(b,text=">")
resizeFrame(f2)
exp$env$state <- FALSE
} else {
## open
tkpack(exp, expand=TRUE, fill="both")
tkconfigure(b,text="V")
exp$env$state <- TRUE
}
}
tkbind(b,"<Button-1>", cb)
| /books/rgui/ProgGUIInR/inst/Examples/ch-tcltk/ex-tcltk-expandgroup.R | no_license | lawremi/RGtk2 | R | false | false | 1,279 | r | ## exanding group example
library(tcltk)
w <- tktoplevel()
tkwm.title(w, "Expanding frame example")
f <- ttkframe(w)
tkpack(f, expand=TRUE, fill="both")
f1 <- ttkframe(f); tkpack(f1)
b <- ttkbutton(f1, text=">"); tkpack(b, side="left")
l <- ttklabel(f1, text=" click to expand"); tkpack(l)
f2 <- ttkframe(f); tkpack(f2, expand=TRUE, fill="both")
## exp is frame to pack things into
exp <- ttkframe(f2); exp$env$state <- FALSE
## as a sample we pack in l a simple label.
l <- ttklabel(exp, text="this is some text for the frame")
tkpack(l)
resizeFrame <- function(f) {
tcl("update","idletasks")
toplevel <- tkwinfo("toplevel",f)
reqHeight <- as.numeric(tkwinfo("reqheight", toplevel))
curHeight <- as.numeric(tkwinfo("height", toplevel))
if(reqHeight == curHeight)
return()
reqWidth <- as.numeric(tkwinfo("reqwidth", toplevel))
tkwm.withdraw(toplevel)
tkwm.geometry(toplevel,paste(reqHeight,reqWidth,sep="x"))
tkwm.deiconify(toplevel)
}
cb <- function() {
if(exp$env$state) {
## close
tkpack("forget", exp)
tkconfigure(b,text=">")
resizeFrame(f2)
exp$env$state <- FALSE
} else {
## open
tkpack(exp, expand=TRUE, fill="both")
tkconfigure(b,text="V")
exp$env$state <- TRUE
}
}
tkbind(b,"<Button-1>", cb)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/js4shiny-package.R
\docType{package}
\name{js4shiny-package}
\alias{js4shiny}
\alias{js4shiny-package}
\title{js4shiny: Companion Package for JavaScript for Shiny Users}
\description{
\if{html}{\figure{logo.png}{options: align='right' alt='logo' width='120'}}
Companion Package for JavaScript for Shiny Users.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/gadenbuie/js4shiny}
\item Report bugs at \url{https://github.com/gadenbuie/js4shiny/issues}
}
}
\author{
\strong{Maintainer}: Garrick Aden-Buie \email{garrick@adenbuie.com} (\href{https://orcid.org/0000-0002-7111-0077}{ORCID})
}
\keyword{internal}
| /man/js4shiny-package.Rd | permissive | gadenbuie/js4shiny | R | false | true | 709 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/js4shiny-package.R
\docType{package}
\name{js4shiny-package}
\alias{js4shiny}
\alias{js4shiny-package}
\title{js4shiny: Companion Package for JavaScript for Shiny Users}
\description{
\if{html}{\figure{logo.png}{options: align='right' alt='logo' width='120'}}
Companion Package for JavaScript for Shiny Users.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/gadenbuie/js4shiny}
\item Report bugs at \url{https://github.com/gadenbuie/js4shiny/issues}
}
}
\author{
\strong{Maintainer}: Garrick Aden-Buie \email{garrick@adenbuie.com} (\href{https://orcid.org/0000-0002-7111-0077}{ORCID})
}
\keyword{internal}
|
#' Square Function
#'
#'
#'takes vector and returns a vector of squared components
#' @param x
#'
#' @return a vector of square components in the form of a vector object
#' @export
#'
#' @examples myf(3)
#' x = 1:30 ; myf(x)
myf = function(x,y){
1+3+x+y
}
| /R/myf.R | no_license | lmtrevisi/MATH4753ouTrev0002 | R | false | false | 257 | r | #' Square Function
#'
#'
#'takes vector and returns a vector of squared components
#' @param x
#'
#' @return a vector of square components in the form of a vector object
#' @export
#'
#' @examples myf(3)
#' x = 1:30 ; myf(x)
myf = function(x,y){
1+3+x+y
}
|
#' R coefficient of linear models between numerical and catagorical columns in a dataframe
#'
#' @param .data a data frame
#'
#' @return a data frame with paired names of categorical variables (\code{Cat}
#' and mnumeric ones \code{Num} and \code{r} value)
#' @export
#'
#' @importFrom dplyr select_if mutate
#' @importFrom purrr map2_dbl
#' @importFrom tidyr crossing
#'
#' @examples
#' library(dplyr)
#' scan_r(starwars)
scan_r <- function(.data) {
quanti <- .data %>%
select_if(is.numeric) %>%
colnames()
quali <- .data %>%
select_if(function(x) !is.numeric(x) & !is.list(x)) %>%
colnames()
df <- .data
crossing(Num = quanti, Cat = quali) %>%
mutate(r = map2_dbl(Cat, Num, .get_r, .data = df))
}
| /R/scan_r.R | permissive | BenjaminLouis/datascan | R | false | false | 728 | r | #' R coefficient of linear models between numerical and catagorical columns in a dataframe
#'
#' @param .data a data frame
#'
#' @return a data frame with paired names of categorical variables (\code{Cat}
#' and mnumeric ones \code{Num} and \code{r} value)
#' @export
#'
#' @importFrom dplyr select_if mutate
#' @importFrom purrr map2_dbl
#' @importFrom tidyr crossing
#'
#' @examples
#' library(dplyr)
#' scan_r(starwars)
scan_r <- function(.data) {
quanti <- .data %>%
select_if(is.numeric) %>%
colnames()
quali <- .data %>%
select_if(function(x) !is.numeric(x) & !is.list(x)) %>%
colnames()
df <- .data
crossing(Num = quanti, Cat = quali) %>%
mutate(r = map2_dbl(Cat, Num, .get_r, .data = df))
}
|
#문제1-1
grade = sample(1:6,1)
#문제1-2
if(grade>3){
cat(grade,"학년은 고학년입니다.","\n")
} else{
cat(grade,"학년은 저학년입니다.\n")
}
#문제2-1
choice = sample(1:5,1)
#문제2-2,4
if(choice==1){
cat("결과값 :",300+50,"\n")
}else if(choice==2){
cat("결과값 :",300-50,"\n")
}else if(choice==3){
cat("결과값 :",300*50,"\n")
}else if(choice==4){
cat("결과값 :",300/50,"\n")
}else{
cat("결과값 :",300%%50,"\n")
}
#문제3-1
count = sample(3:10,1)
#문제3-2
deco = sample(1:3,1)
#문제3-3
if(deco==1){
rep("*",count)
}else if(deco==2){
rep("$",count)
}else{
rep("#",count)
}
#문제4
score = sample(0:100,1)
score2 = score%/%10
score2 = as.character(score2)
level = switch(EXPR = score2,"10"=, "9" = "A",
"8" ="B",
"7" ="C",
"6" ="D",
"F")
cat(score,"점은",level,"등급입니다.")
#문제4 ---연습
#score = sample(0:100,1)
#score2 = score%/%10
#score2 = as.character(score2)
#level = switch(EXPR = score2,"score2=9" = "A",
#"score2=8" ="B",
#"score2=7" ="C",
#"score2=6"="D",
#"F") ## "8"이랑 "score2=8"이랑 다르다!!
#cat(score,"점은",level,"등급입니다.")
#문제5
LETTERS
letters
alpha = paste(LETTERS,letters,sep = "")
alpha
| /R_training/실습제출/김도연/1주차(10.21~25)/2019.10.23/lab_05.R | no_license | BaeYS-marketing/R | R | false | false | 1,337 | r | #문제1-1
grade = sample(1:6,1)
#문제1-2
if(grade>3){
cat(grade,"학년은 고학년입니다.","\n")
} else{
cat(grade,"학년은 저학년입니다.\n")
}
#문제2-1
choice = sample(1:5,1)
#문제2-2,4
if(choice==1){
cat("결과값 :",300+50,"\n")
}else if(choice==2){
cat("결과값 :",300-50,"\n")
}else if(choice==3){
cat("결과값 :",300*50,"\n")
}else if(choice==4){
cat("결과값 :",300/50,"\n")
}else{
cat("결과값 :",300%%50,"\n")
}
#문제3-1
count = sample(3:10,1)
#문제3-2
deco = sample(1:3,1)
#문제3-3
if(deco==1){
rep("*",count)
}else if(deco==2){
rep("$",count)
}else{
rep("#",count)
}
#문제4
score = sample(0:100,1)
score2 = score%/%10
score2 = as.character(score2)
level = switch(EXPR = score2,"10"=, "9" = "A",
"8" ="B",
"7" ="C",
"6" ="D",
"F")
cat(score,"점은",level,"등급입니다.")
#문제4 ---연습
#score = sample(0:100,1)
#score2 = score%/%10
#score2 = as.character(score2)
#level = switch(EXPR = score2,"score2=9" = "A",
#"score2=8" ="B",
#"score2=7" ="C",
#"score2=6"="D",
#"F") ## "8"이랑 "score2=8"이랑 다르다!!
#cat(score,"점은",level,"등급입니다.")
#문제5
LETTERS
letters
alpha = paste(LETTERS,letters,sep = "")
alpha
|
mainPanel_customize <- function(...) {
div(class="span12",
...
)
}
#dataRangeInput with input and label display in horizontal
dateRangeInput_h <- function (inputId, label, start = NULL, end = NULL, min = NULL,
max = NULL, format = "yyyy-mm-dd", startview = "month", weekstart = 0,
language = "en", separator = " to ")
{
if (inherits(start, "Date"))
start <- format(start, "%Y-%m-%d")
if (inherits(end, "Date"))
end <- format(end, "%Y-%m-%d")
if (inherits(min, "Date"))
min <- format(min, "%Y-%m-%d")
if (inherits(max, "Date"))
max <- format(max, "%Y-%m-%d")
tagList(singleton(tags$head(tags$script(src = "shared/datepicker/js/bootstrap-datepicker.min.js"),
tags$link(rel = "stylesheet", type = "text/css", href = "shared/datepicker/css/datepicker.css"))),
tags$div(id = inputId, class = "shiny-date-range-input input-daterange",
tags$div(class='row-fluid',
tags$div(style="float:left; margin:0px 10px 0px 10px;",
tags$label(id = inputId, label)),
tags$div(style="float:left;",
tags$input(class = "input-small",
type = "text", `data-date-language` = language,
`data-date-weekstart` = weekstart,
`data-date-format` = format,
`data-date-start-view` = startview,
`data-min-date` = min,
`data-max-date` = max, `data-initial-date` = start)),
tags$div(style="float:left; margin:0px 10px 0px 10px;", HTML(separator)),
tags$div(style="float:left;",tags$input(class = "input-small",
type = "text", `data-date-language` = language,
`data-date-weekstart` = weekstart, `data-date-format` = format,
`data-date-start-view` = startview, `data-min-date` = min,
`data-max-date` = max, `data-initial-date` = end)))))
}
# Define UI for dataset viewer application
shinyUI(bootstrapPage(
# Show the caption, a summary of the dataset and an HTML table with
# the requested number of observations
#tags$link(rel="stylesheet", type="text/css", href="style.css"),
tags$link(rel="stylesheet", type="text/css", href="css/font-awesome.css"),
tags$link(rel="stylesheet", href="css/bootstrap.icon-large.min.css"),
tags$link(rel="stylesheet", type="text/css", href="css/big.css"),
tags$link(rel="stylesheet", type="text/css", href="css/css.css"),
mainPanel_customize(
tabsetPanel(
tabPanel("Portfolio",
actionButton("refresh_portf", "Refresh"),
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-table"),
h3("Watch list table")
),
div(class="widget-content",
tableOutput("pfTable")
)
),
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-th"),
h3("Watch list real time chart")
),
div(class="widget-content",
uiOutput("portf_plots")
)
)
),
tabPanel("Analysis",
wellPanel(style="height:92px;",
div(class="row-fluid",
div(style="float:left; margin-right:20px;",
selectInput(inputId ="pairs_dataset", label = "Choose a dataset:",
choices = c("SP500, ETF100 pairs"=1, "SP500, SP500 pairs"=2,
"SP400, SP400 pairs"=3, "SP600, SP600 pairs"=4,
"Same Sector pairs"=5,
"Same Industry and Sector pairs"=6,
"All stocks"=7))),
div(style="float:left; margin-right:20px;",
textInput("sym1", "Symbol 1:", "")),
div(style="float:left; margin-right:20px;",
textInput("sym2", "Symbol 2:", "")))),
tabsetPanel(
tabPanel("Potential Pairs",
tags$div(class="span11", style="float:left; margin:10px 10px 0px 10px;",
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-table"),
h3("Pairs Stats.")
),
div(class="widget-content",
htmlOutput("gvis_pot_pairs_ret")
)
)
)
) ,
tabPanel("historical Performance",
wellPanel(style="height:90px;",
div(class="row-fluid",
div(style="float:left;",
dateRangeInput("stock_date_rg", "Zoom into date range :",
start = "2012-01-03", end = "2014-01-03",
min = "2012-01-03", max = "2014-01-03")),
div(style="float:left; margin:20px 10px 0px 10px;",
actionButton("show_pairs_action", "Show Pairs")),
div(style="float:left; margin:20px 10px 0px 0px;",
actionButton("add_portf", "Add to Portfolio"))
)),
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-stats "),
#tags$i(class="fa fa-bar-chart-o fa-lg"),
h3("Historical Performance")
),
div(class="widget-content",
plotOutput("perfPlot", height="100%")
)
),
div(class="widget widget-table",
div(class="widget-header",
tags$i(class="icon-large icon-th"),
h3("Pairs rolling windows stat")
),
div(class="widget-content",
tableOutput("rollingWindowTable")
)
)
),
tabPanel("Pair Components",
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-stats"),
h3("Indv. stock charts")
),
div(class="widget-content",
plotOutput("stock1Plot"),
plotOutput("stock2Plot")
)
),
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-table"),
h3("Fundamental Data")
),
div(class="widget-content",
htmlOutput("fundamentalTable")
)
)
),
tabPanel("Trade Signal",
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-wifi-alt"),
h3("Trade Signal")
),
div(class="widget-content",
htmlOutput("tradeSignalTbl3")
)
)
)
)
)
)
)
))
| /ui.R | permissive | kb2025/R-pairstrading-shiny | R | false | false | 8,867 | r |
mainPanel_customize <- function(...) {
div(class="span12",
...
)
}
#dataRangeInput with input and label display in horizontal
dateRangeInput_h <- function (inputId, label, start = NULL, end = NULL, min = NULL,
max = NULL, format = "yyyy-mm-dd", startview = "month", weekstart = 0,
language = "en", separator = " to ")
{
if (inherits(start, "Date"))
start <- format(start, "%Y-%m-%d")
if (inherits(end, "Date"))
end <- format(end, "%Y-%m-%d")
if (inherits(min, "Date"))
min <- format(min, "%Y-%m-%d")
if (inherits(max, "Date"))
max <- format(max, "%Y-%m-%d")
tagList(singleton(tags$head(tags$script(src = "shared/datepicker/js/bootstrap-datepicker.min.js"),
tags$link(rel = "stylesheet", type = "text/css", href = "shared/datepicker/css/datepicker.css"))),
tags$div(id = inputId, class = "shiny-date-range-input input-daterange",
tags$div(class='row-fluid',
tags$div(style="float:left; margin:0px 10px 0px 10px;",
tags$label(id = inputId, label)),
tags$div(style="float:left;",
tags$input(class = "input-small",
type = "text", `data-date-language` = language,
`data-date-weekstart` = weekstart,
`data-date-format` = format,
`data-date-start-view` = startview,
`data-min-date` = min,
`data-max-date` = max, `data-initial-date` = start)),
tags$div(style="float:left; margin:0px 10px 0px 10px;", HTML(separator)),
tags$div(style="float:left;",tags$input(class = "input-small",
type = "text", `data-date-language` = language,
`data-date-weekstart` = weekstart, `data-date-format` = format,
`data-date-start-view` = startview, `data-min-date` = min,
`data-max-date` = max, `data-initial-date` = end)))))
}
# Define UI for dataset viewer application
shinyUI(bootstrapPage(
# Show the caption, a summary of the dataset and an HTML table with
# the requested number of observations
#tags$link(rel="stylesheet", type="text/css", href="style.css"),
tags$link(rel="stylesheet", type="text/css", href="css/font-awesome.css"),
tags$link(rel="stylesheet", href="css/bootstrap.icon-large.min.css"),
tags$link(rel="stylesheet", type="text/css", href="css/big.css"),
tags$link(rel="stylesheet", type="text/css", href="css/css.css"),
mainPanel_customize(
tabsetPanel(
tabPanel("Portfolio",
actionButton("refresh_portf", "Refresh"),
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-table"),
h3("Watch list table")
),
div(class="widget-content",
tableOutput("pfTable")
)
),
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-th"),
h3("Watch list real time chart")
),
div(class="widget-content",
uiOutput("portf_plots")
)
)
),
tabPanel("Analysis",
wellPanel(style="height:92px;",
div(class="row-fluid",
div(style="float:left; margin-right:20px;",
selectInput(inputId ="pairs_dataset", label = "Choose a dataset:",
choices = c("SP500, ETF100 pairs"=1, "SP500, SP500 pairs"=2,
"SP400, SP400 pairs"=3, "SP600, SP600 pairs"=4,
"Same Sector pairs"=5,
"Same Industry and Sector pairs"=6,
"All stocks"=7))),
div(style="float:left; margin-right:20px;",
textInput("sym1", "Symbol 1:", "")),
div(style="float:left; margin-right:20px;",
textInput("sym2", "Symbol 2:", "")))),
tabsetPanel(
tabPanel("Potential Pairs",
tags$div(class="span11", style="float:left; margin:10px 10px 0px 10px;",
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-table"),
h3("Pairs Stats.")
),
div(class="widget-content",
htmlOutput("gvis_pot_pairs_ret")
)
)
)
) ,
tabPanel("historical Performance",
wellPanel(style="height:90px;",
div(class="row-fluid",
div(style="float:left;",
dateRangeInput("stock_date_rg", "Zoom into date range :",
start = "2012-01-03", end = "2014-01-03",
min = "2012-01-03", max = "2014-01-03")),
div(style="float:left; margin:20px 10px 0px 10px;",
actionButton("show_pairs_action", "Show Pairs")),
div(style="float:left; margin:20px 10px 0px 0px;",
actionButton("add_portf", "Add to Portfolio"))
)),
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-stats "),
#tags$i(class="fa fa-bar-chart-o fa-lg"),
h3("Historical Performance")
),
div(class="widget-content",
plotOutput("perfPlot", height="100%")
)
),
div(class="widget widget-table",
div(class="widget-header",
tags$i(class="icon-large icon-th"),
h3("Pairs rolling windows stat")
),
div(class="widget-content",
tableOutput("rollingWindowTable")
)
)
),
tabPanel("Pair Components",
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-stats"),
h3("Indv. stock charts")
),
div(class="widget-content",
plotOutput("stock1Plot"),
plotOutput("stock2Plot")
)
),
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-table"),
h3("Fundamental Data")
),
div(class="widget-content",
htmlOutput("fundamentalTable")
)
)
),
tabPanel("Trade Signal",
div(class="widget",
div(class="widget-header",
tags$i(class="icon-large icon-wifi-alt"),
h3("Trade Signal")
),
div(class="widget-content",
htmlOutput("tradeSignalTbl3")
)
)
)
)
)
)
)
))
|
func1 <- function(d){
result<-1;
if(d==10){
result <- result+d;
}else if(d==20){
result <- result+d;
};
return (result);
} | /day01/r2.R | no_license | AnabolicAction/R- | R | false | false | 140 | r | func1 <- function(d){
result<-1;
if(d==10){
result <- result+d;
}else if(d==20){
result <- result+d;
};
return (result);
} |
# SET-UP ------------------------------------------------------------------
# rm(list = ls())
# Set-up directory for analysis and load required packages and functions.
source("scripts/_RNASeq_functions.R")
set.seed(16341)
# Prepare a list to collect our results.
myres_list <- list()
# PREPARATION -------------------------------------------------------------
# Set-up databases for pathway mapping
HALLMARK.DB <- MSigDB::MSigDB[["HALLMARK"]]
Hs.c2 <- MSigDB[["C2_CURATED"]]
KEGG.DB <- Hs.c2[grep("KEGG_", names(Hs.c2))]
REACTOME.DB <- Hs.c2[grep("REACTOME_", names(Hs.c2))]
# Hs.c3 <- MSigDB[["C3_MOTIF"]]
# Hs.c5 <- MSigDB[["C5_GENE_ONTOLOGY"]]
# load the results of the expression analysis
Normalised_counts_matrix <- readRDS(file="data/RDS/airways_normalized_counts.rds", refhook = NULL)
cova <- readRDS(file="data/RDS/airways_covariates.rds", refhook = NULL)
result <- readRDS(file="data/RDS/airways_result.rds", refhook = NULL)
# ssGSEA HALLMARK ---------------------------------------------------------
## IQR based filtering - distribution in this data does not warrant filtering.
# iqr <- apply(Normalised_counts_matrix,1,IQR)
# plot(ecdf(iqr))
# keep <- iqr >= quantile(iqr, 0.25)
# Normalised_counts_matrix <- Normalised_counts_matrix[keep,]
# perform actual enrichment
ssgsea.HALLMARK <- gsva(
as.matrix(Normalised_counts_matrix), HALLMARK.DB, min.sz=10, max.sz=500,
method="ssgsea", ssgsea.norm=TRUE,
verbose=TRUE)
# have a look at the output
head( ssgsea.HALLMARK[order(ssgsea.HALLMARK[,1], decreasing=T),], 10 )
# We rename our samples - if we want to.
colnames(ssgsea.HALLMARK) <- c("early_1","early_2","early_3","late_1","late_2","late_3")
# construct our design to facilitate comparisons
design <- model.matrix(~ 0+cova$condition)
colnames(design) <- c("early","late")
# here we specify which comparison we want to make
# In this case we take 'early' as reference level, because that's the direction in which
# we calculated the differential expression. To that end we use the column names of out design
# and say that it is 'late-stage' minus 'early-stage'
contrast.matrix <- limma::makeContrasts(late-early, levels=design)
# fit the linear model
fit <- limma::lmFit(as.matrix(ssgsea.HALLMARK), design)
# and get results for our chosen contrast
fit2 <- limma::contrasts.fit(fit, contrast.matrix)
# calculate Empirical Bayes Statistics for Differential Expression
fit2 <- limma::eBayes(fit2)
# extract the top ranked pathways and perform multiple testing correction
res.HALLMARK <- limma::topTableF(fit2, adjust="BH",number=Inf,sort.by="F", p.value=1)
# adjust colnames for later use
colnames(res.HALLMARK) <- c("logFoldChange","AveExpr","F","P.Value","adj.P.Val")
# take a look at the activity scores for significant pathways
pheatmap::pheatmap(ssgsea.HALLMARK[rownames(res.HALLMARK[which(res.HALLMARK$adj.P.Val <= 0.1),]),],fontsize=8,cellwidth=10,cellheight=10)
# Summary of UP/DOWN regulated pathways
res_HB <- limma::decideTests(fit2, p.value=0.1)
summary(res_HB)
## And plot the network
plotGSVAGraph(res.HALLMARK, HALLMARK.DB, setOverlap=0.2, fieldPvalue="P.Value",cutPvalue=0.1,
clusterSize=1, p.title="data/output/HALLMARK.ssGSVA.airways", cutString="HALLMARK_" )
| /RNASeq/scripts/Enrichment_pipeline.R | no_license | ahmedelhosseiny/NGS_scripts | R | false | false | 3,240 | r | # SET-UP ------------------------------------------------------------------
# rm(list = ls())
# Set-up directory for analysis and load required packages and functions.
source("scripts/_RNASeq_functions.R")
set.seed(16341)
# Prepare a list to collect our results.
myres_list <- list()
# PREPARATION -------------------------------------------------------------
# Set-up databases for pathway mapping
HALLMARK.DB <- MSigDB::MSigDB[["HALLMARK"]]
Hs.c2 <- MSigDB[["C2_CURATED"]]
KEGG.DB <- Hs.c2[grep("KEGG_", names(Hs.c2))]
REACTOME.DB <- Hs.c2[grep("REACTOME_", names(Hs.c2))]
# Hs.c3 <- MSigDB[["C3_MOTIF"]]
# Hs.c5 <- MSigDB[["C5_GENE_ONTOLOGY"]]
# load the results of the expression analysis
Normalised_counts_matrix <- readRDS(file="data/RDS/airways_normalized_counts.rds", refhook = NULL)
cova <- readRDS(file="data/RDS/airways_covariates.rds", refhook = NULL)
result <- readRDS(file="data/RDS/airways_result.rds", refhook = NULL)
# ssGSEA HALLMARK ---------------------------------------------------------
## IQR based filtering - distribution in this data does not warrant filtering.
# iqr <- apply(Normalised_counts_matrix,1,IQR)
# plot(ecdf(iqr))
# keep <- iqr >= quantile(iqr, 0.25)
# Normalised_counts_matrix <- Normalised_counts_matrix[keep,]
# perform actual enrichment
ssgsea.HALLMARK <- gsva(
as.matrix(Normalised_counts_matrix), HALLMARK.DB, min.sz=10, max.sz=500,
method="ssgsea", ssgsea.norm=TRUE,
verbose=TRUE)
# have a look at the output
head( ssgsea.HALLMARK[order(ssgsea.HALLMARK[,1], decreasing=T),], 10 )
# We rename our samples - if we want to.
colnames(ssgsea.HALLMARK) <- c("early_1","early_2","early_3","late_1","late_2","late_3")
# construct our design to facilitate comparisons
design <- model.matrix(~ 0+cova$condition)
colnames(design) <- c("early","late")
# here we specify which comparison we want to make
# In this case we take 'early' as reference level, because that's the direction in which
# we calculated the differential expression. To that end we use the column names of out design
# and say that it is 'late-stage' minus 'early-stage'
contrast.matrix <- limma::makeContrasts(late-early, levels=design)
# fit the linear model
fit <- limma::lmFit(as.matrix(ssgsea.HALLMARK), design)
# and get results for our chosen contrast
fit2 <- limma::contrasts.fit(fit, contrast.matrix)
# calculate Empirical Bayes Statistics for Differential Expression
fit2 <- limma::eBayes(fit2)
# extract the top ranked pathways and perform multiple testing correction
res.HALLMARK <- limma::topTableF(fit2, adjust="BH",number=Inf,sort.by="F", p.value=1)
# adjust colnames for later use
colnames(res.HALLMARK) <- c("logFoldChange","AveExpr","F","P.Value","adj.P.Val")
# take a look at the activity scores for significant pathways
pheatmap::pheatmap(ssgsea.HALLMARK[rownames(res.HALLMARK[which(res.HALLMARK$adj.P.Val <= 0.1),]),],fontsize=8,cellwidth=10,cellheight=10)
# Summary of UP/DOWN regulated pathways
res_HB <- limma::decideTests(fit2, p.value=0.1)
summary(res_HB)
## And plot the network
plotGSVAGraph(res.HALLMARK, HALLMARK.DB, setOverlap=0.2, fieldPvalue="P.Value",cutPvalue=0.1,
clusterSize=1, p.title="data/output/HALLMARK.ssGSVA.airways", cutString="HALLMARK_" )
|
#selac_wrap_methods
OptimizeEdgeLengths <- function(x, par.mat, codon.site.data, codon.site.counts, data.type, codon.model, n.partitions, nsites.vector, index.matrix, phy, aa.optim_array=NULL, root.p_array=NULL, codon.freq.by.aa=NULL, codon.freq.by.gene=NULL, numcode=1, diploid=TRUE, aa.properties=NULL, volume.fixed.value=0.0003990333, nuc.model, codon.index.matrix=NULL, edge.length="optimize", include.gamma=FALSE, gamma.type, ncats, k.levels, logspace=FALSE, verbose=TRUE, n.cores.by.gene, n.cores.by.gene.by.site=1, estimate.importance=FALSE, neglnl=FALSE, HMM=FALSE) {
if(logspace) {
x <- exp(x)
}
phy$edge.length = x
if(is.null(aa.optim_array)){ stop("skipping this implementation")
} else {
if(nuc.model == "JC"){
max.par = 6
}
if(nuc.model == "GTR"){
max.par = 6 + 5
}
if(nuc.model == "UNREST"){
max.par = 3 + 11
}
if(include.gamma == TRUE){
max.par = max.par + 1
}
if(k.levels > 0){
max.par = max.par + 2
}
MultiCoreLikelihood <- function(partition.index){
codon.data = NULL
codon.data$unique.site.patterns = codon.site.data[[partition.index]]
codon.data$site.pattern.counts = codon.site.counts[[partition.index]]
likelihood.tmp = GetLikelihoodSAC_CodonForManyCharGivenAllParams(x=log(par.mat[partition.index,1:max.par]), codon.data=codon.data, phy=phy, aa.optim_array=aa.optim_array[[partition.index]], codon.freq.by.aa=codon.freq.by.aa[[partition.index]], codon.freq.by.gene=codon.freq.by.gene[[partition.index]], numcode=numcode, diploid=diploid, aa.properties=aa.properties, volume.fixed.value=volume.fixed.value, nuc.model=nuc.model, codon.index.matrix=codon.index.matrix, include.gamma=include.gamma, gamma.type=gamma.type, ncats=ncats, k.levels=k.levels, logspace=logspace, verbose=verbose, neglnl=neglnl, n.cores.by.gene.by.site=n.cores.by.gene.by.site)
return(likelihood.tmp)
}
#This orders the nsites per partition in decreasing order (to increase efficiency):
partition.order <- 1:n.partitions
likelihood <- sum(unlist(mclapply(partition.order[order(nsites.vector, decreasing=TRUE)], MultiCoreLikelihood, mc.cores=n.cores.by.gene)))
}
return(likelihood)
}
results.edge.final <- function(...) {
nloptr(x0=log(phy$edge.length),
eval_f = OptimizeEdgeLengths,
ub=upper.edge, lb=lower.edge, opts=opts.edge,
par.mat=mle.pars.mat,
codon.site.data=site.pattern.data.list,
codon.site.counts=site.pattern.count.list,
data.type=data.type, codon.model=codon.model, n.partitions=n.partitions,
nsites.vector=nsites.vector,
index.matrix=index.matrix, phy=phy, aa.optim_array=aa.optim.list, root.p_array=NULL,
codon.freq.by.aa=codon.freq.by.aa.list, codon.freq.by.gene=codon.freq.by.gene.list,
numcode=numcode, diploid=diploid, aa.properties=aa.properties, volume.fixed.value=cpv.starting.parameters[3],
nuc.model=nuc.model, codon.index.matrix=codon.index.matrix, edge.length=edge.length,
include.gamma=include.gamma, gamma.type=gamma.type, ncats=ncats, k.levels=k.levels,
logspace=TRUE, verbose=verbose, n.cores.by.gene=n.cores.by.gene, n.cores.by.gene.by.site=n.cores.by.gene.by.site,
estimate.importance=FALSE, neglnl=TRUE, HMM=FALSE)
}
| /selac_wrap_methods.R | no_license | GrahamDB/testing_selac | R | false | false | 3,336 | r | #selac_wrap_methods
OptimizeEdgeLengths <- function(x, par.mat, codon.site.data, codon.site.counts, data.type, codon.model, n.partitions, nsites.vector, index.matrix, phy, aa.optim_array=NULL, root.p_array=NULL, codon.freq.by.aa=NULL, codon.freq.by.gene=NULL, numcode=1, diploid=TRUE, aa.properties=NULL, volume.fixed.value=0.0003990333, nuc.model, codon.index.matrix=NULL, edge.length="optimize", include.gamma=FALSE, gamma.type, ncats, k.levels, logspace=FALSE, verbose=TRUE, n.cores.by.gene, n.cores.by.gene.by.site=1, estimate.importance=FALSE, neglnl=FALSE, HMM=FALSE) {
if(logspace) {
x <- exp(x)
}
phy$edge.length = x
if(is.null(aa.optim_array)){ stop("skipping this implementation")
} else {
if(nuc.model == "JC"){
max.par = 6
}
if(nuc.model == "GTR"){
max.par = 6 + 5
}
if(nuc.model == "UNREST"){
max.par = 3 + 11
}
if(include.gamma == TRUE){
max.par = max.par + 1
}
if(k.levels > 0){
max.par = max.par + 2
}
MultiCoreLikelihood <- function(partition.index){
codon.data = NULL
codon.data$unique.site.patterns = codon.site.data[[partition.index]]
codon.data$site.pattern.counts = codon.site.counts[[partition.index]]
likelihood.tmp = GetLikelihoodSAC_CodonForManyCharGivenAllParams(x=log(par.mat[partition.index,1:max.par]), codon.data=codon.data, phy=phy, aa.optim_array=aa.optim_array[[partition.index]], codon.freq.by.aa=codon.freq.by.aa[[partition.index]], codon.freq.by.gene=codon.freq.by.gene[[partition.index]], numcode=numcode, diploid=diploid, aa.properties=aa.properties, volume.fixed.value=volume.fixed.value, nuc.model=nuc.model, codon.index.matrix=codon.index.matrix, include.gamma=include.gamma, gamma.type=gamma.type, ncats=ncats, k.levels=k.levels, logspace=logspace, verbose=verbose, neglnl=neglnl, n.cores.by.gene.by.site=n.cores.by.gene.by.site)
return(likelihood.tmp)
}
#This orders the nsites per partition in decreasing order (to increase efficiency):
partition.order <- 1:n.partitions
likelihood <- sum(unlist(mclapply(partition.order[order(nsites.vector, decreasing=TRUE)], MultiCoreLikelihood, mc.cores=n.cores.by.gene)))
}
return(likelihood)
}
results.edge.final <- function(...) {
nloptr(x0=log(phy$edge.length),
eval_f = OptimizeEdgeLengths,
ub=upper.edge, lb=lower.edge, opts=opts.edge,
par.mat=mle.pars.mat,
codon.site.data=site.pattern.data.list,
codon.site.counts=site.pattern.count.list,
data.type=data.type, codon.model=codon.model, n.partitions=n.partitions,
nsites.vector=nsites.vector,
index.matrix=index.matrix, phy=phy, aa.optim_array=aa.optim.list, root.p_array=NULL,
codon.freq.by.aa=codon.freq.by.aa.list, codon.freq.by.gene=codon.freq.by.gene.list,
numcode=numcode, diploid=diploid, aa.properties=aa.properties, volume.fixed.value=cpv.starting.parameters[3],
nuc.model=nuc.model, codon.index.matrix=codon.index.matrix, edge.length=edge.length,
include.gamma=include.gamma, gamma.type=gamma.type, ncats=ncats, k.levels=k.levels,
logspace=TRUE, verbose=verbose, n.cores.by.gene=n.cores.by.gene, n.cores.by.gene.by.site=n.cores.by.gene.by.site,
estimate.importance=FALSE, neglnl=TRUE, HMM=FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eloratingfixed.R
\name{eloratingfixed}
\alias{eloratingfixed}
\title{Create daily elo ranks and multiple derivatives with user-defined parameter values}
\usage{
eloratingfixed(agon_data, pres_data, k = 100, init_elo = 1000, outputfile = NULL,
returnR = TRUE, p_function = "sigmoid")
}
\arguments{
\item{agon_data}{Input data frame with dominance interactions, should only contain Date,
Winner, Loser. Date should be formatted as MONTH/DAY/YEAR, or already as Date class.}
\item{pres_data}{Input data frame with columns "id", "start_date" and "end_date". Date
columns should be formatted as MONTH/DAY/YEAR, or already as Date class. If all IDs
are present the whole time, you ignore this and a pres_data table will be automatically
generated.}
\item{k}{Specified value of the k parameter, default is 100}
\item{init_elo}{The starting Elo value for all individuals, default is 1000}
\item{outputfile}{Name of csv file to save ranks to. Default is NULL, in which case
the function will only return a table in R. If you supply an output file name
the function will save the results as a csv file in your working directory.}
\item{returnR}{whether to return an R object from the function call. Default is TRUE}
\item{p_function}{function defining probability of winning. Default "sigmoid" is
equation (1) from Foerster, Franz et al 2016. Use "pnorm" to use the
\code{\link[stats:Normal]{pnorm}}-based method implemented in the EloRating package.}
}
\value{
Returns a list with six elements:
\itemize{
\item{\strong{elo}}{ Data frame with all IDs and dates they were present, with the following columns:}
\itemize{
\item{Date}{: Dates of study period}
\item{Individual}{: the names of each ranked individual, for each date they were present}
\item{Elo}{: fitted Elo scores for each individual on each day}
\item{EloOrdinal}{: Daily ordinal rank based on Elo scores}
\item{EloScaled}{: Daily Elo scores rescaled between 0 and 1 according to
\deqn{([individual Elo] - min([daily Elo scores])/(max([daily Elo scores]) - min([daily Elo scores]))}}
\item{ExpNumBeaten}{: expected number of individuals in the group beaten, which is the sum of
winning probabilities based on relative Elo scores of an individual and all others, following
equation (4) in Foerster, Franz et al. 2016}
\item{EloCardinal}{: ExpNumBeaten values rescaled as a percentage of the total number of ranked
individuals present in the group on the day of ranking. We encourage the use of this measure.}
\item{JenksEloCardinal}{: Categorical rank (high, mid, or low) using the Jenks natural breaks
classification method implemented in the R package BAMMtools.
See \code{\link[BAMMtools]{getJenksBreaks}}}
}
\item{\strong{k}}{ User-defined value of the k parameter}
\item{\strong{init_elo}}{ User-defined initial Elo score when individuals enter the hierarchy}
\item{\strong{pred_accuracy}}{ Proportion of correctly predicted interactions}
\item{\strong{logL}}{ The overall log-likelihood of the observed data given the user-supplied parameter
values based on winning probabilities (as calculated in equation (1) of Foerster, Franz et al 2016)
for all interactions}
}
}
\description{
Conducts traditional elo rating analyses using specified K value
and outputs raw, normalized, cardinal, and categorical ranks as a list object in
R or in an output file. For optimized Elo parameters, use \code{\link{eloratingopt}}.
}
\details{
This function accepts a data frame of date-stamped dominance interactions and
(optionally) a data frame of start and end dates for each individual to be ranked,
and outputs daily Elo scores with parameters specified by the user. The default function
used to determine probability of winning is equation (1) from Foerster, Franz et al. 2016,
but for ease of comparison with the EloRating package, we also added the option to use
the \code{\link[stats:Normal]{pnorm}}-based method implemented in the EloRating package, and future
development will add the option to use the original function from Elo 1978 (as implemented in
the elo package). This function does not require large presence matrices, and efficiently
calculates a series of additional indices (described below).
As opposed to the \code{\link{eloratingopt}} function, this procedure only requires that
included individuals have at least one win \emph{or} one loss.
A detailed description of the function output is given in the \strong{Value} section of
this help file:
}
\examples{
nbadata = EloOptimized::nba #nba wins and losses from the 1995-96 season
nbaelo = eloratingfixed(agon_data = nbadata)
# generates traditional Elo scores (with init_elo = 1000 & k = 100) and saves
# them as "nbaelo"
}
| /man/eloratingfixed.Rd | no_license | cran/EloOptimized | R | false | true | 5,002 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eloratingfixed.R
\name{eloratingfixed}
\alias{eloratingfixed}
\title{Create daily elo ranks and multiple derivatives with user-defined parameter values}
\usage{
eloratingfixed(agon_data, pres_data, k = 100, init_elo = 1000, outputfile = NULL,
returnR = TRUE, p_function = "sigmoid")
}
\arguments{
\item{agon_data}{Input data frame with dominance interactions, should only contain Date,
Winner, Loser. Date should be formatted as MONTH/DAY/YEAR, or already as Date class.}
\item{pres_data}{Input data frame with columns "id", "start_date" and "end_date". Date
columns should be formatted as MONTH/DAY/YEAR, or already as Date class. If all IDs
are present the whole time, you ignore this and a pres_data table will be automatically
generated.}
\item{k}{Specified value of the k parameter, default is 100}
\item{init_elo}{The starting Elo value for all individuals, default is 1000}
\item{outputfile}{Name of csv file to save ranks to. Default is NULL, in which case
the function will only return a table in R. If you supply an output file name
the function will save the results as a csv file in your working directory.}
\item{returnR}{whether to return an R object from the function call. Default is TRUE}
\item{p_function}{function defining probability of winning. Default "sigmoid" is
equation (1) from Foerster, Franz et al 2016. Use "pnorm" to use the
\code{\link[stats:Normal]{pnorm}}-based method implemented in the EloRating package.}
}
\value{
Returns a list with six elements:
\itemize{
\item{\strong{elo}}{ Data frame with all IDs and dates they were present, with the following columns:}
\itemize{
\item{Date}{: Dates of study period}
\item{Individual}{: the names of each ranked individual, for each date they were present}
\item{Elo}{: fitted Elo scores for each individual on each day}
\item{EloOrdinal}{: Daily ordinal rank based on Elo scores}
\item{EloScaled}{: Daily Elo scores rescaled between 0 and 1 according to
\deqn{([individual Elo] - min([daily Elo scores])/(max([daily Elo scores]) - min([daily Elo scores]))}}
\item{ExpNumBeaten}{: expected number of individuals in the group beaten, which is the sum of
winning probabilities based on relative Elo scores of an individual and all others, following
equation (4) in Foerster, Franz et al. 2016}
\item{EloCardinal}{: ExpNumBeaten values rescaled as a percentage of the total number of ranked
individuals present in the group on the day of ranking. We encourage the use of this measure.}
\item{JenksEloCardinal}{: Categorical rank (high, mid, or low) using the Jenks natural breaks
classification method implemented in the R package BAMMtools.
See \code{\link[BAMMtools]{getJenksBreaks}}}
}
\item{\strong{k}}{ User-defined value of the k parameter}
\item{\strong{init_elo}}{ User-defined initial Elo score when individuals enter the hierarchy}
\item{\strong{pred_accuracy}}{ Proportion of correctly predicted interactions}
\item{\strong{logL}}{ The overall log-likelihood of the observed data given the user-supplied parameter
values based on winning probabilities (as calculated in equation (1) of Foerster, Franz et al 2016)
for all interactions}
}
}
\description{
Conducts traditional elo rating analyses using specified K value
and outputs raw, normalized, cardinal, and categorical ranks as a list object in
R or in an output file. For optimized Elo parameters, use \code{\link{eloratingopt}}.
}
\details{
This function accepts a data frame of date-stamped dominance interactions and
(optionally) a data frame of start and end dates for each individual to be ranked,
and outputs daily Elo scores with parameters specified by the user. The default function
used to determine probability of winning is equation (1) from Foerster, Franz et al. 2016,
but for ease of comparison with the EloRating package, we also added the option to use
the \code{\link[stats:Normal]{pnorm}}-based method implemented in the EloRating package, and future
development will add the option to use the original function from Elo 1978 (as implemented in
the elo package). This function does not require large presence matrices, and efficiently
calculates a series of additional indices (described below).
As opposed to the \code{\link{eloratingopt}} function, this procedure only requires that
included individuals have at least one win \emph{or} one loss.
A detailed description of the function output is given in the \strong{Value} section of
this help file:
}
\examples{
nbadata = EloOptimized::nba #nba wins and losses from the 1995-96 season
nbaelo = eloratingfixed(agon_data = nbadata)
# generates traditional Elo scores (with init_elo = 1000 & k = 100) and saves
# them as "nbaelo"
}
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Data: (c) OpenStreetMap contributors, ODbL 1.0 - http://www.openstreetmap.org/copyright")
packageStartupMessage("Routing: OSRM - http://project-osrm.org/")
packageStartupMessage(paste0("sp support will be dropped in the next major release, ",
"please use sf objects instead."))
# options(osrm.server = "http://0.0.0.0:5000/", osrm.profile = "driving")
}
.onLoad <- function(libname, pkgname) {
options(osrm.server = "https://routing.openstreetmap.de/", osrm.profile = "car")
# options(osrm.server = "http://0.0.0.0:5000/", osrm.profile = "driving")
} | /R/zzz.R | no_license | ashleyasmus/osrm | R | false | false | 662 | r | .onAttach <- function(libname, pkgname) {
packageStartupMessage("Data: (c) OpenStreetMap contributors, ODbL 1.0 - http://www.openstreetmap.org/copyright")
packageStartupMessage("Routing: OSRM - http://project-osrm.org/")
packageStartupMessage(paste0("sp support will be dropped in the next major release, ",
"please use sf objects instead."))
# options(osrm.server = "http://0.0.0.0:5000/", osrm.profile = "driving")
}
.onLoad <- function(libname, pkgname) {
options(osrm.server = "https://routing.openstreetmap.de/", osrm.profile = "car")
# options(osrm.server = "http://0.0.0.0:5000/", osrm.profile = "driving")
} |
# 1 reads in the tsetse count and temperature data
# 2 formats the time columns and summarises the temperature data
# 3 binds the count data and temperature data together
library("zoo") # required packages
library("plyr")
#***********read in data**********************************
b.counts <- read.csv("data_bioassay_counts.csv",header=T) # bioassay count data
b.temps <- read.csv("data_bioassay_temps.csv",header=T) # temperature data
#***********change time column to year-month format*******
b.temps$Date <- as.Date(b.temps$Date,"%d/%m/%Y") # format to date
b.counts$Mon.yr <- as.yearmon(b.counts$Mon.yr,"%d/%m/%Y") # format count data date column to year month
b.temps$yearmon <- as.yearmon(b.temps$Date,"%Y/%m/%d") # format temp data date column to year month
#**********summarise to monthly means for modelling**************************
b.temp <- ddply(b.temps,.(yearmon),summarise,mean.temp=mean(MeanC,na.rm=T)) # for each level in year-month column calculate the mean temperature
names(b.temp) <- c("time","temp") # rename the columns in the table
#**********bind data together into a single table*************************************
temps.count <- cbind.data.frame(time=c(b.temp$time)
,temp=c(b.temp$temp)
,count=c(rep(NA,369),b.counts$Mean[1:(length(b.counts$Mean)-1)])
,count.time=as.yearmon(c(rep(NA,369),b.counts$Mon.yr[1:(length(b.counts$Mean)-1)]))
)
#extend beginning by 5 years to allow population to stabilise - use a repeat of the first year
temps.count <- rbind.data.frame (temps.count[13:24,]
,temps.count[13:24,]
,temps.count[13:24,]
,temps.count[13:24,]
,temps.count[13:24,]
,temps.count
)
#************************************************************************************
| /r_1_data_bioassay.R | permissive | jenniesuz/tsetse_climate_change | R | false | false | 2,027 | r | # 1 reads in the tsetse count and temperature data
# 2 formats the time columns and summarises the temperature data
# 3 binds the count data and temperature data together
library("zoo") # required packages
library("plyr")
#***********read in data**********************************
b.counts <- read.csv("data_bioassay_counts.csv",header=T) # bioassay count data
b.temps <- read.csv("data_bioassay_temps.csv",header=T) # temperature data
#***********change time column to year-month format*******
b.temps$Date <- as.Date(b.temps$Date,"%d/%m/%Y") # format to date
b.counts$Mon.yr <- as.yearmon(b.counts$Mon.yr,"%d/%m/%Y") # format count data date column to year month
b.temps$yearmon <- as.yearmon(b.temps$Date,"%Y/%m/%d") # format temp data date column to year month
#**********summarise to monthly means for modelling**************************
b.temp <- ddply(b.temps,.(yearmon),summarise,mean.temp=mean(MeanC,na.rm=T)) # for each level in year-month column calculate the mean temperature
names(b.temp) <- c("time","temp") # rename the columns in the table
#**********bind data together into a single table*************************************
temps.count <- cbind.data.frame(time=c(b.temp$time)
,temp=c(b.temp$temp)
,count=c(rep(NA,369),b.counts$Mean[1:(length(b.counts$Mean)-1)])
,count.time=as.yearmon(c(rep(NA,369),b.counts$Mon.yr[1:(length(b.counts$Mean)-1)]))
)
#extend beginning by 5 years to allow population to stabilise - use a repeat of the first year
temps.count <- rbind.data.frame (temps.count[13:24,]
,temps.count[13:24,]
,temps.count[13:24,]
,temps.count[13:24,]
,temps.count[13:24,]
,temps.count
)
#************************************************************************************
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/baseline.plot.R
\name{baseline.plot}
\alias{baseline.plot}
\title{Plot the simulated baseline functions}
\usage{
baseline.plot(baseline)
}
\arguments{
\item{baseline}{A data frame containing five variables: time, and the values of the baseline failure PDF,
baseline failure CDF, baseline survivor function, and baseline hazard function at each time point.
Generally, this data frame is taken from the \code{baseline} attribute of the \code{\link[coxed]{sim.survdata}}
function}
}
\value{
A figure of class \code{"gg"} and \code{"ggplot"}
}
\description{
This function is called by \code{\link[coxed]{survsim.plot}} and is not intended to be used by itself.
}
\details{
This function reshapes the data for easy faceting with \code{\link[ggplot2]{facet_wrap}} within
a call to \code{\link[ggplot2]{ggplot}}. Each function is plotted on the y-axis and time is plotted on
the x-axis using \code{\link[ggplot2]{geom_line}}
}
\examples{
simdata <- sim.survdata(N=1000, T=100, num.data.frames=1)
baseline.plot(simdata$baseline)
}
\seealso{
\code{\link[coxed]{survsim.plot}}, \code{\link[coxed]{sim.survdata}}
}
\author{
Jonathan Kropko <jkropko@virginia.edu> and Jeffrey J. Harden <jharden2@nd.edu>
}
| /man/baseline.plot.Rd | no_license | panando/coxed | R | false | true | 1,272 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/baseline.plot.R
\name{baseline.plot}
\alias{baseline.plot}
\title{Plot the simulated baseline functions}
\usage{
baseline.plot(baseline)
}
\arguments{
\item{baseline}{A data frame containing five variables: time, and the values of the baseline failure PDF,
baseline failure CDF, baseline survivor function, and baseline hazard function at each time point.
Generally, this data frame is taken from the \code{baseline} attribute of the \code{\link[coxed]{sim.survdata}}
function}
}
\value{
A figure of class \code{"gg"} and \code{"ggplot"}
}
\description{
This function is called by \code{\link[coxed]{survsim.plot}} and is not intended to be used by itself.
}
\details{
This function reshapes the data for easy faceting with \code{\link[ggplot2]{facet_wrap}} within
a call to \code{\link[ggplot2]{ggplot}}. Each function is plotted on the y-axis and time is plotted on
the x-axis using \code{\link[ggplot2]{geom_line}}
}
\examples{
simdata <- sim.survdata(N=1000, T=100, num.data.frames=1)
baseline.plot(simdata$baseline)
}
\seealso{
\code{\link[coxed]{survsim.plot}}, \code{\link[coxed]{sim.survdata}}
}
\author{
Jonathan Kropko <jkropko@virginia.edu> and Jeffrey J. Harden <jharden2@nd.edu>
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_functions.R
\name{advertisers.get}
\alias{advertisers.get}
\title{Gets one advertiser by ID.}
\usage{
advertisers.get(profileId, id)
}
\arguments{
\item{profileId}{User profile ID associated with this request}
\item{id}{Advertiser ID}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/dfatrafficking
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/dfatrafficking)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/doubleclick-advertisers/}{Google Documentation}
}
| /googledfareportingv26.auto/man/advertisers.get.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 849 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_functions.R
\name{advertisers.get}
\alias{advertisers.get}
\title{Gets one advertiser by ID.}
\usage{
advertisers.get(profileId, id)
}
\arguments{
\item{profileId}{User profile ID associated with this request}
\item{id}{Advertiser ID}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/dfatrafficking
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/dfatrafficking)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/doubleclick-advertisers/}{Google Documentation}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_test_files.R
\name{doc_test_files}
\alias{doc_test_files}
\title{Run all tests listed in the files}
\usage{
doc_test_files(paths, env = test_env(), stop_on_failure = FALSE,
stop_on_warning = FALSE, wrap = TRUE)
}
\arguments{
\item{paths}{paths to each test file}
\item{env}{testing env
warnings.}
\item{stop_on_failure}{If `TRUE`, throw an error if any tests fail.}
\item{stop_on_warning}{If `TRUE`, throw an error if any tests generate}
\item{wrap}{Automatically wrap all code within [test_that()]? This ensures
that all expectations are reported, even if outside a test block.}
}
\description{
Evaluates each of the tests in each file passed to paths
}
\examples{
\dontrun{doc_test_dir("tests/testthat")}
}
| /man/doc_test_files.Rd | no_license | thebioengineer/testDoctor | R | false | true | 797 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_test_files.R
\name{doc_test_files}
\alias{doc_test_files}
\title{Run all tests listed in the files}
\usage{
doc_test_files(paths, env = test_env(), stop_on_failure = FALSE,
stop_on_warning = FALSE, wrap = TRUE)
}
\arguments{
\item{paths}{paths to each test file}
\item{env}{testing env
warnings.}
\item{stop_on_failure}{If `TRUE`, throw an error if any tests fail.}
\item{stop_on_warning}{If `TRUE`, throw an error if any tests generate}
\item{wrap}{Automatically wrap all code within [test_that()]? This ensures
that all expectations are reported, even if outside a test block.}
}
\description{
Evaluates each of the tests in each file passed to paths
}
\examples{
\dontrun{doc_test_dir("tests/testthat")}
}
|
### topGO
# install
# source("http://bioconductor.org/biocLite.R")
# biocLite()
# source("http://bioconductor.org/biocLite.R")
# biocLite("topGO")
# biocLite("ALL")
# biocLite("affyLib")
library(topGO)
library(ALL)
library("VennDiagram")
library(gridExtra)
library(grid)
library(ggplot2)
library("SuperExactTest")
library(cowplot)
require(dplyr)
print (sessionInfo())
# R version 3.5.1 (2018-07-02)
# Platform: x86_64-apple-darwin15.6.0 (64-bit)
# Running under: macOS 10.15.7
# Matrix products: default
# BLAS: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRblas.0.dylib
# LAPACK: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRlapack.dylib
# locale:
# [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
# attached base packages:
# [1] grid stats4 parallel stats graphics grDevices utils datasets methods base
# other attached packages:
# [1] dplyr_1.0.2 cowplot_1.0.0 SuperExactTest_1.0.7 ggplot2_3.3.2 gridExtra_2.3 VennDiagram_1.6.20 futile.logger_1.4.3 ALL_1.24.0 topGO_2.34.0 SparseM_1.77 GO.db_3.7.0
# [12] AnnotationDbi_1.44.0 IRanges_2.16.0 S4Vectors_0.20.1 Biobase_2.42.0 graph_1.60.0 BiocGenerics_0.28.0
# loaded via a namespace (and not attached):
# [1] Rcpp_1.0.2 compiler_3.5.1 pillar_1.4.2 formatR_1.7 futile.options_1.0.1 digest_0.6.20 bit_1.1-14 lifecycle_0.2.0 RSQLite_2.1.2 memoise_1.1.0 tibble_2.1.3
# [12] gtable_0.3.0 lattice_0.20-38 pkgconfig_2.0.2 rlang_0.4.8 DBI_1.0.0 withr_2.1.2 generics_0.0.2 vctrs_0.3.4 tidyselect_1.1.0 bit64_0.9-7 glue_1.4.2
# [23] R6_2.4.0 purrr_0.3.2 magrittr_1.5 lambda.r_1.2.3 blob_1.2.0 scales_1.0.0 matrixStats_0.54.0 colorspace_1.4-1 munsell_0.5.0 crayon_1.3.4
Intersect <- function (x) {
# Multiple set version of intersect
# x is a list
if (length(x) == 1) {
unlist(x)
} else if (length(x) == 2) {
intersect(x[[1]], x[[2]])
} else if (length(x) > 2){
intersect(x[[1]], Intersect(x[-1]))
}
}
Union <- function (x) {
# Multiple set version of union
# x is a list
if (length(x) == 1) {
unlist(x)
} else if (length(x) == 2) {
union(x[[1]], x[[2]])
} else if (length(x) > 2) {
union(x[[1]], Union(x[-1]))
}
}
Setdiff <- function (x, y) {
# Remove the union of the y's from the common x's.
# x and y are lists of characters.
xx <- Intersect(x)
yy <- Union(y)
setdiff(xx, yy)
}
#### load annotation
setwd("pos_sel_data/GO_terms")
# ## Arth annotated ## not used
# geneID2GO_Tbi_Arth <- readMappings(file = "543sp_Arth_Tbi_forTopGO.txt")
# geneID2GO_Tte_Arth <- readMappings(file = "543sp_Arth_Tte_forTopGO.txt")
# geneID2GO_Tce_Arth <- readMappings(file = "543sp_Arth_Tce_forTopGO.txt")
# geneID2GO_Tms_Arth <- readMappings(file = "543sp_Arth_Tms_forTopGO.txt")
# geneID2GO_Tcm_Arth <- readMappings(file = "543sp_Arth_Tcm_forTopGO.txt")
# geneID2GO_Tsi_Arth <- readMappings(file = "543sp_Arth_Tsi_forTopGO.txt")
# geneID2GO_Tpa_Arth <- readMappings(file = "543sp_Arth_Tpa_forTopGO.txt")
# geneID2GO_Tge_Arth <- readMappings(file = "543sp_Arth_Tge_forTopGO.txt")
# geneID2GO_Tps_Arth <- readMappings(file = "543sp_Arth_Tps_forTopGO.txt")
# geneID2GO_Tdi_Arth <- readMappings(file = "543sp_Arth_Tdi_forTopGO.txt")
## Droso annotated
geneID2GO_Tbi_Droso <- readMappings(file = "543sp_Droso_Tbi_forTopGO.txt")
geneID2GO_Tte_Droso <- readMappings(file = "543sp_Droso_Tte_forTopGO.txt")
geneID2GO_Tce_Droso <- readMappings(file = "543sp_Droso_Tce_forTopGO.txt")
geneID2GO_Tms_Droso <- readMappings(file = "543sp_Droso_Tms_forTopGO.txt")
geneID2GO_Tcm_Droso <- readMappings(file = "543sp_Droso_Tcm_forTopGO.txt")
geneID2GO_Tsi_Droso <- readMappings(file = "543sp_Droso_Tsi_forTopGO.txt")
geneID2GO_Tpa_Droso <- readMappings(file = "543sp_Droso_Tpa_forTopGO.txt")
geneID2GO_Tge_Droso <- readMappings(file = "543sp_Droso_Tge_forTopGO.txt")
geneID2GO_Tps_Droso <- readMappings(file = "543sp_Droso_Tps_forTopGO.txt")
geneID2GO_Tdi_Droso <- readMappings(file = "543sp_Droso_Tdi_forTopGO.txt")
###############################################################################################################################################
#### read in tables with genename and qval
make_named_numeric_vector <- function(list_file_name){
full_list <- as.list(read.table(list_file_name))
full_list_GL <- full_list$V2
names(full_list_GL) <- full_list$V1
return(full_list_GL)
}
Tbi_qval_GL <- make_named_numeric_vector("543sp_Arth_Tbi_qval.txt")
Tte_qval_GL <- make_named_numeric_vector("543sp_Arth_Tte_qval.txt")
Tce_qval_GL <- make_named_numeric_vector("543sp_Arth_Tce_qval.txt")
Tms_qval_GL <- make_named_numeric_vector("543sp_Arth_Tms_qval.txt")
Tcm_qval_GL <- make_named_numeric_vector("543sp_Arth_Tcm_qval.txt")
Tsi_qval_GL <- make_named_numeric_vector("543sp_Arth_Tsi_qval.txt")
Tpa_qval_GL <- make_named_numeric_vector("543sp_Arth_Tpa_qval.txt")
Tge_qval_GL <- make_named_numeric_vector("543sp_Arth_Tge_qval.txt")
Tps_qval_GL <- make_named_numeric_vector("543sp_Arth_Tps_qval.txt")
Tdi_qval_GL <- make_named_numeric_vector("543sp_Arth_Tdi_qval.txt")
run_enrichment <- function(genelist, ref, sig_for_GO){
### make rule for classing sig / non-sig
topDiffGenes <- function(allScore) {return(allScore < sig_for_GO)}
#### make GOdata object
#### setting node size as 10 so at least 10 genes must be annot per GO terms
#### do enrichment test
GODATA_BP = new("topGOdata", ontology = "BP", allGenes = genelist, geneSel = topDiffGenes, annot = annFUN.gene2GO, gene2GO = ref, nodeSize = 10)
### get N GOs used
GO_term_use_BP_list = GODATA_BP@graph@nodes
N_GO_term_use_BP = length(GODATA_BP@graph@nodes)
resultFisher <- runTest(GODATA_BP, algorithm = "weight01", statistic = "fisher")
### combined tables
allRes1_BP <- GenTable(GODATA_BP, Fisher_w01 = resultFisher, ranksOf = "Fisher_w01", topNodes = length(GODATA_BP@graph@nodes), numChar = 200)
sig_fisher_BP_GO = subset(allRes1_BP, allRes1_BP$Fisher_w01 < sig_for_GO)$GO.ID
## return everything!
out_list = list("N_GO_term_use_BP" = N_GO_term_use_BP,
"GO_term_use_BP_list" = GO_term_use_BP_list,
"allRes1_BP" = allRes1_BP,
"sig_fisher_BP_GO" = sig_fisher_BP_GO,
"GODATA_BP" = GODATA_BP)
return(out_list)
}
#### run the enrichment stuff (0.05)
# # Tbi_Arth_enrich <- run_enrichment(Tbi_qval_GL, geneID2GO_Tbi_Arth, 0.05)
# Tte_Arth_enrich <- run_enrichment(Tte_qval_GL, geneID2GO_Tte_Arth, 0.05)
# Tce_Arth_enrich <- run_enrichment(Tce_qval_GL, geneID2GO_Tce_Arth, 0.05)
# Tms_Arth_enrich <- run_enrichment(Tms_qval_GL, geneID2GO_Tms_Arth, 0.05)
# Tcm_Arth_enrich <- run_enrichment(Tcm_qval_GL, geneID2GO_Tcm_Arth, 0.05)
# Tsi_Arth_enrich <- run_enrichment(Tsi_qval_GL, geneID2GO_Tsi_Arth, 0.05)
# Tpa_Arth_enrich <- run_enrichment(Tpa_qval_GL, geneID2GO_Tpa_Arth, 0.05)
# Tge_Arth_enrich <- run_enrichment(Tge_qval_GL, geneID2GO_Tge_Arth, 0.05)
# Tps_Arth_enrich <- run_enrichment(Tps_qval_GL, geneID2GO_Tps_Arth, 0.05)
# Tdi_Arth_enrich <- run_enrichment(Tdi_qval_GL, geneID2GO_Tdi_Arth, 0.05)
#### run the enrichment stuff (0.05)
Tbi_Droso_enrich <- run_enrichment(Tbi_qval_GL, geneID2GO_Tbi_Droso, 0.05)
Tte_Droso_enrich <- run_enrichment(Tte_qval_GL, geneID2GO_Tte_Droso, 0.05)
Tce_Droso_enrich <- run_enrichment(Tce_qval_GL, geneID2GO_Tce_Droso, 0.05)
Tms_Droso_enrich <- run_enrichment(Tms_qval_GL, geneID2GO_Tms_Droso, 0.05)
Tcm_Droso_enrich <- run_enrichment(Tcm_qval_GL, geneID2GO_Tcm_Droso, 0.05)
Tsi_Droso_enrich <- run_enrichment(Tsi_qval_GL, geneID2GO_Tsi_Droso, 0.05)
Tpa_Droso_enrich <- run_enrichment(Tpa_qval_GL, geneID2GO_Tpa_Droso, 0.05)
Tge_Droso_enrich <- run_enrichment(Tge_qval_GL, geneID2GO_Tge_Droso, 0.05)
Tps_Droso_enrich <- run_enrichment(Tps_qval_GL, geneID2GO_Tps_Droso, 0.05)
Tdi_Droso_enrich <- run_enrichment(Tdi_qval_GL, geneID2GO_Tdi_Droso, 0.05)
#####################################################################################################################
### Tidy
Tbi_Droso_enrich_table <- Tbi_Droso_enrich$allRes1_BP
Tte_Droso_enrich_table <- Tte_Droso_enrich$allRes1_BP
Tce_Droso_enrich_table <- Tce_Droso_enrich$allRes1_BP
Tms_Droso_enrich_table <- Tms_Droso_enrich$allRes1_BP
Tcm_Droso_enrich_table <- Tcm_Droso_enrich$allRes1_BP
Tsi_Droso_enrich_table <- Tsi_Droso_enrich$allRes1_BP
Tpa_Droso_enrich_table <- Tpa_Droso_enrich$allRes1_BP
Tge_Droso_enrich_table <- Tge_Droso_enrich$allRes1_BP
Tps_Droso_enrich_table <- Tps_Droso_enrich$allRes1_BP
Tdi_Droso_enrich_table <- Tdi_Droso_enrich$allRes1_BP
### add sp
Tbi_Droso_enrich_table$sp <- rep("Tbi", length(Tbi_Droso_enrich_table[,1]))
Tte_Droso_enrich_table$sp <- rep("Tte", length(Tte_Droso_enrich_table[,1]))
Tce_Droso_enrich_table$sp <- rep("Tce", length(Tce_Droso_enrich_table[,1]))
Tms_Droso_enrich_table$sp <- rep("Tms", length(Tms_Droso_enrich_table[,1]))
Tcm_Droso_enrich_table$sp <- rep("Tcm", length(Tcm_Droso_enrich_table[,1]))
Tsi_Droso_enrich_table$sp <- rep("Tsi", length(Tsi_Droso_enrich_table[,1]))
Tpa_Droso_enrich_table$sp <- rep("Tpa", length(Tpa_Droso_enrich_table[,1]))
Tge_Droso_enrich_table$sp <- rep("Tge", length(Tge_Droso_enrich_table[,1]))
Tps_Droso_enrich_table$sp <- rep("Tps", length(Tps_Droso_enrich_table[,1]))
Tdi_Droso_enrich_table$sp <- rep("Tdi", length(Tdi_Droso_enrich_table[,1]))
### join
All_Droso_enrich_table <- rbind(
Tbi_Droso_enrich_table,
Tte_Droso_enrich_table,
Tce_Droso_enrich_table,
Tms_Droso_enrich_table,
Tcm_Droso_enrich_table,
Tsi_Droso_enrich_table,
Tpa_Droso_enrich_table,
Tge_Droso_enrich_table,
Tps_Droso_enrich_table,
Tdi_Droso_enrich_table
)
###
head(All_Droso_enrich_table)
All_Droso_enrich_table_sig <- subset(All_Droso_enrich_table, All_Droso_enrich_table$Fisher_w01 < 0.05)
All_Droso_enrich_table_sig %>% count(sp)
# sp n
# 1 Tbi 1
# 2 Tcm 4
# 3 Tdi 9
# 4 Tge 2
# 5 Tms 7
# 6 Tpa 2
# 7 Tsi 9
# 8 Tte 19
######################################################################
### N GO terms annot in sex and asex genes with sig +ve sel
get_sig_gene_vect <- function(list_file_name){
df <- read.table(list_file_name)
df_sig <- subset(df, df$V2 < 0.05)
return(as.character(df_sig$V1))
}
Tbi_sig_genes <- get_sig_gene_vect("543sp_Droso_Tbi_qval.txt")
Tte_sig_genes <- get_sig_gene_vect("543sp_Droso_Tte_qval.txt")
Tce_sig_genes <- get_sig_gene_vect("543sp_Droso_Tce_qval.txt")
Tms_sig_genes <- get_sig_gene_vect("543sp_Droso_Tms_qval.txt")
Tcm_sig_genes <- get_sig_gene_vect("543sp_Droso_Tcm_qval.txt")
Tsi_sig_genes <- get_sig_gene_vect("543sp_Droso_Tsi_qval.txt")
Tpa_sig_genes <- get_sig_gene_vect("543sp_Droso_Tpa_qval.txt")
Tge_sig_genes <- get_sig_gene_vect("543sp_Droso_Tge_qval.txt")
Tps_sig_genes <- get_sig_gene_vect("543sp_Droso_Tps_qval.txt")
Tdi_sig_genes <- get_sig_gene_vect("543sp_Droso_Tdi_qval.txt")
## get all BPs (i.e. not fitered by node size)
run_enrichment_for_all_BP <- function(genelist, ref, sig_for_GO){
### make rule for classing sig / non-sig - note this rule is not used for the GSEA
topDiffGenes <- function(allScore) {return(allScore < sig_for_GO)}
# topDiffGenes <- function(allScore) {return(allScore < 1)} ## as a check - setting to one gives the same pvalues for the GSEA
#### make GOdata object
#### setting node size as 1 to get all BP gos
GODATA_BP = new("topGOdata", ontology = "BP", allGenes = genelist, geneSel = topDiffGenes, annot = annFUN.gene2GO, gene2GO = ref, nodeSize = 1)
### get N GOs used
GO_term_use_BP_list = GODATA_BP@graph@nodes
## return everything!
out_list = list("GO_term_use_BP_list" = GO_term_use_BP_list, "GODATA_BP" = GODATA_BP)
return(out_list)
}
Tbi_Droso_all_BP <- run_enrichment_for_all_BP(Tbi_qval_GL, geneID2GO_Tbi_Droso, 0.05)
Tte_Droso_all_BP <- run_enrichment_for_all_BP(Tte_qval_GL, geneID2GO_Tte_Droso, 0.05)
Tce_Droso_all_BP <- run_enrichment_for_all_BP(Tce_qval_GL, geneID2GO_Tce_Droso, 0.05)
Tms_Droso_all_BP <- run_enrichment_for_all_BP(Tms_qval_GL, geneID2GO_Tms_Droso, 0.05)
Tcm_Droso_all_BP <- run_enrichment_for_all_BP(Tcm_qval_GL, geneID2GO_Tcm_Droso, 0.05)
Tsi_Droso_all_BP <- run_enrichment_for_all_BP(Tsi_qval_GL, geneID2GO_Tsi_Droso, 0.05)
Tpa_Droso_all_BP <- run_enrichment_for_all_BP(Tpa_qval_GL, geneID2GO_Tpa_Droso, 0.05)
Tge_Droso_all_BP <- run_enrichment_for_all_BP(Tge_qval_GL, geneID2GO_Tge_Droso, 0.05)
Tps_Droso_all_BP <- run_enrichment_for_all_BP(Tps_qval_GL, geneID2GO_Tps_Droso, 0.05)
Tdi_Droso_all_BP <- run_enrichment_for_all_BP(Tdi_qval_GL, geneID2GO_Tdi_Droso, 0.05)
get_N_GOs_wBPfilt <- function(want_vec_name, geneID2GO_file, BP_GOs_name, ALL_BP_GOs_name, sp, rep_m){
out_df = c()
for(i in seq(1:length(want_vec_name))){
#print(want_vec_name[i])
a1 <- eval(parse(text=paste(geneID2GO_file,'$',want_vec_name[i],sep='')))
a1_filt <- a1[a1 %in% BP_GOs_name]
a2_filt <- a1[a1 %in% ALL_BP_GOs_name]
#print(a1)
#print(a1_filt)
#print("\n")
out_df <- rbind(out_df, c(want_vec_name[i], length(a1), length(a2_filt), length(a1_filt)))
colnames(out_df) <- c("gene_name", "N_All_GOs", "N_AllBP_GOs", "N_usedBP_GOs")
}
out_df <- as.data.frame(out_df)
out_df$N_All_GOs <- as.numeric(as.character(out_df$N_All_GOs))
out_df$N_AllBP_GOs <- as.numeric(as.character(out_df$N_AllBP_GOs))
out_df$N_usedBP_GOs <- as.numeric(as.character(out_df$N_usedBP_GOs))
out_df$sp <- rep(sp, length(out_df[,1]))
out_df$rep_mode <- rep(rep_m, length(out_df[,1]))
return(out_df)
}
### get N GOs
Tbi_Droso_NGOs <- get_N_GOs_wBPfilt(Tbi_sig_genes, "geneID2GO_Tbi_Droso", Tbi_Droso_enrich$GO_term_use_BP_list, Tbi_Droso_all_BP$GO_term_use_BP_list, "Tbi", "sex")
Tte_Droso_NGOs <- get_N_GOs_wBPfilt(Tte_sig_genes, "geneID2GO_Tte_Droso", Tte_Droso_enrich$GO_term_use_BP_list, Tte_Droso_all_BP$GO_term_use_BP_list, "Tte", "asex")
Tce_Droso_NGOs <- get_N_GOs_wBPfilt(Tce_sig_genes, "geneID2GO_Tce_Droso", Tce_Droso_enrich$GO_term_use_BP_list, Tce_Droso_all_BP$GO_term_use_BP_list, "Tce", "sex")
Tms_Droso_NGOs <- get_N_GOs_wBPfilt(Tms_sig_genes, "geneID2GO_Tms_Droso", Tms_Droso_enrich$GO_term_use_BP_list, Tms_Droso_all_BP$GO_term_use_BP_list, "Tms", "asex")
Tcm_Droso_NGOs <- get_N_GOs_wBPfilt(Tcm_sig_genes, "geneID2GO_Tcm_Droso", Tcm_Droso_enrich$GO_term_use_BP_list, Tcm_Droso_all_BP$GO_term_use_BP_list, "Tcm", "sex")
Tsi_Droso_NGOs <- get_N_GOs_wBPfilt(Tsi_sig_genes, "geneID2GO_Tsi_Droso", Tsi_Droso_enrich$GO_term_use_BP_list, Tsi_Droso_all_BP$GO_term_use_BP_list, "Tsi", "asex")
Tpa_Droso_NGOs <- get_N_GOs_wBPfilt(Tpa_sig_genes, "geneID2GO_Tpa_Droso", Tpa_Droso_enrich$GO_term_use_BP_list, Tpa_Droso_all_BP$GO_term_use_BP_list, "Tpa", "sex")
Tge_Droso_NGOs <- get_N_GOs_wBPfilt(Tge_sig_genes, "geneID2GO_Tge_Droso", Tge_Droso_enrich$GO_term_use_BP_list, Tge_Droso_all_BP$GO_term_use_BP_list, "Tge", "asex")
Tps_Droso_NGOs <- get_N_GOs_wBPfilt(Tps_sig_genes, "geneID2GO_Tps_Droso", Tps_Droso_enrich$GO_term_use_BP_list, Tps_Droso_all_BP$GO_term_use_BP_list, "Tps", "sex")
Tdi_Droso_NGOs <- get_N_GOs_wBPfilt(Tdi_sig_genes, "geneID2GO_Tdi_Droso", Tdi_Droso_enrich$GO_term_use_BP_list, Tdi_Droso_all_BP$GO_term_use_BP_list, "Tdi", "asex")
Allsp_Droso_NGOs <- as.data.frame(rbind(
Tbi_Droso_NGOs,
Tte_Droso_NGOs,
Tce_Droso_NGOs,
Tms_Droso_NGOs,
Tcm_Droso_NGOs,
Tsi_Droso_NGOs,
Tpa_Droso_NGOs,
Tge_Droso_NGOs,
Tps_Droso_NGOs,
Tdi_Droso_NGOs
))
Allsp_Droso_NGOs$N_All_GOs_bi <- ifelse(Allsp_Droso_NGOs$N_All_GOs > 0, 1,0)
Allsp_Droso_NGOs$N_AllBP_GOs_bi <- ifelse(Allsp_Droso_NGOs$N_AllBP_GOs > 0, 1,0)
Allsp_Droso_NGOs$N_usedBP_GOs_bi <- ifelse(Allsp_Droso_NGOs$N_usedBP_GOs > 0, 1,0)
Allsp_Droso_NGOs$sp_pair <-
ifelse(Allsp_Droso_NGOs$sp == "Tbi", "Tbi-Tte",
ifelse(Allsp_Droso_NGOs$sp == "Tce", "Tce-Tms",
ifelse(Allsp_Droso_NGOs$sp == "Tcm", "Tcm-Tsi",
ifelse(Allsp_Droso_NGOs$sp == "Tpa", "Tpa-Tge",
ifelse(Allsp_Droso_NGOs$sp == "Tps", "Tps-Tdi",
ifelse(Allsp_Droso_NGOs$sp == "Tte", "Tbi-Tte",
ifelse(Allsp_Droso_NGOs$sp == "Tms", "Tce-Tms",
ifelse(Allsp_Droso_NGOs$sp == "Tsi", "Tcm-Tsi",
ifelse(Allsp_Droso_NGOs$sp == "Tge", "Tpa-Tge",
ifelse(Allsp_Droso_NGOs$sp == "Tdi", "Tps-Tdi",
NA))))))))))
Allsp_Droso_NGOs$sp_pair_ord = ordered(Allsp_Droso_NGOs$sp_pair, levels = c("Tbi-Tte", "Tcm-Tsi", "Tce-Tms", "Tps-Tdi", "Tpa-Tge"))
head(Allsp_Droso_NGOs)
### at least one GO term annot
Allsp_Droso_NGOs_more_than_0_used <- subset(Allsp_Droso_NGOs, Allsp_Droso_NGOs$N_usedBP_GOs > 0)
### plot
P1_N_usedBP_GOs_prop_GOs <- ggplot(Allsp_Droso_NGOs) +
geom_bar(aes(sp_pair_ord, N_usedBP_GOs_bi, fill = as.factor(rep_mode)), position = "dodge", stat = "summary", fun = "mean") +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) +
scale_fill_manual(values = c("#92C5DECD", "#D6604DED")) +
xlab ("Species pair") +
ylab ("Prop genes with GO terms annotated")
P1_N_usedBP_GOs_mean_GOs_more_than_0_BPused_GO <- ggplot(Allsp_Droso_NGOs_more_than_0_used ) +
geom_bar(aes(sp_pair_ord, N_usedBP_GOs, fill = as.factor(rep_mode)), position = "dodge", stat = "summary", fun = "mean") +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) +
scale_fill_manual(values = c("#92C5DECD", "#D6604DED")) +
xlab ("Species pair") +
ylab ("Mean GO terms annotated per gene")
plot_grid(P1_N_usedBP_GOs_prop_GOs, P1_N_usedBP_GOs_mean_GOs_more_than_0_BPused_GO)
| /9_Positive_selection_analysis/Top_GO.R | no_license | AsexGenomeEvol/Timema_asex_genomes | R | false | false | 17,840 | r | ### topGO
# install
# source("http://bioconductor.org/biocLite.R")
# biocLite()
# source("http://bioconductor.org/biocLite.R")
# biocLite("topGO")
# biocLite("ALL")
# biocLite("affyLib")
library(topGO)
library(ALL)
library("VennDiagram")
library(gridExtra)
library(grid)
library(ggplot2)
library("SuperExactTest")
library(cowplot)
require(dplyr)
print (sessionInfo())
# R version 3.5.1 (2018-07-02)
# Platform: x86_64-apple-darwin15.6.0 (64-bit)
# Running under: macOS 10.15.7
# Matrix products: default
# BLAS: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRblas.0.dylib
# LAPACK: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRlapack.dylib
# locale:
# [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
# attached base packages:
# [1] grid stats4 parallel stats graphics grDevices utils datasets methods base
# other attached packages:
# [1] dplyr_1.0.2 cowplot_1.0.0 SuperExactTest_1.0.7 ggplot2_3.3.2 gridExtra_2.3 VennDiagram_1.6.20 futile.logger_1.4.3 ALL_1.24.0 topGO_2.34.0 SparseM_1.77 GO.db_3.7.0
# [12] AnnotationDbi_1.44.0 IRanges_2.16.0 S4Vectors_0.20.1 Biobase_2.42.0 graph_1.60.0 BiocGenerics_0.28.0
# loaded via a namespace (and not attached):
# [1] Rcpp_1.0.2 compiler_3.5.1 pillar_1.4.2 formatR_1.7 futile.options_1.0.1 digest_0.6.20 bit_1.1-14 lifecycle_0.2.0 RSQLite_2.1.2 memoise_1.1.0 tibble_2.1.3
# [12] gtable_0.3.0 lattice_0.20-38 pkgconfig_2.0.2 rlang_0.4.8 DBI_1.0.0 withr_2.1.2 generics_0.0.2 vctrs_0.3.4 tidyselect_1.1.0 bit64_0.9-7 glue_1.4.2
# [23] R6_2.4.0 purrr_0.3.2 magrittr_1.5 lambda.r_1.2.3 blob_1.2.0 scales_1.0.0 matrixStats_0.54.0 colorspace_1.4-1 munsell_0.5.0 crayon_1.3.4
Intersect <- function (x) {
# Multiple set version of intersect
# x is a list
if (length(x) == 1) {
unlist(x)
} else if (length(x) == 2) {
intersect(x[[1]], x[[2]])
} else if (length(x) > 2){
intersect(x[[1]], Intersect(x[-1]))
}
}
Union <- function (x) {
# Multiple set version of union
# x is a list
if (length(x) == 1) {
unlist(x)
} else if (length(x) == 2) {
union(x[[1]], x[[2]])
} else if (length(x) > 2) {
union(x[[1]], Union(x[-1]))
}
}
Setdiff <- function (x, y) {
# Remove the union of the y's from the common x's.
# x and y are lists of characters.
xx <- Intersect(x)
yy <- Union(y)
setdiff(xx, yy)
}
#### load annotation
setwd("pos_sel_data/GO_terms")
# ## Arth annotated ## not used
# geneID2GO_Tbi_Arth <- readMappings(file = "543sp_Arth_Tbi_forTopGO.txt")
# geneID2GO_Tte_Arth <- readMappings(file = "543sp_Arth_Tte_forTopGO.txt")
# geneID2GO_Tce_Arth <- readMappings(file = "543sp_Arth_Tce_forTopGO.txt")
# geneID2GO_Tms_Arth <- readMappings(file = "543sp_Arth_Tms_forTopGO.txt")
# geneID2GO_Tcm_Arth <- readMappings(file = "543sp_Arth_Tcm_forTopGO.txt")
# geneID2GO_Tsi_Arth <- readMappings(file = "543sp_Arth_Tsi_forTopGO.txt")
# geneID2GO_Tpa_Arth <- readMappings(file = "543sp_Arth_Tpa_forTopGO.txt")
# geneID2GO_Tge_Arth <- readMappings(file = "543sp_Arth_Tge_forTopGO.txt")
# geneID2GO_Tps_Arth <- readMappings(file = "543sp_Arth_Tps_forTopGO.txt")
# geneID2GO_Tdi_Arth <- readMappings(file = "543sp_Arth_Tdi_forTopGO.txt")
## Droso annotated
geneID2GO_Tbi_Droso <- readMappings(file = "543sp_Droso_Tbi_forTopGO.txt")
geneID2GO_Tte_Droso <- readMappings(file = "543sp_Droso_Tte_forTopGO.txt")
geneID2GO_Tce_Droso <- readMappings(file = "543sp_Droso_Tce_forTopGO.txt")
geneID2GO_Tms_Droso <- readMappings(file = "543sp_Droso_Tms_forTopGO.txt")
geneID2GO_Tcm_Droso <- readMappings(file = "543sp_Droso_Tcm_forTopGO.txt")
geneID2GO_Tsi_Droso <- readMappings(file = "543sp_Droso_Tsi_forTopGO.txt")
geneID2GO_Tpa_Droso <- readMappings(file = "543sp_Droso_Tpa_forTopGO.txt")
geneID2GO_Tge_Droso <- readMappings(file = "543sp_Droso_Tge_forTopGO.txt")
geneID2GO_Tps_Droso <- readMappings(file = "543sp_Droso_Tps_forTopGO.txt")
geneID2GO_Tdi_Droso <- readMappings(file = "543sp_Droso_Tdi_forTopGO.txt")
###############################################################################################################################################
#### read in tables with genename and qval
make_named_numeric_vector <- function(list_file_name){
full_list <- as.list(read.table(list_file_name))
full_list_GL <- full_list$V2
names(full_list_GL) <- full_list$V1
return(full_list_GL)
}
Tbi_qval_GL <- make_named_numeric_vector("543sp_Arth_Tbi_qval.txt")
Tte_qval_GL <- make_named_numeric_vector("543sp_Arth_Tte_qval.txt")
Tce_qval_GL <- make_named_numeric_vector("543sp_Arth_Tce_qval.txt")
Tms_qval_GL <- make_named_numeric_vector("543sp_Arth_Tms_qval.txt")
Tcm_qval_GL <- make_named_numeric_vector("543sp_Arth_Tcm_qval.txt")
Tsi_qval_GL <- make_named_numeric_vector("543sp_Arth_Tsi_qval.txt")
Tpa_qval_GL <- make_named_numeric_vector("543sp_Arth_Tpa_qval.txt")
Tge_qval_GL <- make_named_numeric_vector("543sp_Arth_Tge_qval.txt")
Tps_qval_GL <- make_named_numeric_vector("543sp_Arth_Tps_qval.txt")
Tdi_qval_GL <- make_named_numeric_vector("543sp_Arth_Tdi_qval.txt")
run_enrichment <- function(genelist, ref, sig_for_GO){
### make rule for classing sig / non-sig
topDiffGenes <- function(allScore) {return(allScore < sig_for_GO)}
#### make GOdata object
#### setting node size as 10 so at least 10 genes must be annot per GO terms
#### do enrichment test
GODATA_BP = new("topGOdata", ontology = "BP", allGenes = genelist, geneSel = topDiffGenes, annot = annFUN.gene2GO, gene2GO = ref, nodeSize = 10)
### get N GOs used
GO_term_use_BP_list = GODATA_BP@graph@nodes
N_GO_term_use_BP = length(GODATA_BP@graph@nodes)
resultFisher <- runTest(GODATA_BP, algorithm = "weight01", statistic = "fisher")
### combined tables
allRes1_BP <- GenTable(GODATA_BP, Fisher_w01 = resultFisher, ranksOf = "Fisher_w01", topNodes = length(GODATA_BP@graph@nodes), numChar = 200)
sig_fisher_BP_GO = subset(allRes1_BP, allRes1_BP$Fisher_w01 < sig_for_GO)$GO.ID
## return everything!
out_list = list("N_GO_term_use_BP" = N_GO_term_use_BP,
"GO_term_use_BP_list" = GO_term_use_BP_list,
"allRes1_BP" = allRes1_BP,
"sig_fisher_BP_GO" = sig_fisher_BP_GO,
"GODATA_BP" = GODATA_BP)
return(out_list)
}
#### run the enrichment stuff (0.05)
# # Tbi_Arth_enrich <- run_enrichment(Tbi_qval_GL, geneID2GO_Tbi_Arth, 0.05)
# Tte_Arth_enrich <- run_enrichment(Tte_qval_GL, geneID2GO_Tte_Arth, 0.05)
# Tce_Arth_enrich <- run_enrichment(Tce_qval_GL, geneID2GO_Tce_Arth, 0.05)
# Tms_Arth_enrich <- run_enrichment(Tms_qval_GL, geneID2GO_Tms_Arth, 0.05)
# Tcm_Arth_enrich <- run_enrichment(Tcm_qval_GL, geneID2GO_Tcm_Arth, 0.05)
# Tsi_Arth_enrich <- run_enrichment(Tsi_qval_GL, geneID2GO_Tsi_Arth, 0.05)
# Tpa_Arth_enrich <- run_enrichment(Tpa_qval_GL, geneID2GO_Tpa_Arth, 0.05)
# Tge_Arth_enrich <- run_enrichment(Tge_qval_GL, geneID2GO_Tge_Arth, 0.05)
# Tps_Arth_enrich <- run_enrichment(Tps_qval_GL, geneID2GO_Tps_Arth, 0.05)
# Tdi_Arth_enrich <- run_enrichment(Tdi_qval_GL, geneID2GO_Tdi_Arth, 0.05)
#### run the enrichment stuff (0.05)
Tbi_Droso_enrich <- run_enrichment(Tbi_qval_GL, geneID2GO_Tbi_Droso, 0.05)
Tte_Droso_enrich <- run_enrichment(Tte_qval_GL, geneID2GO_Tte_Droso, 0.05)
Tce_Droso_enrich <- run_enrichment(Tce_qval_GL, geneID2GO_Tce_Droso, 0.05)
Tms_Droso_enrich <- run_enrichment(Tms_qval_GL, geneID2GO_Tms_Droso, 0.05)
Tcm_Droso_enrich <- run_enrichment(Tcm_qval_GL, geneID2GO_Tcm_Droso, 0.05)
Tsi_Droso_enrich <- run_enrichment(Tsi_qval_GL, geneID2GO_Tsi_Droso, 0.05)
Tpa_Droso_enrich <- run_enrichment(Tpa_qval_GL, geneID2GO_Tpa_Droso, 0.05)
Tge_Droso_enrich <- run_enrichment(Tge_qval_GL, geneID2GO_Tge_Droso, 0.05)
Tps_Droso_enrich <- run_enrichment(Tps_qval_GL, geneID2GO_Tps_Droso, 0.05)
Tdi_Droso_enrich <- run_enrichment(Tdi_qval_GL, geneID2GO_Tdi_Droso, 0.05)
#####################################################################################################################
### Tidy
Tbi_Droso_enrich_table <- Tbi_Droso_enrich$allRes1_BP
Tte_Droso_enrich_table <- Tte_Droso_enrich$allRes1_BP
Tce_Droso_enrich_table <- Tce_Droso_enrich$allRes1_BP
Tms_Droso_enrich_table <- Tms_Droso_enrich$allRes1_BP
Tcm_Droso_enrich_table <- Tcm_Droso_enrich$allRes1_BP
Tsi_Droso_enrich_table <- Tsi_Droso_enrich$allRes1_BP
Tpa_Droso_enrich_table <- Tpa_Droso_enrich$allRes1_BP
Tge_Droso_enrich_table <- Tge_Droso_enrich$allRes1_BP
Tps_Droso_enrich_table <- Tps_Droso_enrich$allRes1_BP
Tdi_Droso_enrich_table <- Tdi_Droso_enrich$allRes1_BP
### add sp
Tbi_Droso_enrich_table$sp <- rep("Tbi", length(Tbi_Droso_enrich_table[,1]))
Tte_Droso_enrich_table$sp <- rep("Tte", length(Tte_Droso_enrich_table[,1]))
Tce_Droso_enrich_table$sp <- rep("Tce", length(Tce_Droso_enrich_table[,1]))
Tms_Droso_enrich_table$sp <- rep("Tms", length(Tms_Droso_enrich_table[,1]))
Tcm_Droso_enrich_table$sp <- rep("Tcm", length(Tcm_Droso_enrich_table[,1]))
Tsi_Droso_enrich_table$sp <- rep("Tsi", length(Tsi_Droso_enrich_table[,1]))
Tpa_Droso_enrich_table$sp <- rep("Tpa", length(Tpa_Droso_enrich_table[,1]))
Tge_Droso_enrich_table$sp <- rep("Tge", length(Tge_Droso_enrich_table[,1]))
Tps_Droso_enrich_table$sp <- rep("Tps", length(Tps_Droso_enrich_table[,1]))
Tdi_Droso_enrich_table$sp <- rep("Tdi", length(Tdi_Droso_enrich_table[,1]))
### join
All_Droso_enrich_table <- rbind(
Tbi_Droso_enrich_table,
Tte_Droso_enrich_table,
Tce_Droso_enrich_table,
Tms_Droso_enrich_table,
Tcm_Droso_enrich_table,
Tsi_Droso_enrich_table,
Tpa_Droso_enrich_table,
Tge_Droso_enrich_table,
Tps_Droso_enrich_table,
Tdi_Droso_enrich_table
)
###
head(All_Droso_enrich_table)
All_Droso_enrich_table_sig <- subset(All_Droso_enrich_table, All_Droso_enrich_table$Fisher_w01 < 0.05)
All_Droso_enrich_table_sig %>% count(sp)
# sp n
# 1 Tbi 1
# 2 Tcm 4
# 3 Tdi 9
# 4 Tge 2
# 5 Tms 7
# 6 Tpa 2
# 7 Tsi 9
# 8 Tte 19
######################################################################
### N GO terms annot in sex and asex genes with sig +ve sel
get_sig_gene_vect <- function(list_file_name){
df <- read.table(list_file_name)
df_sig <- subset(df, df$V2 < 0.05)
return(as.character(df_sig$V1))
}
Tbi_sig_genes <- get_sig_gene_vect("543sp_Droso_Tbi_qval.txt")
Tte_sig_genes <- get_sig_gene_vect("543sp_Droso_Tte_qval.txt")
Tce_sig_genes <- get_sig_gene_vect("543sp_Droso_Tce_qval.txt")
Tms_sig_genes <- get_sig_gene_vect("543sp_Droso_Tms_qval.txt")
Tcm_sig_genes <- get_sig_gene_vect("543sp_Droso_Tcm_qval.txt")
Tsi_sig_genes <- get_sig_gene_vect("543sp_Droso_Tsi_qval.txt")
Tpa_sig_genes <- get_sig_gene_vect("543sp_Droso_Tpa_qval.txt")
Tge_sig_genes <- get_sig_gene_vect("543sp_Droso_Tge_qval.txt")
Tps_sig_genes <- get_sig_gene_vect("543sp_Droso_Tps_qval.txt")
Tdi_sig_genes <- get_sig_gene_vect("543sp_Droso_Tdi_qval.txt")
## get all BPs (i.e. not fitered by node size)
run_enrichment_for_all_BP <- function(genelist, ref, sig_for_GO){
### make rule for classing sig / non-sig - note this rule is not used for the GSEA
topDiffGenes <- function(allScore) {return(allScore < sig_for_GO)}
# topDiffGenes <- function(allScore) {return(allScore < 1)} ## as a check - setting to one gives the same pvalues for the GSEA
#### make GOdata object
#### setting node size as 1 to get all BP gos
GODATA_BP = new("topGOdata", ontology = "BP", allGenes = genelist, geneSel = topDiffGenes, annot = annFUN.gene2GO, gene2GO = ref, nodeSize = 1)
### get N GOs used
GO_term_use_BP_list = GODATA_BP@graph@nodes
## return everything!
out_list = list("GO_term_use_BP_list" = GO_term_use_BP_list, "GODATA_BP" = GODATA_BP)
return(out_list)
}
Tbi_Droso_all_BP <- run_enrichment_for_all_BP(Tbi_qval_GL, geneID2GO_Tbi_Droso, 0.05)
Tte_Droso_all_BP <- run_enrichment_for_all_BP(Tte_qval_GL, geneID2GO_Tte_Droso, 0.05)
Tce_Droso_all_BP <- run_enrichment_for_all_BP(Tce_qval_GL, geneID2GO_Tce_Droso, 0.05)
Tms_Droso_all_BP <- run_enrichment_for_all_BP(Tms_qval_GL, geneID2GO_Tms_Droso, 0.05)
Tcm_Droso_all_BP <- run_enrichment_for_all_BP(Tcm_qval_GL, geneID2GO_Tcm_Droso, 0.05)
Tsi_Droso_all_BP <- run_enrichment_for_all_BP(Tsi_qval_GL, geneID2GO_Tsi_Droso, 0.05)
Tpa_Droso_all_BP <- run_enrichment_for_all_BP(Tpa_qval_GL, geneID2GO_Tpa_Droso, 0.05)
Tge_Droso_all_BP <- run_enrichment_for_all_BP(Tge_qval_GL, geneID2GO_Tge_Droso, 0.05)
Tps_Droso_all_BP <- run_enrichment_for_all_BP(Tps_qval_GL, geneID2GO_Tps_Droso, 0.05)
Tdi_Droso_all_BP <- run_enrichment_for_all_BP(Tdi_qval_GL, geneID2GO_Tdi_Droso, 0.05)
get_N_GOs_wBPfilt <- function(want_vec_name, geneID2GO_file, BP_GOs_name, ALL_BP_GOs_name, sp, rep_m){
out_df = c()
for(i in seq(1:length(want_vec_name))){
#print(want_vec_name[i])
a1 <- eval(parse(text=paste(geneID2GO_file,'$',want_vec_name[i],sep='')))
a1_filt <- a1[a1 %in% BP_GOs_name]
a2_filt <- a1[a1 %in% ALL_BP_GOs_name]
#print(a1)
#print(a1_filt)
#print("\n")
out_df <- rbind(out_df, c(want_vec_name[i], length(a1), length(a2_filt), length(a1_filt)))
colnames(out_df) <- c("gene_name", "N_All_GOs", "N_AllBP_GOs", "N_usedBP_GOs")
}
out_df <- as.data.frame(out_df)
out_df$N_All_GOs <- as.numeric(as.character(out_df$N_All_GOs))
out_df$N_AllBP_GOs <- as.numeric(as.character(out_df$N_AllBP_GOs))
out_df$N_usedBP_GOs <- as.numeric(as.character(out_df$N_usedBP_GOs))
out_df$sp <- rep(sp, length(out_df[,1]))
out_df$rep_mode <- rep(rep_m, length(out_df[,1]))
return(out_df)
}
### get N GOs
Tbi_Droso_NGOs <- get_N_GOs_wBPfilt(Tbi_sig_genes, "geneID2GO_Tbi_Droso", Tbi_Droso_enrich$GO_term_use_BP_list, Tbi_Droso_all_BP$GO_term_use_BP_list, "Tbi", "sex")
Tte_Droso_NGOs <- get_N_GOs_wBPfilt(Tte_sig_genes, "geneID2GO_Tte_Droso", Tte_Droso_enrich$GO_term_use_BP_list, Tte_Droso_all_BP$GO_term_use_BP_list, "Tte", "asex")
Tce_Droso_NGOs <- get_N_GOs_wBPfilt(Tce_sig_genes, "geneID2GO_Tce_Droso", Tce_Droso_enrich$GO_term_use_BP_list, Tce_Droso_all_BP$GO_term_use_BP_list, "Tce", "sex")
Tms_Droso_NGOs <- get_N_GOs_wBPfilt(Tms_sig_genes, "geneID2GO_Tms_Droso", Tms_Droso_enrich$GO_term_use_BP_list, Tms_Droso_all_BP$GO_term_use_BP_list, "Tms", "asex")
Tcm_Droso_NGOs <- get_N_GOs_wBPfilt(Tcm_sig_genes, "geneID2GO_Tcm_Droso", Tcm_Droso_enrich$GO_term_use_BP_list, Tcm_Droso_all_BP$GO_term_use_BP_list, "Tcm", "sex")
Tsi_Droso_NGOs <- get_N_GOs_wBPfilt(Tsi_sig_genes, "geneID2GO_Tsi_Droso", Tsi_Droso_enrich$GO_term_use_BP_list, Tsi_Droso_all_BP$GO_term_use_BP_list, "Tsi", "asex")
Tpa_Droso_NGOs <- get_N_GOs_wBPfilt(Tpa_sig_genes, "geneID2GO_Tpa_Droso", Tpa_Droso_enrich$GO_term_use_BP_list, Tpa_Droso_all_BP$GO_term_use_BP_list, "Tpa", "sex")
Tge_Droso_NGOs <- get_N_GOs_wBPfilt(Tge_sig_genes, "geneID2GO_Tge_Droso", Tge_Droso_enrich$GO_term_use_BP_list, Tge_Droso_all_BP$GO_term_use_BP_list, "Tge", "asex")
Tps_Droso_NGOs <- get_N_GOs_wBPfilt(Tps_sig_genes, "geneID2GO_Tps_Droso", Tps_Droso_enrich$GO_term_use_BP_list, Tps_Droso_all_BP$GO_term_use_BP_list, "Tps", "sex")
Tdi_Droso_NGOs <- get_N_GOs_wBPfilt(Tdi_sig_genes, "geneID2GO_Tdi_Droso", Tdi_Droso_enrich$GO_term_use_BP_list, Tdi_Droso_all_BP$GO_term_use_BP_list, "Tdi", "asex")
Allsp_Droso_NGOs <- as.data.frame(rbind(
Tbi_Droso_NGOs,
Tte_Droso_NGOs,
Tce_Droso_NGOs,
Tms_Droso_NGOs,
Tcm_Droso_NGOs,
Tsi_Droso_NGOs,
Tpa_Droso_NGOs,
Tge_Droso_NGOs,
Tps_Droso_NGOs,
Tdi_Droso_NGOs
))
Allsp_Droso_NGOs$N_All_GOs_bi <- ifelse(Allsp_Droso_NGOs$N_All_GOs > 0, 1,0)
Allsp_Droso_NGOs$N_AllBP_GOs_bi <- ifelse(Allsp_Droso_NGOs$N_AllBP_GOs > 0, 1,0)
Allsp_Droso_NGOs$N_usedBP_GOs_bi <- ifelse(Allsp_Droso_NGOs$N_usedBP_GOs > 0, 1,0)
Allsp_Droso_NGOs$sp_pair <-
ifelse(Allsp_Droso_NGOs$sp == "Tbi", "Tbi-Tte",
ifelse(Allsp_Droso_NGOs$sp == "Tce", "Tce-Tms",
ifelse(Allsp_Droso_NGOs$sp == "Tcm", "Tcm-Tsi",
ifelse(Allsp_Droso_NGOs$sp == "Tpa", "Tpa-Tge",
ifelse(Allsp_Droso_NGOs$sp == "Tps", "Tps-Tdi",
ifelse(Allsp_Droso_NGOs$sp == "Tte", "Tbi-Tte",
ifelse(Allsp_Droso_NGOs$sp == "Tms", "Tce-Tms",
ifelse(Allsp_Droso_NGOs$sp == "Tsi", "Tcm-Tsi",
ifelse(Allsp_Droso_NGOs$sp == "Tge", "Tpa-Tge",
ifelse(Allsp_Droso_NGOs$sp == "Tdi", "Tps-Tdi",
NA))))))))))
Allsp_Droso_NGOs$sp_pair_ord = ordered(Allsp_Droso_NGOs$sp_pair, levels = c("Tbi-Tte", "Tcm-Tsi", "Tce-Tms", "Tps-Tdi", "Tpa-Tge"))
head(Allsp_Droso_NGOs)
### at least one GO term annot
Allsp_Droso_NGOs_more_than_0_used <- subset(Allsp_Droso_NGOs, Allsp_Droso_NGOs$N_usedBP_GOs > 0)
### plot
P1_N_usedBP_GOs_prop_GOs <- ggplot(Allsp_Droso_NGOs) +
geom_bar(aes(sp_pair_ord, N_usedBP_GOs_bi, fill = as.factor(rep_mode)), position = "dodge", stat = "summary", fun = "mean") +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) +
scale_fill_manual(values = c("#92C5DECD", "#D6604DED")) +
xlab ("Species pair") +
ylab ("Prop genes with GO terms annotated")
P1_N_usedBP_GOs_mean_GOs_more_than_0_BPused_GO <- ggplot(Allsp_Droso_NGOs_more_than_0_used ) +
geom_bar(aes(sp_pair_ord, N_usedBP_GOs, fill = as.factor(rep_mode)), position = "dodge", stat = "summary", fun = "mean") +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) +
scale_fill_manual(values = c("#92C5DECD", "#D6604DED")) +
xlab ("Species pair") +
ylab ("Mean GO terms annotated per gene")
plot_grid(P1_N_usedBP_GOs_prop_GOs, P1_N_usedBP_GOs_mean_GOs_more_than_0_BPused_GO)
|
#' Substance Use Domain
#'
#' @name Substance Use Domain
#' @description The Substance Use data of an ImmPort study is reformated to the CDISC SDTM Substance Use (SU)
#' domain model, and is a list of 2 data frames containing 1) Substance Use data \code{\link{SU}}
#' and 2) any supplemental Substance Use data \code{\link{SUPP}}
NULL
#> NULL
# call to globalVariables to prevent from generating NOTE: no visible binding for global variable <variable name>
# this hack is to satisfy CRAN (http://stackoverflow.com/questions/9439256/how-can-i-handle-r-cmd-check-no-visible-binding-for-global-variable-notes-when)
globalVariables(c("SUSEQ"))
# Get Substance Use data of a specific study
#
# The function \code{getSubstanceUse} queries the ImmPort database for getSubstanceUse data and
# reformats it to the CDISC SDTM Substance Use (SU) domain model
#
# @param data_src A connection handle to ImmPort (MySQL or SQLite) database instance or
# a directory handle to folder where study RDS files are located
# @param study_id Identifier of a specific study
# @return a list of 2 data frames containing 1) Substance Use data \code{\link{SU}} and 2) any supplemental
# Substance Use data \code{\link{SUPP}}
# @examples
# \dontrun{
# getSubstanceUse(data_src, "SDY1")
# }
#' @importFrom DBI dbGetQuery
#' @importFrom data.table as.data.table is.data.table .N :=
getSubstanceUse <- function(data_src, study_id) {
cat("loading Substance Use data....")
su_cols <- c("STUDYID", "DOMAIN", "USUBJID", "SUSEQ", "SUTRT", "SUCAT", "SUDOSE", "SUDOSTXT", "SUDOSU",
"SUDOSFREQ", "SUROUTE", "SUSTDTC", "SUENDTC", "SUSTDY", "SUENDY")
sql_stmt <- paste("SELECT distinct
sub.study_accession,
\"SU\" as domain,
sub.subject_accession,
cast(0 as UNSIGNED INTEGER) as seq,
sub.compound_name_reported,
sub.merge_name_reported,
sub.dose,
sub.dose_reported,
sub.dose_units,
sub.dose_freq_per_interval,
sub.route_of_admin_reported,
sub.start_time,
sub.end_time,
sub.start_day,
sub.end_day
FROM substance_merge sub
WHERE sub.study_accession in ('", study_id, "') AND
sub.compound_role='Substance Use'
ORDER BY sub.subject_accession", sep = "")
if ((class(data_src)[1] == 'MySQLConnection') ||
(class(data_src)[1] == 'SQLiteConnection')) {
su_df <- dbGetQuery(data_src, statement = sql_stmt)
colnames(su_df) <- su_cols
suppsu_df <- data.frame()
if (nrow(su_df) > 0) {
su_df <- transform(su_df, SUSEQ = as.integer(SUSEQ))
su_dt <- as.data.table(su_df)
if (is.data.table(su_dt) == TRUE) {
su_dt[, `:=`(SUSEQ, seq_len(.N)), by = "USUBJID"]
}
su_df <- as.data.frame(su_dt)
}
} else {
l <- loadSerializedStudyData(data_src, study_id, "Substance Use")
su_df <- l[[1]]
suppsu_df <- l[[2]]
}
cat("done", "\n")
su_l <- list()
if (nrow(su_df) > 0)
su_l <- list(su_df=su_df, suppsu_df=suppsu_df)
su_l
}
# Get count of Substance Use data of a specific study
#
# The function \code{getCountOfSubstanceUse} queries the ImmPort database for count
# of Substance Use data
#
# @param conn A connection handle to ImmPort database instance
# @param study_id Identifier of a specific study
# @return a count of Substance Use data
# @examples
# \dontrun{
# # get count of study SDY1's Substance Use data
# count <- getCountOfSubstanceUse(conn, "SDY1")
# }
getCountOfSubstanceUse <- function(conn, study_id) {
sql_stmt <- paste("SELECT count(*)
FROM substance_merge sub
WHERE sub.study_accession in ('", study_id, "') AND
sub.compound_role='Substance Use'", sep = "")
count <- dbGetQuery(conn, statement = sql_stmt)
count[1, 1]
}
##' Substance Use Domain Variables
##' @name SU
##' @description {
##' \tabular{ll}{
##' \strong{Variable Name } \tab \strong{Variable Label} \cr
##' STUDYID \tab Study Identifier \cr
##' DOMAIN \tab Domain Abbreviation \cr
##' USUBJID \tab Unique Subject Identifier \cr
##' SUSEQ \tab Sequence Number \cr
##' SUTRT \tab Reported Name of Substance \cr
##' SUCAT \tab Category of Substance Use \cr
##' SUDOSE \tab Substance Use Consumption \cr
##' SUDOSTXT \tab Substance Use Consumption Text \cr
##' SUDOSU \tab Consumption Units \cr
##' SUDOSFRQ \tab Use Frequency per Interval \cr
##' SUROUTE \tab Route of Administration \cr
##' SUSTDTC \tab Start Date/Time of Substance Use \cr
##' SUENDTC \tab End Date/Time of Substance Use \cr
##' SUSTDY \tab Study Day of Start of Substance Use \cr
##' SUENDY \tab Study Day of End of Substance Use
##' }
##' }
NULL
#> NULL
| /R/SubstanceUse.R | no_license | rdshankar/RImmPort | R | false | false | 5,197 | r | #' Substance Use Domain
#'
#' @name Substance Use Domain
#' @description The Substance Use data of an ImmPort study is reformated to the CDISC SDTM Substance Use (SU)
#' domain model, and is a list of 2 data frames containing 1) Substance Use data \code{\link{SU}}
#' and 2) any supplemental Substance Use data \code{\link{SUPP}}
NULL
#> NULL
# call to globalVariables to prevent from generating NOTE: no visible binding for global variable <variable name>
# this hack is to satisfy CRAN (http://stackoverflow.com/questions/9439256/how-can-i-handle-r-cmd-check-no-visible-binding-for-global-variable-notes-when)
globalVariables(c("SUSEQ"))
# Get Substance Use data of a specific study
#
# The function \code{getSubstanceUse} queries the ImmPort database for getSubstanceUse data and
# reformats it to the CDISC SDTM Substance Use (SU) domain model
#
# @param data_src A connection handle to ImmPort (MySQL or SQLite) database instance or
# a directory handle to folder where study RDS files are located
# @param study_id Identifier of a specific study
# @return a list of 2 data frames containing 1) Substance Use data \code{\link{SU}} and 2) any supplemental
# Substance Use data \code{\link{SUPP}}
# @examples
# \dontrun{
# getSubstanceUse(data_src, "SDY1")
# }
#' @importFrom DBI dbGetQuery
#' @importFrom data.table as.data.table is.data.table .N :=
getSubstanceUse <- function(data_src, study_id) {
cat("loading Substance Use data....")
su_cols <- c("STUDYID", "DOMAIN", "USUBJID", "SUSEQ", "SUTRT", "SUCAT", "SUDOSE", "SUDOSTXT", "SUDOSU",
"SUDOSFREQ", "SUROUTE", "SUSTDTC", "SUENDTC", "SUSTDY", "SUENDY")
sql_stmt <- paste("SELECT distinct
sub.study_accession,
\"SU\" as domain,
sub.subject_accession,
cast(0 as UNSIGNED INTEGER) as seq,
sub.compound_name_reported,
sub.merge_name_reported,
sub.dose,
sub.dose_reported,
sub.dose_units,
sub.dose_freq_per_interval,
sub.route_of_admin_reported,
sub.start_time,
sub.end_time,
sub.start_day,
sub.end_day
FROM substance_merge sub
WHERE sub.study_accession in ('", study_id, "') AND
sub.compound_role='Substance Use'
ORDER BY sub.subject_accession", sep = "")
if ((class(data_src)[1] == 'MySQLConnection') ||
(class(data_src)[1] == 'SQLiteConnection')) {
su_df <- dbGetQuery(data_src, statement = sql_stmt)
colnames(su_df) <- su_cols
suppsu_df <- data.frame()
if (nrow(su_df) > 0) {
su_df <- transform(su_df, SUSEQ = as.integer(SUSEQ))
su_dt <- as.data.table(su_df)
if (is.data.table(su_dt) == TRUE) {
su_dt[, `:=`(SUSEQ, seq_len(.N)), by = "USUBJID"]
}
su_df <- as.data.frame(su_dt)
}
} else {
l <- loadSerializedStudyData(data_src, study_id, "Substance Use")
su_df <- l[[1]]
suppsu_df <- l[[2]]
}
cat("done", "\n")
su_l <- list()
if (nrow(su_df) > 0)
su_l <- list(su_df=su_df, suppsu_df=suppsu_df)
su_l
}
# Get count of Substance Use data of a specific study
#
# The function \code{getCountOfSubstanceUse} queries the ImmPort database for count
# of Substance Use data
#
# @param conn A connection handle to ImmPort database instance
# @param study_id Identifier of a specific study
# @return a count of Substance Use data
# @examples
# \dontrun{
# # get count of study SDY1's Substance Use data
# count <- getCountOfSubstanceUse(conn, "SDY1")
# }
getCountOfSubstanceUse <- function(conn, study_id) {
sql_stmt <- paste("SELECT count(*)
FROM substance_merge sub
WHERE sub.study_accession in ('", study_id, "') AND
sub.compound_role='Substance Use'", sep = "")
count <- dbGetQuery(conn, statement = sql_stmt)
count[1, 1]
}
##' Substance Use Domain Variables
##' @name SU
##' @description {
##' \tabular{ll}{
##' \strong{Variable Name } \tab \strong{Variable Label} \cr
##' STUDYID \tab Study Identifier \cr
##' DOMAIN \tab Domain Abbreviation \cr
##' USUBJID \tab Unique Subject Identifier \cr
##' SUSEQ \tab Sequence Number \cr
##' SUTRT \tab Reported Name of Substance \cr
##' SUCAT \tab Category of Substance Use \cr
##' SUDOSE \tab Substance Use Consumption \cr
##' SUDOSTXT \tab Substance Use Consumption Text \cr
##' SUDOSU \tab Consumption Units \cr
##' SUDOSFRQ \tab Use Frequency per Interval \cr
##' SUROUTE \tab Route of Administration \cr
##' SUSTDTC \tab Start Date/Time of Substance Use \cr
##' SUENDTC \tab End Date/Time of Substance Use \cr
##' SUSTDY \tab Study Day of Start of Substance Use \cr
##' SUENDY \tab Study Day of End of Substance Use
##' }
##' }
NULL
#> NULL
|
library(tm)
library(plyr)
suppressPackageStartupMessages(library("argparse"))
source("spClassR/helper.R")
strsplit_space_tokenizer <- function(x){
return(unlist(strsplit(x, "\\W*\\s+\\W*", perl=TRUE)))
}
prepEmailTokens <- function(emailText, stops=stopwords("english")){
# lower letters
emailLower <- tolower(emailText)
# split the document into single words
strsplit_token_email <- strsplit_space_tokenizer(emailLower)
# stemming
stem_email <- stemDocument(strsplit_token_email, language="english")
# remove words from stop word set (very frequently occuring words)
stem_removed_email <- stem_email[!stem_email %in% stops]
return(stem_removed_email) # a vector of all tokens
}
extractFreq <- function(tf, tokens){
tfSubset <- tf[tokens]
tfSubset[is.na(tfSubset)] <- 0 # set freq = 0 if token not occur in document
names(tfSubset) <- tokens
return(tfSubset)
}
getTermFreqs <- function(emailText){
myCorpus <- Corpus(VectorSource(emailText))
tf <- termFreq(myCorpus[[1]])
return(tf)
}
dtfFeatures <- function(x, tokens){
features <- dtfFeatures_help(x$emailText, tokens)
return(features)
}
dtfFeatures_help <- function(emailText, tokens){
emailTokens <- prepEmailTokens(emailText)
# paste tokens into a document
cleanText <- paste(emailTokens, collapse=" ")
tfAll <- getTermFreqs(cleanText)
tfTokens <- extractFreq(tfAll, tokens)
tfTokensAug <- (tfTokens * 0.5 / max(tfAll)) + 0.5
# print(tfTokensAug)
result <- c(
tokenCount=length(strsplit_space_tokenizer(emailText)),
cleanTokenCount=length(emailTokens),
tfTokens#tfTokensAug
)
result <- rbind(result)
return(result)
}
computeDtfIdf <- function(tf){
idf <- log(length(tf) / sum(tf > 0)) #where tf = 0.5 is the base line of augmented tf
tfIdf <- tf * idf
return(tfIdf)
}
getDtfIdfAll <- function(dtf, tokens){
dtfIdf <- dtf[,]
for(t in tokens){
newFeature <- data.frame(nf=computeDtfIdf(dtf[,t]))
colnames(newFeature) <- c(sprintf('%s_tfidf', t))
dtfIdf <- cbind(dtfIdf, newFeature)
}
return(dtfIdf)
}
sentenceLength <- function(text){
sentences <- unlist(strsplit(text, "(\\?|\\.|\\!)", perl=TRUE))
sLengths <- laply(sentences, nchar)
return(sLengths)
}
countUpperCase <- function(text){
orig <- unlist(strsplit(text,''))
upper <- unlist(strsplit(toupper(text),''))
upcount <- sum(orig == upper)
return(upcount)
}
countI <- function(rawEmail){
emailTokens <- strsplit_space_tokenizer(tolower(rawEmail))
iregex <- "(\\Ai|(i\\'\\w*))\\Z" # i, i'll, i've etc, but not idea, or hi
cntI <- length(grep(iregex, emailTokens, perl=TRUE, value=TRUE))
myregex <- "\\Amy\\Z"
cntMy <- length(grep(myregex, emailTokens, perl=TRUE, value=TRUE))
return(cntI)
}
countPercent <- function(email){
digitMatches <- gregexpr("\\%", email, perl=TRUE)# look for 2 digits or more continously
cnt <- length(digitMatches[[1]])
return(cnt)
}
countMy <- function(rawEmail){
emailTokens <- strsplit_space_tokenizer(tolower(rawEmail))
myregex <- "\\Amy\\Z"
cntMy <- length(grep(myregex, emailTokens, perl=TRUE, value=TRUE))
return(cntMy)
}
countDigits <- function(email){
digitMatches <- gregexpr("\\d{2,}", email, perl=TRUE) # look for 2 digits or more continously
cnt <- length(digitMatches[[1]])
return(cnt)
}
nonTokenFeatures <- function(emailRaw){
senlens <- sentenceLength(emailRaw)
senLength <- mean(senlens)
# cap letter not occuring at beginning of sentence
capCount <- countUpperCase(emailRaw) - length(senlens)
questionCount <- length(unlist(strsplit(emailRaw,'\\?'))) - 1
exclaimCount <- length(unlist(strsplit(emailRaw,'\\!'))) - 1
iCount <- countI(emailRaw)
digitCount <- countDigits(emailRaw)
feat <- data.frame(
senLength=senLength,
capCount=capCount,
questionCount=questionCount,
exclaimCount=exclaimCount,
iCount=iCount,
myCount=countMy(emailRaw),
digitCount=digitCount,
countPercent=countPercent(emailRaw)
)
return(feat)
}
extraFeatures <- function(eDat, tokens){
features <- adply(eDat, .margins=1, .fun=function(x, tokens){
featSet1 <- dtfFeatures(x, tokens)
featSet2 <- nonTokenFeatures(x[1,'emailText'])
return(cbind(featSet1, featSet2))
}, tokens=tokensNew)
features <- getDtfIdfAll(features, tokens) # add DtfIdf features
#features <- features[,!(colnames(features) %in% tokens)] # remove tf columns
features <- features[,colnames(features) != 'emailText'] # remove row number column
return(features)
}
tokensOrig <- stemDocument(c("price", "customer", "product", "look", "buy"),
language='english')
tokensNew <- stemDocument(c("price", "customer", "product", "look", "buy", "manage"),
language='english')
# create parser object
parser <- ArgumentParser()
# specify our desired options
# by default ArgumentParser will add an help option
parser$add_argument("-i", "--inputFilePath",
default="data/testemails.fake.Rda",
type="character",
help="Rda object of unlabeled emails")
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
args <- parser$parse_args()
print(args)
# their respective r object name is the same as their file name
labeledEmails <- readRDS("data/labeledEmails.Rda")
unlabeledEmails <- readRDS(args$inputFilePath)
labeledFeatures <- extraFeatures(labeledEmails, tokensNew)
write.csv(labeledFeatures,
file="results/labeledFeatures.csv",
row.names=FALSE)
unlabeledFeatures <- extraFeatures(unlabeledEmails, tokensNew)
outNickname <- getNickname(args$inputFilePath)
outFilePath <- paste(c("results/unlabeledFeatures",
outNickname, "csv"), collapse=".")
write.csv(unlabeledFeatures,
file=outFilePath,
row.names=FALSE)
| /spClassR/features.R | no_license | sibyl229/spClass | R | false | false | 5,993 | r | library(tm)
library(plyr)
suppressPackageStartupMessages(library("argparse"))
source("spClassR/helper.R")
strsplit_space_tokenizer <- function(x){
return(unlist(strsplit(x, "\\W*\\s+\\W*", perl=TRUE)))
}
prepEmailTokens <- function(emailText, stops=stopwords("english")){
# lower letters
emailLower <- tolower(emailText)
# split the document into single words
strsplit_token_email <- strsplit_space_tokenizer(emailLower)
# stemming
stem_email <- stemDocument(strsplit_token_email, language="english")
# remove words from stop word set (very frequently occuring words)
stem_removed_email <- stem_email[!stem_email %in% stops]
return(stem_removed_email) # a vector of all tokens
}
extractFreq <- function(tf, tokens){
tfSubset <- tf[tokens]
tfSubset[is.na(tfSubset)] <- 0 # set freq = 0 if token not occur in document
names(tfSubset) <- tokens
return(tfSubset)
}
getTermFreqs <- function(emailText){
myCorpus <- Corpus(VectorSource(emailText))
tf <- termFreq(myCorpus[[1]])
return(tf)
}
dtfFeatures <- function(x, tokens){
features <- dtfFeatures_help(x$emailText, tokens)
return(features)
}
dtfFeatures_help <- function(emailText, tokens){
emailTokens <- prepEmailTokens(emailText)
# paste tokens into a document
cleanText <- paste(emailTokens, collapse=" ")
tfAll <- getTermFreqs(cleanText)
tfTokens <- extractFreq(tfAll, tokens)
tfTokensAug <- (tfTokens * 0.5 / max(tfAll)) + 0.5
# print(tfTokensAug)
result <- c(
tokenCount=length(strsplit_space_tokenizer(emailText)),
cleanTokenCount=length(emailTokens),
tfTokens#tfTokensAug
)
result <- rbind(result)
return(result)
}
computeDtfIdf <- function(tf){
idf <- log(length(tf) / sum(tf > 0)) #where tf = 0.5 is the base line of augmented tf
tfIdf <- tf * idf
return(tfIdf)
}
getDtfIdfAll <- function(dtf, tokens){
dtfIdf <- dtf[,]
for(t in tokens){
newFeature <- data.frame(nf=computeDtfIdf(dtf[,t]))
colnames(newFeature) <- c(sprintf('%s_tfidf', t))
dtfIdf <- cbind(dtfIdf, newFeature)
}
return(dtfIdf)
}
sentenceLength <- function(text){
sentences <- unlist(strsplit(text, "(\\?|\\.|\\!)", perl=TRUE))
sLengths <- laply(sentences, nchar)
return(sLengths)
}
countUpperCase <- function(text){
orig <- unlist(strsplit(text,''))
upper <- unlist(strsplit(toupper(text),''))
upcount <- sum(orig == upper)
return(upcount)
}
countI <- function(rawEmail){
emailTokens <- strsplit_space_tokenizer(tolower(rawEmail))
iregex <- "(\\Ai|(i\\'\\w*))\\Z" # i, i'll, i've etc, but not idea, or hi
cntI <- length(grep(iregex, emailTokens, perl=TRUE, value=TRUE))
myregex <- "\\Amy\\Z"
cntMy <- length(grep(myregex, emailTokens, perl=TRUE, value=TRUE))
return(cntI)
}
countPercent <- function(email){
digitMatches <- gregexpr("\\%", email, perl=TRUE)# look for 2 digits or more continously
cnt <- length(digitMatches[[1]])
return(cnt)
}
countMy <- function(rawEmail){
emailTokens <- strsplit_space_tokenizer(tolower(rawEmail))
myregex <- "\\Amy\\Z"
cntMy <- length(grep(myregex, emailTokens, perl=TRUE, value=TRUE))
return(cntMy)
}
countDigits <- function(email){
digitMatches <- gregexpr("\\d{2,}", email, perl=TRUE) # look for 2 digits or more continously
cnt <- length(digitMatches[[1]])
return(cnt)
}
nonTokenFeatures <- function(emailRaw){
senlens <- sentenceLength(emailRaw)
senLength <- mean(senlens)
# cap letter not occuring at beginning of sentence
capCount <- countUpperCase(emailRaw) - length(senlens)
questionCount <- length(unlist(strsplit(emailRaw,'\\?'))) - 1
exclaimCount <- length(unlist(strsplit(emailRaw,'\\!'))) - 1
iCount <- countI(emailRaw)
digitCount <- countDigits(emailRaw)
feat <- data.frame(
senLength=senLength,
capCount=capCount,
questionCount=questionCount,
exclaimCount=exclaimCount,
iCount=iCount,
myCount=countMy(emailRaw),
digitCount=digitCount,
countPercent=countPercent(emailRaw)
)
return(feat)
}
extraFeatures <- function(eDat, tokens){
features <- adply(eDat, .margins=1, .fun=function(x, tokens){
featSet1 <- dtfFeatures(x, tokens)
featSet2 <- nonTokenFeatures(x[1,'emailText'])
return(cbind(featSet1, featSet2))
}, tokens=tokensNew)
features <- getDtfIdfAll(features, tokens) # add DtfIdf features
#features <- features[,!(colnames(features) %in% tokens)] # remove tf columns
features <- features[,colnames(features) != 'emailText'] # remove row number column
return(features)
}
tokensOrig <- stemDocument(c("price", "customer", "product", "look", "buy"),
language='english')
tokensNew <- stemDocument(c("price", "customer", "product", "look", "buy", "manage"),
language='english')
# create parser object
parser <- ArgumentParser()
# specify our desired options
# by default ArgumentParser will add an help option
parser$add_argument("-i", "--inputFilePath",
default="data/testemails.fake.Rda",
type="character",
help="Rda object of unlabeled emails")
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
args <- parser$parse_args()
print(args)
# their respective r object name is the same as their file name
labeledEmails <- readRDS("data/labeledEmails.Rda")
unlabeledEmails <- readRDS(args$inputFilePath)
labeledFeatures <- extraFeatures(labeledEmails, tokensNew)
write.csv(labeledFeatures,
file="results/labeledFeatures.csv",
row.names=FALSE)
unlabeledFeatures <- extraFeatures(unlabeledEmails, tokensNew)
outNickname <- getNickname(args$inputFilePath)
outFilePath <- paste(c("results/unlabeledFeatures",
outNickname, "csv"), collapse=".")
write.csv(unlabeledFeatures,
file=outFilePath,
row.names=FALSE)
|
library(tidyverse)
library(cifti)
library(gifti)
library(XML)
library(DescTools)
library(Cairo)
# This script will:
# 1. Map AHBA samples to Schaefer cortical parcels (average)
# 2. Similarly, map to Desikan parcels
# 3. Mid-way through the project, the Schaefer atlas was fixed to correct minor
# errors in the naming convention of individual parcels. We map the old parcel
# labels to the new ones to help with forward compatibility.
# function to plot parcel-wise metric files for viewing with HCP wb_view
plot_matlab = function(values, out_path, parcel_num, net_num){
base_dir = '/gpfs/milgram/project/holmes/kma52/mdd_gene_expr'
dscalar_template = paste0(base_dir, '/data/Schaefer/Schaefer2018_',parcel_num,'Parcels_',net_num,'Networks_order.dscalar.nii')
parcel_info_file = paste0(base_dir, '/data/Schaefer/Schaefer2018_',parcel_num,'Parcels_',net_num,'Networks_order_info.txt')
write_val_file = paste0('/gpfs/milgram/project/holmes/kma52/sst_pvalb_nn/data/ahba/tmp/tmp.txt')
write_delim(delim=' ', x=as.data.frame(values), path=write_val_file,col_names=F)
save_path = out_path
print(save_path)
matfunc = 'plotVolOnSurface'
cmd = paste0('/gpfs/milgram/apps/hpc.rhel7/software/MATLAB/2017b/bin/matlab -nosplash -nodesktop -r "cd(\'/gpfs/milgram/project/holmes/kma52/mdd_gene_expr/scripts/util\');',
matfunc, '(\'', dscalar_template, '\',\'', parcel_info_file, '\',\'',
write_val_file, '\',\'', save_path, '\'); exit;"')
system(cmd)
}
# Step 1: Read data
# -------------
base_dir = '/gpfs/milgram/project/holmes/kma52/mdd_gene_expr'
# read sample information
load(file=paste0(base_dir, '/data/ahba/donorDat_obj.Rdata'), verbose=T)
# read ctx expression data
expr_dat_in = paste0(base_dir, '/data/ahba/ahba_ctx_zWithinSubject_zWithinSample.Rdata')
load(verbose=T, expr_dat_in)
# read sample-to-vertex projection info
sample_info = read_csv(paste0(base_dir, '/data/ahba/sample_info_vertex_mapped.csv'))
sample_dat = sample_info[which(abs(sample_info$mm_to_surf) < 4),]
# Step 2: Identify the closest surface vertex for each AHBA sample
# -------------
# cortical sample by normalized gene expression data frame
reg_micro_scale = as.data.frame(t(ctx_data_scale))
match_idxs = match(ctx_samp_all$well_id, sample_dat$well_id)
reg_samples = sample_dat[match_idxs[!is.na(match_idxs)],]
which(reg_samples$well_id != ctx_samp_all$well_id)
# stitch the left/right verticies together to match Schaeffer parcel cifti format
reg_samples$bihemi_vertex = reg_samples$vertex + 1 # cifti indices index at 0, R indexes at 1
right_ctx_idxs = intersect(grep('right', reg_samples$structure_name), which(reg_samples$top_level == 'CTX'))
reg_samples$bihemi_vertex[right_ctx_idxs] = reg_samples$bihemi_vertex[right_ctx_idxs] + 32492
# Step 3: Summarise expressino data within each Schaefer parcel
# -------------
# Read Schaeffer parcel info (for multiple parcel #'s, network assignments)
donor_arr = c('9861','10021','12876','14380','15496','15697')
donor_specific_expression = NULL
for (parcels in c('200')){
for (net in c('17')){
# schaeffer parcellation by vertex
schaeffer = paste0(base_dir, '/data/Schaefer/Schaefer2018_',parcels,'Parcels_',net,'Networks_order.dscalar.nii')
schaef_cii = read_cifti(schaeffer, drop_data = TRUE, trans_data = TRUE)
# corresponding parcel labels
schaeffer_labels = read_csv(col_names=F, paste0(base_dir, '/data/Schaefer/Schaefer2018_',parcels,'Parcels_',net,'Networks_order_info.txt'))
schaef_labels = schaeffer_labels$X1[grep('Network', schaeffer_labels$X1)]
# calculate the average gene expression within each parcel
schaeffer_mat = matrix(NA, ncol=as.numeric(parcels), nrow=ncol(reg_micro_scale))
for (donor in donor_arr){
donor_specific_expression[[donor]] = as.data.frame(schaeffer_mat)
}
# loop over each parcel, find matching samples, calculate average expression
for (idx in 1:length(schaef_labels)){
write(idx,'')
parcel_idxs = which(schaef_cii$data == idx) # schaeffer indices
match_idxs = which(reg_samples$bihemi_vertex %in% parcel_idxs) # foci within this parcel
if (length(match_idxs) < 2){
next
}
# expr data for this parcel
match_samples = reg_samples[match_idxs,]
schaeffer_expr = apply(reg_micro_scale[match_idxs,], 2, mean)
# plug in values to the pre-allocated matrix
schaeffer_mat[,idx] = schaeffer_expr
#
for (donor in donor_arr){
donor_idxs = which(reg_samples$brain == donor)
donor_matches = intersect(donor_idxs, match_idxs)
if (length(donor_matches) < 2){
next
}
# expr data for this parcel
donor_match_samples = reg_samples[donor_matches,]
donor_schaeffer_expr = apply(reg_micro_scale[donor_matches,], 2, mean)
donor_specific_expression[[donor]][,idx] = donor_schaeffer_expr
}
}
# add column/row names
schaef_out = as.data.frame(schaeffer_mat)
rownames(schaef_out) = colnames(reg_micro_scale)
colnames(schaef_out) = schaef_labels
for (donor in donor_arr){
rownames(donor_specific_expression[[donor]]) = colnames(reg_micro_scale)
colnames(donor_specific_expression[[donor]]) = schaef_labels
donor_specific_expression[[donor]]$gene = colnames(reg_micro_scale)
out_path = paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_donor',as.character(donor),'_',parcels,'_',net,'Net_expr_mat.csv')
write_csv(donor_specific_expression[[donor]], path=out_path)
}
schaef_out$gene = colnames(reg_micro_scale)
out_path = paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_',parcels,'_',net,'Net_expr_mat.csv')
write_csv(schaef_out, path=out_path)
}
}
# Step 4: Make sure the Scheafer parcel labels reflect the latest/"fixed" naming conventions
# -------------
# read the ROI summarized expression data from above
# plot interneuron marker distributions, summarized across scheaffer parcels
schaeffer_mat = read_csv(paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_200_17Net_expr_mat.csv'))
# Fix schaefer mapping
info_file = paste0(base_dir, '/reference_files/Schaefer2018_200Parcels_17Networks_order_info.txt')
new_mapping = read.csv(header=F, info_file)
new_map_df = data.frame(new_net=as.character(new_mapping$V1[grep('17Networks', new_mapping$V1)]),
new_info=as.character(new_mapping$V1[!grepl('17Networks', new_mapping$V1)]), stringsAsFactors=F)
old_info_file = paste0(base_dir, '/data/Schaefer/Schaefer2018_200Parcels_17Networks_order_info.txt')
old_mapping = read.csv(header=F, old_info_file)
old_map_df = data.frame(old_net=as.character(old_mapping$V1[grep('17Networks', old_mapping$V1)]),
old_info=as.character(old_mapping$V1[!grepl('17Networks', old_mapping$V1)]), stringsAsFactors=F)
both_map_df = cbind(new_map_df, old_map_df)
remap_df = both_map_df[both_map_df$old_net != both_map_df$new_net,]
old_df_cols = colnames(schaeffer_mat)
new_df_cols = old_df_cols
for (replace_row in 1:nrow(remap_df)){
cur_fix_row = remap_df[replace_row,]
cur_fix_idxs = grep(cur_fix_row$old_net, old_df_cols)
cur_old_names = old_df_cols[cur_fix_idxs]
cur_new_names = gsub(cur_fix_row$old_net, cur_fix_row$new_net, cur_old_names)
new_df_cols[cur_fix_idxs] = cur_new_names
print(old_df_cols[cur_fix_idxs])
print(new_df_cols[cur_fix_idxs])
print('')
}
colnames(schaeffer_mat) = new_df_cols
write_csv(schaeffer_mat, paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_200_17Net_expr_mat_NEWMAP.csv'))
write_csv(schaeffer_mat, paste0(base_dir, '/supp_data/SuppData_1_schaeffer_ahba_ctx_zWithinSubject_zWithinSample_200_17Net_expr_mat_NEWMAP.csv'))
for (donor in donor_arr){
donor_mat = read_csv(paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_donor',as.character(donor),'_200_17Net_expr_mat.csv'))
colnames(donor_mat) = new_df_cols
write_csv(donor_mat, paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_donor',as.character(donor),'_200_17Net_expr_mat_NEWMAP.csv'))
}
# Step 5: Summarise AHBA expression within each Desikan ROI
# -------------
# Desikan atlas
desikan = paste0(base_dir, '/reference_files/desikan_atlas_32k.dlabel.nii')
desikan_cii = read_cifti(desikan, drop_data = TRUE, trans_data = TRUE)
rh_idxs = ((length(desikan_cii$data)/2)+1):length(desikan_cii$data)
desikan_cii$data[rh_idxs] = desikan_cii$data[rh_idxs]+35
# corresponding parcel labels
desikan_path = paste0(base_dir, '/reference_files/desikan_atlas_32k.txt')
desikan_labels_in = read_csv(col_names=F, file=desikan_path)
desikan_labels = desikan_labels_in$X1[seq(1, length(desikan_labels_in$X1), by=2)]
desikan_labels = c(paste0('lh_',desikan_labels), paste0('rh_',desikan_labels))
# calculate the average gene expression within each parcel
desikan_mat = matrix(NA, ncol=length(desikan_labels), nrow=ncol(reg_micro_scale))
# calculate the average gene expression within each parcel
donor_desikan_expression = NULL
for (donor in donor_arr){
donor_desikan_expression[[donor]] = as.data.frame(desikan_mat)
}
for (idx in 1:length(desikan_labels)){
write(idx,'')
parcel_idxs = which(desikan_cii$data == idx) # schaeffer indices
match_idxs = which(reg_samples$bihemi_vertex %in% parcel_idxs) # foci within this parcel
match_samples = reg_samples[match_idxs,] # data for this parcel
desikan_expr = colMeans(reg_micro_scale[match_idxs,]) # average expressino of every gene, across samples in this parcel
desikan_mat[,idx] = desikan_expr # plug in values to the pre-allocated matrix
for (donor in donor_arr){
donor_idxs = which(reg_samples$brain == donor)
donor_matches = intersect(donor_idxs, match_idxs)
# expr data for this parcel
donor_match_samples = reg_samples[donor_matches,]
# average expression of every gene, across samples in this parcel
desikan_expr = colMeans(reg_micro_scale[donor_matches,])
donor_desikan_expression[[donor]][,idx] = desikan_expr
}
}
desikan_mat = as.data.frame(desikan_mat)
colnames(desikan_mat) = desikan_labels
desikan_mat$gene = colnames(reg_micro_scale)
write_csv(desikan_mat, path=paste0(base_dir, '/data/ahba/desikan_ahba_ctx_zWithinSubject_zWithinSample_200_17Net_expr_mat.csv'))
write_csv(desikan_mat, paste0(base_dir, '/supp_data/SuppData_2_desikan_ahba_ctx_zWithinSubject_zWithinSample_200_17Net_expr_mat.csv'))
for (donor in donor_arr){
colnames(donor_desikan_expression[[donor]]) = desikan_labels
donor_desikan_expression[[donor]]$gene = colnames(reg_micro_scale)
write_csv(donor_desikan_expression[[donor]], path=paste0(base_dir, '/data/ahba/desikan_ahba_ctx_zWithinSubject_zWithinSample_donor',as.character(donor),'_200_17Net_expr_mat.csv'))
}
| /scripts/00_ahba_preprocess/02_map_ahba_to_surface.R | no_license | JujiaoKang/2020_PNAS_Depression | R | false | false | 11,337 | r | library(tidyverse)
library(cifti)
library(gifti)
library(XML)
library(DescTools)
library(Cairo)
# This script will:
# 1. Map AHBA samples to Schaefer cortical parcels (average)
# 2. Similarly, map to Desikan parcels
# 3. Mid-way through the project, the Schaefer atlas was fixed to correct minor
# errors in the naming convention of individual parcels. We map the old parcel
# labels to the new ones to help with forward compatibility.
# function to plot parcel-wise metric files for viewing with HCP wb_view
plot_matlab = function(values, out_path, parcel_num, net_num){
base_dir = '/gpfs/milgram/project/holmes/kma52/mdd_gene_expr'
dscalar_template = paste0(base_dir, '/data/Schaefer/Schaefer2018_',parcel_num,'Parcels_',net_num,'Networks_order.dscalar.nii')
parcel_info_file = paste0(base_dir, '/data/Schaefer/Schaefer2018_',parcel_num,'Parcels_',net_num,'Networks_order_info.txt')
write_val_file = paste0('/gpfs/milgram/project/holmes/kma52/sst_pvalb_nn/data/ahba/tmp/tmp.txt')
write_delim(delim=' ', x=as.data.frame(values), path=write_val_file,col_names=F)
save_path = out_path
print(save_path)
matfunc = 'plotVolOnSurface'
cmd = paste0('/gpfs/milgram/apps/hpc.rhel7/software/MATLAB/2017b/bin/matlab -nosplash -nodesktop -r "cd(\'/gpfs/milgram/project/holmes/kma52/mdd_gene_expr/scripts/util\');',
matfunc, '(\'', dscalar_template, '\',\'', parcel_info_file, '\',\'',
write_val_file, '\',\'', save_path, '\'); exit;"')
system(cmd)
}
# Step 1: Read data
# -------------
base_dir = '/gpfs/milgram/project/holmes/kma52/mdd_gene_expr'
# read sample information
load(file=paste0(base_dir, '/data/ahba/donorDat_obj.Rdata'), verbose=T)
# read ctx expression data
expr_dat_in = paste0(base_dir, '/data/ahba/ahba_ctx_zWithinSubject_zWithinSample.Rdata')
load(verbose=T, expr_dat_in)
# read sample-to-vertex projection info
sample_info = read_csv(paste0(base_dir, '/data/ahba/sample_info_vertex_mapped.csv'))
sample_dat = sample_info[which(abs(sample_info$mm_to_surf) < 4),]
# Step 2: Identify the closest surface vertex for each AHBA sample
# -------------
# cortical sample by normalized gene expression data frame
reg_micro_scale = as.data.frame(t(ctx_data_scale))
match_idxs = match(ctx_samp_all$well_id, sample_dat$well_id)
reg_samples = sample_dat[match_idxs[!is.na(match_idxs)],]
which(reg_samples$well_id != ctx_samp_all$well_id)
# stitch the left/right verticies together to match Schaeffer parcel cifti format
reg_samples$bihemi_vertex = reg_samples$vertex + 1 # cifti indices index at 0, R indexes at 1
right_ctx_idxs = intersect(grep('right', reg_samples$structure_name), which(reg_samples$top_level == 'CTX'))
reg_samples$bihemi_vertex[right_ctx_idxs] = reg_samples$bihemi_vertex[right_ctx_idxs] + 32492
# Step 3: Summarise expressino data within each Schaefer parcel
# -------------
# Read Schaeffer parcel info (for multiple parcel #'s, network assignments)
donor_arr = c('9861','10021','12876','14380','15496','15697')
donor_specific_expression = NULL
for (parcels in c('200')){
for (net in c('17')){
# schaeffer parcellation by vertex
schaeffer = paste0(base_dir, '/data/Schaefer/Schaefer2018_',parcels,'Parcels_',net,'Networks_order.dscalar.nii')
schaef_cii = read_cifti(schaeffer, drop_data = TRUE, trans_data = TRUE)
# corresponding parcel labels
schaeffer_labels = read_csv(col_names=F, paste0(base_dir, '/data/Schaefer/Schaefer2018_',parcels,'Parcels_',net,'Networks_order_info.txt'))
schaef_labels = schaeffer_labels$X1[grep('Network', schaeffer_labels$X1)]
# calculate the average gene expression within each parcel
schaeffer_mat = matrix(NA, ncol=as.numeric(parcels), nrow=ncol(reg_micro_scale))
for (donor in donor_arr){
donor_specific_expression[[donor]] = as.data.frame(schaeffer_mat)
}
# loop over each parcel, find matching samples, calculate average expression
for (idx in 1:length(schaef_labels)){
write(idx,'')
parcel_idxs = which(schaef_cii$data == idx) # schaeffer indices
match_idxs = which(reg_samples$bihemi_vertex %in% parcel_idxs) # foci within this parcel
if (length(match_idxs) < 2){
next
}
# expr data for this parcel
match_samples = reg_samples[match_idxs,]
schaeffer_expr = apply(reg_micro_scale[match_idxs,], 2, mean)
# plug in values to the pre-allocated matrix
schaeffer_mat[,idx] = schaeffer_expr
#
for (donor in donor_arr){
donor_idxs = which(reg_samples$brain == donor)
donor_matches = intersect(donor_idxs, match_idxs)
if (length(donor_matches) < 2){
next
}
# expr data for this parcel
donor_match_samples = reg_samples[donor_matches,]
donor_schaeffer_expr = apply(reg_micro_scale[donor_matches,], 2, mean)
donor_specific_expression[[donor]][,idx] = donor_schaeffer_expr
}
}
# add column/row names
schaef_out = as.data.frame(schaeffer_mat)
rownames(schaef_out) = colnames(reg_micro_scale)
colnames(schaef_out) = schaef_labels
for (donor in donor_arr){
rownames(donor_specific_expression[[donor]]) = colnames(reg_micro_scale)
colnames(donor_specific_expression[[donor]]) = schaef_labels
donor_specific_expression[[donor]]$gene = colnames(reg_micro_scale)
out_path = paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_donor',as.character(donor),'_',parcels,'_',net,'Net_expr_mat.csv')
write_csv(donor_specific_expression[[donor]], path=out_path)
}
schaef_out$gene = colnames(reg_micro_scale)
out_path = paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_',parcels,'_',net,'Net_expr_mat.csv')
write_csv(schaef_out, path=out_path)
}
}
# Step 4: Make sure the Scheafer parcel labels reflect the latest/"fixed" naming conventions
# -------------
# read the ROI summarized expression data from above
# plot interneuron marker distributions, summarized across scheaffer parcels
schaeffer_mat = read_csv(paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_200_17Net_expr_mat.csv'))
# Fix schaefer mapping
info_file = paste0(base_dir, '/reference_files/Schaefer2018_200Parcels_17Networks_order_info.txt')
new_mapping = read.csv(header=F, info_file)
new_map_df = data.frame(new_net=as.character(new_mapping$V1[grep('17Networks', new_mapping$V1)]),
new_info=as.character(new_mapping$V1[!grepl('17Networks', new_mapping$V1)]), stringsAsFactors=F)
old_info_file = paste0(base_dir, '/data/Schaefer/Schaefer2018_200Parcels_17Networks_order_info.txt')
old_mapping = read.csv(header=F, old_info_file)
old_map_df = data.frame(old_net=as.character(old_mapping$V1[grep('17Networks', old_mapping$V1)]),
old_info=as.character(old_mapping$V1[!grepl('17Networks', old_mapping$V1)]), stringsAsFactors=F)
both_map_df = cbind(new_map_df, old_map_df)
remap_df = both_map_df[both_map_df$old_net != both_map_df$new_net,]
old_df_cols = colnames(schaeffer_mat)
new_df_cols = old_df_cols
for (replace_row in 1:nrow(remap_df)){
cur_fix_row = remap_df[replace_row,]
cur_fix_idxs = grep(cur_fix_row$old_net, old_df_cols)
cur_old_names = old_df_cols[cur_fix_idxs]
cur_new_names = gsub(cur_fix_row$old_net, cur_fix_row$new_net, cur_old_names)
new_df_cols[cur_fix_idxs] = cur_new_names
print(old_df_cols[cur_fix_idxs])
print(new_df_cols[cur_fix_idxs])
print('')
}
colnames(schaeffer_mat) = new_df_cols
write_csv(schaeffer_mat, paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_200_17Net_expr_mat_NEWMAP.csv'))
write_csv(schaeffer_mat, paste0(base_dir, '/supp_data/SuppData_1_schaeffer_ahba_ctx_zWithinSubject_zWithinSample_200_17Net_expr_mat_NEWMAP.csv'))
for (donor in donor_arr){
donor_mat = read_csv(paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_donor',as.character(donor),'_200_17Net_expr_mat.csv'))
colnames(donor_mat) = new_df_cols
write_csv(donor_mat, paste0(base_dir, '/data/ahba_parcel/schaeffer_ahba_ctx_zWithinSubject_zWithinSample_donor',as.character(donor),'_200_17Net_expr_mat_NEWMAP.csv'))
}
# Step 5: Summarise AHBA expression within each Desikan ROI
# -------------
# Desikan atlas
desikan = paste0(base_dir, '/reference_files/desikan_atlas_32k.dlabel.nii')
desikan_cii = read_cifti(desikan, drop_data = TRUE, trans_data = TRUE)
rh_idxs = ((length(desikan_cii$data)/2)+1):length(desikan_cii$data)
desikan_cii$data[rh_idxs] = desikan_cii$data[rh_idxs]+35
# corresponding parcel labels
desikan_path = paste0(base_dir, '/reference_files/desikan_atlas_32k.txt')
desikan_labels_in = read_csv(col_names=F, file=desikan_path)
desikan_labels = desikan_labels_in$X1[seq(1, length(desikan_labels_in$X1), by=2)]
desikan_labels = c(paste0('lh_',desikan_labels), paste0('rh_',desikan_labels))
# calculate the average gene expression within each parcel
desikan_mat = matrix(NA, ncol=length(desikan_labels), nrow=ncol(reg_micro_scale))
# calculate the average gene expression within each parcel
donor_desikan_expression = NULL
for (donor in donor_arr){
donor_desikan_expression[[donor]] = as.data.frame(desikan_mat)
}
for (idx in 1:length(desikan_labels)){
write(idx,'')
parcel_idxs = which(desikan_cii$data == idx) # schaeffer indices
match_idxs = which(reg_samples$bihemi_vertex %in% parcel_idxs) # foci within this parcel
match_samples = reg_samples[match_idxs,] # data for this parcel
desikan_expr = colMeans(reg_micro_scale[match_idxs,]) # average expressino of every gene, across samples in this parcel
desikan_mat[,idx] = desikan_expr # plug in values to the pre-allocated matrix
for (donor in donor_arr){
donor_idxs = which(reg_samples$brain == donor)
donor_matches = intersect(donor_idxs, match_idxs)
# expr data for this parcel
donor_match_samples = reg_samples[donor_matches,]
# average expression of every gene, across samples in this parcel
desikan_expr = colMeans(reg_micro_scale[donor_matches,])
donor_desikan_expression[[donor]][,idx] = desikan_expr
}
}
desikan_mat = as.data.frame(desikan_mat)
colnames(desikan_mat) = desikan_labels
desikan_mat$gene = colnames(reg_micro_scale)
write_csv(desikan_mat, path=paste0(base_dir, '/data/ahba/desikan_ahba_ctx_zWithinSubject_zWithinSample_200_17Net_expr_mat.csv'))
write_csv(desikan_mat, paste0(base_dir, '/supp_data/SuppData_2_desikan_ahba_ctx_zWithinSubject_zWithinSample_200_17Net_expr_mat.csv'))
for (donor in donor_arr){
colnames(donor_desikan_expression[[donor]]) = desikan_labels
donor_desikan_expression[[donor]]$gene = colnames(reg_micro_scale)
write_csv(donor_desikan_expression[[donor]], path=paste0(base_dir, '/data/ahba/desikan_ahba_ctx_zWithinSubject_zWithinSample_donor',as.character(donor),'_200_17Net_expr_mat.csv'))
}
|
#######################
### Data Day 2019 ###
### Power Session ###
#######################
# Title: Interactive mapping of social vulnerability caused by climate change using R
# Authors: Richard Johansen & Mark Chalmers
# University of Cincinnati Libraries
# 4/1/2019
# Code: https://github.com/RAJohansen/DataDay2019
#REFERENCES:
# Social Vulnerability Data: http://artsandsciences.sc.edu/geog/hvri
# Geocomputation in R: https://geocompr.robinlovelace.net/
######################### PART I :Introduction to R ############################
### Step 1: R as a Calculator
1 + 3
### Step 2: Creating objects in R
# Hint alt - is a shortcut for the < -
x <- 1+2
x
y <- x +1
y
### Step 3: Getting Help in R
help(mean)
#HINT: if you can't remember exactly what function you are looking for, Use Tab
me "Tab"
# Not sure what its called
# Try a fuzzy search
apropos("mea")
### Step 4: Viewing & Examinging a Data set
# Lets explore data using a data set thats contained in R
mtcars <- mtcars
# View our table
# Or click the df object under the data window
View(mtcars)
# Use the names() function to return a list the variables
names(mtcars)
#Look at the data types or structure of the data
str(mtcars)
# This is very useful when we analyzing or visualizing data
# Make sure your variables are in the appropiate format!!
## Quick and simple statistical summary of the data
summary(mtcars)
# Finding values from out data table
# Lets look at column 2
mtcars[,2]
# Lets look at row 5
mtcars[5,]
# What value is in row 5 column 3?
mtcars[5,3]
# What if we want to know the max mpg
max(mtcars$mpg)
##################### PART II: Plotting using Base R ############################
### Default Plot
plot(mtcars$mpg)
## Dotchart ##
dotchart(mtcars$mpg, labels=row.names(mtcars))
## Histogram ##
hist(mtcars$mpg)
# Colored Histogram with Different Number of Bins
hist(mtcars$mpg, breaks=10)
## Scatterplot ##
plot(mtcars$wt,mtcars$mpg)
## Box Plots ##
boxplot(mtcars$mpg~mtcars$cyl)
# Boxplot with labels
boxplot(mpg~cyl,
data=mtcars,
main="Car Milage Data",
xlab="Number of Cylinders",
ylab="Miles Per Gallon")
########################### PART III:Data Acquisition############################
### Step 1: Install & load required packages
#install.packages(c("tigris","tmap","tidyverse","tabulizer","dplyr","sf","leaflet"))
library(tigris)
library(tmap)
library(tidyverse)
library(tabulizer)
library(dplyr)
library(sf)
library(leaflet)
### Step 2: Extract a web PDF
# Explore file location to ensure accuracy:
website <- "http://artsandsciences.sc.edu/geog/hvri/sites/sc.edu.geog.hvri/files/attachments/SoVI_10_14_Website.pdf"
browseURL(url = website)
# Use URL location to extract pdf as a table
# When you're unfamilar with a function you can use the ?
?extract_tables
Sovi_table <- extract_tables(website)
# Lets view what exactly is extracted through this process
View(Sovi_table)
#What a mess???
### Step 3: Converting the web-based PDF into csv
# Lets use two more functions to convert the extracted table
# into a more usable and analysis friendly format
# do.call?
# rbind?
final <- do.call(rbind, Sovi_table[-length(Sovi_table)])
# Reformate table headers by dropping the first row
final <- as.data.frame(final[2:nrow(final), ])
# Lets lable the column names so they can merged with Census data
headers <- c('GEOID', 'State_FIP', 'County_FIP', 'County_Name', 'CNTY_SoVI',
'Percentile')
# Apply our names to the data frame
names(final) <- headers
# **NOTE** GEOID is the ID code for CENSUS data
# This is mandatory for the next section
### Step 4: Save the table as a csv
# This is helpful for eliminating redundancy and reproducibility
write.csv(final, file='Data/SoVI.csv', row.names=FALSE)
########################### PART IV: Mapping in R #############################
### Step 1: Load spatial objects into R from US Census Tigris files
# In this case we want to load counties
# The tigris package is connected to the Census's database so we can pull data directly
# We want to pull the spatial objects counties and save them as an R object
# NOTE: this might take a couple minutes due to the size of the file
# Question: How many counties are there in the USA?
# Load USA Counties from tigris package (US CENSUS)
Counties <- counties()
# Convert Large SpatialPolygonsDataFrame to Simple Feature (sf)
Counties_sf <- st_as_sf(Counties)
#Subset data to only lower 48
Counties_Cont48 <- subset(Counties_sf,STATEFP != "69" & STATEFP != "02" & STATEFP != "60" & STATEFP != '66' & STATEFP != "15" & STATEFP != "72" & STATEFP != "78")
# Save Counties_Cont48 as CSV & GeoPackage
st_write(Counties_Cont48, dsn = 'Data/Counties_Cont48.gpkg')
#Select only Florida Counties & Save them
Counties_FL <- subset(Counties_sf,STATEFP == "12")
st_write(Counties_FL, dsn = 'Data/Counties_FL.gpkg')
#Read geopackage
Counties_FL <- st_read('Data/Counties_FL.gpkg')
View(Counties_FL)
# geom column
### Step 2: Merge SoVI csv with our county region spatial object
# Load data from package location if not currently loaded
# We can start directly from the objects in our working environment
#Or we can load the data saved in Part 1: Step 4
df <- read.csv('Data/SoVI.csv')
# Create subset of SoVI data for just Florida Counties
df_FL <- subset(df,State_FIP == "12")
#Notice the number of rows is exactly the same as Counties_FL
# Now that we have both of objects loaded we can merged them using a common field
# This is a very common practice is GIS
# Each object must have the exact same set of unique identifiers for each row
# Using the merge fucntion we can combine the spatial object with our data frame
FL_SoVI <- merge(Counties_FL,df_FL, by = "GEOID", all = FALSE)
### Step 3: Plot using base plot
# We want to plot the spatial object from the values of the first column
# In this case that is the unique ID for each column
plot(FL_SoVI[1])
### Step 4: Mapping with tmap
# tmap uses the same grammar of graphics as ggplot
# We build on our graphics like layers on a cake
# Plot data
tm_shape(FL_SoVI) +
tm_fill()
# Now add our county borders
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill()
# Lets add our SoVI data to explore trends
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI")
# Manually define lable breaks
breaks = c(-6,-3,0,3,6)
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI",breaks = breaks)
# However in this case negative values correspond to higher resilience (good)
# So lets flip the color scheme
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI",breaks = breaks, palette = "-RdYlGn")
# We can explore color palettes
tmaptools::palette_explorer()
# Lets choose our own color palette and add a continuous scale bar
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI", style = "cont", palette = "viridis")
# Add some cartographic elements
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI", style = "cont", palette = "viridis") +
tm_layout(title = "Florida SoVI Vulnerability Index by County",
legend.outside = FALSE,
frame = TRUE,
inner.margins = 0.1,
legend.title.size = 1.5,
legend.text.size = 1.1) +
tm_compass(type = "arrow", position = c("right", "top"), size = 2) +
tm_scale_bar(breaks = c(0, 100, 200),size = 0.8)
### Finally Lets save our plot
#Saving a plot
jpeg('My_Awesome_Map.jpg', width = 7, height = 7, units = "in", res =300)
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI", style = "cont", palette = "viridis") +
tm_layout(title = "Florida SoVI Vulnerability Index by County",
legend.outside = FALSE,
frame = TRUE,
inner.margins = 0.1,
legend.title.size = 1.5,
legend.text.size = 1.1) +
tm_compass(type = "arrow", position = c("right", "top"), size = 2) +
tm_scale_bar(breaks = c(0, 100, 200),size = 0.8)
dev.off()
################ PART V: Interactive mapping with tmap & leafleft ####################
# Load the leaflet package
library(leaflet)
# Lets examine the leaflet documentation
?leaflet
browseURL(url = "https://rstudio.github.io/leaflet/")
### Step 1: Convert Static tmap to Interactive Map using leaflet
# Create R object from map
map <- tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI",
palette = "-RdYlGn",
id = "NAME",
popup.vars = c("NAME","CNTY_SoVI"))
#Call that object using tmap_leaflet function
tmap_leaflet(map)
### Step 2: Complex Mapping with leaflet
# Create duplicate maps so we can do a side by side comparison of SoVI and Flood Zone
FL_pop <- read.csv("Data/FL_Population.csv")
FL_pop$NAMELSAD <- FL_pop$County
# Merge population data into spatial object
FL_SoVI <- merge(FL_SoVI,FL_pop, by = "NAMELSAD", all = FALSE)
#Add Flood zone lines
FL_slr_10ft <- st_read("Data/FL_slr_10ft.gpkg")
facets <- c("CNTY_SoVI","Population")
map_facets <- tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = facets,
palette = "-RdYlGn",
id = "NAME",
popup.vars = c("NAME","CNTY_SoVI", "Population")) +
tm_shape(FL_slr_10ft) +
tm_polygons(col = "blue", alpha = 0.75) +
tm_facets(nrow = 1, sync = TRUE, free.scales.fill =TRUE)
tmap_leaflet(map_facets)
### Step 3: Adding a basemap to a Interactive Map
map_facets_base <- tm_basemap(leaflet::providers$Esri.WorldImagery) +
tm_shape(FL_SoVI) +
tm_polygons(facets) +
tm_borders() +
tm_fill(col = facets,
id = "NAME",
palette = "-RdYlGn",
popup.vars = c("NAME","CNTY_SoVI", "Population")) +
tm_shape(FL_slr_10ft) +
tm_polygons(col = "blue", alpha = 0.75) +
tm_facets(nrow = 1, sync = TRUE)
tmap_leaflet(map_facets_base)
### Step 4: Interactive Map using leaflet only
pal <- colorNumeric(
palette = "RdYlBu",
domain = FL_SoVI$CNTY_SoVI
)
m <- leaflet(FL_SoVI) %>%
addTiles(group = "Open Street Map") %>%
addProviderTiles(leaflet::providers$Esri.WorldImagery, group = "Satellite Imagery") %>%
addPolygons(color = "#444444", weight = 1, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.75,
fillColor = ~colorQuantile("RdYlBu", CNTY_SoVI)(CNTY_SoVI),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE), group = "SoVI") %>%
addPolygons(data = FL_slr_10ft, fillColor = "blue", fillOpacity = 0.75, group = "Sea Level Rise") %>%
addLayersControl(baseGroups = c("Satellite Imagery", "Open Street Map"),
overlayGroups = c("Sea Level Rise", "SoVI"),
options = layersControlOptions(collapsed = FALSE)) %>%
addLegend("bottomleft",
title = "Vulnerability (SoVI)",
pal = pal,
values = ~CNTY_SoVI,
opacity = 1)
m # Print the map
| /Scripts/Mapping_Social_Vulnerability.R | permissive | RAJohansen/DataDay2019 | R | false | false | 10,951 | r | #######################
### Data Day 2019 ###
### Power Session ###
#######################
# Title: Interactive mapping of social vulnerability caused by climate change using R
# Authors: Richard Johansen & Mark Chalmers
# University of Cincinnati Libraries
# 4/1/2019
# Code: https://github.com/RAJohansen/DataDay2019
#REFERENCES:
# Social Vulnerability Data: http://artsandsciences.sc.edu/geog/hvri
# Geocomputation in R: https://geocompr.robinlovelace.net/
######################### PART I :Introduction to R ############################
### Step 1: R as a Calculator
1 + 3
### Step 2: Creating objects in R
# Hint alt - is a shortcut for the < -
x <- 1+2
x
y <- x +1
y
### Step 3: Getting Help in R
help(mean)
#HINT: if you can't remember exactly what function you are looking for, Use Tab
me "Tab"
# Not sure what its called
# Try a fuzzy search
apropos("mea")
### Step 4: Viewing & Examinging a Data set
# Lets explore data using a data set thats contained in R
mtcars <- mtcars
# View our table
# Or click the df object under the data window
View(mtcars)
# Use the names() function to return a list the variables
names(mtcars)
#Look at the data types or structure of the data
str(mtcars)
# This is very useful when we analyzing or visualizing data
# Make sure your variables are in the appropiate format!!
## Quick and simple statistical summary of the data
summary(mtcars)
# Finding values from out data table
# Lets look at column 2
mtcars[,2]
# Lets look at row 5
mtcars[5,]
# What value is in row 5 column 3?
mtcars[5,3]
# What if we want to know the max mpg
max(mtcars$mpg)
##################### PART II: Plotting using Base R ############################
### Default Plot
plot(mtcars$mpg)
## Dotchart ##
dotchart(mtcars$mpg, labels=row.names(mtcars))
## Histogram ##
hist(mtcars$mpg)
# Colored Histogram with Different Number of Bins
hist(mtcars$mpg, breaks=10)
## Scatterplot ##
plot(mtcars$wt,mtcars$mpg)
## Box Plots ##
boxplot(mtcars$mpg~mtcars$cyl)
# Boxplot with labels
boxplot(mpg~cyl,
data=mtcars,
main="Car Milage Data",
xlab="Number of Cylinders",
ylab="Miles Per Gallon")
########################### PART III:Data Acquisition############################
### Step 1: Install & load required packages
#install.packages(c("tigris","tmap","tidyverse","tabulizer","dplyr","sf","leaflet"))
library(tigris)
library(tmap)
library(tidyverse)
library(tabulizer)
library(dplyr)
library(sf)
library(leaflet)
### Step 2: Extract a web PDF
# Explore file location to ensure accuracy:
website <- "http://artsandsciences.sc.edu/geog/hvri/sites/sc.edu.geog.hvri/files/attachments/SoVI_10_14_Website.pdf"
browseURL(url = website)
# Use URL location to extract pdf as a table
# When you're unfamilar with a function you can use the ?
?extract_tables
Sovi_table <- extract_tables(website)
# Lets view what exactly is extracted through this process
View(Sovi_table)
#What a mess???
### Step 3: Converting the web-based PDF into csv
# Lets use two more functions to convert the extracted table
# into a more usable and analysis friendly format
# do.call?
# rbind?
final <- do.call(rbind, Sovi_table[-length(Sovi_table)])
# Reformate table headers by dropping the first row
final <- as.data.frame(final[2:nrow(final), ])
# Lets lable the column names so they can merged with Census data
headers <- c('GEOID', 'State_FIP', 'County_FIP', 'County_Name', 'CNTY_SoVI',
'Percentile')
# Apply our names to the data frame
names(final) <- headers
# **NOTE** GEOID is the ID code for CENSUS data
# This is mandatory for the next section
### Step 4: Save the table as a csv
# This is helpful for eliminating redundancy and reproducibility
write.csv(final, file='Data/SoVI.csv', row.names=FALSE)
########################### PART IV: Mapping in R #############################
### Step 1: Load spatial objects into R from US Census Tigris files
# In this case we want to load counties
# The tigris package is connected to the Census's database so we can pull data directly
# We want to pull the spatial objects counties and save them as an R object
# NOTE: this might take a couple minutes due to the size of the file
# Question: How many counties are there in the USA?
# Load USA Counties from tigris package (US CENSUS)
Counties <- counties()
# Convert Large SpatialPolygonsDataFrame to Simple Feature (sf)
Counties_sf <- st_as_sf(Counties)
#Subset data to only lower 48
Counties_Cont48 <- subset(Counties_sf,STATEFP != "69" & STATEFP != "02" & STATEFP != "60" & STATEFP != '66' & STATEFP != "15" & STATEFP != "72" & STATEFP != "78")
# Save Counties_Cont48 as CSV & GeoPackage
st_write(Counties_Cont48, dsn = 'Data/Counties_Cont48.gpkg')
#Select only Florida Counties & Save them
Counties_FL <- subset(Counties_sf,STATEFP == "12")
st_write(Counties_FL, dsn = 'Data/Counties_FL.gpkg')
#Read geopackage
Counties_FL <- st_read('Data/Counties_FL.gpkg')
View(Counties_FL)
# geom column
### Step 2: Merge SoVI csv with our county region spatial object
# Load data from package location if not currently loaded
# We can start directly from the objects in our working environment
#Or we can load the data saved in Part 1: Step 4
df <- read.csv('Data/SoVI.csv')
# Create subset of SoVI data for just Florida Counties
df_FL <- subset(df,State_FIP == "12")
#Notice the number of rows is exactly the same as Counties_FL
# Now that we have both of objects loaded we can merged them using a common field
# This is a very common practice is GIS
# Each object must have the exact same set of unique identifiers for each row
# Using the merge fucntion we can combine the spatial object with our data frame
FL_SoVI <- merge(Counties_FL,df_FL, by = "GEOID", all = FALSE)
### Step 3: Plot using base plot
# We want to plot the spatial object from the values of the first column
# In this case that is the unique ID for each column
plot(FL_SoVI[1])
### Step 4: Mapping with tmap
# tmap uses the same grammar of graphics as ggplot
# We build on our graphics like layers on a cake
# Plot data
tm_shape(FL_SoVI) +
tm_fill()
# Now add our county borders
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill()
# Lets add our SoVI data to explore trends
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI")
# Manually define lable breaks
breaks = c(-6,-3,0,3,6)
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI",breaks = breaks)
# However in this case negative values correspond to higher resilience (good)
# So lets flip the color scheme
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI",breaks = breaks, palette = "-RdYlGn")
# We can explore color palettes
tmaptools::palette_explorer()
# Lets choose our own color palette and add a continuous scale bar
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI", style = "cont", palette = "viridis")
# Add some cartographic elements
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI", style = "cont", palette = "viridis") +
tm_layout(title = "Florida SoVI Vulnerability Index by County",
legend.outside = FALSE,
frame = TRUE,
inner.margins = 0.1,
legend.title.size = 1.5,
legend.text.size = 1.1) +
tm_compass(type = "arrow", position = c("right", "top"), size = 2) +
tm_scale_bar(breaks = c(0, 100, 200),size = 0.8)
### Finally Lets save our plot
#Saving a plot
jpeg('My_Awesome_Map.jpg', width = 7, height = 7, units = "in", res =300)
tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI", style = "cont", palette = "viridis") +
tm_layout(title = "Florida SoVI Vulnerability Index by County",
legend.outside = FALSE,
frame = TRUE,
inner.margins = 0.1,
legend.title.size = 1.5,
legend.text.size = 1.1) +
tm_compass(type = "arrow", position = c("right", "top"), size = 2) +
tm_scale_bar(breaks = c(0, 100, 200),size = 0.8)
dev.off()
################ PART V: Interactive mapping with tmap & leafleft ####################
# Load the leaflet package
library(leaflet)
# Lets examine the leaflet documentation
?leaflet
browseURL(url = "https://rstudio.github.io/leaflet/")
### Step 1: Convert Static tmap to Interactive Map using leaflet
# Create R object from map
map <- tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = "CNTY_SoVI",
palette = "-RdYlGn",
id = "NAME",
popup.vars = c("NAME","CNTY_SoVI"))
#Call that object using tmap_leaflet function
tmap_leaflet(map)
### Step 2: Complex Mapping with leaflet
# Create duplicate maps so we can do a side by side comparison of SoVI and Flood Zone
FL_pop <- read.csv("Data/FL_Population.csv")
FL_pop$NAMELSAD <- FL_pop$County
# Merge population data into spatial object
FL_SoVI <- merge(FL_SoVI,FL_pop, by = "NAMELSAD", all = FALSE)
#Add Flood zone lines
FL_slr_10ft <- st_read("Data/FL_slr_10ft.gpkg")
facets <- c("CNTY_SoVI","Population")
map_facets <- tm_shape(FL_SoVI) +
tm_borders() +
tm_fill(col = facets,
palette = "-RdYlGn",
id = "NAME",
popup.vars = c("NAME","CNTY_SoVI", "Population")) +
tm_shape(FL_slr_10ft) +
tm_polygons(col = "blue", alpha = 0.75) +
tm_facets(nrow = 1, sync = TRUE, free.scales.fill =TRUE)
tmap_leaflet(map_facets)
### Step 3: Adding a basemap to a Interactive Map
map_facets_base <- tm_basemap(leaflet::providers$Esri.WorldImagery) +
tm_shape(FL_SoVI) +
tm_polygons(facets) +
tm_borders() +
tm_fill(col = facets,
id = "NAME",
palette = "-RdYlGn",
popup.vars = c("NAME","CNTY_SoVI", "Population")) +
tm_shape(FL_slr_10ft) +
tm_polygons(col = "blue", alpha = 0.75) +
tm_facets(nrow = 1, sync = TRUE)
tmap_leaflet(map_facets_base)
### Step 4: Interactive Map using leaflet only
pal <- colorNumeric(
palette = "RdYlBu",
domain = FL_SoVI$CNTY_SoVI
)
m <- leaflet(FL_SoVI) %>%
addTiles(group = "Open Street Map") %>%
addProviderTiles(leaflet::providers$Esri.WorldImagery, group = "Satellite Imagery") %>%
addPolygons(color = "#444444", weight = 1, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.75,
fillColor = ~colorQuantile("RdYlBu", CNTY_SoVI)(CNTY_SoVI),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE), group = "SoVI") %>%
addPolygons(data = FL_slr_10ft, fillColor = "blue", fillOpacity = 0.75, group = "Sea Level Rise") %>%
addLayersControl(baseGroups = c("Satellite Imagery", "Open Street Map"),
overlayGroups = c("Sea Level Rise", "SoVI"),
options = layersControlOptions(collapsed = FALSE)) %>%
addLegend("bottomleft",
title = "Vulnerability (SoVI)",
pal = pal,
values = ~CNTY_SoVI,
opacity = 1)
m # Print the map
|
#http://nces.ed.gov/ccd/pubschuniv.asp
#ccd <- read.table("sc121a_supp.txt", sep="\t", header=T, fill=T, stringsAsFactors = F)
#md <- ccd[ccd$FIPST==24,]
#FCC geocode API website: https://www.fcc.gov/developers/census-block-conversions-api
#NCES file layout: https://nces.ed.gov/ccd/Data/txt/sc131alay.txt
library(haven)
require(dplyr)
library('RCurl')
library('XML')
library('httr')
#setwd('/Users/zoe/Dropbox/rclass')
setwd('C:/Users/rrosso/Dropbox/rclass')
url = "http://nces.ed.gov/ccd/Data/zip/sc131a_supp_sas.zip"
temp <- tempfile()
download.file(url, temp)
ccd2 <- read_sas(unzip(temp))
unlink(temp)
ccd2_pregeo <- ccd2[,c("NCESSCH","MSTREE","MCITY","MSTATE","MZIP","MZIP4","LATCOD","LONCOD")]
write.csv(ccd2_pregeo, file="us_nces_pregeo.csv")
geoTest = getURL("http://data.fcc.gov/api/block/2010/find?latitude=40.0&longitude=-85")
fcc = handle("http://data.fcc.gov/api/block/2010/find?")
fccTest <- GET("http://data.fcc.gov/api/block/2010/find", query = list(latitude = 40.0, longitude = -85))
#how to parse XML response (get JSON instead?)
#how to submit GET for each lat/long on file
#how to join response block/block group codes onto file | /code/nces_apitest.R | no_license | arkut/rnces | R | false | false | 1,154 | r | #http://nces.ed.gov/ccd/pubschuniv.asp
#ccd <- read.table("sc121a_supp.txt", sep="\t", header=T, fill=T, stringsAsFactors = F)
#md <- ccd[ccd$FIPST==24,]
#FCC geocode API website: https://www.fcc.gov/developers/census-block-conversions-api
#NCES file layout: https://nces.ed.gov/ccd/Data/txt/sc131alay.txt
library(haven)
require(dplyr)
library('RCurl')
library('XML')
library('httr')
#setwd('/Users/zoe/Dropbox/rclass')
setwd('C:/Users/rrosso/Dropbox/rclass')
url = "http://nces.ed.gov/ccd/Data/zip/sc131a_supp_sas.zip"
temp <- tempfile()
download.file(url, temp)
ccd2 <- read_sas(unzip(temp))
unlink(temp)
ccd2_pregeo <- ccd2[,c("NCESSCH","MSTREE","MCITY","MSTATE","MZIP","MZIP4","LATCOD","LONCOD")]
write.csv(ccd2_pregeo, file="us_nces_pregeo.csv")
geoTest = getURL("http://data.fcc.gov/api/block/2010/find?latitude=40.0&longitude=-85")
fcc = handle("http://data.fcc.gov/api/block/2010/find?")
fccTest <- GET("http://data.fcc.gov/api/block/2010/find", query = list(latitude = 40.0, longitude = -85))
#how to parse XML response (get JSON instead?)
#how to submit GET for each lat/long on file
#how to join response block/block group codes onto file |
multiple_rep_plot = function(file,
a_individual = TRUE, b_individual = TRUE, x_individual = FALSE,
a_average = TRUE, b_average = TRUE, x_average = FALSE){
# create main title
main_title = bquote("Mulitple Games")
# plot all repetitions for a and b, adding in x for consecutive
ymax = c(0, 1.1 * max(c(file$a_average[[1]], file$b_average[[1]], file$x_average[[1]])))
plot(x = -100, y = -100, type = "p", lwd = 1, lty = 1,
xlim = c(0, max(file$time)), ylim = ymax, xlab = "", ylab = "")
mtext("Time", side = 1, line = 2, cex = 1.5)
mtext("Counts", side = 2, line = 2, cex = 1.5)
mtext(main_title, side = 3, line = 1.5, cex = 2.5)
if(a_individual){
for(i in 1:file$num){
color_a_individual = switch(file$mechanism[i],
"monomolecular" = "deeppink",
"bimolecular" = "dodgerblue",
"catalytic" = "yellow",
"autocatalytic" = "peru",
"consecutive" = "greenyellow",
"equilibrium" = "violetred")
matlines(x = file$time, y = file$a_reps[[i]], lty = 1, lwd = 0.4, col = color_a_individual)
}
}
if(b_individual){
for(i in 1:file$num){
color_b_individual = switch(file$mechanism[i],
"monomolecular" = "red2",
"bimolecular" = "blue",
"catalytic" = "orange",
"autocatalytic" = "brown",
"consecutive" = "green4",
"equilibrium" = "purple4")
matlines(x = file$time, y = file$b_reps[[i]], lty = 1, lwd = 0.4, col = color_b_individual)
}
}
if (x_individual && "consecutive" %in% file$mechanism){
for(i in 1:file$num){
matlines(x = file$time, y = file$x_reps[[i]], lty = 1, lwd = 0.4, col = "darkgreen")
}
}
# plot averages for a and b, adding in x for consecutive
if(a_average){
for(i in 1:file$num){
color_a_average = switch(file$mechanism[i],
"monomolecular" = "deeppink",
"bimolecular" = "dodgerblue",
"catalytic" = "yellow",
"autocatalytic" = "peru",
"consecutive" = "greenyellow",
"equilibrium" = "violetred")
lines(x = file$time, y = file$a_average[[i]], lwd = 2, lty = 2, col = color_a_average)
}
}
if(b_average){
for(i in 1:file$num){
color_b_average = switch(file$mechanism[i],
"monomolecular" = "red2",
"bimolecular" = "blue",
"catalytic" = "orange",
"autocatalytic" = "brown4",
"consecutive" = "green4",
"equilibrium" = "purple4")
lines(x = file$time, y = file$b_average[[i]], lwd = 2, lty = 2, col = color_b_average)
}
}
if (x_average ||x_individual && !("consecutive" %in% file$mechanism)){
for(i in 1:file$num){
lines(x = file$time, y = file$x_average[[i]], lwd = 2, lty = 2, col = "darkgreen")
}
}
# add legend to plot
legend_text = vector()
legend_pch = vector()
legend_color = vector()
if("monomolecular" %in% file$mechanism){
legend_text = c(legend_text, "monomolecular")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "red")
}
if("bimolecular" %in% file$mechanism){
legend_text = c(legend_text, "bimolecular")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "blue")
}
if("catalytic" %in% file$mechanism){
legend_text = c(legend_text, "catalytic")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "yellow")
}
if("autocatalytic" %in% file$mechanism){
legend_text = c(legend_text, "autocatalytic")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "brown")
}
if("consecutive" %in% file$mechanism){
legend_text = c(legend_text, "consecutive")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "green")
}
if("equilibrium" %in% file$mechanism){
legend_text = c(legend_text, "equiribrium")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "purple")
}
legend(x = "top", horiz = TRUE,
legend = legend_text, pch = legend_pch, pt.cex = 1.5,
col = legend_color, bty = "n")
grid(col = "black")
}
| /multiple_rep_plot.R | permissive | MasayukiNagai/Kinetics-Games | R | false | false | 4,687 | r | multiple_rep_plot = function(file,
a_individual = TRUE, b_individual = TRUE, x_individual = FALSE,
a_average = TRUE, b_average = TRUE, x_average = FALSE){
# create main title
main_title = bquote("Mulitple Games")
# plot all repetitions for a and b, adding in x for consecutive
ymax = c(0, 1.1 * max(c(file$a_average[[1]], file$b_average[[1]], file$x_average[[1]])))
plot(x = -100, y = -100, type = "p", lwd = 1, lty = 1,
xlim = c(0, max(file$time)), ylim = ymax, xlab = "", ylab = "")
mtext("Time", side = 1, line = 2, cex = 1.5)
mtext("Counts", side = 2, line = 2, cex = 1.5)
mtext(main_title, side = 3, line = 1.5, cex = 2.5)
if(a_individual){
for(i in 1:file$num){
color_a_individual = switch(file$mechanism[i],
"monomolecular" = "deeppink",
"bimolecular" = "dodgerblue",
"catalytic" = "yellow",
"autocatalytic" = "peru",
"consecutive" = "greenyellow",
"equilibrium" = "violetred")
matlines(x = file$time, y = file$a_reps[[i]], lty = 1, lwd = 0.4, col = color_a_individual)
}
}
if(b_individual){
for(i in 1:file$num){
color_b_individual = switch(file$mechanism[i],
"monomolecular" = "red2",
"bimolecular" = "blue",
"catalytic" = "orange",
"autocatalytic" = "brown",
"consecutive" = "green4",
"equilibrium" = "purple4")
matlines(x = file$time, y = file$b_reps[[i]], lty = 1, lwd = 0.4, col = color_b_individual)
}
}
if (x_individual && "consecutive" %in% file$mechanism){
for(i in 1:file$num){
matlines(x = file$time, y = file$x_reps[[i]], lty = 1, lwd = 0.4, col = "darkgreen")
}
}
# plot averages for a and b, adding in x for consecutive
if(a_average){
for(i in 1:file$num){
color_a_average = switch(file$mechanism[i],
"monomolecular" = "deeppink",
"bimolecular" = "dodgerblue",
"catalytic" = "yellow",
"autocatalytic" = "peru",
"consecutive" = "greenyellow",
"equilibrium" = "violetred")
lines(x = file$time, y = file$a_average[[i]], lwd = 2, lty = 2, col = color_a_average)
}
}
if(b_average){
for(i in 1:file$num){
color_b_average = switch(file$mechanism[i],
"monomolecular" = "red2",
"bimolecular" = "blue",
"catalytic" = "orange",
"autocatalytic" = "brown4",
"consecutive" = "green4",
"equilibrium" = "purple4")
lines(x = file$time, y = file$b_average[[i]], lwd = 2, lty = 2, col = color_b_average)
}
}
if (x_average ||x_individual && !("consecutive" %in% file$mechanism)){
for(i in 1:file$num){
lines(x = file$time, y = file$x_average[[i]], lwd = 2, lty = 2, col = "darkgreen")
}
}
# add legend to plot
legend_text = vector()
legend_pch = vector()
legend_color = vector()
if("monomolecular" %in% file$mechanism){
legend_text = c(legend_text, "monomolecular")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "red")
}
if("bimolecular" %in% file$mechanism){
legend_text = c(legend_text, "bimolecular")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "blue")
}
if("catalytic" %in% file$mechanism){
legend_text = c(legend_text, "catalytic")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "yellow")
}
if("autocatalytic" %in% file$mechanism){
legend_text = c(legend_text, "autocatalytic")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "brown")
}
if("consecutive" %in% file$mechanism){
legend_text = c(legend_text, "consecutive")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "green")
}
if("equilibrium" %in% file$mechanism){
legend_text = c(legend_text, "equiribrium")
legend_pch = c(legend_pch, 15)
legend_color = c(legend_color, "purple")
}
legend(x = "top", horiz = TRUE,
legend = legend_text, pch = legend_pch, pt.cex = 1.5,
col = legend_color, bty = "n")
grid(col = "black")
}
|
# April 13, 2022 analyze m6A results from m6anet method
# July 21, 2022 replot
c1 <- data.table::fread("./ont/m6anet/C1.data.result.csv.gz")
c2 <- data.table::fread("./ont/m6anet/C2.data.result.csv.gz")
e1 <- data.table::fread("./ont/m6anet/E1.data.result.csv.gz")
e2 <- data.table::fread("./ont/m6anet/E2.data.result.csv.gz")
m1 <- data.table::fread("./ont/m6anet/M1.data.result.csv.gz")
m2 <- data.table::fread("./ont/m6anet/M2.data.result.csv.gz")
# replication
c1$rep <- "C1"
c2$rep <- "C2"
e1$rep <- "E1"
e2$rep <- "E2"
m1$rep <- "M1"
m2$rep <- "M2"
# condition
c1$condition <- "Control"
c2$condition <- "Control"
e1$condition <- "shEZH2"
e2$condition <- "shEZH2"
m1$condition <- "shMETTLE3"
m2$condition <- "shMETTLE3"
# rbind all
mod <- data.table::rbindlist(list(c1,c2,e1,e2,m1,m2))
mod$condition <- factor(mod$condition, levels=c("Control","shEZH2","shMETTLE3"))
# boxplot to check the global methylation level across samples ---
library(ggpubr)
stat.test <- compare_means(probability_modified ~ condition, data= mod, method = "wilcox.test",p.adjust.method = "fdr") #, ref.group = "Control"
stat.test2 <- compare_means(probability_modified ~ rep, data= mod, method = "wilcox.test",p.adjust.method = "bonferroni")
stat.test2
# opposite compared to wilcox.test only
# A tibble: 3 × 8
#.y. group1 group2 p p.adj p.format p.signif method
#<chr> <chr> <chr> <dbl> <dbl> <chr> <chr> <chr>
#1 probability_modified Control shEZH2 0 0 < 2e-16 **** Wilcoxon
#2 probability_modified Control shMETTLE3 0 0 < 2e-16 **** Wilcoxon
#3 probability_modified shEZH2 shMETTLE3 0.0000159 0.000016 1.6e-05 **** Wilcoxon
#my_comparisons <- list( c("Control", "shEZH2"), c("shEZH2", "shMETTLE3"), c("Control", "shMETTLE3") )
p <- ggboxplot(mod, x = "condition", y = "probability_modified",
fill = "condition",
palette =c("#a50f15", "#fb6a4a","#fcbba1"),
xlab = "",
ylab = "Probability of m6A",
width = 0.3) +
theme(legend.position="none")
p + stat_pvalue_manual(
stat.test,
tip.length = 0.02,
step.increase = 0.075,
y.position = 1.1,
label = "p.signif"
#position = position_dodge(0.2)
)
ggsave("./ont/m6anet/global.mod.probablity.boxplot.pdf")
#july 21 2022
ggsave("./ont/figure/m6anet.global.mod.probablity.boxplot.svg",width=5,height=5)
# find common transcript position across samples -------------------------------
c1 <- data.table::fread("./ont/m6anet/C1.data.result.csv.gz")
c2 <- data.table::fread("./ont/m6anet/C2.data.result.csv.gz")
e1 <- data.table::fread("./ont/m6anet/E1.data.result.csv.gz")
e2 <- data.table::fread("./ont/m6anet/E2.data.result.csv.gz")
m1 <- data.table::fread("./ont/m6anet/M1.data.result.csv.gz")
m2 <- data.table::fread("./ont/m6anet/M2.data.result.csv.gz")
control <- dplyr::full_join(c1,c2,by=c("transcript_id","transcript_position"))
control <- na.omit(control)
shE <- dplyr::full_join(e1,e2,by=c("transcript_id","transcript_position"))
shE <- na.omit(shE)
shM <- dplyr::full_join(m1,m2,by=c("transcript_id","transcript_position"))
shM <- na.omit(shM)
mod2 <- purrr::reduce(list(control,shE,shM), dplyr::full_join, by = c("transcript_id","transcript_position"))
# rename columns ---
names(mod2)[c(3:14)] <- c(paste0(c("n_reads","probability_modified"),".C1"),
paste0(c("n_reads","probability_modified"),".C2"),
paste0(c("n_reads","probability_modified"),".E1"),
paste0(c("n_reads","probability_modified"),".E2"),
paste0(c("n_reads","probability_modified"),".M1"),
paste0(c("n_reads","probability_modified"),".M2"))
# mean within condition
mod2.mean <- data.frame(transcript_id = mod2$transcript_id,
transcript_position = mod2$transcript_position,
probability.control = rowMeans(mod2[,c(4,6)],na.rm=TRUE),
probability.shEZH2 = rowMeans(mod2[,c(8,10)],na.rm=TRUE),
probability.shMETTLE3 = rowMeans(mod2[,c(12,14)],na.rm=TRUE))
mean(mod2.mean$probability.control,na.rm=TRUE) # 0.2892524
mean(mod2.mean$probability.shEZH2,na.rm=TRUE) # 0.2626877
mean(mod2.mean$probability.shMETTLE3,na.rm=TRUE) # 0.2556337
# test ---
wilcox.test(mod2.mean$probability.control, mod2.mean$probability.shEZH2) # p-value < 2.2e-16
wilcox.test(mod2.mean$probability.control, mod2.mean$probability.shMETTLE3) # p-value < 2.2e-16
wilcox.test(mod2.mean$probability.shMETTLE3, mod2.mean$probability.shEZH2) # p-value = 0.001795
# calculate the mean difference ---
mod2.mean.diff <- mod2.mean
mod2.mean.diff$diff_ctrl.shE <- mod2.mean.diff$probability.control - mod2.mean.diff$probability.shEZH2
mod2.mean.diff$diff_ctrl.shM <- mod2.mean.diff$probability.control - mod2.mean.diff$probability.shMETTLE3
plot(mod2.mean.diff$diff_ctrl.shE,mod2.mean.diff$diff_ctrl.shM)
cor.test(mod2.mean.diff$diff_ctrl.shE,mod2.mean.diff$diff_ctrl.shM)
#Pearson's product-moment correlation
#data: mod2.mean.diff$diff_ctrl.shE and mod2.mean.diff$diff_ctrl.shM
#t = 267.54, df = 139862, p-value < 2.2e-16
#alternative hypothesis: true correlation is not equal to 0
#95 percent confidence interval:
# 0.5783480 0.5852814
#sample estimates:
# cor
#0.5818253
# scatter plot ---
ggscatter(mod2.mean.diff, x = "diff_ctrl.shE", y = "diff_ctrl.shM",
add = "reg.line", # Add regression line
color = "#bdbdbd",
conf.int = TRUE, # Add confidence interval
add.params = list(color = "blue",
fill = "lightgray")
)+
stat_cor(method = "spearman", label.x = 0.3, label.y = 0.5) # Add correlation coefficient
ggsave("./ont/m6anet/ctrl.shEZH2_ctrl.shMETTLE2_corplot.pdf")
# gather, wide to long to do boxplot ---
mod2.mean.long <- tidyr::gather(mod2.mean, condition, probability, -c(transcript_id,transcript_position))
mod2.mean.long$condition <- stringr::str_replace_all(mod2.mean.long$condition,"probability.","")
mod2.mean.long$condition <- factor(mod2.mean.long$condition, levels = c("control","shEZH2","shMETTLE3"))
p <- ggboxplot(mod2.mean.long, x = "condition", y = "probability",
fill = "condition", palette =c("#a50f15", "#fb6a4a","#fcbba1"))
my_comparisons <- list( c("control", "shEZH2"), c("control", "shMETTLE3"), c("shEZH2", "shMETTLE3") )
p + stat_compare_means(comparisons = my_comparisons)+
stat_compare_means(label.y = 1.5)
ggsave("./ont/m6anet/global.mod.probablity.boxplot.commonTranscript.pdf")
####################annotate with gtf ##################################################################
transcript.gtf <- data.table::fread("gencode.v38.annotation.transcript.gtf")
transcript.gtf$transcript_id <- stringr::str_sub(transcript.gtf$transcript_id,1,15)
mod2.mean.long.gtf <- dplyr::left_join(mod2.mean.long, transcript.gtf, by = "transcript_id")
names(mod2.mean.long.gtf)[5] <- "seqnames"
mod2.mean.long.gtf <- as.data.table(mod2.mean.long.gtf)
# overlap with supp 12 table ---
results <- data.table::fread("./ont/Yi/SupplementaryTable12_hg38.txt")
results$group_name <- NULL
results$group <- NULL
setkey(mod2.mean.long.gtf,seqnames, start, end)
overlaps.res.m6anet <- foverlaps(results,mod2.mean.long.gtf,type="within",nomatch=NULL)
# boxplot
p <- ggboxplot(overlaps.res.m6anet, x = "condition", y = "probability",
fill = "condition", palette =c("#a50f15", "#fb6a4a","#fcbba1"))
my_comparisons <- list( c("control", "shEZH2"), c("control", "shMETTLE3"), c("shEZH2", "shMETTLE3") )
p + stat_compare_means(comparisons = my_comparisons)+
stat_compare_means(label.y = 1.5)
ggsave("./ont/m6anet/global.mod.probablity.boxplot.overlapSupp12.pdf")
| /m6anet_res.R | no_license | ynren1020/QC | R | false | false | 7,858 | r | # April 13, 2022 analyze m6A results from m6anet method
# July 21, 2022 replot
c1 <- data.table::fread("./ont/m6anet/C1.data.result.csv.gz")
c2 <- data.table::fread("./ont/m6anet/C2.data.result.csv.gz")
e1 <- data.table::fread("./ont/m6anet/E1.data.result.csv.gz")
e2 <- data.table::fread("./ont/m6anet/E2.data.result.csv.gz")
m1 <- data.table::fread("./ont/m6anet/M1.data.result.csv.gz")
m2 <- data.table::fread("./ont/m6anet/M2.data.result.csv.gz")
# replication
c1$rep <- "C1"
c2$rep <- "C2"
e1$rep <- "E1"
e2$rep <- "E2"
m1$rep <- "M1"
m2$rep <- "M2"
# condition
c1$condition <- "Control"
c2$condition <- "Control"
e1$condition <- "shEZH2"
e2$condition <- "shEZH2"
m1$condition <- "shMETTLE3"
m2$condition <- "shMETTLE3"
# rbind all
mod <- data.table::rbindlist(list(c1,c2,e1,e2,m1,m2))
mod$condition <- factor(mod$condition, levels=c("Control","shEZH2","shMETTLE3"))
# boxplot to check the global methylation level across samples ---
library(ggpubr)
stat.test <- compare_means(probability_modified ~ condition, data= mod, method = "wilcox.test",p.adjust.method = "fdr") #, ref.group = "Control"
stat.test2 <- compare_means(probability_modified ~ rep, data= mod, method = "wilcox.test",p.adjust.method = "bonferroni")
stat.test2
# opposite compared to wilcox.test only
# A tibble: 3 × 8
#.y. group1 group2 p p.adj p.format p.signif method
#<chr> <chr> <chr> <dbl> <dbl> <chr> <chr> <chr>
#1 probability_modified Control shEZH2 0 0 < 2e-16 **** Wilcoxon
#2 probability_modified Control shMETTLE3 0 0 < 2e-16 **** Wilcoxon
#3 probability_modified shEZH2 shMETTLE3 0.0000159 0.000016 1.6e-05 **** Wilcoxon
#my_comparisons <- list( c("Control", "shEZH2"), c("shEZH2", "shMETTLE3"), c("Control", "shMETTLE3") )
p <- ggboxplot(mod, x = "condition", y = "probability_modified",
fill = "condition",
palette =c("#a50f15", "#fb6a4a","#fcbba1"),
xlab = "",
ylab = "Probability of m6A",
width = 0.3) +
theme(legend.position="none")
p + stat_pvalue_manual(
stat.test,
tip.length = 0.02,
step.increase = 0.075,
y.position = 1.1,
label = "p.signif"
#position = position_dodge(0.2)
)
ggsave("./ont/m6anet/global.mod.probablity.boxplot.pdf")
#july 21 2022
ggsave("./ont/figure/m6anet.global.mod.probablity.boxplot.svg",width=5,height=5)
# find common transcript position across samples -------------------------------
c1 <- data.table::fread("./ont/m6anet/C1.data.result.csv.gz")
c2 <- data.table::fread("./ont/m6anet/C2.data.result.csv.gz")
e1 <- data.table::fread("./ont/m6anet/E1.data.result.csv.gz")
e2 <- data.table::fread("./ont/m6anet/E2.data.result.csv.gz")
m1 <- data.table::fread("./ont/m6anet/M1.data.result.csv.gz")
m2 <- data.table::fread("./ont/m6anet/M2.data.result.csv.gz")
control <- dplyr::full_join(c1,c2,by=c("transcript_id","transcript_position"))
control <- na.omit(control)
shE <- dplyr::full_join(e1,e2,by=c("transcript_id","transcript_position"))
shE <- na.omit(shE)
shM <- dplyr::full_join(m1,m2,by=c("transcript_id","transcript_position"))
shM <- na.omit(shM)
mod2 <- purrr::reduce(list(control,shE,shM), dplyr::full_join, by = c("transcript_id","transcript_position"))
# rename columns ---
names(mod2)[c(3:14)] <- c(paste0(c("n_reads","probability_modified"),".C1"),
paste0(c("n_reads","probability_modified"),".C2"),
paste0(c("n_reads","probability_modified"),".E1"),
paste0(c("n_reads","probability_modified"),".E2"),
paste0(c("n_reads","probability_modified"),".M1"),
paste0(c("n_reads","probability_modified"),".M2"))
# mean within condition
mod2.mean <- data.frame(transcript_id = mod2$transcript_id,
transcript_position = mod2$transcript_position,
probability.control = rowMeans(mod2[,c(4,6)],na.rm=TRUE),
probability.shEZH2 = rowMeans(mod2[,c(8,10)],na.rm=TRUE),
probability.shMETTLE3 = rowMeans(mod2[,c(12,14)],na.rm=TRUE))
mean(mod2.mean$probability.control,na.rm=TRUE) # 0.2892524
mean(mod2.mean$probability.shEZH2,na.rm=TRUE) # 0.2626877
mean(mod2.mean$probability.shMETTLE3,na.rm=TRUE) # 0.2556337
# test ---
wilcox.test(mod2.mean$probability.control, mod2.mean$probability.shEZH2) # p-value < 2.2e-16
wilcox.test(mod2.mean$probability.control, mod2.mean$probability.shMETTLE3) # p-value < 2.2e-16
wilcox.test(mod2.mean$probability.shMETTLE3, mod2.mean$probability.shEZH2) # p-value = 0.001795
# calculate the mean difference ---
mod2.mean.diff <- mod2.mean
mod2.mean.diff$diff_ctrl.shE <- mod2.mean.diff$probability.control - mod2.mean.diff$probability.shEZH2
mod2.mean.diff$diff_ctrl.shM <- mod2.mean.diff$probability.control - mod2.mean.diff$probability.shMETTLE3
plot(mod2.mean.diff$diff_ctrl.shE,mod2.mean.diff$diff_ctrl.shM)
cor.test(mod2.mean.diff$diff_ctrl.shE,mod2.mean.diff$diff_ctrl.shM)
#Pearson's product-moment correlation
#data: mod2.mean.diff$diff_ctrl.shE and mod2.mean.diff$diff_ctrl.shM
#t = 267.54, df = 139862, p-value < 2.2e-16
#alternative hypothesis: true correlation is not equal to 0
#95 percent confidence interval:
# 0.5783480 0.5852814
#sample estimates:
# cor
#0.5818253
# scatter plot ---
ggscatter(mod2.mean.diff, x = "diff_ctrl.shE", y = "diff_ctrl.shM",
add = "reg.line", # Add regression line
color = "#bdbdbd",
conf.int = TRUE, # Add confidence interval
add.params = list(color = "blue",
fill = "lightgray")
)+
stat_cor(method = "spearman", label.x = 0.3, label.y = 0.5) # Add correlation coefficient
ggsave("./ont/m6anet/ctrl.shEZH2_ctrl.shMETTLE2_corplot.pdf")
# gather, wide to long to do boxplot ---
mod2.mean.long <- tidyr::gather(mod2.mean, condition, probability, -c(transcript_id,transcript_position))
mod2.mean.long$condition <- stringr::str_replace_all(mod2.mean.long$condition,"probability.","")
mod2.mean.long$condition <- factor(mod2.mean.long$condition, levels = c("control","shEZH2","shMETTLE3"))
p <- ggboxplot(mod2.mean.long, x = "condition", y = "probability",
fill = "condition", palette =c("#a50f15", "#fb6a4a","#fcbba1"))
my_comparisons <- list( c("control", "shEZH2"), c("control", "shMETTLE3"), c("shEZH2", "shMETTLE3") )
p + stat_compare_means(comparisons = my_comparisons)+
stat_compare_means(label.y = 1.5)
ggsave("./ont/m6anet/global.mod.probablity.boxplot.commonTranscript.pdf")
####################annotate with gtf ##################################################################
transcript.gtf <- data.table::fread("gencode.v38.annotation.transcript.gtf")
transcript.gtf$transcript_id <- stringr::str_sub(transcript.gtf$transcript_id,1,15)
mod2.mean.long.gtf <- dplyr::left_join(mod2.mean.long, transcript.gtf, by = "transcript_id")
names(mod2.mean.long.gtf)[5] <- "seqnames"
mod2.mean.long.gtf <- as.data.table(mod2.mean.long.gtf)
# overlap with supp 12 table ---
results <- data.table::fread("./ont/Yi/SupplementaryTable12_hg38.txt")
results$group_name <- NULL
results$group <- NULL
setkey(mod2.mean.long.gtf,seqnames, start, end)
overlaps.res.m6anet <- foverlaps(results,mod2.mean.long.gtf,type="within",nomatch=NULL)
# boxplot
p <- ggboxplot(overlaps.res.m6anet, x = "condition", y = "probability",
fill = "condition", palette =c("#a50f15", "#fb6a4a","#fcbba1"))
my_comparisons <- list( c("control", "shEZH2"), c("control", "shMETTLE3"), c("shEZH2", "shMETTLE3") )
p + stat_compare_means(comparisons = my_comparisons)+
stat_compare_means(label.y = 1.5)
ggsave("./ont/m6anet/global.mod.probablity.boxplot.overlapSupp12.pdf")
|
## @knitr
head(state.x77, n=3)
## @knitr
state.x77[1,2] # first row, second column
state.x77[1, 3:4] # first row, third and fourth columns
## @knitr
state.x77[state.x77[,"Population"] < 500, 1:6]
## @knitr
colnames(state.x77)
## @knitr
rbind(1:5, c(1, 1, 2, 3, 5)) # 2 by 5 matrix without names
## @knitr
m <- cbind(1:3, c(1.1, 1.2, 1.3), c(1, 1, 2)) # a 3 by 3 matrix
colnames(m) <- c("x", "y", "z") # or cbind(x=..., ...)
m
## @knitr
m[1:6] # first 6 entries of m
## @knitr
matrix(1:10, nrow=2)
matrix(1:10, nrow=2, byrow=TRUE)
## @knitr
DF <- Cars93[1:3, 1:5]
DF
## @knitr
DF[ , "Price"]
## @knitr
DF[ , "Price", drop=FALSE]
## @knitr
DF["Price"]
## @knitr
require(MASS)
dim(UScereal) # rows, columns
length(levels(UScereal$mfr)) # number of manufacturers
length(levels(UScereal["vitamins"])) # vitamin categories
sum(UScereal[, "sugars"] > 10) # sugar levels above 10
mean(UScereal[UScereal$fat > 5, "calories"]) # conditional mean
mean(UScereal[UScereal$fat <= 5, "calories"]) # conditional mean
mean(UScereal[UScereal["shelf"] == 2, "calories"])
## @knitr
l <- lm(mpg ~ wt, data=mtcars)
length(l) # no. components
names(l)
l[["residuals"]] # numeric, named data
## @knitr
d <- data.frame(a=1, b="two")
names(d) <- c("A", "B")
dimnames(d) <- list("1", c("eh", "bee"))
colnames(d) <- c("EH", "BEE")
d <- setNames(d, c("ahh", "buh"))
## @knitr eval=FALSE
## data.frame(nm1 = vec1, nm2=vec2, ...)
## @knitr
d <- as.data.frame(state.x77)
class(d)
## @knitr
d1 <- data.matrix(d)
class(d1)
## @knitr
d <- mtcars[1:5,] # first 5 rows
mean(d$mpg) - sd(d$mpg) <= d$mpg & d$mpg <= mean(d$mpg) + sd(d$mpg)
## @knitr
with(d, mean(mpg) - sd(mpg) <= mpg & mpg <= mean(mpg) + sd(mpg))
## @knitr
d <- Cars93[1:3, 1:4] # first 3 rows, 4 columns
d[1,1] <- d[3,4] <- NA # set two values to NA
d
## @knitr
d[1:2, 4] <- round(d[1:2, 4]) # round numeric values
## @knitr
d[3,c(2,4)] <- list("A3", 30) # warning
## @knitr
levels(d$Model) <- c(levels(d$Model), c("A3", "A4", "A6"))
d[3,c(2,4)] <- list("A3", 30)
## @knitr
d[4, ] <- list("Audi", "A4", "Midsize", 35)
## @knitr
d <- rbind(d, list("Audi", "A6", "Large", 45))
## @knitr
d[, 5] <- d$Min.Price * 1.3 # price in Euros
## @knitr
d$Min.Price.Euro <- d$Min.Price * 1.3
## @knitr
names(d) <- tolower(names(d))
## @knitr
names(d)[3] <- "car type"
## @knitr
aq <- airquality[1:5, ] # shorten
aq
subset(aq, select = Ozone:Wind) # range of names
subset(aq, select = -c(Month, Day)) # same result
subset(aq, subset = !is.na(Ozone), select=Ozone:Wind) # drop a row
## @knitr
DF <- data.frame(a=c(NA, 1, 2), b=c("one", NA, "three"))
subset(DF, !is.na(a)) # drop first, keep second
subset(DF, complete.cases(DF)) # drop first, second
## @knitr
d$min.price.euro <- d$min.price * 1.5
## @knitr
d <- within(d, {min.price.euro = min.price * 1.5})
## @knitr
d <- transform(d, min.price.euro = min.price * 1.5)
## @knitr echo=FALSE
speed <- head(reshape(morley, v.names="Speed", timevar="Expt", idvar="Run", direction="wide"))
speed <- speed[,-1]
rownames(speed) <- 1:6
## @knitr
speed
## @knitr
m <- reshape(speed, varying=names(speed)[1:5], direction="long")
head(m) # first 6 rows only
## @knitr
speed$Run <- LETTERS[1:6]
m <- reshape(speed, varying=names(speed)[1:5], direction="long")
head(m)
## @knitr
reshape(m, v.names="Speed", timevar="time", idvar="Run",
direction="wide")
## @knitr cache=FALSE
domestic <- "
The Avengers, 623357910
The Dark Knight Rises, 448139099
The Hunger Games, 408010692
Skyfall, 304360277
The Hobbit, 303003568
"
foreign <- "
The Avengers, 1511.8
Skyfall, 1108.6
The Dark Knight Rises, 1084.4
The Hobbit, 1017.0
Ice Age, 877.2
"
## @knitr cache=FALSE
df.domestic <- read.csv(textConnection(domestic), header=FALSE)
names(df.domestic) <- c("Name", "Domestic")
df.foreign <- read.csv(textConnection(foreign), header=FALSE)
names(df.foreign) <- c("name", "foreign")
## @knitr cache=FALSE
merge(df.domestic, df.foreign, by.x="Name", by.y="name", all=FALSE)
## @knitr cache=FALSE
merge(df.domestic, df.foreign, by.x="Name", by.y="name", all=TRUE)
## @knitr cache=FALSE
merge(df.domestic, df.foreign, by.x="Name", by.y="name", all.x=TRUE)
## @knitr
babies$id <- as.character(babies$id) # change type of variable
## @knitr
with(babies, gestation[gestation > 45 * 7])
## @knitr
babies <- within(babies, {
gestation[gestation == 999] <- NA
wt[wt == 999] <- NA
wt1[wt1 == 999] <- NA
dwt[dwt == 999] <- NA
ht[ht == 99] <- NA
dht[dht == 99] <- NA
})
## @knitr
babies$smoke <- factor(babies$smoke)
levels(babies$smoke) <- list("never"=0, "smokes now"=1,
"Until current pregnancy"=2,
"once did, not now"=3)
## @knitr
babies$number <- factor(babies$number)
levels(babies$number) <- list("never"=0, "1-4"=1, "5-9"=2,
"10-14"=3, "15-19"=4, "20-29"=5,
"30-39"=6, "40-60"=7, "60+"=8,
"smoke, don't know"=9, "unknown"=98)
## @knitr
require(lubridate)
(x <- ymd("1961-01-01"))
## @knitr
x + 10 * days(1)
## @knitr
babies$date <- x + (babies$date - 1096) * days(1)
## @knitr
bmi <- function(wt, ht) (wt/2.2) / (ht*2.54/100)^2
babies <- transform(babies, bmi = bmi(wt1, ht),
dbmi = bmi(dwt, dht))
## @knitr
subset(babies, abs(dbmi - bmi) > 14,
select=c(date, gestation, wt, race))
## @knitr
mean(fat$neck) / mean(fat$wrist)
mean(fat$neck/fat$wrist)
## @knitr
with(fat, mean(neck) / mean(wrist))
with(fat, mean(neck / wrist))
## @knitr
subset(Cars93, Origin == "non-USA" & Cylinders == 4 & Max.Price <= 15)
## @knitr
x <- 1:5
x - 3
## @knitr
x <- 1:5
res <- integer(length(x)) # allocate temporary storage
for(i in 1:length(x)) { # iterate over indices
res[i] <- x[i] - 3 # compute f, do bookkeeping
}
res
## @knitr
vmedian <- Vectorize(median) # returns a function
vmedian(homedata)
## @knitr
collection <- c(4, 9, 16)
Map(sqrt, collection)
## @knitr
sqrt(collection)
## @knitr
sapply(collection, sqrt)
## @knitr
lst <- with(ToothGrowth, split(len, supp))
sapply(lst, mean)
## @knitr
with(ToothGrowth,
tapply(len, supp, mean) # (X, INDEX, FUN)
)
## @knitr
with(ToothGrowth,
tapply(len, list(supp, dose), mean) # (X, INDEX, FUN)
)
## @knitr
aggregate(len ~ supp, data=ToothGrowth, mean)
## @knitr
mean_formula <- function(formula, data) {
out <- aggregate(formula, data=data, mean)
xtabs(formula, out)
}
## @knitr
mean_formula(len ~ supp, data=ToothGrowth)
## @knitr echo=FALSE
mean.formula <- function(...) mean_formula(...)
## @knitr
mean(len ~ supp, data=ToothGrowth)
## @knitr
lst <- with(ToothGrowth, split(len, supp))
sapply(lst, summary)
## @knitr
sapply(mtcars, mean)
## @knitr
m <- mtcars[1:4, 1:3]
m[1,1] <- m[2,2] <- NA
sapply(m, mean)
sapply(m, mean, na.rm=TRUE)
## @knitr
m <- rbind(c(1,2), c(3,4))
sqrt(m)
## @knitr
(m <- replicate(5, rnorm(3)))
## @knitr
rowSums(m) # add along rows
colSums(m) # add down columns
## @knitr
sapply(m, sum)
## @knitr
apply(m, 1, mean) # rowMeans alternative
apply(m, 2, mean) # colMeans alternative
## @knitr
c(sum(m[1,]), sum(m[2,]), sum(m[3,]))
## @knitr
apply(m, 2, summary)
## @knitr
xbars <- apply(m, 2, mean)
centers <- sweep(m, 2, xbars, FUN="-") # "-" is default
centers
## @knitr
sds <- apply(m, 2, sd)
z_scores <- sweep(centers, 2, sds, FUN="/")
z_scores
## @knitr
min(3, 4) # 3
min(c(1,4), c(2,3)) # not c(1,3) as maybe desired
## @knitr
Map(min, c(1,4), c(2,3))
## @knitr
mapply(min, c(1,4), c(2,3))
## @knitr
our_sweep <- function(col, center) col - center
mapply(our_sweep, as.data.frame(m), apply(m, 2, mean))
## @knitr
body <- Animals$body; brain <- Animals$brain
do.call(cor, Map(rank, list(body, brain)))
## @knitr
do.call(cor, Map(rank, setNames(Animals, NULL)))
## @knitr
m <- Cars93[1:2, 1:15] # 15 columns
Filter(is.factor, m) # 6 are factors
## @knitr
Reduce("+", 1:4)
## @knitr
Reduce(function(x,y) ifelse(x > y, x, y), c(6, 4, 7, 9, 10, 3))
## @knitr
## gcd by Euclidean algorithm, a, b integers
gcd <- function(a, b) {while (b != 0) {t = b; b = a %% b; a = t}; a}
## scm (lcm is R function name in graphics package)
scm <- function(a, b) (a * b) / gcd(a, b)
## @knitr
scm(3, 5) # no common primes
scm(3, 6) # common prime
## @knitr
Reduce(scm, 1:20) # smallest number divisible by 1, 2, ..., 20
## @knitr eval=FALSE
## sapply(wellbeing[,-(1:2)], function(y) {
## cor(wellbeing[,2], y, use="complete.obs")
## })
## @knitr
library(LearnEDA)
l <- with(beatles, split(time, album))
sapply(l, length)
## @knitr
sapply(mtcars, sd)
Vectorize(sd)(mtcars)
## @knitr
sapply(Filter(is.numeric, Cars93), sd)
## @knitr
sapply(Filter(is.numeric, Cars93), sd, na.rm=TRUE)
## @knitr
teams <- split(batting, batting$teamID)
team_avg <- sapply(teams, function(DF) with(DF, sum(H) / sum(AB)))
sort(team_avg)
## @knitr
players <- split(batting, batting$playerID)
traded_players <- Filter(function(x) nrow(x) > 1, players)
names(traded_players)
## @knitr
d <- c("1,2","3,4","5,6")
strsplit(d,",")
## @knitr
sapply(d, function(x) x[1])
## @knitr
d <- data.frame(a=1:3, b=c(1, NA, 3), c=c("one", "two", NA))
Filter(function(x) !any(is.na(x)), d)
## @knitr
f <- function(nm, x) sprintf("Variable %s has class %s", nm, class(x)[1])
our_func <- function(DF) mapply(f, names(DF), DF)
our_func(mtcars[1:3])
## @knitr
fruits <- c("Bananas", "Oranges", "Avocados", "Celeries?")
sapply(fruits, function(x)
paste(x, "are fruit number", which(fruits==x)))
## @knitr
sapply(seq_along(fruits), function(i) paste(fruits[i], "are fruit number", i))
## @knitr
mapply(paste, fruits, "are fruit number", seq_along(fruits))
## @knitr
require("gdata") # must be installed
f <- "http://www.eia.gov/petroleum/gasdiesel/xls/pswrgvwall.xls"
gas_prices <- read.xls(f, sheet=2, skip=2)
gas_prices <- setNames(gas_prices[,1:2], c("Date", "Weekly_US"))
## @knitr gas_price_graph, eval=FALSE
## gas_prices$Date <- as.Date(substr(gas_prices$Date, 1, 10),
## format="%b %d%Y")
## plot(Weekly_US ~ Date, gas_prices, type="l")
## @knitr echo=FALSE, out.width=singlewide
gas_prices$Date <- as.Date(substr(gas_prices$Date, 1, 10),
format="%b %d%Y")
plot(Weekly_US ~ Date, gas_prices, type="l")
## @knitr
key <- "0AoaQTPQhRgkqdEthU0ZZeThtcWtvcWpZUThiX2JUMGc"
f <- paste("https://docs.google.com/spreadsheet/pub?key=",
key,
"&single=true&gid=0&output=csv", sep="")
require(RCurl)
read.csv(textConnection(getURL(f)), header=TRUE)
## @knitr quandl, eval=FALSE
## require(Quandl)
## ch_0014 <- Quandl("WORLDBANK/CHN_SP_POP_0014_TO_ZS")
## ch_1564 <- Quandl("WORLDBANK/CHN_SP_POP_1564_TO_ZS")
## ch_65up <- Quandl("WORLDBANK/CHN_SP_POP_65UP_TO_ZS")
## ch_all <- Reduce(function(x,y) merge(x, y, by="Date"),
## list(ch_0014, ch_1564, ch_65up))
## names(ch_all) <- c("Date", "[0,14]", "[15,64]", "[65,)")
## @knitr echo=FALSE
ch_0014 <- suppressWarnings(Quandl("WORLDBANK/CHN_SP_POP_0014_TO_ZS"))
ch_1564 <- suppressWarnings(Quandl("WORLDBANK/CHN_SP_POP_1564_TO_ZS"))
ch_65up <- suppressWarnings(Quandl("WORLDBANK/CHN_SP_POP_65UP_TO_ZS"))
ch_all <- Reduce(function(x,y) merge(x, y, by="Date"), list(ch_0014, ch_1564, ch_65up))
names(ch_all) <- c("Date", "[0,14]", "[15,64]", "[65,)")
## @knitr chinese_demographics, eval=FALSE
## heights <- t(ch_all[,-1])
## colnames(heights) <- format(ch_all[,'Date'], format="%Y")
## barplot(heights, main="Proportion of [0-14], [15-64], [65,)")
## @knitr
require(RJSONIO)
f <- "http://www.quandl.com/api/v1/datasets/PRAGUESE/PX.json"
out <- fromJSON(f)
out$column_names # names
out$data[1] # one from 1000s of values
## @knitr portugese_stocks, eval=FALSE
## pluck <- function(l, key) l[[key]] # pluck from a list
## px <- data.frame(Date = as.Date(sapply(out$data, pluck, key=1)),
## index = sapply(out$data, pluck, key=2),
## perc_change = sapply(out$data, pluck, key=3))
## plot(index ~ Date, data=px, type="l", main="Portugese stock index")
## @knitr echo=FALSE, out.width=doublewide
heights <- t(ch_all[,-1])
colnames(heights) <- format(ch_all[,'Date'], format="%Y")
barplot(heights, main="Proportion of [0-14], [15-64], [65,)")
pluck <- function(l, key) l[[key]] # pluck from a list
px <- data.frame(Date = as.Date(sapply(out$data, pluck, key=1)),
index = sapply(out$data, pluck, key=2),
perc_change = sapply(out$data, pluck, key=3))
plot(index ~ Date, data=px, type="l", main="Portugese stock index")
## @knitr
require(XML)
## fit in 80 characters
url_base = "http://en.wikipedia.org/wiki/"
ch <- "List_of_highest-grossing_films_in_China"
us_can <- "List_of_highest-grossing_films_in_Canada_and_the_United_States"
##
china_all <- readHTMLTable(paste(url_base, ch, sep=""))[[1]]
us_can_all <- readHTMLTable(paste(url_base, us_can, sep=""))[[2]]
## @knitr cache=FALSE
in_common <- merge(china_all, us_can_all, by="Title")
## tidy up
elide <- function(x, n=20)
ifelse(nchar(x) < n, x, sprintf("%s...", substr(x,0,n)))
rownames(in_common) <- sapply(as.character(in_common[,1]), elide)
##
in_common[, c(2,6,7)]
| /inst/samplecode/multivariate.R | no_license | jverzani/UsingR | R | false | false | 14,198 | r |
## @knitr
head(state.x77, n=3)
## @knitr
state.x77[1,2] # first row, second column
state.x77[1, 3:4] # first row, third and fourth columns
## @knitr
state.x77[state.x77[,"Population"] < 500, 1:6]
## @knitr
colnames(state.x77)
## @knitr
rbind(1:5, c(1, 1, 2, 3, 5)) # 2 by 5 matrix without names
## @knitr
m <- cbind(1:3, c(1.1, 1.2, 1.3), c(1, 1, 2)) # a 3 by 3 matrix
colnames(m) <- c("x", "y", "z") # or cbind(x=..., ...)
m
## @knitr
m[1:6] # first 6 entries of m
## @knitr
matrix(1:10, nrow=2)
matrix(1:10, nrow=2, byrow=TRUE)
## @knitr
DF <- Cars93[1:3, 1:5]
DF
## @knitr
DF[ , "Price"]
## @knitr
DF[ , "Price", drop=FALSE]
## @knitr
DF["Price"]
## @knitr
require(MASS)
dim(UScereal) # rows, columns
length(levels(UScereal$mfr)) # number of manufacturers
length(levels(UScereal["vitamins"])) # vitamin categories
sum(UScereal[, "sugars"] > 10) # sugar levels above 10
mean(UScereal[UScereal$fat > 5, "calories"]) # conditional mean
mean(UScereal[UScereal$fat <= 5, "calories"]) # conditional mean
mean(UScereal[UScereal["shelf"] == 2, "calories"])
## @knitr
l <- lm(mpg ~ wt, data=mtcars)
length(l) # no. components
names(l)
l[["residuals"]] # numeric, named data
## @knitr
d <- data.frame(a=1, b="two")
names(d) <- c("A", "B")
dimnames(d) <- list("1", c("eh", "bee"))
colnames(d) <- c("EH", "BEE")
d <- setNames(d, c("ahh", "buh"))
## @knitr eval=FALSE
## data.frame(nm1 = vec1, nm2=vec2, ...)
## @knitr
d <- as.data.frame(state.x77)
class(d)
## @knitr
d1 <- data.matrix(d)
class(d1)
## @knitr
d <- mtcars[1:5,] # first 5 rows
mean(d$mpg) - sd(d$mpg) <= d$mpg & d$mpg <= mean(d$mpg) + sd(d$mpg)
## @knitr
with(d, mean(mpg) - sd(mpg) <= mpg & mpg <= mean(mpg) + sd(mpg))
## @knitr
d <- Cars93[1:3, 1:4] # first 3 rows, 4 columns
d[1,1] <- d[3,4] <- NA # set two values to NA
d
## @knitr
d[1:2, 4] <- round(d[1:2, 4]) # round numeric values
## @knitr
d[3,c(2,4)] <- list("A3", 30) # warning
## @knitr
levels(d$Model) <- c(levels(d$Model), c("A3", "A4", "A6"))
d[3,c(2,4)] <- list("A3", 30)
## @knitr
d[4, ] <- list("Audi", "A4", "Midsize", 35)
## @knitr
d <- rbind(d, list("Audi", "A6", "Large", 45))
## @knitr
d[, 5] <- d$Min.Price * 1.3 # price in Euros
## @knitr
d$Min.Price.Euro <- d$Min.Price * 1.3
## @knitr
names(d) <- tolower(names(d))
## @knitr
names(d)[3] <- "car type"
## @knitr
aq <- airquality[1:5, ] # shorten
aq
subset(aq, select = Ozone:Wind) # range of names
subset(aq, select = -c(Month, Day)) # same result
subset(aq, subset = !is.na(Ozone), select=Ozone:Wind) # drop a row
## @knitr
DF <- data.frame(a=c(NA, 1, 2), b=c("one", NA, "three"))
subset(DF, !is.na(a)) # drop first, keep second
subset(DF, complete.cases(DF)) # drop first, second
## @knitr
d$min.price.euro <- d$min.price * 1.5
## @knitr
d <- within(d, {min.price.euro = min.price * 1.5})
## @knitr
d <- transform(d, min.price.euro = min.price * 1.5)
## @knitr echo=FALSE
speed <- head(reshape(morley, v.names="Speed", timevar="Expt", idvar="Run", direction="wide"))
speed <- speed[,-1]
rownames(speed) <- 1:6
## @knitr
speed
## @knitr
m <- reshape(speed, varying=names(speed)[1:5], direction="long")
head(m) # first 6 rows only
## @knitr
speed$Run <- LETTERS[1:6]
m <- reshape(speed, varying=names(speed)[1:5], direction="long")
head(m)
## @knitr
reshape(m, v.names="Speed", timevar="time", idvar="Run",
direction="wide")
## @knitr cache=FALSE
domestic <- "
The Avengers, 623357910
The Dark Knight Rises, 448139099
The Hunger Games, 408010692
Skyfall, 304360277
The Hobbit, 303003568
"
foreign <- "
The Avengers, 1511.8
Skyfall, 1108.6
The Dark Knight Rises, 1084.4
The Hobbit, 1017.0
Ice Age, 877.2
"
## @knitr cache=FALSE
df.domestic <- read.csv(textConnection(domestic), header=FALSE)
names(df.domestic) <- c("Name", "Domestic")
df.foreign <- read.csv(textConnection(foreign), header=FALSE)
names(df.foreign) <- c("name", "foreign")
## @knitr cache=FALSE
merge(df.domestic, df.foreign, by.x="Name", by.y="name", all=FALSE)
## @knitr cache=FALSE
merge(df.domestic, df.foreign, by.x="Name", by.y="name", all=TRUE)
## @knitr cache=FALSE
merge(df.domestic, df.foreign, by.x="Name", by.y="name", all.x=TRUE)
## @knitr
babies$id <- as.character(babies$id) # change type of variable
## @knitr
with(babies, gestation[gestation > 45 * 7])
## @knitr
babies <- within(babies, {
gestation[gestation == 999] <- NA
wt[wt == 999] <- NA
wt1[wt1 == 999] <- NA
dwt[dwt == 999] <- NA
ht[ht == 99] <- NA
dht[dht == 99] <- NA
})
## @knitr
babies$smoke <- factor(babies$smoke)
levels(babies$smoke) <- list("never"=0, "smokes now"=1,
"Until current pregnancy"=2,
"once did, not now"=3)
## @knitr
babies$number <- factor(babies$number)
levels(babies$number) <- list("never"=0, "1-4"=1, "5-9"=2,
"10-14"=3, "15-19"=4, "20-29"=5,
"30-39"=6, "40-60"=7, "60+"=8,
"smoke, don't know"=9, "unknown"=98)
## @knitr
require(lubridate)
(x <- ymd("1961-01-01"))
## @knitr
x + 10 * days(1)
## @knitr
babies$date <- x + (babies$date - 1096) * days(1)
## @knitr
bmi <- function(wt, ht) (wt/2.2) / (ht*2.54/100)^2
babies <- transform(babies, bmi = bmi(wt1, ht),
dbmi = bmi(dwt, dht))
## @knitr
subset(babies, abs(dbmi - bmi) > 14,
select=c(date, gestation, wt, race))
## @knitr
mean(fat$neck) / mean(fat$wrist)
mean(fat$neck/fat$wrist)
## @knitr
with(fat, mean(neck) / mean(wrist))
with(fat, mean(neck / wrist))
## @knitr
subset(Cars93, Origin == "non-USA" & Cylinders == 4 & Max.Price <= 15)
## @knitr
x <- 1:5
x - 3
## @knitr
x <- 1:5
res <- integer(length(x)) # allocate temporary storage
for(i in 1:length(x)) { # iterate over indices
res[i] <- x[i] - 3 # compute f, do bookkeeping
}
res
## @knitr
vmedian <- Vectorize(median) # returns a function
vmedian(homedata)
## @knitr
collection <- c(4, 9, 16)
Map(sqrt, collection)
## @knitr
sqrt(collection)
## @knitr
sapply(collection, sqrt)
## @knitr
lst <- with(ToothGrowth, split(len, supp))
sapply(lst, mean)
## @knitr
with(ToothGrowth,
tapply(len, supp, mean) # (X, INDEX, FUN)
)
## @knitr
with(ToothGrowth,
tapply(len, list(supp, dose), mean) # (X, INDEX, FUN)
)
## @knitr
aggregate(len ~ supp, data=ToothGrowth, mean)
## @knitr
mean_formula <- function(formula, data) {
out <- aggregate(formula, data=data, mean)
xtabs(formula, out)
}
## @knitr
mean_formula(len ~ supp, data=ToothGrowth)
## @knitr echo=FALSE
mean.formula <- function(...) mean_formula(...)
## @knitr
mean(len ~ supp, data=ToothGrowth)
## @knitr
lst <- with(ToothGrowth, split(len, supp))
sapply(lst, summary)
## @knitr
sapply(mtcars, mean)
## @knitr
m <- mtcars[1:4, 1:3]
m[1,1] <- m[2,2] <- NA
sapply(m, mean)
sapply(m, mean, na.rm=TRUE)
## @knitr
m <- rbind(c(1,2), c(3,4))
sqrt(m)
## @knitr
(m <- replicate(5, rnorm(3)))
## @knitr
rowSums(m) # add along rows
colSums(m) # add down columns
## @knitr
sapply(m, sum)
## @knitr
apply(m, 1, mean) # rowMeans alternative
apply(m, 2, mean) # colMeans alternative
## @knitr
c(sum(m[1,]), sum(m[2,]), sum(m[3,]))
## @knitr
apply(m, 2, summary)
## @knitr
xbars <- apply(m, 2, mean)
centers <- sweep(m, 2, xbars, FUN="-") # "-" is default
centers
## @knitr
sds <- apply(m, 2, sd)
z_scores <- sweep(centers, 2, sds, FUN="/")
z_scores
## @knitr
min(3, 4) # 3
min(c(1,4), c(2,3)) # not c(1,3) as maybe desired
## @knitr
Map(min, c(1,4), c(2,3))
## @knitr
mapply(min, c(1,4), c(2,3))
## @knitr
our_sweep <- function(col, center) col - center
mapply(our_sweep, as.data.frame(m), apply(m, 2, mean))
## @knitr
body <- Animals$body; brain <- Animals$brain
do.call(cor, Map(rank, list(body, brain)))
## @knitr
do.call(cor, Map(rank, setNames(Animals, NULL)))
## @knitr
m <- Cars93[1:2, 1:15] # 15 columns
Filter(is.factor, m) # 6 are factors
## @knitr
Reduce("+", 1:4)
## @knitr
Reduce(function(x,y) ifelse(x > y, x, y), c(6, 4, 7, 9, 10, 3))
## @knitr
## gcd by Euclidean algorithm, a, b integers
gcd <- function(a, b) {while (b != 0) {t = b; b = a %% b; a = t}; a}
## scm (lcm is R function name in graphics package)
scm <- function(a, b) (a * b) / gcd(a, b)
## @knitr
scm(3, 5) # no common primes
scm(3, 6) # common prime
## @knitr
Reduce(scm, 1:20) # smallest number divisible by 1, 2, ..., 20
## @knitr eval=FALSE
## sapply(wellbeing[,-(1:2)], function(y) {
## cor(wellbeing[,2], y, use="complete.obs")
## })
## @knitr
library(LearnEDA)
l <- with(beatles, split(time, album))
sapply(l, length)
## @knitr
sapply(mtcars, sd)
Vectorize(sd)(mtcars)
## @knitr
sapply(Filter(is.numeric, Cars93), sd)
## @knitr
sapply(Filter(is.numeric, Cars93), sd, na.rm=TRUE)
## @knitr
teams <- split(batting, batting$teamID)
team_avg <- sapply(teams, function(DF) with(DF, sum(H) / sum(AB)))
sort(team_avg)
## @knitr
players <- split(batting, batting$playerID)
traded_players <- Filter(function(x) nrow(x) > 1, players)
names(traded_players)
## @knitr
d <- c("1,2","3,4","5,6")
strsplit(d,",")
## @knitr
sapply(d, function(x) x[1])
## @knitr
d <- data.frame(a=1:3, b=c(1, NA, 3), c=c("one", "two", NA))
Filter(function(x) !any(is.na(x)), d)
## @knitr
f <- function(nm, x) sprintf("Variable %s has class %s", nm, class(x)[1])
our_func <- function(DF) mapply(f, names(DF), DF)
our_func(mtcars[1:3])
## @knitr
fruits <- c("Bananas", "Oranges", "Avocados", "Celeries?")
sapply(fruits, function(x)
paste(x, "are fruit number", which(fruits==x)))
## @knitr
sapply(seq_along(fruits), function(i) paste(fruits[i], "are fruit number", i))
## @knitr
mapply(paste, fruits, "are fruit number", seq_along(fruits))
## @knitr
require("gdata") # must be installed
f <- "http://www.eia.gov/petroleum/gasdiesel/xls/pswrgvwall.xls"
gas_prices <- read.xls(f, sheet=2, skip=2)
gas_prices <- setNames(gas_prices[,1:2], c("Date", "Weekly_US"))
## @knitr gas_price_graph, eval=FALSE
## gas_prices$Date <- as.Date(substr(gas_prices$Date, 1, 10),
## format="%b %d%Y")
## plot(Weekly_US ~ Date, gas_prices, type="l")
## @knitr echo=FALSE, out.width=singlewide
gas_prices$Date <- as.Date(substr(gas_prices$Date, 1, 10),
format="%b %d%Y")
plot(Weekly_US ~ Date, gas_prices, type="l")
## @knitr
key <- "0AoaQTPQhRgkqdEthU0ZZeThtcWtvcWpZUThiX2JUMGc"
f <- paste("https://docs.google.com/spreadsheet/pub?key=",
key,
"&single=true&gid=0&output=csv", sep="")
require(RCurl)
read.csv(textConnection(getURL(f)), header=TRUE)
## @knitr quandl, eval=FALSE
## require(Quandl)
## ch_0014 <- Quandl("WORLDBANK/CHN_SP_POP_0014_TO_ZS")
## ch_1564 <- Quandl("WORLDBANK/CHN_SP_POP_1564_TO_ZS")
## ch_65up <- Quandl("WORLDBANK/CHN_SP_POP_65UP_TO_ZS")
## ch_all <- Reduce(function(x,y) merge(x, y, by="Date"),
## list(ch_0014, ch_1564, ch_65up))
## names(ch_all) <- c("Date", "[0,14]", "[15,64]", "[65,)")
## @knitr echo=FALSE
ch_0014 <- suppressWarnings(Quandl("WORLDBANK/CHN_SP_POP_0014_TO_ZS"))
ch_1564 <- suppressWarnings(Quandl("WORLDBANK/CHN_SP_POP_1564_TO_ZS"))
ch_65up <- suppressWarnings(Quandl("WORLDBANK/CHN_SP_POP_65UP_TO_ZS"))
ch_all <- Reduce(function(x,y) merge(x, y, by="Date"), list(ch_0014, ch_1564, ch_65up))
names(ch_all) <- c("Date", "[0,14]", "[15,64]", "[65,)")
## @knitr chinese_demographics, eval=FALSE
## heights <- t(ch_all[,-1])
## colnames(heights) <- format(ch_all[,'Date'], format="%Y")
## barplot(heights, main="Proportion of [0-14], [15-64], [65,)")
## @knitr
require(RJSONIO)
f <- "http://www.quandl.com/api/v1/datasets/PRAGUESE/PX.json"
out <- fromJSON(f)
out$column_names # names
out$data[1] # one from 1000s of values
## @knitr portugese_stocks, eval=FALSE
## pluck <- function(l, key) l[[key]] # pluck from a list
## px <- data.frame(Date = as.Date(sapply(out$data, pluck, key=1)),
## index = sapply(out$data, pluck, key=2),
## perc_change = sapply(out$data, pluck, key=3))
## plot(index ~ Date, data=px, type="l", main="Portugese stock index")
## @knitr echo=FALSE, out.width=doublewide
heights <- t(ch_all[,-1])
colnames(heights) <- format(ch_all[,'Date'], format="%Y")
barplot(heights, main="Proportion of [0-14], [15-64], [65,)")
pluck <- function(l, key) l[[key]] # pluck from a list
px <- data.frame(Date = as.Date(sapply(out$data, pluck, key=1)),
index = sapply(out$data, pluck, key=2),
perc_change = sapply(out$data, pluck, key=3))
plot(index ~ Date, data=px, type="l", main="Portugese stock index")
## @knitr
require(XML)
## fit in 80 characters
url_base = "http://en.wikipedia.org/wiki/"
ch <- "List_of_highest-grossing_films_in_China"
us_can <- "List_of_highest-grossing_films_in_Canada_and_the_United_States"
##
china_all <- readHTMLTable(paste(url_base, ch, sep=""))[[1]]
us_can_all <- readHTMLTable(paste(url_base, us_can, sep=""))[[2]]
## @knitr cache=FALSE
in_common <- merge(china_all, us_can_all, by="Title")
## tidy up
elide <- function(x, n=20)
ifelse(nchar(x) < n, x, sprintf("%s...", substr(x,0,n)))
rownames(in_common) <- sapply(as.character(in_common[,1]), elide)
##
in_common[, c(2,6,7)]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/powernoise.R
\name{powernoise}
\alias{powernoise}
\title{A function to generate k-noise}
\usage{
powernoise(k, N)
}
\arguments{
\item{k}{Power law scaling exponent}
\item{N}{number of samples to generate}
}
\value{
A named list with three entries is returned.
x - N x 1 vector of power law samples
}
\description{
Generates samples of power law noise.
}
\details{
Generates samples of power law noise.
The power spectrum of the signal scales as f^(-k). The R function uses fft(),
similarly to the knoise_fft Matlab function.
}
\examples{
powernoise_series = powernoise(k=2, N=10000)
}
\author{
Sebastian Sippel and Holger Lange
}
| /man/powernoise.Rd | no_license | cran/statcomp | R | false | true | 712 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/powernoise.R
\name{powernoise}
\alias{powernoise}
\title{A function to generate k-noise}
\usage{
powernoise(k, N)
}
\arguments{
\item{k}{Power law scaling exponent}
\item{N}{number of samples to generate}
}
\value{
A named list with three entries is returned.
x - N x 1 vector of power law samples
}
\description{
Generates samples of power law noise.
}
\details{
Generates samples of power law noise.
The power spectrum of the signal scales as f^(-k). The R function uses fft(),
similarly to the knoise_fft Matlab function.
}
\examples{
powernoise_series = powernoise(k=2, N=10000)
}
\author{
Sebastian Sippel and Holger Lange
}
|
library(dataRetrieval)
### Name: readNWISsite
### Title: USGS Site File Data Retrieval
### Aliases: readNWISsite
### Keywords: USGS data import service web
### ** Examples
## Not run:
##D siteINFO <- readNWISsite('05114000')
##D siteINFOMulti <- readNWISsite(c('05114000','09423350'))
## End(Not run)
| /data/genthat_extracted_code/dataRetrieval/examples/readNWISsite.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 309 | r | library(dataRetrieval)
### Name: readNWISsite
### Title: USGS Site File Data Retrieval
### Aliases: readNWISsite
### Keywords: USGS data import service web
### ** Examples
## Not run:
##D siteINFO <- readNWISsite('05114000')
##D siteINFOMulti <- readNWISsite(c('05114000','09423350'))
## End(Not run)
|
## transmission probability $p_{ij}$ ###########################################
function (thetaSS, R0, epsilon, Data) {
# call the function: likelihood of serial interval
L_si = dget("4b_L_si.R")
# contacts with Case 20
w20 = c(5, 6, 7, 10, 11, 12)
# preallocate
s_hat20 = rep(NA ,length(w20))
for (j in 1:length(w20)){
# the missing infector
Data$vi[20] = w20[j]
# likelihood of serial interval using proposed parameters
# actually the serial interval pmf
s_hat20[j] = L_si(i = 20, thetaSS, R0, epsilon, Data)
}
# transmission probability
p_20_j = s_hat20/sum(s_hat20)
# output the transmission probability
if ( all(is.finite(p_20_j)) ) {
return(p_20_j)
} else { # uniform probability
return(NULL)
}
} | /4d_pij.R | no_license | imlouischan/ebola-ng | R | false | false | 796 | r | ## transmission probability $p_{ij}$ ###########################################
function (thetaSS, R0, epsilon, Data) {
# call the function: likelihood of serial interval
L_si = dget("4b_L_si.R")
# contacts with Case 20
w20 = c(5, 6, 7, 10, 11, 12)
# preallocate
s_hat20 = rep(NA ,length(w20))
for (j in 1:length(w20)){
# the missing infector
Data$vi[20] = w20[j]
# likelihood of serial interval using proposed parameters
# actually the serial interval pmf
s_hat20[j] = L_si(i = 20, thetaSS, R0, epsilon, Data)
}
# transmission probability
p_20_j = s_hat20/sum(s_hat20)
# output the transmission probability
if ( all(is.finite(p_20_j)) ) {
return(p_20_j)
} else { # uniform probability
return(NULL)
}
} |
%% File Name: IRT.informationCurves.Rd
%% File Version: 0.13
\name{IRT.informationCurves}
\alias{IRT.informationCurves}
\alias{IRT.informationCurves.tam.mml}
\alias{IRT.informationCurves.tam.mml.2pl}
\alias{IRT.informationCurves.tam.mml.3pl}
\alias{IRT.informationCurves.tam.mml.mfr}
\alias{plot.IRT.informationCurves}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Item and Test Information Curve
}
\description{
An S3 method which computes item and test information curves, see Muraki (1993).
}
\usage{
IRT.informationCurves(object, ...)
\method{IRT.informationCurves}{tam.mml}( object , h=.0001 , iIndex = NULL ,
theta = NULL , ... )
\method{IRT.informationCurves}{tam.mml.2pl}( object , h=.0001 , iIndex = NULL ,
theta = NULL , ... )
\method{IRT.informationCurves}{tam.mml.mfr}( object , h=.0001 , iIndex = NULL ,
theta = NULL , ... )
\method{IRT.informationCurves}{tam.mml.3pl}( object , h=.0001 , iIndex = NULL ,
theta = NULL , ... )
\method{plot}{IRT.informationCurves}(x , curve_type="test" , \dots)
}
% informationCurves_mml <- function( object , h=.0001 ,
% iIndex = NULL , theta = NULL , ... )
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
Object of class \code{tam.mml}, \code{tam.mml.2pl}, \code{tam.mml.mfr}
or \code{tam.mml.3pl}.
}
\item{\dots}{
Further arguments to be passed
}
\item{h}{Numerical differentiation parameter}
\item{iIndex}{Indices of items for which test information should be computed.
The default is to use all items.}
\item{theta}{Optional vector of \eqn{\theta} for which information curves
should be computed.}
\item{curve_type}{Type of information to be plotted. It can be \code{"test"}
for the test information curve and \code{"se"} for the
standard error curve.}
\item{x}{
Object of class \code{tam.mml}, \code{tam.mml.2pl}, \code{tam.mml.mfr}
or \code{tam.mml.3pl}.
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
List with following entries
\item{se_curve}{Standard error curves}
\item{test_info_curve}{Test information curve}
\item{info_curves_item}{Item information curves}
\item{info_curves_categories}{Item-category information curves}
\item{theta}{Used \eqn{\theta} grid}
}
\references{
Muraki, E. (1993). Information functions of the generalized partial credit
model. \emph{Applied Psychological Measurement, 17}(4), 351-363.
}
%\author{
%Alexander Robitzsch
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
\dontrun{
#############################################################################
# EXAMPLE 1: Dichotomous data | data.read
#############################################################################
data(data.read , package="sirt")
dat <- data.read
# fit 2PL model
mod1 <- TAM::tam.mml.2pl( dat )
summary(mod1)
# compute information curves at grid seq(-5,5,length=100)
imod1 <- TAM::IRT.informationCurves( mod1 , theta= seq(-5,5,len=100) )
str(imod1)
# plot test information
plot( imod1 )
# plot standard error curve
plot( imod1 , curve_type = "se" , xlim=c(-3,2) )
# cutomized plot
plot( imod1 , curve_type = "se" , xlim=c(-3,2) , ylim = c(0,2) , lwd=2 , lty=3)
#############################################################################
# EXAMPLE 2: Mixed dichotomous and polytomous data
#############################################################################
data(data.timssAusTwn.scored, package="TAM")
dat <- data.timssAusTwn.scored
# select item response data
items <- grep( "M0" , colnames(dat) , value=TRUE )
resp <- dat[, items ]
#*** Model 1: Partial credit model
mod1 <- TAM::tam.mml( resp )
summary(mod1)
# information curves
imod1 <- TAM::IRT.informationCurves( mod1 , theta= seq(-3,3,len=20) )
#*** Model 2: Generalized partial credit model
mod2 <- TAM::tam.mml.2pl( resp , irtmodel="GPCM")
summary(mod2)
imod2 <- TAM::IRT.informationCurves( mod2 )
#*** Model 3: Mixed 3PL and generalized partial credit model
psych::describe(resp)
maxK <- apply( resp , 2 , max , na.rm=TRUE )
I <- ncol(resp)
# specify guessing parameters, including a prior distribution
est.guess <- 1:I
est.guess[ maxK > 1 ] <- 0
guess <- .2*(est.guess >0)
guess.prior <- matrix( 0 , nrow=I , ncol=2 )
guess.prior[ est.guess > 0 , 1] <- 5
guess.prior[ est.guess > 0 , 2] <- 17
# fit model
mod3 <- TAM::tam.mml.3pl( resp , gammaslope.des = "2PL" , est.guess=est.guess , guess=guess ,
guess.prior = guess.prior ,
control=list( maxiter=100 , Msteps=10 , fac.oldxsi=0.1 ,
nodes = seq(-8,8,len=41) ) , est.variance=FALSE )
summary(mod3)
# information curves
imod3 <- TAM::IRT.informationCurves( mod3 )
imod3
#*** estimate model in mirt package
library(mirt)
itemtype <- rep("gpcm" , I)
itemtype[ maxK==1] <- "3PL"
mod3b <- mirt::mirt(resp , 1 , itemtype=itemtype , verbose=TRUE )
print(mod3b)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Information curves}
% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/IRT.informationCurves.Rd | no_license | yaozeyang90/TAM | R | false | false | 5,239 | rd | %% File Name: IRT.informationCurves.Rd
%% File Version: 0.13
\name{IRT.informationCurves}
\alias{IRT.informationCurves}
\alias{IRT.informationCurves.tam.mml}
\alias{IRT.informationCurves.tam.mml.2pl}
\alias{IRT.informationCurves.tam.mml.3pl}
\alias{IRT.informationCurves.tam.mml.mfr}
\alias{plot.IRT.informationCurves}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Item and Test Information Curve
}
\description{
An S3 method which computes item and test information curves, see Muraki (1993).
}
\usage{
IRT.informationCurves(object, ...)
\method{IRT.informationCurves}{tam.mml}( object , h=.0001 , iIndex = NULL ,
theta = NULL , ... )
\method{IRT.informationCurves}{tam.mml.2pl}( object , h=.0001 , iIndex = NULL ,
theta = NULL , ... )
\method{IRT.informationCurves}{tam.mml.mfr}( object , h=.0001 , iIndex = NULL ,
theta = NULL , ... )
\method{IRT.informationCurves}{tam.mml.3pl}( object , h=.0001 , iIndex = NULL ,
theta = NULL , ... )
\method{plot}{IRT.informationCurves}(x , curve_type="test" , \dots)
}
% informationCurves_mml <- function( object , h=.0001 ,
% iIndex = NULL , theta = NULL , ... )
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
Object of class \code{tam.mml}, \code{tam.mml.2pl}, \code{tam.mml.mfr}
or \code{tam.mml.3pl}.
}
\item{\dots}{
Further arguments to be passed
}
\item{h}{Numerical differentiation parameter}
\item{iIndex}{Indices of items for which test information should be computed.
The default is to use all items.}
\item{theta}{Optional vector of \eqn{\theta} for which information curves
should be computed.}
\item{curve_type}{Type of information to be plotted. It can be \code{"test"}
for the test information curve and \code{"se"} for the
standard error curve.}
\item{x}{
Object of class \code{tam.mml}, \code{tam.mml.2pl}, \code{tam.mml.mfr}
or \code{tam.mml.3pl}.
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
List with following entries
\item{se_curve}{Standard error curves}
\item{test_info_curve}{Test information curve}
\item{info_curves_item}{Item information curves}
\item{info_curves_categories}{Item-category information curves}
\item{theta}{Used \eqn{\theta} grid}
}
\references{
Muraki, E. (1993). Information functions of the generalized partial credit
model. \emph{Applied Psychological Measurement, 17}(4), 351-363.
}
%\author{
%Alexander Robitzsch
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
\dontrun{
#############################################################################
# EXAMPLE 1: Dichotomous data | data.read
#############################################################################
data(data.read , package="sirt")
dat <- data.read
# fit 2PL model
mod1 <- TAM::tam.mml.2pl( dat )
summary(mod1)
# compute information curves at grid seq(-5,5,length=100)
imod1 <- TAM::IRT.informationCurves( mod1 , theta= seq(-5,5,len=100) )
str(imod1)
# plot test information
plot( imod1 )
# plot standard error curve
plot( imod1 , curve_type = "se" , xlim=c(-3,2) )
# cutomized plot
plot( imod1 , curve_type = "se" , xlim=c(-3,2) , ylim = c(0,2) , lwd=2 , lty=3)
#############################################################################
# EXAMPLE 2: Mixed dichotomous and polytomous data
#############################################################################
data(data.timssAusTwn.scored, package="TAM")
dat <- data.timssAusTwn.scored
# select item response data
items <- grep( "M0" , colnames(dat) , value=TRUE )
resp <- dat[, items ]
#*** Model 1: Partial credit model
mod1 <- TAM::tam.mml( resp )
summary(mod1)
# information curves
imod1 <- TAM::IRT.informationCurves( mod1 , theta= seq(-3,3,len=20) )
#*** Model 2: Generalized partial credit model
mod2 <- TAM::tam.mml.2pl( resp , irtmodel="GPCM")
summary(mod2)
imod2 <- TAM::IRT.informationCurves( mod2 )
#*** Model 3: Mixed 3PL and generalized partial credit model
psych::describe(resp)
maxK <- apply( resp , 2 , max , na.rm=TRUE )
I <- ncol(resp)
# specify guessing parameters, including a prior distribution
est.guess <- 1:I
est.guess[ maxK > 1 ] <- 0
guess <- .2*(est.guess >0)
guess.prior <- matrix( 0 , nrow=I , ncol=2 )
guess.prior[ est.guess > 0 , 1] <- 5
guess.prior[ est.guess > 0 , 2] <- 17
# fit model
mod3 <- TAM::tam.mml.3pl( resp , gammaslope.des = "2PL" , est.guess=est.guess , guess=guess ,
guess.prior = guess.prior ,
control=list( maxiter=100 , Msteps=10 , fac.oldxsi=0.1 ,
nodes = seq(-8,8,len=41) ) , est.variance=FALSE )
summary(mod3)
# information curves
imod3 <- TAM::IRT.informationCurves( mod3 )
imod3
#*** estimate model in mirt package
library(mirt)
itemtype <- rep("gpcm" , I)
itemtype[ maxK==1] <- "3PL"
mod3b <- mirt::mirt(resp , 1 , itemtype=itemtype , verbose=TRUE )
print(mod3b)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Information curves}
% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
setwd("/home/gehad/bigData_lab5")
getwd()
#install.packages("rpart.plot")
#install.packages("ROCR")
library("rpart")
library("rpart.plot")
library("ROCR")
#Read the data
play_decision <- read.table("DTdata.csv",header=TRUE,sep=",")
play_decision
summary(play_decision)
#Build the tree to "fit" the model
fit <- rpart(Play ~ Outlook + Temperature + Humidity + Wind,
method="class",
data=play_decision,
control=rpart.control(minsplit=2, maxdepth = 3),
parms=list(split='information'))
# split='information' : means split on "information gain"
#plot the tree
rpart.plot(fit, type = 4, extra = 1)
summary(fit)
#######################################################################################
# Q1: what is the defult value for split?
# split defaults to gini
# Q2: what are the meanings of these control parameters?
# 1- "minsplit=2"
#The minimum number of observations that must exist in a node in order for a split to be attempted is 2
# 2- "maxdepth=3"
#The maximum depth of any node of the final tree, with the root node counted as depth 0 is 3
# 3- "minbucket=4"
#The minimum number of observations in any terminal node is 4
# Support your answers with graphs for different values of these parameters. ---------> Graphs in the pdf named Graphs_lab5_req2.pdf
#Q3: What will happen if only one of either minsplit or minbucket is specified
# and not the other?
# The code either sets minsplit to minbucket*3 or minbucket to minsplit/3, as appropriate
#Q4: What does 'type' and 'extra' parameters mean in the plot function?
# 'type' is Type of plot that takes possible values : 0 , 1 , 2 , 3 , 4 , 5 each type plot a type as specified in the help
# 'extra' Display extra information at the nodes and possible values are : "auto" , 1 , 2 , 3 , 4 , 5 , 6 , 7 ,8 , 9 ,10 ,11 , +100 where each value display specific information on the plot
#Q5: Plot the tree with propabilities instead of number of observations in each node.
######################################################################################
rpart.plot(fit, type = 4, extra = 4)
#Predict if Play is possible for condition rainy, mild humidity, high temperature and no wind
newdata <- data.frame(Outlook="overcast",Temperature="mild",Humidity="high",Wind=FALSE)
newdata
predict(fit,newdata=newdata,type=c("class"))
# type can be class, prob or vector for classification trees.
######################################################################################
#Q6: What is the predicted class for this test case?
#the predicted class is yes
#Q7: State the sequence of tree node checks to reach this class (label).
# starts from the root then checks the temperature then go left as it is mild , then checks Outlook so go right as it is overcast and stop at this leaf node and predicted yes as the prop of yes is 100%
## ================================= END ===================================== ## | /DecisionTrees.R | no_license | geehad/Naive-Bayes-Classifier---Decision-Trees-in-R | R | false | false | 3,010 | r | setwd("/home/gehad/bigData_lab5")
getwd()
#install.packages("rpart.plot")
#install.packages("ROCR")
library("rpart")
library("rpart.plot")
library("ROCR")
#Read the data
play_decision <- read.table("DTdata.csv",header=TRUE,sep=",")
play_decision
summary(play_decision)
#Build the tree to "fit" the model
fit <- rpart(Play ~ Outlook + Temperature + Humidity + Wind,
method="class",
data=play_decision,
control=rpart.control(minsplit=2, maxdepth = 3),
parms=list(split='information'))
# split='information' : means split on "information gain"
#plot the tree
rpart.plot(fit, type = 4, extra = 1)
summary(fit)
#######################################################################################
# Q1: what is the defult value for split?
# split defaults to gini
# Q2: what are the meanings of these control parameters?
# 1- "minsplit=2"
#The minimum number of observations that must exist in a node in order for a split to be attempted is 2
# 2- "maxdepth=3"
#The maximum depth of any node of the final tree, with the root node counted as depth 0 is 3
# 3- "minbucket=4"
#The minimum number of observations in any terminal node is 4
# Support your answers with graphs for different values of these parameters. ---------> Graphs in the pdf named Graphs_lab5_req2.pdf
#Q3: What will happen if only one of either minsplit or minbucket is specified
# and not the other?
# The code either sets minsplit to minbucket*3 or minbucket to minsplit/3, as appropriate
#Q4: What does 'type' and 'extra' parameters mean in the plot function?
# 'type' is Type of plot that takes possible values : 0 , 1 , 2 , 3 , 4 , 5 each type plot a type as specified in the help
# 'extra' Display extra information at the nodes and possible values are : "auto" , 1 , 2 , 3 , 4 , 5 , 6 , 7 ,8 , 9 ,10 ,11 , +100 where each value display specific information on the plot
#Q5: Plot the tree with propabilities instead of number of observations in each node.
######################################################################################
rpart.plot(fit, type = 4, extra = 4)
#Predict if Play is possible for condition rainy, mild humidity, high temperature and no wind
newdata <- data.frame(Outlook="overcast",Temperature="mild",Humidity="high",Wind=FALSE)
newdata
predict(fit,newdata=newdata,type=c("class"))
# type can be class, prob or vector for classification trees.
######################################################################################
#Q6: What is the predicted class for this test case?
#the predicted class is yes
#Q7: State the sequence of tree node checks to reach this class (label).
# starts from the root then checks the temperature then go left as it is mild , then checks Outlook so go right as it is overcast and stop at this leaf node and predicted yes as the prop of yes is 100%
## ================================= END ===================================== ## |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dose.functions.R
\name{dfpoly}
\alias{dfpoly}
\title{Fractional polynomial dose-response function}
\usage{
dfpoly(degree = 1, beta.1 = "rel", beta.2 = "rel", power.1 = 0, power.2 = 0)
}
\arguments{
\item{degree}{The degree of the fractional polynomial as defined in \insertCite{royston1994;textual}{MBNMAdose}}
\item{beta.1}{Pooling for the 1st fractional polynomial coefficient. Can take \code{"rel"}, \code{"common"}, \code{"random"} or be
assigned a numeric value (see details).}
\item{beta.2}{Pooling for the 2nd fractional polynomial coefficient. Can take \code{"rel"}, \code{"common"}, \code{"random"} or be
assigned a numeric value (see details).}
\item{power.1}{Value for the 1st fractional polynomial power (\eqn{\gamma_1}). Must take any numeric value in the set \verb{-2, -1, -0.5, 0, 0.5, 1, 2, 3}.}
\item{power.2}{Value for the 2nd fractional polynomial power (\eqn{\gamma_2}). Must take any numeric value in the set \verb{-2, -1, -0.5, 0, 0.5, 1, 2, 3}.}
}
\value{
An object of \code{class("dosefun")}
}
\description{
Fractional polynomial dose-response function
}
\details{
\itemize{
\item \eqn{\beta_1} represents the 1st coefficient.
\item \eqn{\beta_2} represents the 2nd coefficient.
\item \eqn{\gamma_1} represents the 1st fractional polynomial power
\item \eqn{\gamma_2} represents the 2nd fractional polynomial power
}
For a polynomial of \code{degree=1}:
\deqn{{\beta_1}x^{\gamma_1}}
For a polynomial of \code{degree=2}:
\deqn{{\beta_1}x^{\gamma_1}+{\beta_2}x^{\gamma_2}}
\eqn{x^{\gamma}} is a regular power except where \eqn{\gamma=0}, where \eqn{x^{(0)}=ln(x)}.
If a fractional polynomial power \eqn{\gamma} repeats within the function it is multiplied by another \eqn{ln(x)}.
}
\section{Dose-response parameters}{
\tabular{ll}{
\strong{Argument} \tab \strong{Model specification} \cr
\code{"rel"} \tab Implies that \emph{relative} effects should be pooled for this dose-response parameter separately for each agent in the network. \cr
\code{"common"} \tab Implies that all agents share the same common effect for this dose-response parameter. \cr
\code{"random"} \tab Implies that all agents share a similar (exchangeable) effect for this dose-response parameter. This approach allows for modelling of variability between agents. \cr
\code{numeric()} \tab Assigned a numeric value, indicating that this dose-response parameter should not be estimated from the data but should be assigned the numeric value determined by the user. This can be useful for fixing specific dose-response parameters (e.g. Hill parameters in Emax functions) to a single value. \cr
}
When relative effects are modelled on more than one dose-response parameter,
correlation between them is automatically estimated using a vague inverse-Wishart prior.
This prior can be made slightly more informative by specifying the scale matrix \code{omega}
and by changing the degrees of freedom of the inverse-Wishart prior
using the \code{priors} argument in \code{mbnma.run()}.
}
\examples{
# 1st order fractional polynomial a value of 0.5 for the power
dfpoly(beta.1="rel", power.1=0.5)
# 2nd order fractional polynomial with relative effects for coefficients
# and a value of -0.5 and 2 for the 1st and 2nd powers respectively
dfpoly(degree=2, beta.1="rel", beta.2="rel",
power.1=-0.5, power.2=2)
}
\references{
\insertAllCited
}
| /man/dfpoly.Rd | no_license | cran/MBNMAdose | R | false | true | 3,505 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dose.functions.R
\name{dfpoly}
\alias{dfpoly}
\title{Fractional polynomial dose-response function}
\usage{
dfpoly(degree = 1, beta.1 = "rel", beta.2 = "rel", power.1 = 0, power.2 = 0)
}
\arguments{
\item{degree}{The degree of the fractional polynomial as defined in \insertCite{royston1994;textual}{MBNMAdose}}
\item{beta.1}{Pooling for the 1st fractional polynomial coefficient. Can take \code{"rel"}, \code{"common"}, \code{"random"} or be
assigned a numeric value (see details).}
\item{beta.2}{Pooling for the 2nd fractional polynomial coefficient. Can take \code{"rel"}, \code{"common"}, \code{"random"} or be
assigned a numeric value (see details).}
\item{power.1}{Value for the 1st fractional polynomial power (\eqn{\gamma_1}). Must take any numeric value in the set \verb{-2, -1, -0.5, 0, 0.5, 1, 2, 3}.}
\item{power.2}{Value for the 2nd fractional polynomial power (\eqn{\gamma_2}). Must take any numeric value in the set \verb{-2, -1, -0.5, 0, 0.5, 1, 2, 3}.}
}
\value{
An object of \code{class("dosefun")}
}
\description{
Fractional polynomial dose-response function
}
\details{
\itemize{
\item \eqn{\beta_1} represents the 1st coefficient.
\item \eqn{\beta_2} represents the 2nd coefficient.
\item \eqn{\gamma_1} represents the 1st fractional polynomial power
\item \eqn{\gamma_2} represents the 2nd fractional polynomial power
}
For a polynomial of \code{degree=1}:
\deqn{{\beta_1}x^{\gamma_1}}
For a polynomial of \code{degree=2}:
\deqn{{\beta_1}x^{\gamma_1}+{\beta_2}x^{\gamma_2}}
\eqn{x^{\gamma}} is a regular power except where \eqn{\gamma=0}, where \eqn{x^{(0)}=ln(x)}.
If a fractional polynomial power \eqn{\gamma} repeats within the function it is multiplied by another \eqn{ln(x)}.
}
\section{Dose-response parameters}{
\tabular{ll}{
\strong{Argument} \tab \strong{Model specification} \cr
\code{"rel"} \tab Implies that \emph{relative} effects should be pooled for this dose-response parameter separately for each agent in the network. \cr
\code{"common"} \tab Implies that all agents share the same common effect for this dose-response parameter. \cr
\code{"random"} \tab Implies that all agents share a similar (exchangeable) effect for this dose-response parameter. This approach allows for modelling of variability between agents. \cr
\code{numeric()} \tab Assigned a numeric value, indicating that this dose-response parameter should not be estimated from the data but should be assigned the numeric value determined by the user. This can be useful for fixing specific dose-response parameters (e.g. Hill parameters in Emax functions) to a single value. \cr
}
When relative effects are modelled on more than one dose-response parameter,
correlation between them is automatically estimated using a vague inverse-Wishart prior.
This prior can be made slightly more informative by specifying the scale matrix \code{omega}
and by changing the degrees of freedom of the inverse-Wishart prior
using the \code{priors} argument in \code{mbnma.run()}.
}
\examples{
# 1st order fractional polynomial a value of 0.5 for the power
dfpoly(beta.1="rel", power.1=0.5)
# 2nd order fractional polynomial with relative effects for coefficients
# and a value of -0.5 and 2 for the 1st and 2nd powers respectively
dfpoly(degree=2, beta.1="rel", beta.2="rel",
power.1=-0.5, power.2=2)
}
\references{
\insertAllCited
}
|
testlist <- list(ends = c(-14914341L, 1092032927L, -96688997L, 1632068659L, -1985290631L, -1286875988L, 1686393228L, -1178994978L, -161631656L ), pts = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L, -840170934L, 1837701012L, 1533208973L, -1125300777L, 765849512L, -1760774663L ), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) | /IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609861111-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 716 | r | testlist <- list(ends = c(-14914341L, 1092032927L, -96688997L, 1632068659L, -1985290631L, -1286875988L, 1686393228L, -1178994978L, -161631656L ), pts = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L, -840170934L, 1837701012L, 1533208973L, -1125300777L, 765849512L, -1760774663L ), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) |
# Functions to compute the bandwidth matrix
### 1a. Flexible Hpi.diag function with 1 stage estimation
# Warning: This function requires the package ks.
# It can only be used for up to 6 dimensions.
#' @export
Hpi.diag.flex1 = function(x){
if(is.matrix(x)){Hpi.diag(x, nstage = 1)}
else {hpi(x)}
}
### 1b. Flexible Hpi.diag function with 2 stage estimation
# Warning: This function requires the package ks.
# It can only be used for up to 6 dimensions.
#' @export
Hpi.diag.flex2 = function(x){
if(is.matrix(x)){Hpi.diag(x, nstage = 2)}
else {hpi(x)}
}
### 2. Flexible bw.nrd function (Silverman)
#' @export
bw.nrd.flex = function(x){
if(is.matrix(x)){
bw = apply(x, 2, bw.nrd)
diag(bw, ncol(x))}
else {bw.nrd(x)}
}
### 3. Flexible bw.nrd0 function (Scott)
# (using the modified bw.nrd0 function that allows NA values)
#' @export
bw.nrd0.mod = function(x){bw.nrd0(x[complete.cases(x)])}
#' @export
bw.nrd0.flex = function(x){
if(is.matrix(x)){
bw = apply(x, 2, bw.nrd0.mod)
diag(bw, ncol(x))}
else {bw.nrd0.mod(x)}
}
### 3b. Generalization of Scott's rule (Haerdle et al 2004, p. 73)
# For multivariate bandwidth estimation, this rule should be used instead of bw.nrd0.flex (or bw.nrd.flex), because it takes the number of dimensions into account.
#' @export
bw.nrd0.mult = function(x){
if(is.matrix(x)){
n = nrow(x)
bw = apply(x, 2, bw.nrd0.mod)*(n^(1/5-1/(ncol(x)+4)))
diag(bw, ncol(x))}
else {bw.nrd0.mod(x)}
}
#### Functions for kernel density derivative estimation ####
# (only first derivative)
# normal scale diagonal bandwidth matrix by Chacon et al, Stat. Sin. 2011, eq. 3.2 (extension of Silverman/Scott's rule for derivatives)
# univariate:
#' @export
bw.chacon.uni = function(x){
var(x, na.rm=T)*(4/(length(x)*(5)))^(2/(7))
}
# multivariate:
#' @export
bw.chacon = function(x){
if(is.matrix(x)){
d = ncol(x)
n = nrow(x)
h = diag(var(x, na.rm=T))*(4/(n*(d + 4)))^(2/(d+6))
diag(h, d)
} else {
bw.chacon.uni(x)
}
}
| /R/bw.R | no_license | johannabertl/ApproxML | R | false | false | 2,029 | r | # Functions to compute the bandwidth matrix
### 1a. Flexible Hpi.diag function with 1 stage estimation
# Warning: This function requires the package ks.
# It can only be used for up to 6 dimensions.
#' @export
Hpi.diag.flex1 = function(x){
if(is.matrix(x)){Hpi.diag(x, nstage = 1)}
else {hpi(x)}
}
### 1b. Flexible Hpi.diag function with 2 stage estimation
# Warning: This function requires the package ks.
# It can only be used for up to 6 dimensions.
#' @export
Hpi.diag.flex2 = function(x){
if(is.matrix(x)){Hpi.diag(x, nstage = 2)}
else {hpi(x)}
}
### 2. Flexible bw.nrd function (Silverman)
#' @export
bw.nrd.flex = function(x){
if(is.matrix(x)){
bw = apply(x, 2, bw.nrd)
diag(bw, ncol(x))}
else {bw.nrd(x)}
}
### 3. Flexible bw.nrd0 function (Scott)
# (using the modified bw.nrd0 function that allows NA values)
#' @export
bw.nrd0.mod = function(x){bw.nrd0(x[complete.cases(x)])}
#' @export
bw.nrd0.flex = function(x){
if(is.matrix(x)){
bw = apply(x, 2, bw.nrd0.mod)
diag(bw, ncol(x))}
else {bw.nrd0.mod(x)}
}
### 3b. Generalization of Scott's rule (Haerdle et al 2004, p. 73)
# For multivariate bandwidth estimation, this rule should be used instead of bw.nrd0.flex (or bw.nrd.flex), because it takes the number of dimensions into account.
#' @export
bw.nrd0.mult = function(x){
if(is.matrix(x)){
n = nrow(x)
bw = apply(x, 2, bw.nrd0.mod)*(n^(1/5-1/(ncol(x)+4)))
diag(bw, ncol(x))}
else {bw.nrd0.mod(x)}
}
#### Functions for kernel density derivative estimation ####
# (only first derivative)
# normal scale diagonal bandwidth matrix by Chacon et al, Stat. Sin. 2011, eq. 3.2 (extension of Silverman/Scott's rule for derivatives)
# univariate:
#' @export
bw.chacon.uni = function(x){
var(x, na.rm=T)*(4/(length(x)*(5)))^(2/(7))
}
# multivariate:
#' @export
bw.chacon = function(x){
if(is.matrix(x)){
d = ncol(x)
n = nrow(x)
h = diag(var(x, na.rm=T))*(4/(n*(d + 4)))^(2/(d+6))
diag(h, d)
} else {
bw.chacon.uni(x)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_cmty_edge_btwns.R
\name{get_cmty_edge_btwns}
\alias{get_cmty_edge_btwns}
\title{Get community membership by edge betweenness}
\usage{
get_cmty_edge_btwns(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a data frame with group membership
assignments for each of the nodes.
}
\description{
Using edge betweenness, obtain the
group membership values for each of the nodes in
the graph.
}
\examples{
# Create a random graph
graph <-
create_random_graph(
10, 22, set_seed = 1)
# Get the group membership values for all nodes
# in the graph through calculation of the leading
# non-negative eigenvector of the modularity matrix
# of the graph
get_cmty_edge_btwns(graph)
#> node edge_btwns_group
#> 1 1 1
#> 2 2 1
#> 3 3 1
#> 4 4 2
#> 5 5 1
#> 6 6 2
#> 7 7 1
#> 8 8 1
#> 9 9 2
#> 10 10 1
# Add the group membership values to the graph
# as a node attribute
graph <-
graph \%>\%
join_node_attrs(get_cmty_edge_btwns(.))
}
| /man/get_cmty_edge_btwns.Rd | no_license | timelyportfolio/DiagrammeR | R | false | true | 1,238 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_cmty_edge_btwns.R
\name{get_cmty_edge_btwns}
\alias{get_cmty_edge_btwns}
\title{Get community membership by edge betweenness}
\usage{
get_cmty_edge_btwns(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a data frame with group membership
assignments for each of the nodes.
}
\description{
Using edge betweenness, obtain the
group membership values for each of the nodes in
the graph.
}
\examples{
# Create a random graph
graph <-
create_random_graph(
10, 22, set_seed = 1)
# Get the group membership values for all nodes
# in the graph through calculation of the leading
# non-negative eigenvector of the modularity matrix
# of the graph
get_cmty_edge_btwns(graph)
#> node edge_btwns_group
#> 1 1 1
#> 2 2 1
#> 3 3 1
#> 4 4 2
#> 5 5 1
#> 6 6 2
#> 7 7 1
#> 8 8 1
#> 9 9 2
#> 10 10 1
# Add the group membership values to the graph
# as a node attribute
graph <-
graph \%>\%
join_node_attrs(get_cmty_edge_btwns(.))
}
|
#' @useDynLib eppasm eppasmC
#' @export
simmod.specfp <- function(fp, VERSION="C"){
if(!exists("popadjust", where=fp))
fp$popadjust <- FALSE
if(!exists("incidmod", where=fp))
fp$incidmod <- "eppspectrum"
if(VERSION != "R"){
fp$eppmodInt <- match(fp$eppmod, c("rtrend", "directincid"), nomatch=0) # 0: r-spline;
fp$incidmodInt <- match(fp$incidmod, c("eppspectrum"))-1L # -1 for 0-based indexing
mod <- .Call(eppasmC, fp)
class(mod) <- "spec"
return(mod)
}
##################################################################################
if(requireNamespace("fastmatch", quietly = TRUE))
ctapply <- fastmatch::ctapply
else
ctapply <- tapply
fp$ss$DT <- 1/fp$ss$hiv_steps_per_year
## Attach state space variables
invisible(list2env(fp$ss, environment())) # put ss variables in environment for convenience
birthslag <- fp$birthslag
pregprevlag <- rep(0, PROJ_YEARS)
## initialize projection
pop <- array(0, c(pAG, NG, pDS, PROJ_YEARS))
pop[,,1,1] <- fp$basepop
hivpop <- array(0, c(hDS, hAG, NG, PROJ_YEARS))
artpop <- array(0, c(hTS, hDS, hAG, NG, PROJ_YEARS))
## initialize output
prev15to49 <- numeric(PROJ_YEARS)
incid15to49 <- numeric(PROJ_YEARS)
sexinc15to49out <- array(NA, c(NG, PROJ_YEARS))
paedsurvout <- rep(NA, PROJ_YEARS)
infections <- array(0, c(pAG, NG, PROJ_YEARS))
hivdeaths <- array(0, c(pAG, NG, PROJ_YEARS))
natdeaths <- array(0, c(pAG, NG, PROJ_YEARS))
popadj.prob <- array(0, c(pAG, NG, PROJ_YEARS))
if(fp$eppmod != "directincid"){
## outputs by timestep
incrate15to49.ts.out <- rep(NA, length(fp$rvec))
rvec <- if(fp$eppmod == "rtrend") rep(NA, length(fp$proj.steps)) else fp$rvec
prev15to49.ts.out <- rep(NA, length(fp$rvec))
}
entrant_prev_out <- numeric(PROJ_YEARS)
hivp_entrants_out <- array(0, c(NG, PROJ_YEARS))
## store last prevalence value (for r-trend model)
prevlast <- 0
for(i in 2:fp$SIM_YEARS){
## ################################### ##
## Single-year population projection ##
## ################################### ##
## age the population
pop[-c(1,pAG),,,i] <- pop[-(pAG-1:0),,,i-1]
pop[pAG,,,i] <- pop[pAG,,,i-1] + pop[pAG-1,,,i-1] # open age group
## Add lagged births into youngest age group
entrant_prev <- fp$entrantprev[,i]
if(exists("popadjust", where=fp) & fp$popadjust){
hivn_entrants <- fp$entrantpop[,i-1]*(1-entrant_prev)
hivp_entrants <- fp$entrantpop[,i-1]*entrant_prev
} else {
hivn_entrants <- birthslag[,i-1]*fp$cumsurv[,i-1]*(1-entrant_prev / fp$paedsurv_lag[i-1]) + fp$cumnetmigr[,i-1]*(1-pregprevlag[i-1]*fp$netmig_hivprob)
hivp_entrants <- birthslag[,i-1]*fp$cumsurv[,i-1]*entrant_prev + fp$cumnetmigr[,i-1]*entrant_prev
}
entrant_prev_out[i] <- sum(hivp_entrants) / sum(hivn_entrants+hivp_entrants)
hivp_entrants_out[,i] <- sum(hivp_entrants)
pop[1,,hivn.idx,i] <- hivn_entrants
pop[1,,hivp.idx,i] <- hivp_entrants
hiv.ag.prob <- pop[aglast.idx,,hivp.idx,i-1] / apply(pop[,,hivp.idx,i-1], 2, ctapply, ag.idx, sum)
hiv.ag.prob[is.nan(hiv.ag.prob)] <- 0
hivpop[,,,i] <- hivpop[,,,i-1]
hivpop[,-hAG,,i] <- hivpop[,-hAG,,i] - sweep(hivpop[,-hAG,,i-1], 2:3, hiv.ag.prob[-hAG,], "*")
hivpop[,-1,,i] <- hivpop[,-1,,i] + sweep(hivpop[,-hAG,,i-1], 2:3, hiv.ag.prob[-hAG,], "*")
hivpop[,1,,i] <- hivpop[,1,,i] + sweep(fp$paedsurv_cd4dist[,,i], 2, hivp_entrants * (1-fp$entrantartcov[,i]), "*")
if(i > fp$tARTstart){
artpop[,,,,i] <- artpop[,,,,i-1]
artpop[,,-hAG,,i] <- artpop[,,-hAG,,i] - sweep(artpop[,,-hAG,,i-1], 3:4, hiv.ag.prob[-hAG,], "*")
artpop[,,-1,,i] <- artpop[,,-1,,i] + sweep(artpop[,,-hAG,,i-1], 3:4, hiv.ag.prob[-hAG,], "*")
artpop[,,1,,i] <- artpop[,,1,,i] + sweep(fp$paedsurv_artcd4dist[,,,i], 3, hivp_entrants * fp$entrantartcov[,i], "*")
}
## survive the population
deaths <- sweep(pop[,,,i], 1:2, (1-fp$Sx[,,i]), "*")
hiv.sx.prob <- 1-apply(deaths[,,2], 2, ctapply, ag.idx, sum) / apply(pop[,,2,i], 2, ctapply, ag.idx, sum)
hiv.sx.prob[is.nan(hiv.sx.prob)] <- 0
pop[,,,i] <- pop[,,,i] - deaths
natdeaths[,,i] <- rowSums(deaths,,2)
hivpop[,,,i] <- sweep(hivpop[,,,i], 2:3, hiv.sx.prob, "*")
if(i > fp$tARTstart)
artpop[,,,,i] <- sweep(artpop[,,,,i], 3:4, hiv.sx.prob, "*")
## net migration
netmigsurv <- fp$netmigr[,,i]*(1+fp$Sx[,,i])/2
mr.prob <- 1+netmigsurv / rowSums(pop[,,,i],,2)
hiv.mr.prob <- apply(mr.prob * pop[,,2,i], 2, ctapply, ag.idx, sum) / apply(pop[,,2,i], 2, ctapply, ag.idx, sum)
hiv.mr.prob[is.nan(hiv.mr.prob)] <- 0
pop[,,,i] <- sweep(pop[,,,i], 1:2, mr.prob, "*")
hivpop[,,,i] <- sweep(hivpop[,,,i], 2:3, hiv.mr.prob, "*")
if(i > fp$tARTstart)
artpop[,,,,i] <- sweep(artpop[,,,,i], 3:4, hiv.mr.prob, "*")
## fertility
births.by.age <- rowSums(pop[p.fert.idx, f.idx,,i-1:0])/2 * fp$asfr[,i]
births.by.h.age <- ctapply(births.by.age, ag.idx[p.fert.idx], sum)
births <- fp$srb[,i] * sum(births.by.h.age)
if(i+AGE_START <= PROJ_YEARS)
birthslag[,i+AGE_START-1] <- births
## ########################## ##
## Disease model simulation ##
## ########################## ##
## events at dt timestep
for(ii in seq_len(hiv_steps_per_year)){
ts <- (i-2)/DT + ii
grad <- array(0, c(hDS, hAG, NG))
if(fp$eppmod != "directincid"){
## incidence
## calculate r(t)
if(fp$eppmod %in% c("rtrend", "rtrend_rw"))
rvec[ts] <- calc_rtrend_rt(fp$proj.steps[ts], fp, rvec[ts-1], prevlast, pop, i, ii)
else
rvec[ts] <- fp$rvec[ts]
## number of infections by age / sex
infections.ts <- calc_infections_eppspectrum(fp, pop, hivpop, artpop, i, ii, rvec[ts])
incrate15to49.ts.out[ts] <- attr(infections.ts, "incrate15to49.ts")
prev15to49.ts.out[ts] <- attr(infections.ts, "prevcurr")
prevlast <- attr(infections.ts, "prevcurr")
pop[,,hivn.idx,i] <- pop[,,hivn.idx,i] - DT*infections.ts
pop[,,hivp.idx,i] <- pop[,,hivp.idx,i] + DT*infections.ts
infections[,,i] <- infections[,,i] + DT*infections.ts
grad <- grad + sweep(fp$cd4_initdist, 2:3, apply(infections.ts, 2, ctapply, ag.idx, sum), "*")
incid15to49[i] <- incid15to49[i] + sum(DT*infections.ts[p.age15to49.idx,])
}
## disease progression and mortality
grad[-hDS,,] <- grad[-hDS,,] - fp$cd4_prog * hivpop[-hDS,,,i] # remove cd4 stage progression (untreated)
grad[-1,,] <- grad[-1,,] + fp$cd4_prog * hivpop[-hDS,,,i] # add cd4 stage progression (untreated)
if(fp$scale_cd4_mort == 1){
cd4mx_scale <- hivpop[,,,i] / (hivpop[,,,i] + colSums(artpop[,,,,i]))
cd4mx_scale[!is.finite(cd4mx_scale)] <- 1.0
cd4_mort_ts <- fp$cd4_mort * cd4mx_scale
} else
cd4_mort_ts <- fp$cd4_mort
grad <- grad - cd4_mort_ts * hivpop[,,,i] # HIV mortality, untreated
## Remove hivdeaths from pop
hivdeaths.ts <- DT*(colSums(cd4_mort_ts * hivpop[,,,i]) + colSums(fp$art_mort * fp$artmx_timerr[ , i] * artpop[,,,,i],,2))
calc.agdist <- function(x) {d <- x/rep(ctapply(x, ag.idx, sum), h.ag.span); d[is.na(d)] <- 0; d}
hivdeaths_p.ts <- apply(hivdeaths.ts, 2, rep, h.ag.span) * apply(pop[,,hivp.idx,i], 2, calc.agdist) # HIV deaths by single-year age
pop[,,2,i] <- pop[,,2,i] - hivdeaths_p.ts
hivdeaths[,,i] <- hivdeaths[,,i] + hivdeaths_p.ts
## ART initiation
if(i >= fp$tARTstart) {
gradART <- array(0, c(hTS, hDS, hAG, NG))
## progression and mortality
gradART[1:(hTS-1),,,] <- gradART[1:(hTS-1),,,] - 1.0 / fp$ss$h_art_stage_dur * artpop[1:(hTS-1),,,, i] # remove ART duration progression
gradART[2:hTS,,,] <- gradART[2:hTS,,,] + 1.0 / fp$ss$h_art_stage_dur * artpop[1:(hTS-1),,,, i] # add ART duration progression
gradART <- gradART - fp$art_mort * fp$artmx_timerr[ , i] * artpop[,,,,i] # ART mortality
## ART dropout
## remove proportion from all adult ART groups back to untreated pop
grad <- grad + fp$art_dropout[i]*colSums(artpop[,,,,i])
gradART <- gradART - fp$art_dropout[i]*artpop[,,,,i]
## calculate number eligible for ART
artcd4_percelig <- 1 - (1-rep(0:1, times=c(fp$artcd4elig_idx[i]-1, hDS - fp$artcd4elig_idx[i]+1))) *
(1-rep(c(0, fp$who34percelig), c(2, hDS-2))) *
(1-rep(fp$specpop_percelig[i], hDS))
art15plus.elig <- sweep(hivpop[,h.age15plus.idx,,i], 1, artcd4_percelig, "*")
## calculate pregnant women
if(fp$pw_artelig[i]){
births.dist <- sweep(fp$frr_cd4[,,i] * hivpop[,h.fert.idx,f.idx,i], 2,
births.by.h.age / (ctapply(pop[p.fert.idx, f.idx, hivn.idx, i], ag.idx[p.fert.idx], sum) + colSums(fp$frr_cd4[,,i] * hivpop[,h.fert.idx,f.idx,i]) + colSums(fp$frr_art[,,,i] * artpop[ ,,h.fert.idx,f.idx,i],,2)), "*")
if(fp$artcd4elig_idx[i] > 1)
art15plus.elig[1:(fp$artcd4elig_idx[i]-1),h.fert.idx-min(h.age15plus.idx)+1,f.idx] <- art15plus.elig[1:(fp$artcd4elig_idx[i]-1),h.fert.idx-min(h.age15plus.idx)+1,f.idx] + births.dist[1:(fp$artcd4elig_idx[i]-1),]
}
## calculate number to initiate ART based on number or percentage
artpop_curr_g <- colSums(artpop[,,h.age15plus.idx,,i],,3) + DT*colSums(gradART[,,h.age15plus.idx,],,3)
artnum.ii <- c(0,0) # number on ART this ts
if(DT*ii < 0.5){
for(g in 1:2){
if(!any(fp$art15plus_isperc[g,i-2:1])){ # both number
artnum.ii[g] <- c(fp$art15plus_num[g,i-2:1] %*% c(1-(DT*ii+0.5), DT*ii+0.5))
} else if(all(fp$art15plus_isperc[g,i-2:1])){ # both percentage
artcov.ii <- c(fp$art15plus_num[g,i-2:1] %*% c(1-(DT*ii+0.5), DT*ii+0.5))
artnum.ii[g] <- artcov.ii * (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
} else if(!fp$art15plus_isperc[g,i-2] & fp$art15plus_isperc[g,i-1]){ # transition number to percentage
curr_coverage <- artpop_curr_g[g] / (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
artcov.ii <- curr_coverage + (fp$art15plus_num[g,i-1] - curr_coverage) * DT/(0.5-DT*(ii-1))
artnum.ii[g] <- artcov.ii * (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
}
}
} else {
for(g in 1:2){
if(!any(fp$art15plus_isperc[g,i-1:0])){ # both number
artnum.ii[g] <- c(fp$art15plus_num[g,i-1:0] %*% c(1-(DT*ii-0.5), DT*ii-0.5))
} else if(all(fp$art15plus_isperc[g,i-1:0])) { # both percentage
artcov.ii <- c(fp$art15plus_num[g,i-1:0] %*% c(1-(DT*ii-0.5), DT*ii-0.5))
artnum.ii[g] <- artcov.ii * (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
} else if(!fp$art15plus_isperc[g,i-1] & fp$art15plus_isperc[g,i]){ # transition number to percentage
curr_coverage <- artpop_curr_g[g] / (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
artcov.ii <- curr_coverage + (fp$art15plus_num[g,i] - curr_coverage) * DT/(1.5-DT*(ii-1))
artnum.ii[g] <- artcov.ii * (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
}
}
}
artpop_curr_g <- colSums(artpop[,,h.age15plus.idx,,i],,3) + DT*colSums(gradART[,,h.age15plus.idx,],,3)
art15plus.inits <- pmax(artnum.ii - artpop_curr_g, 0)
## calculate ART initiation distribution
if(!fp$med_cd4init_input[i]){
if(fp$art_alloc_method == 4L){ ## by lowest CD4
## Calculate proportion to be initiated in each CD4 category
artinit <- array(0, dim(art15plus.elig))
remain_artalloc <- art15plus.inits
for(m in hDS:1){
elig_hm <- colSums(art15plus.elig[m,,])
init_prop <- ifelse(elig_hm == 0, elig_hm, pmin(1.0, remain_artalloc / elig_hm, na.rm=TRUE))
artinit[m , , ] <- sweep(art15plus.elig[m,,], 2, init_prop, "*")
remain_artalloc <- remain_artalloc - init_prop * elig_hm
}
} else {
expect.mort.weight <- sweep(fp$cd4_mort[, h.age15plus.idx,], 3,
colSums(art15plus.elig * fp$cd4_mort[, h.age15plus.idx,],,2), "/")
artinit.weight <- sweep(fp$art_alloc_mxweight * expect.mort.weight, 3, (1 - fp$art_alloc_mxweight)/colSums(art15plus.elig,,2), "+")
artinit <- pmin(sweep(artinit.weight * art15plus.elig, 3, art15plus.inits, "*"),
art15plus.elig)
## Allocation by average mortality across CD4, trying to match Spectrum
## artelig_by_cd4 <- apply(art15plus.elig, c(1, 3), sum)
## expectmort_by_cd4 <- apply(art15plus.elig * fp$cd4_mort[, h.age15plus.idx,], c(1, 3), sum)
## artinit_dist <- fp$art_alloc_mxweight * sweep(artelig_by_cd4, 2, colSums(artelig_by_cd4), "/") +
## (1 - fp$art_alloc_mxweight) * sweep(expectmort_by_cd4, 2, colSums(expectmort_by_cd4), "/")
## artinit_prob <- sweep(artinit_dist, 2, art15plus.inits, "*") / artelig_by_cd4
## artinit <- sweep(art15plus.elig, c(1, 3), artinit_prob, "*")
## artinit <- pmin(artinit, art15plus.elig, na.rm=TRUE)
}
} else {
CD4_LOW_LIM <- c(500, 350, 250, 200, 100, 50, 0)
CD4_UPP_LIM <- c(1000, 500, 350, 250, 200, 100, 50)
medcd4_idx <- fp$med_cd4init_cat[i]
medcat_propbelow <- (fp$median_cd4init[i] - CD4_LOW_LIM[medcd4_idx]) / (CD4_UPP_LIM[medcd4_idx] - CD4_LOW_LIM[medcd4_idx])
elig_below <- colSums(art15plus.elig[medcd4_idx,,,drop=FALSE],,2) * medcat_propbelow
if(medcd4_idx < hDS)
elig_below <- elig_below + colSums(art15plus.elig[(medcd4_idx+1):hDS,,,drop=FALSE],,2)
elig_above <- colSums(art15plus.elig[medcd4_idx,,,drop=FALSE],,2) * (1.0-medcat_propbelow)
if(medcd4_idx > 1)
elig_above <- elig_above + colSums(art15plus.elig[1:(medcd4_idx-1),,,drop=FALSE],,2)
initprob_below <- pmin(art15plus.inits * 0.5 / elig_below, 1.0, na.rm=TRUE)
initprob_above <- pmin(art15plus.inits * 0.5 / elig_above, 1.0, na.rm=TRUE)
initprob_medcat <- initprob_below * medcat_propbelow + initprob_above * (1-medcat_propbelow)
artinit <- array(0, dim=c(hDS, hAG, NG))
if(medcd4_idx < hDS)
artinit[(medcd4_idx+1):hDS,,] <- sweep(art15plus.elig[(medcd4_idx+1):hDS,,,drop=FALSE], 3, initprob_below, "*")
artinit[medcd4_idx,,] <- sweep(art15plus.elig[medcd4_idx,,,drop=FALSE], 3, initprob_medcat, "*")
if(medcd4_idx > 0)
artinit[1:(medcd4_idx-1),,] <- sweep(art15plus.elig[1:(medcd4_idx-1),,,drop=FALSE], 3, initprob_above, "*")
}
artinit <- pmin(artinit, hivpop[ , , , i] + DT * grad)
grad[ , h.age15plus.idx, ] <- grad[ , h.age15plus.idx, ] - artinit / DT
gradART[1, , h.age15plus.idx, ] <- gradART[1, , h.age15plus.idx, ] + artinit / DT
artpop[,,,, i] <- artpop[,,,, i] + DT * gradART
}
hivpop[,,,i] <- hivpop[,,,i] + DT * grad
}
## ## Code for calculating new infections once per year to match prevalence (like Spectrum)
## ## incidence
## prev.i <- sum(pop[p.age15to49.idx,,2,i]) / sum(pop[p.age15to49.idx,,,i]) # prevalence age 15 to 49
## incrate15to49.i <- (fp$prev15to49[i] - prev.i)/(1-prev.i)
## Direct incidence input
if(fp$eppmod == "directincid"){
if(fp$incidpopage == 0L) # incidence for 15-49 population
p.incidpop.idx <- p.age15to49.idx
else if(fp$incidpopage == 1L) # incidence for 15+ population
p.incidpop.idx <- p.age15plus.idx
incrate.i <- fp$incidinput[i]
sexinc <- incrate.i*c(1, fp$incrr_sex[i])*sum(pop[p.incidpop.idx,,hivn.idx,i-1])/(sum(pop[p.incidpop.idx,m.idx,hivn.idx,i-1]) + fp$incrr_sex[i]*sum(pop[p.incidpop.idx, f.idx,hivn.idx,i-1]))
agesex.inc <- sweep(fp$incrr_age[,,i], 2, sexinc/(colSums(pop[p.incidpop.idx,,hivn.idx,i-1] * fp$incrr_age[p.incidpop.idx,,i])/colSums(pop[p.incidpop.idx,,hivn.idx,i-1])), "*")
infections[,,i] <- agesex.inc * pop[,,hivn.idx,i-1]
pop[,,hivn.idx,i] <- pop[,,hivn.idx,i] - infections[,,i]
pop[,,hivp.idx,i] <- pop[,,hivp.idx,i] + infections[,,i]
hivpop[,,,i] <- hivpop[,,,i] + sweep(fp$cd4_initdist, 2:3, apply(infections[,,i], 2, ctapply, ag.idx, sum), "*")
incid15to49[i] <- sum(infections[p.age15to49.idx,,i])
}
## adjust population to match target population size
if(exists("popadjust", where=fp) & fp$popadjust){
popadj.prob[,,i] <- fp$targetpop[,,i] / rowSums(pop[,,,i],,2)
hiv.popadj.prob <- apply(popadj.prob[,,i] * pop[,,2,i], 2, ctapply, ag.idx, sum) / apply(pop[,,2,i], 2, ctapply, ag.idx, sum)
hiv.popadj.prob[is.nan(hiv.popadj.prob)] <- 0
pop[,,,i] <- sweep(pop[,,,i], 1:2, popadj.prob[,,i], "*")
hivpop[,,,i] <- sweep(hivpop[,,,i], 2:3, hiv.popadj.prob, "*")
if(i >= fp$tARTstart)
artpop[,,,,i] <- sweep(artpop[,,,,i], 3:4, hiv.popadj.prob, "*")
}
## prevalence among pregnant women
hivn.byage <- ctapply(rowMeans(pop[p.fert.idx, f.idx, hivn.idx,i-1:0]), ag.idx[p.fert.idx], sum)
hivp.byage <- rowMeans(hivpop[,h.fert.idx, f.idx,i-1:0],,2)
artp.byage <- rowMeans(artpop[,,h.fert.idx, f.idx,i-1:0],,3)
pregprev <- sum(births.by.h.age * (1 - hivn.byage / (hivn.byage + colSums(fp$frr_cd4[,,i] * hivp.byage) + colSums(fp$frr_art[,,,i] * artp.byage,,2)))) / sum(births.by.age)
if(i+AGE_START <= PROJ_YEARS)
pregprevlag[i+AGE_START-1] <- pregprev
## prevalence and incidence 15 to 49
prev15to49[i] <- sum(pop[p.age15to49.idx,,hivp.idx,i]) / sum(pop[p.age15to49.idx,,,i])
incid15to49[i] <- sum(incid15to49[i]) / sum(pop[p.age15to49.idx,,hivn.idx,i-1])
}
attr(pop, "prev15to49") <- prev15to49
attr(pop, "incid15to49") <- incid15to49
attr(pop, "sexinc") <- sexinc15to49out
attr(pop, "hivpop") <- hivpop
attr(pop, "artpop") <- artpop
attr(pop, "infections") <- infections
attr(pop, "hivdeaths") <- hivdeaths
attr(pop, "natdeaths") <- natdeaths
attr(pop, "popadjust") <- popadj.prob
attr(pop, "pregprevlag") <- pregprevlag
if(fp$eppmod != "directincid"){
attr(pop, "incrate15to49_ts") <- incrate15to49.ts.out
attr(pop, "prev15to49_ts") <- prev15to49.ts.out
}
attr(pop, "entrantprev") <- entrant_prev_out
attr(pop, "hivp_entrants") <- hivp_entrants_out
class(pop) <- "spec"
return(pop)
}
#' Add dimnames to EPP-ASM model output
#'
#' @param mod output from `simmod()`
#' @param fp fixed parameters input to `simmod()`
#'
#' @return Input `mod` object with dimnames applied to arrays.
#'
#'
#' @export
spec_add_dimnames <- function(mod, fp) {
nm_pAG <- fp$ss$AGE_START + seq_len(fp$ss$pAG) - 1L
nm_NG <- c("male", "female")
nm_pDS <- c("negative", "positive")
nm_years <- fp$ss$proj_start + seq_len(fp$ss$PROJ_YEARS) - 1L
nm_hDS <- c(">500", "350-499", "250-349", "200-249", "100-199", "50-99", "<50")
nm_hTS <- c("art0mos", "art6mos", "art1yr")
nm_hAG <- c("15-16", "17-19", "20-24", "25-29", "30-34", "35-39", "40-44", "45-49", "50+")
dn_pop <- list(age = nm_pAG, sex = nm_NG, hivstatus = nm_pDS, year = nm_years)
dn_hivpop <- list(cd4stage = nm_hDS, age_coarse = nm_hAG, sex = nm_NG, year = nm_years)
dn_artpop <- c(list(artdur = nm_hTS), dn_hivpop)
dimnames(mod) <- dn_pop
dimnames(attr(mod, "hivpop")) <- dn_hivpop
dimnames(attr(mod, "artpop")) <- dn_artpop
dimnames(attr(mod, "infections")) <- dn_pop[c("age", "sex", "year")]
dimnames(attr(mod, "hivdeaths")) <- dn_pop[c("age", "sex", "year")]
dimnames(attr(mod, "natdeaths")) <- dn_pop[c("age", "sex", "year")]
dimnames(attr(mod, "aidsdeaths_noart")) <- dn_hivpop
dimnames(attr(mod, "aidsdeaths_art")) <- dn_artpop
dimnames(attr(mod, "popadjust")) <- dn_pop[c("age", "sex", "year")]
dimnames(attr(mod, "artinit")) <- dn_hivpop
names(attr(mod, "pregprevlag")) <- nm_years
names(attr(mod, "incrate15to49_ts")) <- fp$proj.steps[-length(fp$proj.steps)]
names(attr(mod, "prev15to49_ts")) <- fp$proj.steps[-length(fp$proj.steps)]
names(attr(mod, "rvec_ts")) <- fp$proj.steps[-length(fp$proj.steps)]
names(attr(mod, "prev15to49")) <- nm_years
names(attr(mod, "pregprev")) <- nm_years
names(attr(mod, "incid15to49")) <- nm_years
names(attr(mod, "entrantprev")) <- nm_years
mod
}
| /R/eppasm.R | no_license | aucarter/eppasm | R | false | false | 20,675 | r |
#' @useDynLib eppasm eppasmC
#' @export
simmod.specfp <- function(fp, VERSION="C"){
if(!exists("popadjust", where=fp))
fp$popadjust <- FALSE
if(!exists("incidmod", where=fp))
fp$incidmod <- "eppspectrum"
if(VERSION != "R"){
fp$eppmodInt <- match(fp$eppmod, c("rtrend", "directincid"), nomatch=0) # 0: r-spline;
fp$incidmodInt <- match(fp$incidmod, c("eppspectrum"))-1L # -1 for 0-based indexing
mod <- .Call(eppasmC, fp)
class(mod) <- "spec"
return(mod)
}
##################################################################################
if(requireNamespace("fastmatch", quietly = TRUE))
ctapply <- fastmatch::ctapply
else
ctapply <- tapply
fp$ss$DT <- 1/fp$ss$hiv_steps_per_year
## Attach state space variables
invisible(list2env(fp$ss, environment())) # put ss variables in environment for convenience
birthslag <- fp$birthslag
pregprevlag <- rep(0, PROJ_YEARS)
## initialize projection
pop <- array(0, c(pAG, NG, pDS, PROJ_YEARS))
pop[,,1,1] <- fp$basepop
hivpop <- array(0, c(hDS, hAG, NG, PROJ_YEARS))
artpop <- array(0, c(hTS, hDS, hAG, NG, PROJ_YEARS))
## initialize output
prev15to49 <- numeric(PROJ_YEARS)
incid15to49 <- numeric(PROJ_YEARS)
sexinc15to49out <- array(NA, c(NG, PROJ_YEARS))
paedsurvout <- rep(NA, PROJ_YEARS)
infections <- array(0, c(pAG, NG, PROJ_YEARS))
hivdeaths <- array(0, c(pAG, NG, PROJ_YEARS))
natdeaths <- array(0, c(pAG, NG, PROJ_YEARS))
popadj.prob <- array(0, c(pAG, NG, PROJ_YEARS))
if(fp$eppmod != "directincid"){
## outputs by timestep
incrate15to49.ts.out <- rep(NA, length(fp$rvec))
rvec <- if(fp$eppmod == "rtrend") rep(NA, length(fp$proj.steps)) else fp$rvec
prev15to49.ts.out <- rep(NA, length(fp$rvec))
}
entrant_prev_out <- numeric(PROJ_YEARS)
hivp_entrants_out <- array(0, c(NG, PROJ_YEARS))
## store last prevalence value (for r-trend model)
prevlast <- 0
for(i in 2:fp$SIM_YEARS){
## ################################### ##
## Single-year population projection ##
## ################################### ##
## age the population
pop[-c(1,pAG),,,i] <- pop[-(pAG-1:0),,,i-1]
pop[pAG,,,i] <- pop[pAG,,,i-1] + pop[pAG-1,,,i-1] # open age group
## Add lagged births into youngest age group
entrant_prev <- fp$entrantprev[,i]
if(exists("popadjust", where=fp) & fp$popadjust){
hivn_entrants <- fp$entrantpop[,i-1]*(1-entrant_prev)
hivp_entrants <- fp$entrantpop[,i-1]*entrant_prev
} else {
hivn_entrants <- birthslag[,i-1]*fp$cumsurv[,i-1]*(1-entrant_prev / fp$paedsurv_lag[i-1]) + fp$cumnetmigr[,i-1]*(1-pregprevlag[i-1]*fp$netmig_hivprob)
hivp_entrants <- birthslag[,i-1]*fp$cumsurv[,i-1]*entrant_prev + fp$cumnetmigr[,i-1]*entrant_prev
}
entrant_prev_out[i] <- sum(hivp_entrants) / sum(hivn_entrants+hivp_entrants)
hivp_entrants_out[,i] <- sum(hivp_entrants)
pop[1,,hivn.idx,i] <- hivn_entrants
pop[1,,hivp.idx,i] <- hivp_entrants
hiv.ag.prob <- pop[aglast.idx,,hivp.idx,i-1] / apply(pop[,,hivp.idx,i-1], 2, ctapply, ag.idx, sum)
hiv.ag.prob[is.nan(hiv.ag.prob)] <- 0
hivpop[,,,i] <- hivpop[,,,i-1]
hivpop[,-hAG,,i] <- hivpop[,-hAG,,i] - sweep(hivpop[,-hAG,,i-1], 2:3, hiv.ag.prob[-hAG,], "*")
hivpop[,-1,,i] <- hivpop[,-1,,i] + sweep(hivpop[,-hAG,,i-1], 2:3, hiv.ag.prob[-hAG,], "*")
hivpop[,1,,i] <- hivpop[,1,,i] + sweep(fp$paedsurv_cd4dist[,,i], 2, hivp_entrants * (1-fp$entrantartcov[,i]), "*")
if(i > fp$tARTstart){
artpop[,,,,i] <- artpop[,,,,i-1]
artpop[,,-hAG,,i] <- artpop[,,-hAG,,i] - sweep(artpop[,,-hAG,,i-1], 3:4, hiv.ag.prob[-hAG,], "*")
artpop[,,-1,,i] <- artpop[,,-1,,i] + sweep(artpop[,,-hAG,,i-1], 3:4, hiv.ag.prob[-hAG,], "*")
artpop[,,1,,i] <- artpop[,,1,,i] + sweep(fp$paedsurv_artcd4dist[,,,i], 3, hivp_entrants * fp$entrantartcov[,i], "*")
}
## survive the population
deaths <- sweep(pop[,,,i], 1:2, (1-fp$Sx[,,i]), "*")
hiv.sx.prob <- 1-apply(deaths[,,2], 2, ctapply, ag.idx, sum) / apply(pop[,,2,i], 2, ctapply, ag.idx, sum)
hiv.sx.prob[is.nan(hiv.sx.prob)] <- 0
pop[,,,i] <- pop[,,,i] - deaths
natdeaths[,,i] <- rowSums(deaths,,2)
hivpop[,,,i] <- sweep(hivpop[,,,i], 2:3, hiv.sx.prob, "*")
if(i > fp$tARTstart)
artpop[,,,,i] <- sweep(artpop[,,,,i], 3:4, hiv.sx.prob, "*")
## net migration
netmigsurv <- fp$netmigr[,,i]*(1+fp$Sx[,,i])/2
mr.prob <- 1+netmigsurv / rowSums(pop[,,,i],,2)
hiv.mr.prob <- apply(mr.prob * pop[,,2,i], 2, ctapply, ag.idx, sum) / apply(pop[,,2,i], 2, ctapply, ag.idx, sum)
hiv.mr.prob[is.nan(hiv.mr.prob)] <- 0
pop[,,,i] <- sweep(pop[,,,i], 1:2, mr.prob, "*")
hivpop[,,,i] <- sweep(hivpop[,,,i], 2:3, hiv.mr.prob, "*")
if(i > fp$tARTstart)
artpop[,,,,i] <- sweep(artpop[,,,,i], 3:4, hiv.mr.prob, "*")
## fertility
births.by.age <- rowSums(pop[p.fert.idx, f.idx,,i-1:0])/2 * fp$asfr[,i]
births.by.h.age <- ctapply(births.by.age, ag.idx[p.fert.idx], sum)
births <- fp$srb[,i] * sum(births.by.h.age)
if(i+AGE_START <= PROJ_YEARS)
birthslag[,i+AGE_START-1] <- births
## ########################## ##
## Disease model simulation ##
## ########################## ##
## events at dt timestep
for(ii in seq_len(hiv_steps_per_year)){
ts <- (i-2)/DT + ii
grad <- array(0, c(hDS, hAG, NG))
if(fp$eppmod != "directincid"){
## incidence
## calculate r(t)
if(fp$eppmod %in% c("rtrend", "rtrend_rw"))
rvec[ts] <- calc_rtrend_rt(fp$proj.steps[ts], fp, rvec[ts-1], prevlast, pop, i, ii)
else
rvec[ts] <- fp$rvec[ts]
## number of infections by age / sex
infections.ts <- calc_infections_eppspectrum(fp, pop, hivpop, artpop, i, ii, rvec[ts])
incrate15to49.ts.out[ts] <- attr(infections.ts, "incrate15to49.ts")
prev15to49.ts.out[ts] <- attr(infections.ts, "prevcurr")
prevlast <- attr(infections.ts, "prevcurr")
pop[,,hivn.idx,i] <- pop[,,hivn.idx,i] - DT*infections.ts
pop[,,hivp.idx,i] <- pop[,,hivp.idx,i] + DT*infections.ts
infections[,,i] <- infections[,,i] + DT*infections.ts
grad <- grad + sweep(fp$cd4_initdist, 2:3, apply(infections.ts, 2, ctapply, ag.idx, sum), "*")
incid15to49[i] <- incid15to49[i] + sum(DT*infections.ts[p.age15to49.idx,])
}
## disease progression and mortality
grad[-hDS,,] <- grad[-hDS,,] - fp$cd4_prog * hivpop[-hDS,,,i] # remove cd4 stage progression (untreated)
grad[-1,,] <- grad[-1,,] + fp$cd4_prog * hivpop[-hDS,,,i] # add cd4 stage progression (untreated)
if(fp$scale_cd4_mort == 1){
cd4mx_scale <- hivpop[,,,i] / (hivpop[,,,i] + colSums(artpop[,,,,i]))
cd4mx_scale[!is.finite(cd4mx_scale)] <- 1.0
cd4_mort_ts <- fp$cd4_mort * cd4mx_scale
} else
cd4_mort_ts <- fp$cd4_mort
grad <- grad - cd4_mort_ts * hivpop[,,,i] # HIV mortality, untreated
## Remove hivdeaths from pop
hivdeaths.ts <- DT*(colSums(cd4_mort_ts * hivpop[,,,i]) + colSums(fp$art_mort * fp$artmx_timerr[ , i] * artpop[,,,,i],,2))
calc.agdist <- function(x) {d <- x/rep(ctapply(x, ag.idx, sum), h.ag.span); d[is.na(d)] <- 0; d}
hivdeaths_p.ts <- apply(hivdeaths.ts, 2, rep, h.ag.span) * apply(pop[,,hivp.idx,i], 2, calc.agdist) # HIV deaths by single-year age
pop[,,2,i] <- pop[,,2,i] - hivdeaths_p.ts
hivdeaths[,,i] <- hivdeaths[,,i] + hivdeaths_p.ts
## ART initiation
if(i >= fp$tARTstart) {
gradART <- array(0, c(hTS, hDS, hAG, NG))
## progression and mortality
gradART[1:(hTS-1),,,] <- gradART[1:(hTS-1),,,] - 1.0 / fp$ss$h_art_stage_dur * artpop[1:(hTS-1),,,, i] # remove ART duration progression
gradART[2:hTS,,,] <- gradART[2:hTS,,,] + 1.0 / fp$ss$h_art_stage_dur * artpop[1:(hTS-1),,,, i] # add ART duration progression
gradART <- gradART - fp$art_mort * fp$artmx_timerr[ , i] * artpop[,,,,i] # ART mortality
## ART dropout
## remove proportion from all adult ART groups back to untreated pop
grad <- grad + fp$art_dropout[i]*colSums(artpop[,,,,i])
gradART <- gradART - fp$art_dropout[i]*artpop[,,,,i]
## calculate number eligible for ART
artcd4_percelig <- 1 - (1-rep(0:1, times=c(fp$artcd4elig_idx[i]-1, hDS - fp$artcd4elig_idx[i]+1))) *
(1-rep(c(0, fp$who34percelig), c(2, hDS-2))) *
(1-rep(fp$specpop_percelig[i], hDS))
art15plus.elig <- sweep(hivpop[,h.age15plus.idx,,i], 1, artcd4_percelig, "*")
## calculate pregnant women
if(fp$pw_artelig[i]){
births.dist <- sweep(fp$frr_cd4[,,i] * hivpop[,h.fert.idx,f.idx,i], 2,
births.by.h.age / (ctapply(pop[p.fert.idx, f.idx, hivn.idx, i], ag.idx[p.fert.idx], sum) + colSums(fp$frr_cd4[,,i] * hivpop[,h.fert.idx,f.idx,i]) + colSums(fp$frr_art[,,,i] * artpop[ ,,h.fert.idx,f.idx,i],,2)), "*")
if(fp$artcd4elig_idx[i] > 1)
art15plus.elig[1:(fp$artcd4elig_idx[i]-1),h.fert.idx-min(h.age15plus.idx)+1,f.idx] <- art15plus.elig[1:(fp$artcd4elig_idx[i]-1),h.fert.idx-min(h.age15plus.idx)+1,f.idx] + births.dist[1:(fp$artcd4elig_idx[i]-1),]
}
## calculate number to initiate ART based on number or percentage
artpop_curr_g <- colSums(artpop[,,h.age15plus.idx,,i],,3) + DT*colSums(gradART[,,h.age15plus.idx,],,3)
artnum.ii <- c(0,0) # number on ART this ts
if(DT*ii < 0.5){
for(g in 1:2){
if(!any(fp$art15plus_isperc[g,i-2:1])){ # both number
artnum.ii[g] <- c(fp$art15plus_num[g,i-2:1] %*% c(1-(DT*ii+0.5), DT*ii+0.5))
} else if(all(fp$art15plus_isperc[g,i-2:1])){ # both percentage
artcov.ii <- c(fp$art15plus_num[g,i-2:1] %*% c(1-(DT*ii+0.5), DT*ii+0.5))
artnum.ii[g] <- artcov.ii * (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
} else if(!fp$art15plus_isperc[g,i-2] & fp$art15plus_isperc[g,i-1]){ # transition number to percentage
curr_coverage <- artpop_curr_g[g] / (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
artcov.ii <- curr_coverage + (fp$art15plus_num[g,i-1] - curr_coverage) * DT/(0.5-DT*(ii-1))
artnum.ii[g] <- artcov.ii * (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
}
}
} else {
for(g in 1:2){
if(!any(fp$art15plus_isperc[g,i-1:0])){ # both number
artnum.ii[g] <- c(fp$art15plus_num[g,i-1:0] %*% c(1-(DT*ii-0.5), DT*ii-0.5))
} else if(all(fp$art15plus_isperc[g,i-1:0])) { # both percentage
artcov.ii <- c(fp$art15plus_num[g,i-1:0] %*% c(1-(DT*ii-0.5), DT*ii-0.5))
artnum.ii[g] <- artcov.ii * (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
} else if(!fp$art15plus_isperc[g,i-1] & fp$art15plus_isperc[g,i]){ # transition number to percentage
curr_coverage <- artpop_curr_g[g] / (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
artcov.ii <- curr_coverage + (fp$art15plus_num[g,i] - curr_coverage) * DT/(1.5-DT*(ii-1))
artnum.ii[g] <- artcov.ii * (sum(art15plus.elig[,,g]) + artpop_curr_g[g])
}
}
}
artpop_curr_g <- colSums(artpop[,,h.age15plus.idx,,i],,3) + DT*colSums(gradART[,,h.age15plus.idx,],,3)
art15plus.inits <- pmax(artnum.ii - artpop_curr_g, 0)
## calculate ART initiation distribution
if(!fp$med_cd4init_input[i]){
if(fp$art_alloc_method == 4L){ ## by lowest CD4
## Calculate proportion to be initiated in each CD4 category
artinit <- array(0, dim(art15plus.elig))
remain_artalloc <- art15plus.inits
for(m in hDS:1){
elig_hm <- colSums(art15plus.elig[m,,])
init_prop <- ifelse(elig_hm == 0, elig_hm, pmin(1.0, remain_artalloc / elig_hm, na.rm=TRUE))
artinit[m , , ] <- sweep(art15plus.elig[m,,], 2, init_prop, "*")
remain_artalloc <- remain_artalloc - init_prop * elig_hm
}
} else {
expect.mort.weight <- sweep(fp$cd4_mort[, h.age15plus.idx,], 3,
colSums(art15plus.elig * fp$cd4_mort[, h.age15plus.idx,],,2), "/")
artinit.weight <- sweep(fp$art_alloc_mxweight * expect.mort.weight, 3, (1 - fp$art_alloc_mxweight)/colSums(art15plus.elig,,2), "+")
artinit <- pmin(sweep(artinit.weight * art15plus.elig, 3, art15plus.inits, "*"),
art15plus.elig)
## Allocation by average mortality across CD4, trying to match Spectrum
## artelig_by_cd4 <- apply(art15plus.elig, c(1, 3), sum)
## expectmort_by_cd4 <- apply(art15plus.elig * fp$cd4_mort[, h.age15plus.idx,], c(1, 3), sum)
## artinit_dist <- fp$art_alloc_mxweight * sweep(artelig_by_cd4, 2, colSums(artelig_by_cd4), "/") +
## (1 - fp$art_alloc_mxweight) * sweep(expectmort_by_cd4, 2, colSums(expectmort_by_cd4), "/")
## artinit_prob <- sweep(artinit_dist, 2, art15plus.inits, "*") / artelig_by_cd4
## artinit <- sweep(art15plus.elig, c(1, 3), artinit_prob, "*")
## artinit <- pmin(artinit, art15plus.elig, na.rm=TRUE)
}
} else {
CD4_LOW_LIM <- c(500, 350, 250, 200, 100, 50, 0)
CD4_UPP_LIM <- c(1000, 500, 350, 250, 200, 100, 50)
medcd4_idx <- fp$med_cd4init_cat[i]
medcat_propbelow <- (fp$median_cd4init[i] - CD4_LOW_LIM[medcd4_idx]) / (CD4_UPP_LIM[medcd4_idx] - CD4_LOW_LIM[medcd4_idx])
elig_below <- colSums(art15plus.elig[medcd4_idx,,,drop=FALSE],,2) * medcat_propbelow
if(medcd4_idx < hDS)
elig_below <- elig_below + colSums(art15plus.elig[(medcd4_idx+1):hDS,,,drop=FALSE],,2)
elig_above <- colSums(art15plus.elig[medcd4_idx,,,drop=FALSE],,2) * (1.0-medcat_propbelow)
if(medcd4_idx > 1)
elig_above <- elig_above + colSums(art15plus.elig[1:(medcd4_idx-1),,,drop=FALSE],,2)
initprob_below <- pmin(art15plus.inits * 0.5 / elig_below, 1.0, na.rm=TRUE)
initprob_above <- pmin(art15plus.inits * 0.5 / elig_above, 1.0, na.rm=TRUE)
initprob_medcat <- initprob_below * medcat_propbelow + initprob_above * (1-medcat_propbelow)
artinit <- array(0, dim=c(hDS, hAG, NG))
if(medcd4_idx < hDS)
artinit[(medcd4_idx+1):hDS,,] <- sweep(art15plus.elig[(medcd4_idx+1):hDS,,,drop=FALSE], 3, initprob_below, "*")
artinit[medcd4_idx,,] <- sweep(art15plus.elig[medcd4_idx,,,drop=FALSE], 3, initprob_medcat, "*")
if(medcd4_idx > 0)
artinit[1:(medcd4_idx-1),,] <- sweep(art15plus.elig[1:(medcd4_idx-1),,,drop=FALSE], 3, initprob_above, "*")
}
artinit <- pmin(artinit, hivpop[ , , , i] + DT * grad)
grad[ , h.age15plus.idx, ] <- grad[ , h.age15plus.idx, ] - artinit / DT
gradART[1, , h.age15plus.idx, ] <- gradART[1, , h.age15plus.idx, ] + artinit / DT
artpop[,,,, i] <- artpop[,,,, i] + DT * gradART
}
hivpop[,,,i] <- hivpop[,,,i] + DT * grad
}
## ## Code for calculating new infections once per year to match prevalence (like Spectrum)
## ## incidence
## prev.i <- sum(pop[p.age15to49.idx,,2,i]) / sum(pop[p.age15to49.idx,,,i]) # prevalence age 15 to 49
## incrate15to49.i <- (fp$prev15to49[i] - prev.i)/(1-prev.i)
## Direct incidence input
if(fp$eppmod == "directincid"){
if(fp$incidpopage == 0L) # incidence for 15-49 population
p.incidpop.idx <- p.age15to49.idx
else if(fp$incidpopage == 1L) # incidence for 15+ population
p.incidpop.idx <- p.age15plus.idx
incrate.i <- fp$incidinput[i]
sexinc <- incrate.i*c(1, fp$incrr_sex[i])*sum(pop[p.incidpop.idx,,hivn.idx,i-1])/(sum(pop[p.incidpop.idx,m.idx,hivn.idx,i-1]) + fp$incrr_sex[i]*sum(pop[p.incidpop.idx, f.idx,hivn.idx,i-1]))
agesex.inc <- sweep(fp$incrr_age[,,i], 2, sexinc/(colSums(pop[p.incidpop.idx,,hivn.idx,i-1] * fp$incrr_age[p.incidpop.idx,,i])/colSums(pop[p.incidpop.idx,,hivn.idx,i-1])), "*")
infections[,,i] <- agesex.inc * pop[,,hivn.idx,i-1]
pop[,,hivn.idx,i] <- pop[,,hivn.idx,i] - infections[,,i]
pop[,,hivp.idx,i] <- pop[,,hivp.idx,i] + infections[,,i]
hivpop[,,,i] <- hivpop[,,,i] + sweep(fp$cd4_initdist, 2:3, apply(infections[,,i], 2, ctapply, ag.idx, sum), "*")
incid15to49[i] <- sum(infections[p.age15to49.idx,,i])
}
## adjust population to match target population size
if(exists("popadjust", where=fp) & fp$popadjust){
popadj.prob[,,i] <- fp$targetpop[,,i] / rowSums(pop[,,,i],,2)
hiv.popadj.prob <- apply(popadj.prob[,,i] * pop[,,2,i], 2, ctapply, ag.idx, sum) / apply(pop[,,2,i], 2, ctapply, ag.idx, sum)
hiv.popadj.prob[is.nan(hiv.popadj.prob)] <- 0
pop[,,,i] <- sweep(pop[,,,i], 1:2, popadj.prob[,,i], "*")
hivpop[,,,i] <- sweep(hivpop[,,,i], 2:3, hiv.popadj.prob, "*")
if(i >= fp$tARTstart)
artpop[,,,,i] <- sweep(artpop[,,,,i], 3:4, hiv.popadj.prob, "*")
}
## prevalence among pregnant women
hivn.byage <- ctapply(rowMeans(pop[p.fert.idx, f.idx, hivn.idx,i-1:0]), ag.idx[p.fert.idx], sum)
hivp.byage <- rowMeans(hivpop[,h.fert.idx, f.idx,i-1:0],,2)
artp.byage <- rowMeans(artpop[,,h.fert.idx, f.idx,i-1:0],,3)
pregprev <- sum(births.by.h.age * (1 - hivn.byage / (hivn.byage + colSums(fp$frr_cd4[,,i] * hivp.byage) + colSums(fp$frr_art[,,,i] * artp.byage,,2)))) / sum(births.by.age)
if(i+AGE_START <= PROJ_YEARS)
pregprevlag[i+AGE_START-1] <- pregprev
## prevalence and incidence 15 to 49
prev15to49[i] <- sum(pop[p.age15to49.idx,,hivp.idx,i]) / sum(pop[p.age15to49.idx,,,i])
incid15to49[i] <- sum(incid15to49[i]) / sum(pop[p.age15to49.idx,,hivn.idx,i-1])
}
attr(pop, "prev15to49") <- prev15to49
attr(pop, "incid15to49") <- incid15to49
attr(pop, "sexinc") <- sexinc15to49out
attr(pop, "hivpop") <- hivpop
attr(pop, "artpop") <- artpop
attr(pop, "infections") <- infections
attr(pop, "hivdeaths") <- hivdeaths
attr(pop, "natdeaths") <- natdeaths
attr(pop, "popadjust") <- popadj.prob
attr(pop, "pregprevlag") <- pregprevlag
if(fp$eppmod != "directincid"){
attr(pop, "incrate15to49_ts") <- incrate15to49.ts.out
attr(pop, "prev15to49_ts") <- prev15to49.ts.out
}
attr(pop, "entrantprev") <- entrant_prev_out
attr(pop, "hivp_entrants") <- hivp_entrants_out
class(pop) <- "spec"
return(pop)
}
#' Add dimnames to EPP-ASM model output
#'
#' @param mod output from `simmod()`
#' @param fp fixed parameters input to `simmod()`
#'
#' @return Input `mod` object with dimnames applied to arrays.
#'
#'
#' @export
spec_add_dimnames <- function(mod, fp) {
nm_pAG <- fp$ss$AGE_START + seq_len(fp$ss$pAG) - 1L
nm_NG <- c("male", "female")
nm_pDS <- c("negative", "positive")
nm_years <- fp$ss$proj_start + seq_len(fp$ss$PROJ_YEARS) - 1L
nm_hDS <- c(">500", "350-499", "250-349", "200-249", "100-199", "50-99", "<50")
nm_hTS <- c("art0mos", "art6mos", "art1yr")
nm_hAG <- c("15-16", "17-19", "20-24", "25-29", "30-34", "35-39", "40-44", "45-49", "50+")
dn_pop <- list(age = nm_pAG, sex = nm_NG, hivstatus = nm_pDS, year = nm_years)
dn_hivpop <- list(cd4stage = nm_hDS, age_coarse = nm_hAG, sex = nm_NG, year = nm_years)
dn_artpop <- c(list(artdur = nm_hTS), dn_hivpop)
dimnames(mod) <- dn_pop
dimnames(attr(mod, "hivpop")) <- dn_hivpop
dimnames(attr(mod, "artpop")) <- dn_artpop
dimnames(attr(mod, "infections")) <- dn_pop[c("age", "sex", "year")]
dimnames(attr(mod, "hivdeaths")) <- dn_pop[c("age", "sex", "year")]
dimnames(attr(mod, "natdeaths")) <- dn_pop[c("age", "sex", "year")]
dimnames(attr(mod, "aidsdeaths_noart")) <- dn_hivpop
dimnames(attr(mod, "aidsdeaths_art")) <- dn_artpop
dimnames(attr(mod, "popadjust")) <- dn_pop[c("age", "sex", "year")]
dimnames(attr(mod, "artinit")) <- dn_hivpop
names(attr(mod, "pregprevlag")) <- nm_years
names(attr(mod, "incrate15to49_ts")) <- fp$proj.steps[-length(fp$proj.steps)]
names(attr(mod, "prev15to49_ts")) <- fp$proj.steps[-length(fp$proj.steps)]
names(attr(mod, "rvec_ts")) <- fp$proj.steps[-length(fp$proj.steps)]
names(attr(mod, "prev15to49")) <- nm_years
names(attr(mod, "pregprev")) <- nm_years
names(attr(mod, "incid15to49")) <- nm_years
names(attr(mod, "entrantprev")) <- nm_years
mod
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyse_randomKde2d.R
\name{analyse_randomKde2d}
\alias{analyse_randomKde2d}
\title{Perform analysis of random 2d distributions}
\usage{
analyse_randomKde2d(nfields=100, nstars, maxX, maxY, nKde=50,
showStats=FALSE, returnStats=TRUE)
}
\arguments{
\item{nfields}{an integer with the number of individual field realisations}
\item{nstars}{an integer with the number of stars to consider}
\item{maxX}{the length of the field in X}
\item{maxY}{the length of the field in Y}
\item{nKde}{the number of samplings of the kernel in each direction}
\item{showStats}{a boolean indicating if the user wants to see statistics}
\item{returnStats}{a boolean indicating if the user wants statistics to be returned}
}
\value{
A data frame with the \code{mean} and \code{sd} fields containing the results
of the random field analysis.
}
\description{
\code{analyse_randomKde2d} will compute statistics from uniformly randomly
created 2D fields based on Kernel Density Estimations (calling the code \code{\link{create_randomKde2d}}).
}
\examples{
# Runs the analysis on random fields
toyRes <- analyse_randomKde2d(100, 200, 100, 100, showStats=TRUE)
# Clean the environment
rm(toyRes)
}
\author{
Alberto Krone-Martins, Andre Moitinho
}
\keyword{utilities}
| /man/analyse_randomKde2d.Rd | no_license | cran/UPMASK | R | false | true | 1,327 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyse_randomKde2d.R
\name{analyse_randomKde2d}
\alias{analyse_randomKde2d}
\title{Perform analysis of random 2d distributions}
\usage{
analyse_randomKde2d(nfields=100, nstars, maxX, maxY, nKde=50,
showStats=FALSE, returnStats=TRUE)
}
\arguments{
\item{nfields}{an integer with the number of individual field realisations}
\item{nstars}{an integer with the number of stars to consider}
\item{maxX}{the length of the field in X}
\item{maxY}{the length of the field in Y}
\item{nKde}{the number of samplings of the kernel in each direction}
\item{showStats}{a boolean indicating if the user wants to see statistics}
\item{returnStats}{a boolean indicating if the user wants statistics to be returned}
}
\value{
A data frame with the \code{mean} and \code{sd} fields containing the results
of the random field analysis.
}
\description{
\code{analyse_randomKde2d} will compute statistics from uniformly randomly
created 2D fields based on Kernel Density Estimations (calling the code \code{\link{create_randomKde2d}}).
}
\examples{
# Runs the analysis on random fields
toyRes <- analyse_randomKde2d(100, 200, 100, 100, showStats=TRUE)
# Clean the environment
rm(toyRes)
}
\author{
Alberto Krone-Martins, Andre Moitinho
}
\keyword{utilities}
|
testlist <- list(bytes1 = 771751936L, pmutation = 1.30620522460846e-300)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) | /mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612886986-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 136 | r | testlist <- list(bytes1 = 771751936L, pmutation = 1.30620522460846e-300)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) |
testlist <- list(type = 0L, z = 2.06376160924347e-319)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609890391-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 108 | r | testlist <- list(type = 0L, z = 2.06376160924347e-319)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control.utilities.R
\name{ult}
\alias{ult}
\title{Extract or replace the *ult*imate (last) element of a vector or a list, or an element counting from the end.}
\usage{
ult(x, i = 1L)
}
\arguments{
\item{x}{a vector or a list.}
\item{i}{index from the end of the list to extract or replace (where 1 is the last element, 2 is the penultimate element, etc.).}
}
\value{
An element of `x`.
}
\description{
Extract or replace the *ult*imate (last) element of a vector or a list, or an element counting from the end.
}
\examples{
x <- 1:5
(last <- ult(x))
(penultimate <- ult(x, 2)) # 2nd last.
\dontshow{
stopifnot(last==5)
stopifnot(penultimate==4)
}
}
| /man/ult.Rd | no_license | Edouard-Legoupil/RDS | R | false | true | 730 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/control.utilities.R
\name{ult}
\alias{ult}
\title{Extract or replace the *ult*imate (last) element of a vector or a list, or an element counting from the end.}
\usage{
ult(x, i = 1L)
}
\arguments{
\item{x}{a vector or a list.}
\item{i}{index from the end of the list to extract or replace (where 1 is the last element, 2 is the penultimate element, etc.).}
}
\value{
An element of `x`.
}
\description{
Extract or replace the *ult*imate (last) element of a vector or a list, or an element counting from the end.
}
\examples{
x <- 1:5
(last <- ult(x))
(penultimate <- ult(x, 2)) # 2nd last.
\dontshow{
stopifnot(last==5)
stopifnot(penultimate==4)
}
}
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(
echo = TRUE,
fig.width=6, fig.height=5
)
## -----------------------------------------------------------------------------
library(TropFishR)
t <- seq(-0.2, 3, length.out = 200)
K <- c(2, 1, 0.5)
COL <- rep(1,3)
LTY <- 1:3
for(i in seq(K)){
Lt <- VBGF(param = list(Linf = 20, K = K[i], t0 = -0.1), t = t)
if(i == 1){
plot(t, Lt, t="l", ylim = c(0,22), yaxs="i", col = COL[i], lty = LTY[i])
abline(v = 0, col = 8, lty = 3)
abline(h = 20, col = 3, lty = 3)
points(x = -0.1, y = 0, pch = 16, col = 4)
text(x = -0.1, y = 0, labels = expression(italic(t[0])), adj=c(1,-0.5), col=4)
text(x = -0.1, y = 20, labels = expression(italic(L[infinity])), adj=c(1,-0.5), col=3)
legend("bottomright", legend = paste("K =", K), lty=LTY, col=COL, bty="n")
}else{
lines(t, Lt, col = COL[i], lty = LTY[i])
}
}
## -----------------------------------------------------------------------------
library(TropFishR)
t <- seq(-0.2, 3, length.out = 200)
Lt <- VBGF(param = list(Linf = 20, K = 1, t0 = -0.1, ts = 0, C=0), t = t)
Cs <- seq(0.25,1,0.25)
COLs <- 1:5
plot(t, Lt, t="l", ylim=c(0,22), yaxs="i", col=1)
for(i in seq(Cs)){
lines(t, VBGF(param = list(Linf = 20, K = 1, t0 = -0.1, ts = 0, C=Cs[i]), t = t), col=COLs[i+1])
}
legend("bottomright", legend=paste("C =", c(0,Cs)), lty=1, col=COLs, bty="n")
abline(v=0, col = 8, lty = 3)
abline(h = 20, col = 8, lty=3)
## -----------------------------------------------------------------------------
data("alba")
tmplfq <- list(
midLengths = alba$midLengths,
dates = alba$dates,
catch = alba$catch
)
## -----------------------------------------------------------------------------
class(tmplfq) <- "lfq"
plot(tmplfq, Fname="catch", hist.sc = 1)
## -----------------------------------------------------------------------------
alba <- lfqRestructure(alba, MA = 7)
plot(alba, hist.sc = 0.75)
## -----------------------------------------------------------------------------
alba <- lfqRestructure(alba, MA=7)
plot(alba, hist.col = c("white", "black"),
image.col = c(rep(rgb(1,0.8,0.8),1000), "white", rep(rgb(0.8,0.8,1),1000)),
ylim = c(0,max(alba$midLengths+0.5)))
tmp <- lfqFitCurves(alba, par = list(Linf=11, K=2.5, t_anchor=0.5),
draw = TRUE, col=4, lty=2)
## -----------------------------------------------------------------------------
PW <- powell_wetherall(alba, catch_columns = 1:7, reg_int = c(2,9) )
PW$Linf_est
PW$confidenceInt_Linf
## -----------------------------------------------------------------------------
alba2 <- ELEFAN(
lfq = alba, MA = 7,
Linf_range = seq(7, 20, length.out = 30),
K_range = exp(seq(log(0.1),log(4), length.out = 30)),
method = "cross",
cross.date = alba$dates[3],
cross.midLength = alba$midLengths[5],
contour = TRUE, add.values = FALSE,
hide.progressbar = TRUE # change to 'TRUE' to follow algorithm's progression
)
points(alba2$par["Linf"], alba2$par["K"], pch="*", cex=2, col=2)
unlist(alba2$par)
alba2$Rn_max
## -----------------------------------------------------------------------------
plot(alba2)
points(alba$dates[3], alba$midLengths[5], pch="*", cex=2, col=2)
## -----------------------------------------------------------------------------
set.seed(1)
alba3 <- ELEFAN_SA(
lfq = alba,
seasonalised = FALSE,
init_par = alba2$par[1:5],
low_par = list(Linf=PW$confidenceInt_Linf[1], K=1, t_anchor=0, ts=0, C=0),
up_par = list(Linf=PW$confidenceInt_Linf[2], K=4, t_anchor=1, ts=1, C=1),
SA_temp = 2e5,
SA_time = 60,
maxit = 400,
MA = 7,
plot.score = TRUE,
verbose = FALSE
)
unlist(alba3$par)
alba3$Rn_max
## -----------------------------------------------------------------------------
plot(alba3)
## -----------------------------------------------------------------------------
set.seed(1)
alba4 <- ELEFAN_GA(
lfq = alba,
seasonalised = FALSE,
low_par = list(Linf=PW$confidenceInt_Linf[1], K=1, t_anchor=0, ts=0, C=0),
up_par = list(Linf=PW$confidenceInt_Linf[2], K=4, t_anchor=1, ts=1, C=1),
popSize = 60,
pmutation = 0.2,
maxiter = 100,
run = 20,
MA = 7,
plot.score = TRUE,
monitor = FALSE,
parallel = FALSE
)
unlist(alba4$par)
alba4$Rn_max
## -----------------------------------------------------------------------------
plot(alba4)
## -----------------------------------------------------------------------------
set.seed(1)
alba5 <- ELEFAN_GA(
lfq = alba,
seasonalised = TRUE,
low_par = list(Linf=PW$confidenceInt_Linf[1], K=0.1, t_anchor=0, ts=0, C=0),
up_par = list(Linf=PW$confidenceInt_Linf[2], K=4, t_anchor=1, ts=1, C=1),
popSize = 60,
pmutation = 0.2,
maxiter = 100,
run = 20,
MA = 7,
plot.score = TRUE,
monitor = FALSE,
parallel = FALSE
)
unlist(alba5$par)
alba5$Rn_max
plot(alba5)
## -----------------------------------------------------------------------------
true_par <- list(Linf = 80, K = 0.5, t_anchor = 0.25,C = 0.75, ts = 0.5, phiL = 3.51)
## -----------------------------------------------------------------------------
set.seed(1)
data("synLFQ4")
synLFQ4 <- ELEFAN_GA(
lfq = synLFQ4,
seasonalised = TRUE,
low_par = list(Linf=70, K=0.1, t_anchor=0, ts=0, C=0),
up_par = list(Linf=110, K=1, t_anchor=1, ts=1, C=1),
popSize = 60,
pmutation = 0.2,
maxiter = 100,
run = 20,
MA = 11,
plot.score = TRUE,
monitor = FALSE,
parallel = FALSE
)
## -----------------------------------------------------------------------------
tmp <- as.data.frame(rbind(unlist(true_par), unlist(synLFQ4$par)))
rownames(tmp) <- c("true", "estimated")
tmp$Rn <- c(synLFQ4$Rn_max, lfqFitCurves(synLFQ4, par = true_par)$Rn_max)
tmp <- round(tmp,3)
tmp
## -----------------------------------------------------------------------------
plot(synLFQ4, draw = FALSE)
tmp <- lfqFitCurves(synLFQ4, par = true_par, col=8, lty=1, draw = TRUE)
tmp <- lfqFitCurves(synLFQ4, par = synLFQ4$par, col=4, lty=2, draw = TRUE)
legend("top", ncol=2, legend = c("true", "estimated"), col=c(8,4), lty=c(1,2))
## -----------------------------------------------------------------------------
synLFQ4 <- ELEFAN_GA(
lfq = synLFQ4,
seasonalised = TRUE,
low_par = list(Linf=70, K=0.1, t_anchor=0, ts=0, C=0),
up_par = list(Linf=110, K=1, t_anchor=1, ts=1, C=1),
popSize = 60,
pmutation = function(...) GA::ga_pmutation(..., p0=0.5, p=0.1),
maxiter = 100,
run = 20,
MA = 11,
plot.score = TRUE,
monitor = FALSE,
parallel = FALSE
)
| /inst/doc/Using_TropFishR_ELEFAN_functions.R | no_license | cran/TropFishR | R | false | false | 6,480 | r | ## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(
echo = TRUE,
fig.width=6, fig.height=5
)
## -----------------------------------------------------------------------------
library(TropFishR)
t <- seq(-0.2, 3, length.out = 200)
K <- c(2, 1, 0.5)
COL <- rep(1,3)
LTY <- 1:3
for(i in seq(K)){
Lt <- VBGF(param = list(Linf = 20, K = K[i], t0 = -0.1), t = t)
if(i == 1){
plot(t, Lt, t="l", ylim = c(0,22), yaxs="i", col = COL[i], lty = LTY[i])
abline(v = 0, col = 8, lty = 3)
abline(h = 20, col = 3, lty = 3)
points(x = -0.1, y = 0, pch = 16, col = 4)
text(x = -0.1, y = 0, labels = expression(italic(t[0])), adj=c(1,-0.5), col=4)
text(x = -0.1, y = 20, labels = expression(italic(L[infinity])), adj=c(1,-0.5), col=3)
legend("bottomright", legend = paste("K =", K), lty=LTY, col=COL, bty="n")
}else{
lines(t, Lt, col = COL[i], lty = LTY[i])
}
}
## -----------------------------------------------------------------------------
library(TropFishR)
t <- seq(-0.2, 3, length.out = 200)
Lt <- VBGF(param = list(Linf = 20, K = 1, t0 = -0.1, ts = 0, C=0), t = t)
Cs <- seq(0.25,1,0.25)
COLs <- 1:5
plot(t, Lt, t="l", ylim=c(0,22), yaxs="i", col=1)
for(i in seq(Cs)){
lines(t, VBGF(param = list(Linf = 20, K = 1, t0 = -0.1, ts = 0, C=Cs[i]), t = t), col=COLs[i+1])
}
legend("bottomright", legend=paste("C =", c(0,Cs)), lty=1, col=COLs, bty="n")
abline(v=0, col = 8, lty = 3)
abline(h = 20, col = 8, lty=3)
## -----------------------------------------------------------------------------
data("alba")
tmplfq <- list(
midLengths = alba$midLengths,
dates = alba$dates,
catch = alba$catch
)
## -----------------------------------------------------------------------------
class(tmplfq) <- "lfq"
plot(tmplfq, Fname="catch", hist.sc = 1)
## -----------------------------------------------------------------------------
alba <- lfqRestructure(alba, MA = 7)
plot(alba, hist.sc = 0.75)
## -----------------------------------------------------------------------------
alba <- lfqRestructure(alba, MA=7)
plot(alba, hist.col = c("white", "black"),
image.col = c(rep(rgb(1,0.8,0.8),1000), "white", rep(rgb(0.8,0.8,1),1000)),
ylim = c(0,max(alba$midLengths+0.5)))
tmp <- lfqFitCurves(alba, par = list(Linf=11, K=2.5, t_anchor=0.5),
draw = TRUE, col=4, lty=2)
## -----------------------------------------------------------------------------
PW <- powell_wetherall(alba, catch_columns = 1:7, reg_int = c(2,9) )
PW$Linf_est
PW$confidenceInt_Linf
## -----------------------------------------------------------------------------
alba2 <- ELEFAN(
lfq = alba, MA = 7,
Linf_range = seq(7, 20, length.out = 30),
K_range = exp(seq(log(0.1),log(4), length.out = 30)),
method = "cross",
cross.date = alba$dates[3],
cross.midLength = alba$midLengths[5],
contour = TRUE, add.values = FALSE,
hide.progressbar = TRUE # change to 'TRUE' to follow algorithm's progression
)
points(alba2$par["Linf"], alba2$par["K"], pch="*", cex=2, col=2)
unlist(alba2$par)
alba2$Rn_max
## -----------------------------------------------------------------------------
plot(alba2)
points(alba$dates[3], alba$midLengths[5], pch="*", cex=2, col=2)
## -----------------------------------------------------------------------------
set.seed(1)
alba3 <- ELEFAN_SA(
lfq = alba,
seasonalised = FALSE,
init_par = alba2$par[1:5],
low_par = list(Linf=PW$confidenceInt_Linf[1], K=1, t_anchor=0, ts=0, C=0),
up_par = list(Linf=PW$confidenceInt_Linf[2], K=4, t_anchor=1, ts=1, C=1),
SA_temp = 2e5,
SA_time = 60,
maxit = 400,
MA = 7,
plot.score = TRUE,
verbose = FALSE
)
unlist(alba3$par)
alba3$Rn_max
## -----------------------------------------------------------------------------
plot(alba3)
## -----------------------------------------------------------------------------
set.seed(1)
alba4 <- ELEFAN_GA(
lfq = alba,
seasonalised = FALSE,
low_par = list(Linf=PW$confidenceInt_Linf[1], K=1, t_anchor=0, ts=0, C=0),
up_par = list(Linf=PW$confidenceInt_Linf[2], K=4, t_anchor=1, ts=1, C=1),
popSize = 60,
pmutation = 0.2,
maxiter = 100,
run = 20,
MA = 7,
plot.score = TRUE,
monitor = FALSE,
parallel = FALSE
)
unlist(alba4$par)
alba4$Rn_max
## -----------------------------------------------------------------------------
plot(alba4)
## -----------------------------------------------------------------------------
set.seed(1)
alba5 <- ELEFAN_GA(
lfq = alba,
seasonalised = TRUE,
low_par = list(Linf=PW$confidenceInt_Linf[1], K=0.1, t_anchor=0, ts=0, C=0),
up_par = list(Linf=PW$confidenceInt_Linf[2], K=4, t_anchor=1, ts=1, C=1),
popSize = 60,
pmutation = 0.2,
maxiter = 100,
run = 20,
MA = 7,
plot.score = TRUE,
monitor = FALSE,
parallel = FALSE
)
unlist(alba5$par)
alba5$Rn_max
plot(alba5)
## -----------------------------------------------------------------------------
true_par <- list(Linf = 80, K = 0.5, t_anchor = 0.25,C = 0.75, ts = 0.5, phiL = 3.51)
## -----------------------------------------------------------------------------
set.seed(1)
data("synLFQ4")
synLFQ4 <- ELEFAN_GA(
lfq = synLFQ4,
seasonalised = TRUE,
low_par = list(Linf=70, K=0.1, t_anchor=0, ts=0, C=0),
up_par = list(Linf=110, K=1, t_anchor=1, ts=1, C=1),
popSize = 60,
pmutation = 0.2,
maxiter = 100,
run = 20,
MA = 11,
plot.score = TRUE,
monitor = FALSE,
parallel = FALSE
)
## -----------------------------------------------------------------------------
tmp <- as.data.frame(rbind(unlist(true_par), unlist(synLFQ4$par)))
rownames(tmp) <- c("true", "estimated")
tmp$Rn <- c(synLFQ4$Rn_max, lfqFitCurves(synLFQ4, par = true_par)$Rn_max)
tmp <- round(tmp,3)
tmp
## -----------------------------------------------------------------------------
plot(synLFQ4, draw = FALSE)
tmp <- lfqFitCurves(synLFQ4, par = true_par, col=8, lty=1, draw = TRUE)
tmp <- lfqFitCurves(synLFQ4, par = synLFQ4$par, col=4, lty=2, draw = TRUE)
legend("top", ncol=2, legend = c("true", "estimated"), col=c(8,4), lty=c(1,2))
## -----------------------------------------------------------------------------
synLFQ4 <- ELEFAN_GA(
lfq = synLFQ4,
seasonalised = TRUE,
low_par = list(Linf=70, K=0.1, t_anchor=0, ts=0, C=0),
up_par = list(Linf=110, K=1, t_anchor=1, ts=1, C=1),
popSize = 60,
pmutation = function(...) GA::ga_pmutation(..., p0=0.5, p=0.1),
maxiter = 100,
run = 20,
MA = 11,
plot.score = TRUE,
monitor = FALSE,
parallel = FALSE
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DFIT.R
\docType{package}
\name{DFIT}
\alias{DFIT}
\title{Differential Functioning of Items and Tests framework}
\description{
\code{DFIT} provides functions for calculating the differential item and test functioning
proposed by Raju et al. (1995).
}
\details{
DFIT provides a set of functions to calculate the noncompensatory (NCDIF), compensatory (CDIF) and test level (DTF)
differential functioning indices for items and tests under Raju's (Raju, et al. 1995) DFIT framework.
It also provides functions for obtaining cut-off points for identifying differential functioning for these indices
following the Monte Carlo Item Parameter Replication approach proposed by Oshima et al. (2006).
This package also improves upon available DFIT software by allowing the covariance matrices for both focal and reference
groups to be used. This improves the obtained cut-off points, which result in type I error rates at the nominal level,
and increased power, when compared to the cut-off points obtained when using only the focal group item parameter
estimates and their estimate covariances (Cervantes, 2012). Furthermore, this package includes functions for obtaining
the asymptotic covariance matrices of item parameter estimates (currently only for dichotomous IRT models) and for
calculating the DFIT indices base on the focal group distribution as well as ability estimates for a sample from the
focal population are included; these enable ad hoc and a priori power calculations for given item parameters and sample
sizes to be possible with this package.
}
\references{
de Ayala, R. J., (2009). The theory and practice of item response theory. New York: The Guildford Press
Cervantes, V. H. (2012). On using the Item Parameter Replication (IPR) approach for power calculation of the noncompensatory differential item functioning (NCDIF) index (pp. 206-207). Proceedings of the V European Congress of Methodology. Santiago de Compostela, Spain: Universidade de Santiago de Compostela.
Cervantes, V. H. (2017). DFIT: An R Package for Raju's Differential Functioning of Items and Tests Framework. Journal of Statistical Software, 76(5), 1-24. doi:10.18637/jss.v076.i05
Cohen, A., Kim, S-H and Baker, F. (1993). Detection of differential item functioning in the Graded Response Moodel. Applied psychological measurement, 17(4), 335-350. doi:10.1177/014662169301700402
Holland, P.W., and Thayer, D.T. (1988). Differential Item Performance and the Mantel-Haenszel Procedure. In H. Wainer and H.I. Braun (Eds.), Test Validity. Hillsdale, NJ: Erlbaum.
Li, Y. & Lissitz, R. (2004). Applications of the analytically derived standard errors of Item Response Theory item parameter estimates. Journal of educational measurement, 41(2), 85--117. doi:10.1111/j.1745-3984.2004.tb01109.x
Oshima, T. & Morris, S. (2008). Raju's Differential Functioning of Items and Tests (DFIT). Educational Measurement: Issues and Practice, 27(3), 43--50. doi:10.1111/j.1745-3992.2008.00127.x
Oshima, T., Raju, N. & Nanda, A. (2006). A new method for assessing the statistical significance in the Differential Functioning of Items and Tests (DFIT) framework. Journal of educational measurement, 43(1), 1--17. doi:10.1111/j.1745-3984.2006.00001.x
Raju, N. (1988). The area between two item characteristic cureves. Psychometricka, 53(4), 495--502. doi:10.1007/bf02294403
Raju, N., Fortmann-Johnson, K., Kim, W., Morris, S., Nering, M. & Oshima, T. (2009). The item parameter replication method for detecting differential functioning in the polytomous DFIT framework. Applied psychological measurement, 33(2), 133--147. doi:10.1177/0146621608319514
Raju, N. S., van der Linden, W. J., & Fleer, P. F. (1995). IRT-based internal measures of differential functioning of items and tests. Applied Psychological Measurement, 19, 353--368. doi:10.1177/014662169501900405
Roussos, L., Schnipke, D. & Pashley, P. (1999). A generalized formula for the Mantel-Haenszel Differential Item Functioning parameter. Journal of educational and behavioral statistics, 24(3), 293--322. doi:10.3102/10769986024003293
Wright, K. (2011). Improvements for Differential Funtioning of Items and Tests (DFIT): Investigating the addition of reporting an effect size measure and power (Unpublished doctoral dissertation). Georgia State University, USA.
}
| /man/DFIT.Rd | no_license | herulor/DFIT | R | false | true | 4,379 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DFIT.R
\docType{package}
\name{DFIT}
\alias{DFIT}
\title{Differential Functioning of Items and Tests framework}
\description{
\code{DFIT} provides functions for calculating the differential item and test functioning
proposed by Raju et al. (1995).
}
\details{
DFIT provides a set of functions to calculate the noncompensatory (NCDIF), compensatory (CDIF) and test level (DTF)
differential functioning indices for items and tests under Raju's (Raju, et al. 1995) DFIT framework.
It also provides functions for obtaining cut-off points for identifying differential functioning for these indices
following the Monte Carlo Item Parameter Replication approach proposed by Oshima et al. (2006).
This package also improves upon available DFIT software by allowing the covariance matrices for both focal and reference
groups to be used. This improves the obtained cut-off points, which result in type I error rates at the nominal level,
and increased power, when compared to the cut-off points obtained when using only the focal group item parameter
estimates and their estimate covariances (Cervantes, 2012). Furthermore, this package includes functions for obtaining
the asymptotic covariance matrices of item parameter estimates (currently only for dichotomous IRT models) and for
calculating the DFIT indices base on the focal group distribution as well as ability estimates for a sample from the
focal population are included; these enable ad hoc and a priori power calculations for given item parameters and sample
sizes to be possible with this package.
}
\references{
de Ayala, R. J., (2009). The theory and practice of item response theory. New York: The Guildford Press
Cervantes, V. H. (2012). On using the Item Parameter Replication (IPR) approach for power calculation of the noncompensatory differential item functioning (NCDIF) index (pp. 206-207). Proceedings of the V European Congress of Methodology. Santiago de Compostela, Spain: Universidade de Santiago de Compostela.
Cervantes, V. H. (2017). DFIT: An R Package for Raju's Differential Functioning of Items and Tests Framework. Journal of Statistical Software, 76(5), 1-24. doi:10.18637/jss.v076.i05
Cohen, A., Kim, S-H and Baker, F. (1993). Detection of differential item functioning in the Graded Response Moodel. Applied psychological measurement, 17(4), 335-350. doi:10.1177/014662169301700402
Holland, P.W., and Thayer, D.T. (1988). Differential Item Performance and the Mantel-Haenszel Procedure. In H. Wainer and H.I. Braun (Eds.), Test Validity. Hillsdale, NJ: Erlbaum.
Li, Y. & Lissitz, R. (2004). Applications of the analytically derived standard errors of Item Response Theory item parameter estimates. Journal of educational measurement, 41(2), 85--117. doi:10.1111/j.1745-3984.2004.tb01109.x
Oshima, T. & Morris, S. (2008). Raju's Differential Functioning of Items and Tests (DFIT). Educational Measurement: Issues and Practice, 27(3), 43--50. doi:10.1111/j.1745-3992.2008.00127.x
Oshima, T., Raju, N. & Nanda, A. (2006). A new method for assessing the statistical significance in the Differential Functioning of Items and Tests (DFIT) framework. Journal of educational measurement, 43(1), 1--17. doi:10.1111/j.1745-3984.2006.00001.x
Raju, N. (1988). The area between two item characteristic cureves. Psychometricka, 53(4), 495--502. doi:10.1007/bf02294403
Raju, N., Fortmann-Johnson, K., Kim, W., Morris, S., Nering, M. & Oshima, T. (2009). The item parameter replication method for detecting differential functioning in the polytomous DFIT framework. Applied psychological measurement, 33(2), 133--147. doi:10.1177/0146621608319514
Raju, N. S., van der Linden, W. J., & Fleer, P. F. (1995). IRT-based internal measures of differential functioning of items and tests. Applied Psychological Measurement, 19, 353--368. doi:10.1177/014662169501900405
Roussos, L., Schnipke, D. & Pashley, P. (1999). A generalized formula for the Mantel-Haenszel Differential Item Functioning parameter. Journal of educational and behavioral statistics, 24(3), 293--322. doi:10.3102/10769986024003293
Wright, K. (2011). Improvements for Differential Funtioning of Items and Tests (DFIT): Investigating the addition of reporting an effect size measure and power (Unpublished doctoral dissertation). Georgia State University, USA.
}
|
library(tidyverse)
library(ggthemes)
library(lubridate)
library(survey)
library(haven)
#look at both private and pulic
mrip_all_17 <- read_csv("data/mrip_dm_17_all_sites.csv",
col_types = cols(psu_id = col_character()))
cls_17 <- read_csv("data/cls_17_all_sites.csv")
#make species specific variables for estimation
cls_17 <- cls_17 %>%
filter(state == "FL")
mrip_all_17$`GREAT BARRACUDA_released`[is.na(mrip_all_17$`GREAT BARRACUDA_released`)] <- 0
mrip_all_17 <- mrip_all_17 %>%
mutate(`GREAT BARRACUDA_released` = as.numeric(`GREAT BARRACUDA_released`)) %>%
mutate(black_grouper_release_fl = if_else(ST == 12,
`GREAT BARRACUDA_release`,as.integer(0)),
black_grouper_released_fl = if_else(ST == 12,
`GREAT BARRACUDA_released`,0)) %>%
mutate(delta_rs = black_grouper_release_fl - black_grouper_released_fl) %>%
mutate(reported = if_else(ST == 12,
reported,as.integer(0)))
#2017
desi17_all <- svydesign(id=~psu_id,
weights=~w_int,
strata = ~strat_id,
nest=T,
data=mrip_all_17)
options(survey.lonely.psu = "adjust")
#tyc_17_rs_total
ty2.r17_rs <- svyratio(~delta_rs,
~reported,
design=desi17_all,
na.rm=T)
ty2_17_rs_se <- as.numeric(predict(ty2.r17_rs,
total = nrow(cls_17))$se)
#ty2_17_rs_se
ty2_17_rs_total <- predict(ty2.r17_rs,total = nrow(cls_17))[[1]] +
sum(cls_17$`GREAT BARRACUDA_released`,na.rm=T)
ty2_17_rs_total <- as.numeric(ty2_17_rs_total)
#FLORIDA
#Florida Only
mrip_all_17_1 <- read_csv("data/mrip_dm_17_all_sites.csv",
col_types = cols(psu_id = col_character()))
mrip_all_17_1$`GREAT BARRACUDA_released`[is.na(mrip_all_17_1$`GREAT BARRACUDA_released`)] <- 0
mrip_all_17_1$`GREAT BARRACUDA_released` <- as.numeric(mrip_all_17_1$`GREAT BARRACUDA_released`)
mrip_all_17_1 <- mrip_all_17_1 %>%
mutate(delta_rs = `GREAT BARRACUDA_release` - `GREAT BARRACUDA_released`)
mrip_all_17_1_fl <- mrip_all_17_1 %>%
mutate(red_snapper_release_fl = if_else(ST == 12,
`GREAT BARRACUDA_release`,as.integer(0)),
red_snapper_released_fl = if_else(ST == 12,
`GREAT BARRACUDA_released`,as.numeric(0))) %>%
mutate(delta_rs_fl = red_snapper_release_fl - red_snapper_released_fl) %>%
mutate(reported_fl = if_else(ST == 12,
reported,as.integer(0)))
cls_17 <- read_csv("data/cls_17_all_sites.csv")
cls_17_fl <- cls_17 %>%
filter(state == "FL")
desi17_all_1_fl <- svydesign(id = ~psu_id,
weights = ~w_int,
strata = ~strat_id,
nest=T,
data=mrip_all_17_1_fl)
options(survey.lonely.psu = "adjust")
ty2.r17_rs_cut_13_fl <- svyratio(~delta_rs_fl,
~reported_fl,
design=desi17_all_1_fl,
na.rm=T)
ty2_17_cut_13_rs_fl_se <- as.numeric(predict(ty2.r17_rs_cut_13_fl,
total = nrow(cls_17_fl))$se)
ty2_17_cut_13_rs_fl_total <- predict(ty2.r17_rs_cut_13_fl,total = nrow(cls_17_fl))[[1]] +
sum(cls_17_fl$`GREAT BARRACUDA_released`,na.rm=T)
ty2_17_cut_13_rs_fl_total <- as.numeric(ty2_17_cut_13_rs_fl_total)
############
#ALABAMA
############
mrip_all_17_2 <- read_csv("data/mrip_dm_17_all_sites.csv",
col_types = cols(psu_id = col_character()))
mrip_all_17_2$`GREAT BARRACUDA_released`[is.na(mrip_all_17_2$`GREAT BARRACUDA_released`)] <- 0
mrip_all_17_2$`GREAT BARRACUDA_released` <- as.numeric(mrip_all_17_2$`GREAT BARRACUDA_released`)
mrip_all_17_2 <- mrip_all_17_2 %>%
mutate(delta_rs = `GREAT BARRACUDA_release` - `GREAT BARRACUDA_released`)
mrip_all_17_2_al <- mrip_all_17_2 %>%
mutate(red_snapper_release_al = if_else(ST == 1,
`GREAT BARRACUDA_release`,as.integer(0)),
red_snapper_released_al = if_else(ST == 1,
`GREAT BARRACUDA_released`,as.numeric(0))) %>%
mutate(delta_rs_al = red_snapper_release_al - red_snapper_released_al) %>%
mutate(reported_al = if_else(ST == 1,
reported,as.integer(0)))
cls_17 <- read_csv("data/cls_17_all_sites.csv")
cls_17_al <- cls_17 %>%
filter(state == "AL")
desi17_all_1_al <- svydesign(id = ~psu_id,
weights = ~w_int,
strata = ~strat_id,
nest=T,
data=mrip_all_17_2_al)
options(survey.lonely.psu = "adjust")
ty2.r17_rs_cut_13_al <- svyratio(~delta_rs_al,
~reported_al,
design=desi17_all_1_al,
na.rm=T)
ty2_17_cut_13_rs_al_se <- as.numeric(predict(ty2.r17_rs_cut_13_al,
total = nrow(cls_17_al))$se)
ty2_17_cut_13_rs_al_total <- predict(ty2.r17_rs_cut_13_al,total = nrow(cls_17_al))[[1]] +
sum(cls_17_al$`GREAT BARRACUDA_released`,na.rm=T)
ty2_17_cut_13_rs_al_total <- as.numeric(ty2_17_cut_13_rs_al_total)
| /code/estimates_for_cls_report/direct_estimates/additional_species/direct_estimates_2017_great_barracuda_released_alive.R | no_license | williamsbenjamin/Fish-Estimation-Research | R | false | false | 5,443 | r | library(tidyverse)
library(ggthemes)
library(lubridate)
library(survey)
library(haven)
#look at both private and pulic
mrip_all_17 <- read_csv("data/mrip_dm_17_all_sites.csv",
col_types = cols(psu_id = col_character()))
cls_17 <- read_csv("data/cls_17_all_sites.csv")
#make species specific variables for estimation
cls_17 <- cls_17 %>%
filter(state == "FL")
mrip_all_17$`GREAT BARRACUDA_released`[is.na(mrip_all_17$`GREAT BARRACUDA_released`)] <- 0
mrip_all_17 <- mrip_all_17 %>%
mutate(`GREAT BARRACUDA_released` = as.numeric(`GREAT BARRACUDA_released`)) %>%
mutate(black_grouper_release_fl = if_else(ST == 12,
`GREAT BARRACUDA_release`,as.integer(0)),
black_grouper_released_fl = if_else(ST == 12,
`GREAT BARRACUDA_released`,0)) %>%
mutate(delta_rs = black_grouper_release_fl - black_grouper_released_fl) %>%
mutate(reported = if_else(ST == 12,
reported,as.integer(0)))
#2017
desi17_all <- svydesign(id=~psu_id,
weights=~w_int,
strata = ~strat_id,
nest=T,
data=mrip_all_17)
options(survey.lonely.psu = "adjust")
#tyc_17_rs_total
ty2.r17_rs <- svyratio(~delta_rs,
~reported,
design=desi17_all,
na.rm=T)
ty2_17_rs_se <- as.numeric(predict(ty2.r17_rs,
total = nrow(cls_17))$se)
#ty2_17_rs_se
ty2_17_rs_total <- predict(ty2.r17_rs,total = nrow(cls_17))[[1]] +
sum(cls_17$`GREAT BARRACUDA_released`,na.rm=T)
ty2_17_rs_total <- as.numeric(ty2_17_rs_total)
#FLORIDA
#Florida Only
mrip_all_17_1 <- read_csv("data/mrip_dm_17_all_sites.csv",
col_types = cols(psu_id = col_character()))
mrip_all_17_1$`GREAT BARRACUDA_released`[is.na(mrip_all_17_1$`GREAT BARRACUDA_released`)] <- 0
mrip_all_17_1$`GREAT BARRACUDA_released` <- as.numeric(mrip_all_17_1$`GREAT BARRACUDA_released`)
mrip_all_17_1 <- mrip_all_17_1 %>%
mutate(delta_rs = `GREAT BARRACUDA_release` - `GREAT BARRACUDA_released`)
mrip_all_17_1_fl <- mrip_all_17_1 %>%
mutate(red_snapper_release_fl = if_else(ST == 12,
`GREAT BARRACUDA_release`,as.integer(0)),
red_snapper_released_fl = if_else(ST == 12,
`GREAT BARRACUDA_released`,as.numeric(0))) %>%
mutate(delta_rs_fl = red_snapper_release_fl - red_snapper_released_fl) %>%
mutate(reported_fl = if_else(ST == 12,
reported,as.integer(0)))
cls_17 <- read_csv("data/cls_17_all_sites.csv")
cls_17_fl <- cls_17 %>%
filter(state == "FL")
desi17_all_1_fl <- svydesign(id = ~psu_id,
weights = ~w_int,
strata = ~strat_id,
nest=T,
data=mrip_all_17_1_fl)
options(survey.lonely.psu = "adjust")
ty2.r17_rs_cut_13_fl <- svyratio(~delta_rs_fl,
~reported_fl,
design=desi17_all_1_fl,
na.rm=T)
ty2_17_cut_13_rs_fl_se <- as.numeric(predict(ty2.r17_rs_cut_13_fl,
total = nrow(cls_17_fl))$se)
ty2_17_cut_13_rs_fl_total <- predict(ty2.r17_rs_cut_13_fl,total = nrow(cls_17_fl))[[1]] +
sum(cls_17_fl$`GREAT BARRACUDA_released`,na.rm=T)
ty2_17_cut_13_rs_fl_total <- as.numeric(ty2_17_cut_13_rs_fl_total)
############
#ALABAMA
############
mrip_all_17_2 <- read_csv("data/mrip_dm_17_all_sites.csv",
col_types = cols(psu_id = col_character()))
mrip_all_17_2$`GREAT BARRACUDA_released`[is.na(mrip_all_17_2$`GREAT BARRACUDA_released`)] <- 0
mrip_all_17_2$`GREAT BARRACUDA_released` <- as.numeric(mrip_all_17_2$`GREAT BARRACUDA_released`)
mrip_all_17_2 <- mrip_all_17_2 %>%
mutate(delta_rs = `GREAT BARRACUDA_release` - `GREAT BARRACUDA_released`)
mrip_all_17_2_al <- mrip_all_17_2 %>%
mutate(red_snapper_release_al = if_else(ST == 1,
`GREAT BARRACUDA_release`,as.integer(0)),
red_snapper_released_al = if_else(ST == 1,
`GREAT BARRACUDA_released`,as.numeric(0))) %>%
mutate(delta_rs_al = red_snapper_release_al - red_snapper_released_al) %>%
mutate(reported_al = if_else(ST == 1,
reported,as.integer(0)))
cls_17 <- read_csv("data/cls_17_all_sites.csv")
cls_17_al <- cls_17 %>%
filter(state == "AL")
desi17_all_1_al <- svydesign(id = ~psu_id,
weights = ~w_int,
strata = ~strat_id,
nest=T,
data=mrip_all_17_2_al)
options(survey.lonely.psu = "adjust")
ty2.r17_rs_cut_13_al <- svyratio(~delta_rs_al,
~reported_al,
design=desi17_all_1_al,
na.rm=T)
ty2_17_cut_13_rs_al_se <- as.numeric(predict(ty2.r17_rs_cut_13_al,
total = nrow(cls_17_al))$se)
ty2_17_cut_13_rs_al_total <- predict(ty2.r17_rs_cut_13_al,total = nrow(cls_17_al))[[1]] +
sum(cls_17_al$`GREAT BARRACUDA_released`,na.rm=T)
ty2_17_cut_13_rs_al_total <- as.numeric(ty2_17_cut_13_rs_al_total)
|
library(h2o)
### Name: h2o.mean
### Title: Compute the frame's mean by-column (or by-row).
### Aliases: h2o.mean mean.H2OFrame
### ** Examples
## No test:
h2o.init()
prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
prostate <- h2o.uploadFile(path = prostate_path)
# Default behavior. Will return list of means per column.
h2o.mean(prostate$AGE)
# return_frame set to TRUE. This will return an H2O Frame
# with mean per row or column (depends on axis argument)
h2o.mean(prostate, na.rm=TRUE, axis=1, return_frame=TRUE)
## End(No test)
| /data/genthat_extracted_code/h2o/examples/h2o.mean.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 566 | r | library(h2o)
### Name: h2o.mean
### Title: Compute the frame's mean by-column (or by-row).
### Aliases: h2o.mean mean.H2OFrame
### ** Examples
## No test:
h2o.init()
prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
prostate <- h2o.uploadFile(path = prostate_path)
# Default behavior. Will return list of means per column.
h2o.mean(prostate$AGE)
# return_frame set to TRUE. This will return an H2O Frame
# with mean per row or column (depends on axis argument)
h2o.mean(prostate, na.rm=TRUE, axis=1, return_frame=TRUE)
## End(No test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{coKrig}
\alias{coKrig}
\title{co-Kriging computation.}
\usage{
coKrig(Z, K, k, G, g, type = "ordinary", cinv = "syminv")
}
\arguments{
\item{Z}{Matrix of observed values of the spatial process.}
\item{K}{Covariance matrix computed for the position \eqn{X} where the spatial process \eqn{Z}
was observed.}
\item{k}{Covariance cube computed for the position \eqn{X} where the spatial process \eqn{Z}
was observed and the position \eqn{Y} where the spatial process \eqn{Z} will be predicted.}
\item{G}{When universal kriging will be computed, this matrix represents the values of the
of the functions representing the mean of the process \eqn{Z}, evaluated in the spatial
points \eqn{X} where the spatial process was first observed.}
\item{g}{When universal kriging will be computed, this matrix represents the evaluation of the
functions representing the mean over the new position points \eqn{Y} where the spatial process
\eqn{Z} will be predicted.}
\item{type}{Type of kriging model, possible values are: simple, ordinary, universal.}
\item{cinv}{Specifies how the inverse of the covariance matrix \eqn{K} will be computed.
Possible values are: syminv = symmetric matrix inverse computation, inv = usual armadillo
inverse computation, cholinv = Cholesky based inverse computation, ginv = given inverse not
necessary to compute inverse at all.}
}
\value{
Depending of the type of analysis the list of results change.
\item{Z}{New estimated values for Z.}
\item{L}{Linear coefficients determined by kriging.}
\item{J}{Inverse of the covariance matrix.}
\item{tau}{Factor computed in the ordinary and universal kriging.}
\item{alpha}{Factor computed in the ordinary kriging.}
\item{A}{Factor computed in the universal kriging.}
}
\description{
Computes the co-kriging linear estimator for different types models.
}
\examples{
library( KRIG )
}
\author{
Pedro Guarderas \email{pedro.felipe.guarderas@gmail.com}.
}
| /man/coKrig.Rd | no_license | pedroguarderas/KRIG | R | false | true | 2,017 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{coKrig}
\alias{coKrig}
\title{co-Kriging computation.}
\usage{
coKrig(Z, K, k, G, g, type = "ordinary", cinv = "syminv")
}
\arguments{
\item{Z}{Matrix of observed values of the spatial process.}
\item{K}{Covariance matrix computed for the position \eqn{X} where the spatial process \eqn{Z}
was observed.}
\item{k}{Covariance cube computed for the position \eqn{X} where the spatial process \eqn{Z}
was observed and the position \eqn{Y} where the spatial process \eqn{Z} will be predicted.}
\item{G}{When universal kriging will be computed, this matrix represents the values of the
of the functions representing the mean of the process \eqn{Z}, evaluated in the spatial
points \eqn{X} where the spatial process was first observed.}
\item{g}{When universal kriging will be computed, this matrix represents the evaluation of the
functions representing the mean over the new position points \eqn{Y} where the spatial process
\eqn{Z} will be predicted.}
\item{type}{Type of kriging model, possible values are: simple, ordinary, universal.}
\item{cinv}{Specifies how the inverse of the covariance matrix \eqn{K} will be computed.
Possible values are: syminv = symmetric matrix inverse computation, inv = usual armadillo
inverse computation, cholinv = Cholesky based inverse computation, ginv = given inverse not
necessary to compute inverse at all.}
}
\value{
Depending of the type of analysis the list of results change.
\item{Z}{New estimated values for Z.}
\item{L}{Linear coefficients determined by kriging.}
\item{J}{Inverse of the covariance matrix.}
\item{tau}{Factor computed in the ordinary and universal kriging.}
\item{alpha}{Factor computed in the ordinary kriging.}
\item{A}{Factor computed in the universal kriging.}
}
\description{
Computes the co-kriging linear estimator for different types models.
}
\examples{
library( KRIG )
}
\author{
Pedro Guarderas \email{pedro.felipe.guarderas@gmail.com}.
}
|
# Jake Yeung
# Date of Creation: 2020-01-20
# File: ~/projects/scchic/scripts/rstudioserver_analysis/BM_all_merged/6-correct_variance_LDA_k4me3.R
# K4me3
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(JFuncs)
library(scchicFuncs)
library(hash)
library(igraph)
library(umap)
# Constants ---------------------------------------------------------------
binsize <- 50000
mergesize <- 1000
bigbinsize <- 50000 * mergesize
jsystem <- "BoneMarrow"
jmark <- "H3K4me1"
jcutoff.ncuts.var <- 0.3
outdir <- "/home/jyeung/hpc/scChiC/from_rstudioserver/quality_control_postLDA_var_BM.2020-01-31"
dir.create(outdir)
outname <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".", Sys.Date(), ".AllMerged.rds")
outname.unenriched <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".", Sys.Date(), ".Unenriched.rds")
pdfname <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".", Sys.Date(), ".pdf")
outf <- file.path(outdir, outname)
outf.unenriched <- file.path(outdir, outname.unenriched)
outpdf <- file.path(outdir, pdfname)
bad.plates.grep <- paste("Bl6-BM-Linneg-H3K4me1", "B6-13W1-BM-H3K4me1-4", "B6-13W1-BM-H3K4me1-3",
"PZ-ChIC-Bl6-BM-H3K4me1-Index4-12-09-19",
"PZ-ChIC-Bl6-BM-H3K4me1-Index3-12-09-19",
"PZ-ChIC-Bl6-BM-H3K4me1-Index2-12-09-19", sep = "|")
bm.unenriched.plates.grep <- paste("B6-13W1-BM-H3K4me1-1", "B6-13W1-BM-H3K4me1-2", "PZ-ChIC-Bl6-BM-H3K4me1-Index1-12-09-19", sep = "|")
# Set up -----------------------------------------------------------------
inf <- paste0("/home/jyeung/hpc/scChiC/raw_demultiplexed/LDA_outputs_all/ldaAnalysisBins_B6BM_All_allmarks.2020-01-12.bsizestepsize_50000_50000.NoSliding/lda_outputs.count_mat.", jmark, ".countcutoff_1000-500-1000-1000.TAcutoff_0.5.K-30.binarize.FALSE/ldaOut.count_mat.", jmark, ".countcutoff_1000-500-1000-1000.TAcutoff_0.5.K-30.Robj")
load(inf, v=T)
platenames <- unique(sapply(colnames(count.mat), function(x) ClipLast(x, jsep = "_")))
print(platenames)
# Show UMAP ---------------------------------------------------------------
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
tm.result <- posterior(out.lda)
topics.mat <- tm.result$topics
umap.out <- umap(topics.mat, config = jsettings)
dat.umap.long <- data.frame(cell = rownames(umap.out[["layout"]]), umap1 = umap.out[["layout"]][, 1], umap2 = umap.out[["layout"]][, 2], stringsAsFactors = FALSE)
dat.umap.long <- DoLouvain(topics.mat, jsettings, dat.umap.long)
cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
m.umap <- ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = louvain)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_manual(values = cbPalette)
dat.impute.log <- log2(t(tm.result$topics %*% tm.result$terms))
jchromos <- paste("chr", c(seq(19), "X", "Y"), sep = "")
dat.var <- CalculateVarAll(dat.impute.log, jchromos)
dat.merge <- left_join(dat.umap.long, dat.var) %>%
rowwise() %>%
mutate(experi = ClipLast(cell, jsep = "-"),
plate = ClipLast(cell, jsep = "_"),
prefix = gsub("PZ-Bl6-BM", "Linneg", paste(strsplit(gsub("PZ-", "", cell), "-")[[1]][1:4], collapse = "-")))
m.umap.plates <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = louvain)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette)
m.umap.var <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1)
m.umap.var.plates <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) + facet_wrap(~plate)
# Show raw ---------------------------------------------------------------
dat.var.raw <- CalculateVarRaw(count.mat, merge.size = mergesize, chromo.exclude.grep = "^chrX|^chrY", jpseudocount = 1, jscale = 10^6, calculate.ncuts = TRUE)
dat.merge2 <- left_join(dat.merge, dat.var.raw)
# Correlate raw intrachrom var with UMAP ---------------------------------
m.rawvar.vs.imputevar <- ggplot(dat.merge2, aes(x = ncuts.var, y = cell.var.within.sum.norm, color = prefix)) + geom_point(alpha = 0.5) +
scale_x_log10() + scale_y_log10() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
xlab(paste("Intrachromo var raw from", bigbinsize/10^6, "MB bins")) +
ylab("Imputed intrachromo var from LDA") +
ggtitle(jmark, jsystem) + geom_vline(xintercept = jcutoff.ncuts.var)
print(m.rawvar.vs.imputevar)
m.rawvar.vs.imputevar.plates <- ggplot(dat.merge2, aes(x = ncuts.var, y = cell.var.within.sum.norm, color = prefix)) + geom_point(alpha = 0.5) +
scale_x_log10() + scale_y_log10() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
xlab(paste("Intrachromo var raw from", bigbinsize/10^6, "MB bins")) +
ylab("Imputed intrachromo var from LDA") + facet_wrap(~plate) +
ggtitle(jmark, jsystem)
print(m.rawvar.vs.imputevar.plates)
# show ncuts vs var
m.ncutsVSvar <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + geom_hline(yintercept = jcutoff.ncuts.var)
m.ncutsVSvar.plate <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + facet_wrap(~plate) + geom_hline(yintercept = jcutoff.ncuts.var)
# filter bad plates and bad cells and redo?
unique(dat.merge2$prefix)
# calculate intrachromo cutoff
dat.linneg <- subset(dat.merge2, grepl("Linneg", plate))
m.var.cutoff <- ggplot(dat.linneg, aes(x = ncuts.var)) + geom_density() + scale_x_log10() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
geom_vline(xintercept = jcutoff.ncuts.var)
m.ncutsVSvar.plate.cutoff <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + facet_wrap(~plate) + geom_hline(yintercept = jcutoff.ncuts.var)
# Write new count mat -----------------------------------------------------
dat.keep <- subset(dat.merge2, !grepl(bad.plates.grep, plate) & ncuts.var > jcutoff.ncuts.var)
cells.keep <- dat.keep$cell
dat.keep.unenriched <- subset(dat.merge2, grepl(bm.unenriched.plates.grep, plate) & ncuts.var > jcutoff.ncuts.var)
cells.keep.unenriched <- dat.keep.unenriched$cell
count.mat.keep.unenriched <- count.mat[, cells.keep.unenriched]
print(unique(dat.keep$prefix))
# plot UMAP after removing bad cells and plates
m.umap.var.plates.filt <- ggplot(dat.merge %>% filter(cell %in% cells.keep), aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) + facet_wrap(~plate)
count.mat.keep <- count.mat[, cells.keep]
print(dim(count.mat))
print(dim(count.mat.keep))
saveRDS(count.mat.keep, outf)
saveRDS(count.mat.keep.unenriched, outf.unenriched)
pdf(file = outpdf, useDingbats = FALSE)
m.umap
m.umap.plates
m.umap.var
m.umap.var.plates
m.ncutsVSvar
m.ncutsVSvar.plate
m.ncutsVSvar.plate.cutoff
# plot(pca.out$sdev ^ 2 / sum(pca.out$sdev ^ 2))
m.rawvar.vs.imputevar
m.rawvar.vs.imputevar.plates
m.var.cutoff
m.umap.var.plates.filt
dev.off()
| /scripts/rstudioserver_analysis/BM_all_merged/6-correct_variance_LDA_k4me1_remove_plates_write_unenriched_and_stem.R | no_license | jakeyeung/sortchicAllScripts | R | false | false | 8,557 | r | # Jake Yeung
# Date of Creation: 2020-01-20
# File: ~/projects/scchic/scripts/rstudioserver_analysis/BM_all_merged/6-correct_variance_LDA_k4me3.R
# K4me3
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(JFuncs)
library(scchicFuncs)
library(hash)
library(igraph)
library(umap)
# Constants ---------------------------------------------------------------
binsize <- 50000
mergesize <- 1000
bigbinsize <- 50000 * mergesize
jsystem <- "BoneMarrow"
jmark <- "H3K4me1"
jcutoff.ncuts.var <- 0.3
outdir <- "/home/jyeung/hpc/scChiC/from_rstudioserver/quality_control_postLDA_var_BM.2020-01-31"
dir.create(outdir)
outname <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".", Sys.Date(), ".AllMerged.rds")
outname.unenriched <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".", Sys.Date(), ".Unenriched.rds")
pdfname <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".", Sys.Date(), ".pdf")
outf <- file.path(outdir, outname)
outf.unenriched <- file.path(outdir, outname.unenriched)
outpdf <- file.path(outdir, pdfname)
bad.plates.grep <- paste("Bl6-BM-Linneg-H3K4me1", "B6-13W1-BM-H3K4me1-4", "B6-13W1-BM-H3K4me1-3",
"PZ-ChIC-Bl6-BM-H3K4me1-Index4-12-09-19",
"PZ-ChIC-Bl6-BM-H3K4me1-Index3-12-09-19",
"PZ-ChIC-Bl6-BM-H3K4me1-Index2-12-09-19", sep = "|")
bm.unenriched.plates.grep <- paste("B6-13W1-BM-H3K4me1-1", "B6-13W1-BM-H3K4me1-2", "PZ-ChIC-Bl6-BM-H3K4me1-Index1-12-09-19", sep = "|")
# Set up -----------------------------------------------------------------
inf <- paste0("/home/jyeung/hpc/scChiC/raw_demultiplexed/LDA_outputs_all/ldaAnalysisBins_B6BM_All_allmarks.2020-01-12.bsizestepsize_50000_50000.NoSliding/lda_outputs.count_mat.", jmark, ".countcutoff_1000-500-1000-1000.TAcutoff_0.5.K-30.binarize.FALSE/ldaOut.count_mat.", jmark, ".countcutoff_1000-500-1000-1000.TAcutoff_0.5.K-30.Robj")
load(inf, v=T)
platenames <- unique(sapply(colnames(count.mat), function(x) ClipLast(x, jsep = "_")))
print(platenames)
# Show UMAP ---------------------------------------------------------------
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
tm.result <- posterior(out.lda)
topics.mat <- tm.result$topics
umap.out <- umap(topics.mat, config = jsettings)
dat.umap.long <- data.frame(cell = rownames(umap.out[["layout"]]), umap1 = umap.out[["layout"]][, 1], umap2 = umap.out[["layout"]][, 2], stringsAsFactors = FALSE)
dat.umap.long <- DoLouvain(topics.mat, jsettings, dat.umap.long)
cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
m.umap <- ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = louvain)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_manual(values = cbPalette)
dat.impute.log <- log2(t(tm.result$topics %*% tm.result$terms))
jchromos <- paste("chr", c(seq(19), "X", "Y"), sep = "")
dat.var <- CalculateVarAll(dat.impute.log, jchromos)
dat.merge <- left_join(dat.umap.long, dat.var) %>%
rowwise() %>%
mutate(experi = ClipLast(cell, jsep = "-"),
plate = ClipLast(cell, jsep = "_"),
prefix = gsub("PZ-Bl6-BM", "Linneg", paste(strsplit(gsub("PZ-", "", cell), "-")[[1]][1:4], collapse = "-")))
m.umap.plates <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = louvain)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette)
m.umap.var <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1)
m.umap.var.plates <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) + facet_wrap(~plate)
# Show raw ---------------------------------------------------------------
dat.var.raw <- CalculateVarRaw(count.mat, merge.size = mergesize, chromo.exclude.grep = "^chrX|^chrY", jpseudocount = 1, jscale = 10^6, calculate.ncuts = TRUE)
dat.merge2 <- left_join(dat.merge, dat.var.raw)
# Correlate raw intrachrom var with UMAP ---------------------------------
m.rawvar.vs.imputevar <- ggplot(dat.merge2, aes(x = ncuts.var, y = cell.var.within.sum.norm, color = prefix)) + geom_point(alpha = 0.5) +
scale_x_log10() + scale_y_log10() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
xlab(paste("Intrachromo var raw from", bigbinsize/10^6, "MB bins")) +
ylab("Imputed intrachromo var from LDA") +
ggtitle(jmark, jsystem) + geom_vline(xintercept = jcutoff.ncuts.var)
print(m.rawvar.vs.imputevar)
m.rawvar.vs.imputevar.plates <- ggplot(dat.merge2, aes(x = ncuts.var, y = cell.var.within.sum.norm, color = prefix)) + geom_point(alpha = 0.5) +
scale_x_log10() + scale_y_log10() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
xlab(paste("Intrachromo var raw from", bigbinsize/10^6, "MB bins")) +
ylab("Imputed intrachromo var from LDA") + facet_wrap(~plate) +
ggtitle(jmark, jsystem)
print(m.rawvar.vs.imputevar.plates)
# show ncuts vs var
m.ncutsVSvar <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + geom_hline(yintercept = jcutoff.ncuts.var)
m.ncutsVSvar.plate <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + facet_wrap(~plate) + geom_hline(yintercept = jcutoff.ncuts.var)
# filter bad plates and bad cells and redo?
unique(dat.merge2$prefix)
# calculate intrachromo cutoff
dat.linneg <- subset(dat.merge2, grepl("Linneg", plate))
m.var.cutoff <- ggplot(dat.linneg, aes(x = ncuts.var)) + geom_density() + scale_x_log10() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
geom_vline(xintercept = jcutoff.ncuts.var)
m.ncutsVSvar.plate.cutoff <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + facet_wrap(~plate) + geom_hline(yintercept = jcutoff.ncuts.var)
# Write new count mat -----------------------------------------------------
dat.keep <- subset(dat.merge2, !grepl(bad.plates.grep, plate) & ncuts.var > jcutoff.ncuts.var)
cells.keep <- dat.keep$cell
dat.keep.unenriched <- subset(dat.merge2, grepl(bm.unenriched.plates.grep, plate) & ncuts.var > jcutoff.ncuts.var)
cells.keep.unenriched <- dat.keep.unenriched$cell
count.mat.keep.unenriched <- count.mat[, cells.keep.unenriched]
print(unique(dat.keep$prefix))
# plot UMAP after removing bad cells and plates
m.umap.var.plates.filt <- ggplot(dat.merge %>% filter(cell %in% cells.keep), aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) + facet_wrap(~plate)
count.mat.keep <- count.mat[, cells.keep]
print(dim(count.mat))
print(dim(count.mat.keep))
saveRDS(count.mat.keep, outf)
saveRDS(count.mat.keep.unenriched, outf.unenriched)
pdf(file = outpdf, useDingbats = FALSE)
m.umap
m.umap.plates
m.umap.var
m.umap.var.plates
m.ncutsVSvar
m.ncutsVSvar.plate
m.ncutsVSvar.plate.cutoff
# plot(pca.out$sdev ^ 2 / sum(pca.out$sdev ^ 2))
m.rawvar.vs.imputevar
m.rawvar.vs.imputevar.plates
m.var.cutoff
m.umap.var.plates.filt
dev.off()
|
#' Truncated student generator for Bayesian regression simulation
#'
#' Simulates \code{n} random vectors \eqn{X} exactly distributed
#' from the \code{d}-dimensional Student distribution with
#' \code{df=}\eqn{\nu} degrees of freedom, mean zero and scale matrix
#' \code{Sig}, conditional on \eqn{l<X<u},
#'
#' @inheritParams mvrandt
#'
#' @author \code{Matlab} code by Zdravko Botev, \code{R} port by Leo Belzile
#' @export
#' @references Z. I. Botev and P. L'Ecuyer (2015), Efficient probability estimation
#' and simulation of the truncated multivariate Student-t distribution,
#' Proceedings of the 2015 Winter Simulation Conference, pp. 380-391,
#' @return list with components
#' \itemize{
#' \item{\code{R}: } \code{n} vector of scale
#' \item{\code{Z}: } a \code{d} by \code{n} matrix
#' } so that \eqn{\sqrt(\nu)Z/R} follows
#' a truncated Student distribution
#' @examples
#' d <- 5
#' tregress(rep(-2, d), rep(2, d), df = 3, n = 10,
#' Sig = diag(0.5, d) + matrix(1, d, d))
tregress <- function (l, u, Sig, df, n)
{
d = length(l)
if (length(u) != d | d != sqrt(length(Sig)) | any(l > u)) {
stop("l, u, and Sig have to match in dimension with u>l")
}
out = cholperm(Sig, l, u)
Lfull = out$L
l = out$l
u = out$u
D = diag(Lfull)
perm = out$perm
if (any(D < 1e-10)) {
warning("Method may fail as covariance matrix is singular!")
}
L = Lfull/D
u = u/D
l = l/D
L = L - diag(d)
#Starting value
x0 <- rep(0, 2*d); x0[2*d] <- sqrt(df); x0[d] <- log(x0[2*d])
solvneq <- nleqslv::nleqslv(x = x0, fn = gradpsiT, L = L, l = l, u = u, nu = df,
global = "pwldog", method = "Broyden")
soln <- solvneq$x
#fval <- solvneq$fvec
exitflag <- solvneq$termcd
if(!(exitflag %in% 1:2) || !all.equal(solvneq$fvec, rep(0, length(x0)))){
warning('Method may fail as covariance matrix is close to singular!')
}
# assign saddlepoint x* and mu*
soln[d] <- exp(soln[d])
x <- soln[1:d];
mu <- soln[(d+1):length(soln)];
# compute psi star
psistar <- psyT(x= x, L = L, l = l, u = u, nu = df, mu = mu);
# start acceptance rejection sampling
Z <- matrix(0, nrow = d, ncol = n)
R <- rep(0, n)
accept <- 0L; iter <- 0L; nsim <- n
while(accept < n){ # while # of accepted is less than n
call <- mvtrnd(n = nsim, L = L, l = l, u = u, nu = df, mu = mu); # simulate n proposals
idx <- rexp(nsim) > (psistar - call$p); # acceptance tests
m <- sum(idx)
if(m > n - accept){
m <- n - accept
idx <- which(idx)[1:m]
}
if(m > 0){
Z[,(accept+1):(accept+m)] <- call$Z[,idx]; # accumulate accepted
R[(accept+1):(accept+m)] <- call$R[idx]; # accumulate accepted
}
accept <- accept + m; # keep track of # of accepted
iter <- iter + 1L; # keep track of while loop iterations
nsim <- min(n, ceiling(nsim/m))
if(iter == 1e3){ # if iterations are getting large, give warning
warning('Acceptance prob. smaller than 0.001')
} else if(iter > 1e4){ # if iterations too large, seek approximation only
R[,1:accept]
Z[,1:accept]
warning('Sample of size smaller than n returned.')
}
}
# # finish sampling; postprocessing
out = sort(perm, decreasing = FALSE, index.return = TRUE)
order = out$ix
Z = Lfull %*% Z
Z = Z[order, ]
return(list(R = R, Z = Z))
}
| /R/tregress.R | no_license | danmackinlay/TruncatedNormal | R | false | false | 3,330 | r | #' Truncated student generator for Bayesian regression simulation
#'
#' Simulates \code{n} random vectors \eqn{X} exactly distributed
#' from the \code{d}-dimensional Student distribution with
#' \code{df=}\eqn{\nu} degrees of freedom, mean zero and scale matrix
#' \code{Sig}, conditional on \eqn{l<X<u},
#'
#' @inheritParams mvrandt
#'
#' @author \code{Matlab} code by Zdravko Botev, \code{R} port by Leo Belzile
#' @export
#' @references Z. I. Botev and P. L'Ecuyer (2015), Efficient probability estimation
#' and simulation of the truncated multivariate Student-t distribution,
#' Proceedings of the 2015 Winter Simulation Conference, pp. 380-391,
#' @return list with components
#' \itemize{
#' \item{\code{R}: } \code{n} vector of scale
#' \item{\code{Z}: } a \code{d} by \code{n} matrix
#' } so that \eqn{\sqrt(\nu)Z/R} follows
#' a truncated Student distribution
#' @examples
#' d <- 5
#' tregress(rep(-2, d), rep(2, d), df = 3, n = 10,
#' Sig = diag(0.5, d) + matrix(1, d, d))
tregress <- function (l, u, Sig, df, n)
{
d = length(l)
if (length(u) != d | d != sqrt(length(Sig)) | any(l > u)) {
stop("l, u, and Sig have to match in dimension with u>l")
}
out = cholperm(Sig, l, u)
Lfull = out$L
l = out$l
u = out$u
D = diag(Lfull)
perm = out$perm
if (any(D < 1e-10)) {
warning("Method may fail as covariance matrix is singular!")
}
L = Lfull/D
u = u/D
l = l/D
L = L - diag(d)
#Starting value
x0 <- rep(0, 2*d); x0[2*d] <- sqrt(df); x0[d] <- log(x0[2*d])
solvneq <- nleqslv::nleqslv(x = x0, fn = gradpsiT, L = L, l = l, u = u, nu = df,
global = "pwldog", method = "Broyden")
soln <- solvneq$x
#fval <- solvneq$fvec
exitflag <- solvneq$termcd
if(!(exitflag %in% 1:2) || !all.equal(solvneq$fvec, rep(0, length(x0)))){
warning('Method may fail as covariance matrix is close to singular!')
}
# assign saddlepoint x* and mu*
soln[d] <- exp(soln[d])
x <- soln[1:d];
mu <- soln[(d+1):length(soln)];
# compute psi star
psistar <- psyT(x= x, L = L, l = l, u = u, nu = df, mu = mu);
# start acceptance rejection sampling
Z <- matrix(0, nrow = d, ncol = n)
R <- rep(0, n)
accept <- 0L; iter <- 0L; nsim <- n
while(accept < n){ # while # of accepted is less than n
call <- mvtrnd(n = nsim, L = L, l = l, u = u, nu = df, mu = mu); # simulate n proposals
idx <- rexp(nsim) > (psistar - call$p); # acceptance tests
m <- sum(idx)
if(m > n - accept){
m <- n - accept
idx <- which(idx)[1:m]
}
if(m > 0){
Z[,(accept+1):(accept+m)] <- call$Z[,idx]; # accumulate accepted
R[(accept+1):(accept+m)] <- call$R[idx]; # accumulate accepted
}
accept <- accept + m; # keep track of # of accepted
iter <- iter + 1L; # keep track of while loop iterations
nsim <- min(n, ceiling(nsim/m))
if(iter == 1e3){ # if iterations are getting large, give warning
warning('Acceptance prob. smaller than 0.001')
} else if(iter > 1e4){ # if iterations too large, seek approximation only
R[,1:accept]
Z[,1:accept]
warning('Sample of size smaller than n returned.')
}
}
# # finish sampling; postprocessing
out = sort(perm, decreasing = FALSE, index.return = TRUE)
order = out$ix
Z = Lfull %*% Z
Z = Z[order, ]
return(list(R = R, Z = Z))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.