content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
## The data household_power_comsumption has been stored in the working directiory
#read the data into the working environment
ndata <- read.csv("household_power_consumption.txt", header =T, sep=";", na.strings ="?")
### use subset() to select 2 days of data for plotting
ndata <- subset(ndata, ndata$Date == '1/2/2007'|ndata$Date == '2/2/2007', )
ndata$Date<- as.Date(ndata$Date, format = "%d/%m/%Y")
## Combine the Date and Time using paste to create DateTime column
DateTime<- paste(as.Date(ndata$Date), ndata$Time, sep = "")
ndata$DateTime<- as.POSIXct(DateTime)
## We use par to change the margins to fit our plot and call the png device
png( filename = "plot4.png", width = 480, height = 480, bg ="white")
par( mfrow= c(2,2), mar = c(10,6,1,2))
## plot (1,1)
plot(ndata$Global_active_power~ndata$DateTime, type ="l",
ylab = "Global Active Power (kilowatts)", xlab="")
## plot (1,2)
plot(ndata$Voltage~ndata$DateTime, type ="l",
ylab = "Voltage", xlab="datetime")
## plot (2,1)
with(ndata, {
plot(Sub_metering_1~DateTime, type ="l", ylab = "Energy sub metering", xlab="")
lines(Sub_metering_2~DateTime, col ="red")
lines(Sub_metering_3~DateTime, col="blue")})
legend("topright", col=c("black", "red", "blue"),legend = c("sub_metering_1","sub_metering_2", "sub_metering_3"), lty = 1, lwd =1)
## plot (2,2)
plot(ndata$Global_reactive_power~ndata$DateTime, type ="l", ylab = "Global_reactive_power", xlab="datetime")
dev.off()
| /plot4.R | no_license | vivianni/ExploratoryDataAnalysis | R | false | false | 1,484 | r | ## The data household_power_comsumption has been stored in the working directiory
#read the data into the working environment
ndata <- read.csv("household_power_consumption.txt", header =T, sep=";", na.strings ="?")
### use subset() to select 2 days of data for plotting
ndata <- subset(ndata, ndata$Date == '1/2/2007'|ndata$Date == '2/2/2007', )
ndata$Date<- as.Date(ndata$Date, format = "%d/%m/%Y")
## Combine the Date and Time using paste to create DateTime column
DateTime<- paste(as.Date(ndata$Date), ndata$Time, sep = "")
ndata$DateTime<- as.POSIXct(DateTime)
## We use par to change the margins to fit our plot and call the png device
png( filename = "plot4.png", width = 480, height = 480, bg ="white")
par( mfrow= c(2,2), mar = c(10,6,1,2))
## plot (1,1)
plot(ndata$Global_active_power~ndata$DateTime, type ="l",
ylab = "Global Active Power (kilowatts)", xlab="")
## plot (1,2)
plot(ndata$Voltage~ndata$DateTime, type ="l",
ylab = "Voltage", xlab="datetime")
## plot (2,1)
with(ndata, {
plot(Sub_metering_1~DateTime, type ="l", ylab = "Energy sub metering", xlab="")
lines(Sub_metering_2~DateTime, col ="red")
lines(Sub_metering_3~DateTime, col="blue")})
legend("topright", col=c("black", "red", "blue"),legend = c("sub_metering_1","sub_metering_2", "sub_metering_3"), lty = 1, lwd =1)
## plot (2,2)
plot(ndata$Global_reactive_power~ndata$DateTime, type ="l", ylab = "Global_reactive_power", xlab="datetime")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{no_region_id_metadata}
\alias{no_region_id_metadata}
\title{Simple toy metadata defining three experiments, with one condition (\code{test}) per experiment.
Implicitly, 20 individuals are in each experiment
It serves as a simple example.}
\format{
An object of class \code{data.frame} with 3 rows and 4 columns.
}
\usage{
no_region_id_metadata
}
\description{
Simple toy metadata defining three experiments, with one condition (\code{test}) per experiment.
Implicitly, 20 individuals are in each experiment
It serves as a simple example.
}
\author{
Quentin Geissmann
}
\keyword{datasets}
| /man/no_region_id_metadata.Rd | no_license | cran/scopr | R | false | true | 694 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{no_region_id_metadata}
\alias{no_region_id_metadata}
\title{Simple toy metadata defining three experiments, with one condition (\code{test}) per experiment.
Implicitly, 20 individuals are in each experiment
It serves as a simple example.}
\format{
An object of class \code{data.frame} with 3 rows and 4 columns.
}
\usage{
no_region_id_metadata
}
\description{
Simple toy metadata defining three experiments, with one condition (\code{test}) per experiment.
Implicitly, 20 individuals are in each experiment
It serves as a simple example.
}
\author{
Quentin Geissmann
}
\keyword{datasets}
|
library(httr)
library(ggplot2)
library(Peptides)
rm(list=ls())
# results
res_path <- ("/Users/rmylonas/tmp/datamining_pumba/results/")
res_name <- "homodimers_"
# internal library
source("./R/functions/all_functions.R")
all_datasets <- get_all_datasets()
samples <- get_samples(all_datasets)
sel_samples <- c("HCT") # NULL
#nr_proteins <- 100
plot_proteins <- c() #"H7C417", "O60307", "E9PLN8", "E7EVH7", "A2RTX5", "A2RRP1", "A0A0A6YYH1")
# store results
results <- list()
for(sample in samples){
# look only at selected datasets
if(! is_selected_sample(sample, sel_samples)) next
# load normalized protein groups file of one dataset
datasets <- find_datasets(sample, all_datasets)
# get the dataset ids for this sample
dataset_ids <- get_dataset_ids(datasets)
all_protein_groups <- get_all_protein_groups(datasets)
#orig_protein_groups <- get_orig_protein_groups(datasets[[1]])
all_orig_protein_groups <- get_all_orig_protein_groups(datasets)
first_protein_acs <- get_protein_acs(all_protein_groups[[1]])
# results
closest_peak_dists <- c()
theo_weights <- c()
protein_acs <- c()
closest_peak_masses <- c()
highest_peak_masses <- c()
charges <- c()
charges_by_length <- c()
pIs <- c()
locations <- c()
glycosylations <- c()
signal_peps <- c()
ptms <- c()
prot_intensities <- c()
nr_peaks <- c()
perc_slices <- c()
perc_dists <- c()
peak_intensities <- c()
peak_masses <- c()
scop_classes <- c()
hydrophobicities <- c()
homodimers <- c()
# loop over all proteins
nr_prot_loop <- if(! exists("nr_proteins")) length(first_protein_acs) else nr_proteins
for(k in 1:nr_prot_loop){
print(paste0(sample, ': ', k, " of ", nr_prot_loop))
# select one protein
protein <- all_protein_groups[[1]][k,]
protein_ac <- first_protein_acs[k]
#orig_protein <- orig_protein_groups[k,]
# look for percentage of slices in which there was a signal
perc_slices_prot <- get_perc_slices(all_protein_groups, protein_ac, 0.1)
# get merged data from backend or cache
protein_merges <- get_protein_merge(protein_ac, sample)
# for some datasets the protein is not detected
if(length(protein_merges) == 0) next
# since we load only one dataset at the time there is only 1 list
protein_merge <- protein_merges[[1]]
mass <- get_masses(protein_merge)
ints <- get_ints(protein_merge)
# get the indexes of intensities corresponding to a peak (see "config.R" for thresholds)
peak_idxs <- get_peak_indexes(ints)
# just skip with a warning if no peak was detected
if(length(peak_idxs) < 1){
warning(paste0("Could not find any peak in [", protein_ac, "]."))
next
}
peaks_masses <- mass[peak_idxs]
peaks_ints <- ints[peak_idxs]
theo_weight <- as.numeric(protein$Mol..weight..kDa.)
peak_dists <- abs(peaks_masses - theo_weight)
peak_perc_dists <- (peaks_masses / theo_weight) - 1
closest_peak <- which(peak_dists == min(peak_dists))
closest_peak_perc_diff <- (peaks_masses[closest_peak] / theo_weight) - 1
highest_peak <- which(peaks_ints == max(peaks_ints))
# peak_dists_log <- log(peaks_masses) - log(theo_weight)
# closest_peak_log_diff <- log(peaks_masses[closest_peak]) - log(theo_weight)
if(seq_exists(protein_ac)){
seq <- get_seq(protein_ac)
theo_weights <- c(theo_weights, theo_weight)
protein_acs <- c(protein_acs, protein_ac)
# distance info
closest_peak_dists <- c(closest_peak_dists, closest_peak_perc_diff)
closest_peak_masses <- c(closest_peak_masses, peaks_masses[closest_peak])
highest_peak_masses <- c(highest_peak_masses, peaks_masses[highest_peak])
# number of slices or peaks per protein
nr_peaks <- c(nr_peaks, length(peaks_masses))
perc_slices <- c(perc_slices, perc_slices_prot)
# info from AA sequence
this_charge <- charge(seq, pH=7, pKscale="EMBOSS")
this_pI <- pI(seq, pKscale = "EMBOSS")
this_hydrophopicity <- hydrophobicity(seq, scale = "HoppWoods")
pIs <- c(pIs, this_pI)
charges <- c(charges, this_charge)
charges_by_length <- c(charges_by_length, this_charge / nchar(seq))
hydrophobicities <- c(hydrophobicities, this_hydrophopicity)
# info from MaxQuan
prot_intensities <- c(prot_intensities, get_median_protein_intensities(all_orig_protein_groups, protein_ac))
# info from uniprot
uniprot_xml <- get_uniprot_xml(protein_ac)
location <- NA
glycosylation <- NA
signal_pep <- NA
ptm <- NA
is_homodimer <- NA
if(! is.null(uniprot_xml)){
location <- paste(get_locations(uniprot_xml), collapse=",")
glycosylation <- paste(get_glycosylations(uniprot_xml), collapse = ",")
signal_pep <- get_signal_pep(uniprot_xml)
ptm <- paste(get_ptms(uniprot_xml), get_crosslink(uniprot_xml), collapse = ",")
is_homodimer <- get_homodimer(uniprot_xml)
}
locations <- c(locations, location)
glycosylations <- c(glycosylations, glycosylation)
signal_peps <- c(signal_peps, signal_pep)
ptms <- c(ptms, ptm)
peak_intensities <- c(peak_intensities, paste(peaks_ints, collapse = ","))
peak_masses <- c(peak_masses, paste(peaks_masses, collapse = ","))
perc_dists <- c(perc_dists, paste(peak_perc_dists, collapse=","))
scop_classes <- c(scop_classes, get_scop_classes(protein_ac))
homodimers <- c(homodimers, is_homodimer)
}
if(protein_ac %in% plot_proteins){
## plot the merge curve, the peaks and the distances
peak_dists_perc <- (peaks_masses / theo_weight) - 1
plot(mass, ints, type="l", main=protein_ac)
abline(v=theo_weight, col="blue")
points(mass[peak_idxs], ints[peak_idxs], col="red")
#text(mass[peak_idxs], ints[peak_idxs], labels=(round(peak_dists_perc, digits = 2)), col="red", pos=4)
}
}
sample_res <- data.frame(
theo_weights,
closest_peak_dists,
protein_acs,
closest_peak_masses,
highest_peak_masses,
charges,
charges_by_length,
pIs,
locations,
glycosylations,
signal_peps,
ptms,
prot_intensities,
nr_peaks,
perc_slices,
peak_intensities,
peak_masses,
perc_dists,
scop_classes,
hydrophobicities,
homodimers
)
# store the data in the results list
results[[sample]] <- sample_res
}
# store results with timestamp
res_file <- paste0(res_path, res_name, as.numeric(Sys.time()), ".RData")
save(results, file=res_file)
print(paste0("saved results in [", res_file, "]"))
| /R/find_merged_min_max.R | permissive | UNIL-PAF/pumba-datamining | R | false | false | 7,032 | r | library(httr)
library(ggplot2)
library(Peptides)
rm(list=ls())
# results
res_path <- ("/Users/rmylonas/tmp/datamining_pumba/results/")
res_name <- "homodimers_"
# internal library
source("./R/functions/all_functions.R")
all_datasets <- get_all_datasets()
samples <- get_samples(all_datasets)
sel_samples <- c("HCT") # NULL
#nr_proteins <- 100
plot_proteins <- c() #"H7C417", "O60307", "E9PLN8", "E7EVH7", "A2RTX5", "A2RRP1", "A0A0A6YYH1")
# store results
results <- list()
for(sample in samples){
# look only at selected datasets
if(! is_selected_sample(sample, sel_samples)) next
# load normalized protein groups file of one dataset
datasets <- find_datasets(sample, all_datasets)
# get the dataset ids for this sample
dataset_ids <- get_dataset_ids(datasets)
all_protein_groups <- get_all_protein_groups(datasets)
#orig_protein_groups <- get_orig_protein_groups(datasets[[1]])
all_orig_protein_groups <- get_all_orig_protein_groups(datasets)
first_protein_acs <- get_protein_acs(all_protein_groups[[1]])
# results
closest_peak_dists <- c()
theo_weights <- c()
protein_acs <- c()
closest_peak_masses <- c()
highest_peak_masses <- c()
charges <- c()
charges_by_length <- c()
pIs <- c()
locations <- c()
glycosylations <- c()
signal_peps <- c()
ptms <- c()
prot_intensities <- c()
nr_peaks <- c()
perc_slices <- c()
perc_dists <- c()
peak_intensities <- c()
peak_masses <- c()
scop_classes <- c()
hydrophobicities <- c()
homodimers <- c()
# loop over all proteins
nr_prot_loop <- if(! exists("nr_proteins")) length(first_protein_acs) else nr_proteins
for(k in 1:nr_prot_loop){
print(paste0(sample, ': ', k, " of ", nr_prot_loop))
# select one protein
protein <- all_protein_groups[[1]][k,]
protein_ac <- first_protein_acs[k]
#orig_protein <- orig_protein_groups[k,]
# look for percentage of slices in which there was a signal
perc_slices_prot <- get_perc_slices(all_protein_groups, protein_ac, 0.1)
# get merged data from backend or cache
protein_merges <- get_protein_merge(protein_ac, sample)
# for some datasets the protein is not detected
if(length(protein_merges) == 0) next
# since we load only one dataset at the time there is only 1 list
protein_merge <- protein_merges[[1]]
mass <- get_masses(protein_merge)
ints <- get_ints(protein_merge)
# get the indexes of intensities corresponding to a peak (see "config.R" for thresholds)
peak_idxs <- get_peak_indexes(ints)
# just skip with a warning if no peak was detected
if(length(peak_idxs) < 1){
warning(paste0("Could not find any peak in [", protein_ac, "]."))
next
}
peaks_masses <- mass[peak_idxs]
peaks_ints <- ints[peak_idxs]
theo_weight <- as.numeric(protein$Mol..weight..kDa.)
peak_dists <- abs(peaks_masses - theo_weight)
peak_perc_dists <- (peaks_masses / theo_weight) - 1
closest_peak <- which(peak_dists == min(peak_dists))
closest_peak_perc_diff <- (peaks_masses[closest_peak] / theo_weight) - 1
highest_peak <- which(peaks_ints == max(peaks_ints))
# peak_dists_log <- log(peaks_masses) - log(theo_weight)
# closest_peak_log_diff <- log(peaks_masses[closest_peak]) - log(theo_weight)
if(seq_exists(protein_ac)){
seq <- get_seq(protein_ac)
theo_weights <- c(theo_weights, theo_weight)
protein_acs <- c(protein_acs, protein_ac)
# distance info
closest_peak_dists <- c(closest_peak_dists, closest_peak_perc_diff)
closest_peak_masses <- c(closest_peak_masses, peaks_masses[closest_peak])
highest_peak_masses <- c(highest_peak_masses, peaks_masses[highest_peak])
# number of slices or peaks per protein
nr_peaks <- c(nr_peaks, length(peaks_masses))
perc_slices <- c(perc_slices, perc_slices_prot)
# info from AA sequence
this_charge <- charge(seq, pH=7, pKscale="EMBOSS")
this_pI <- pI(seq, pKscale = "EMBOSS")
this_hydrophopicity <- hydrophobicity(seq, scale = "HoppWoods")
pIs <- c(pIs, this_pI)
charges <- c(charges, this_charge)
charges_by_length <- c(charges_by_length, this_charge / nchar(seq))
hydrophobicities <- c(hydrophobicities, this_hydrophopicity)
# info from MaxQuan
prot_intensities <- c(prot_intensities, get_median_protein_intensities(all_orig_protein_groups, protein_ac))
# info from uniprot
uniprot_xml <- get_uniprot_xml(protein_ac)
location <- NA
glycosylation <- NA
signal_pep <- NA
ptm <- NA
is_homodimer <- NA
if(! is.null(uniprot_xml)){
location <- paste(get_locations(uniprot_xml), collapse=",")
glycosylation <- paste(get_glycosylations(uniprot_xml), collapse = ",")
signal_pep <- get_signal_pep(uniprot_xml)
ptm <- paste(get_ptms(uniprot_xml), get_crosslink(uniprot_xml), collapse = ",")
is_homodimer <- get_homodimer(uniprot_xml)
}
locations <- c(locations, location)
glycosylations <- c(glycosylations, glycosylation)
signal_peps <- c(signal_peps, signal_pep)
ptms <- c(ptms, ptm)
peak_intensities <- c(peak_intensities, paste(peaks_ints, collapse = ","))
peak_masses <- c(peak_masses, paste(peaks_masses, collapse = ","))
perc_dists <- c(perc_dists, paste(peak_perc_dists, collapse=","))
scop_classes <- c(scop_classes, get_scop_classes(protein_ac))
homodimers <- c(homodimers, is_homodimer)
}
if(protein_ac %in% plot_proteins){
## plot the merge curve, the peaks and the distances
peak_dists_perc <- (peaks_masses / theo_weight) - 1
plot(mass, ints, type="l", main=protein_ac)
abline(v=theo_weight, col="blue")
points(mass[peak_idxs], ints[peak_idxs], col="red")
#text(mass[peak_idxs], ints[peak_idxs], labels=(round(peak_dists_perc, digits = 2)), col="red", pos=4)
}
}
sample_res <- data.frame(
theo_weights,
closest_peak_dists,
protein_acs,
closest_peak_masses,
highest_peak_masses,
charges,
charges_by_length,
pIs,
locations,
glycosylations,
signal_peps,
ptms,
prot_intensities,
nr_peaks,
perc_slices,
peak_intensities,
peak_masses,
perc_dists,
scop_classes,
hydrophobicities,
homodimers
)
# store the data in the results list
results[[sample]] <- sample_res
}
# store results with timestamp
res_file <- paste0(res_path, res_name, as.numeric(Sys.time()), ".RData")
save(results, file=res_file)
print(paste0("saved results in [", res_file, "]"))
|
library("shiny")
library("shinydashboard")
dashboardPage(
dashboardHeader(title="MM*Stat"),
dashboardSidebar(
sidebarMenu(
menuItem(text=gettext("Confidence interval parameter"), startExpanded=TRUE,
uiOutput("conflevelUI"),
uiOutput("varequalUI"),
uiOutput("size1UI"),
uiOutput("size2UI")),
menuItem(text=gettext("Sample drawing"), startExpanded=TRUE,
uiOutput("goUI"),
uiOutput("resetUI"),
uiOutput("speedUI")),
menuItem(text=gettext("Data choice"), startExpanded=FALSE,
uiOutput("datasetUI"),
uiOutput("variableUI"),
uiOutput("groupUI")),
menuItem(text=gettext("Options"), startExpanded=FALSE,
uiOutput("cexUI"))
)
),
dashboardBody(
fluidRow(
column(width = 12,
box(title=gettext("Confidence intervals for the difference of two means"), status="primary",
plotOutput("outputConfPlot"),
plotOutput("outputSamplePlot", height = "200px"))
)
)
)
)
#shinyUI(fluidPage(
#
# div(class="navbar navbar-static-top",
# div(class = "navbar-inner",
# fluidRow(column(4, div(class = "brand pull-left", gettext("Confidence intervals for the difference of two means"))),
# column(2, checkboxInput("showtest", gettext("Confidence interval parameter"), TRUE)),
# column(2, checkboxInput("showsample", gettext("Sample drawing"), TRUE)),
# column(2, checkboxInput("showdata", gettext("Data choice"), FALSE)),
# column(2, checkboxInput("showoptions", gettext("Options"), FALSE))))),
#
# sidebarLayout(
# sidebarPanel(
# conditionalPanel(
# condition = 'input.showtest',
# uiOutput("conflevelUI"),
# br(),
# uiOutput("varequalUI"),
# br(),
# uiOutput("size1UI"),
# br(),
# uiOutput("size2UI")
# ),
# conditionalPanel(
# condition = 'input.showsample',
# hr(),
# uiOutput("goUI"),
# uiOutput("resetUI"),
# uiOutput("speedUI")
# ),
# conditionalPanel(
# condition = 'input.showdata',
# hr(),
# uiOutput("datasetUI"),
# uiOutput("variableUI"),
# uiOutput("groupUI")
# ),
# conditionalPanel(
# condition = 'input.showoptions',
# hr(),
# uiOutput("cexUI")
# )
# ),
#
# mainPanel(plotOutput("outputConfPlot"),
# plotOutput("outputSamplePlot", height = "200px"))),
#
# htmlOutput("logText")
# ))
| /inst/examples/stat/confidence_two_means/ui.R | no_license | Kale14/mmstat4 | R | false | false | 2,744 | r | library("shiny")
library("shinydashboard")
dashboardPage(
dashboardHeader(title="MM*Stat"),
dashboardSidebar(
sidebarMenu(
menuItem(text=gettext("Confidence interval parameter"), startExpanded=TRUE,
uiOutput("conflevelUI"),
uiOutput("varequalUI"),
uiOutput("size1UI"),
uiOutput("size2UI")),
menuItem(text=gettext("Sample drawing"), startExpanded=TRUE,
uiOutput("goUI"),
uiOutput("resetUI"),
uiOutput("speedUI")),
menuItem(text=gettext("Data choice"), startExpanded=FALSE,
uiOutput("datasetUI"),
uiOutput("variableUI"),
uiOutput("groupUI")),
menuItem(text=gettext("Options"), startExpanded=FALSE,
uiOutput("cexUI"))
)
),
dashboardBody(
fluidRow(
column(width = 12,
box(title=gettext("Confidence intervals for the difference of two means"), status="primary",
plotOutput("outputConfPlot"),
plotOutput("outputSamplePlot", height = "200px"))
)
)
)
)
#shinyUI(fluidPage(
#
# div(class="navbar navbar-static-top",
# div(class = "navbar-inner",
# fluidRow(column(4, div(class = "brand pull-left", gettext("Confidence intervals for the difference of two means"))),
# column(2, checkboxInput("showtest", gettext("Confidence interval parameter"), TRUE)),
# column(2, checkboxInput("showsample", gettext("Sample drawing"), TRUE)),
# column(2, checkboxInput("showdata", gettext("Data choice"), FALSE)),
# column(2, checkboxInput("showoptions", gettext("Options"), FALSE))))),
#
# sidebarLayout(
# sidebarPanel(
# conditionalPanel(
# condition = 'input.showtest',
# uiOutput("conflevelUI"),
# br(),
# uiOutput("varequalUI"),
# br(),
# uiOutput("size1UI"),
# br(),
# uiOutput("size2UI")
# ),
# conditionalPanel(
# condition = 'input.showsample',
# hr(),
# uiOutput("goUI"),
# uiOutput("resetUI"),
# uiOutput("speedUI")
# ),
# conditionalPanel(
# condition = 'input.showdata',
# hr(),
# uiOutput("datasetUI"),
# uiOutput("variableUI"),
# uiOutput("groupUI")
# ),
# conditionalPanel(
# condition = 'input.showoptions',
# hr(),
# uiOutput("cexUI")
# )
# ),
#
# mainPanel(plotOutput("outputConfPlot"),
# plotOutput("outputSamplePlot", height = "200px"))),
#
# htmlOutput("logText")
# ))
|
PKG <- "garel"
RLOAD <- "anaconda3/personal" #"R/3.3.3"
OUTSH <- "code"
TIMEPBS <- "#PBS -l walltime=24:00:00"
SMALLPBS <- "#PBS -l select=1:ncpus=1:mem=1gb"
LARGEPBS <- "#PBS -l select=1:ncpus=16:mem=8gb"
#PATHRS <- system.file("ext", "do_funr.Rscript", package = PKG, mustWork = TRUE)
####---- hpc.cmd.funr ----
#' @title Create cmd with do_funr.Rscript
#' @description Execute any R function as Rscript, either by wrapping for HPC or as a bash script.
#' @param f.name Function name in quotes [default = norm_test], example function
#' @param f.arg Function arguments in the form \code{"arg1=x arg2=y"}
#' @param r.load R version to load with \code{module load}
#' @param other.load Other software to load with \code{module load}
#' @param hpc.time Job resource selection: time [default = TIMEPBS]
#' @param hpc.resource Job resource selection: core / node / memory [default = SMALLPBS]
#' @param hpc.q PBS queue name [default = NA]
#' @param submit Run either with \code{qsub} or \code{bash} [default = FALSE]
#' @param jobdepend In the form \code{depend=afterok:previous_jobs}
#' @param verbose Write stuff
#' @return List containing script to execute with \code{qsub} or \code{bash}. If \code{submit = TRUE}, run the script. If \code{submit = TRUE} and on HPC, output the PBS_JOBID number
#' @examples
#' hpc.cmd.funr("norm_test", "n=3", submit = FALSE)
#' @export
hpc.cmd.funr <- function(f.name = "norm_test",
f.arg = "n=5",
r.load = RLOAD,
other.load = "",
hpc.time = TIMEPBS,
hpc.resource = SMALLPBS,
hpc.q = NA,
submit = FALSE,
jobdepend = NULL,
verbose = TRUE)
{
#browser()
PATHRS <- system.file("ext", "do_funr.Rscript", package = PKG, mustWork = TRUE)
cmd <- paste("Rscript", PATHRS, f.name, f.arg)
if (verbose) {print(cmd)}
bang <- "#!/usr/bin/env bash"
##- HPC
if( nchar( Sys.which("qsub") ) ){
##- dir for log files
dirlog <- paste0("stdout", "/", f.name, ".log")
dir.create("stdout", showWarnings = FALSE)
dir.create(dirlog, showWarnings = FALSE)
##- header
if (!is.na(hpc.q)) q <- paste("#PBS -q", hpc.q) else q <- ""
hpc.head <- paste(bang, hpc.time, hpc.resource, paste("#PBS -o", dirlog, "-j oe"), q, "cd $PBS_O_WORKDIR", paste("module load", r.load, other.load), sep ="\n ")
fullscript <- paste(hpc.head, cmd, sep = "\n")
path.qsub <- tempfile(pattern = paste0(f.name,"_"), fileext = ".sub") # tmpdir = "."
writeLines(fullscript, con = path.qsub)
if(verbose) print(paste("Writing qsub script in", path.qsub))
args <- ifelse(!is.null(jobdepend), paste(jobdepend, path.qsub), path.qsub)
if(submit){
jobid <- system2("qsub", args, stdout = TRUE)
#extract first numeric part of jobid (e.i. "543220.cx1")
njobid <- as.numeric(regmatches(jobid, regexpr("[[:digit:]]+", jobid)))
return(list(script = fullscript, jobid = njobid))
} else {
return(list(script = fullscript))
}
}
##- local machine
else {
fullscript <- paste(bang, cmd, sep = "\n")
path.sh <- tempfile(pattern = paste0(f.name,"_"), fileext = ".sh") # tmpdir = "."
writeLines(fullscript, con = path.sh)
if(verbose) print(paste("Writing bash script in", path.sh))
if(submit){
system(paste("bash", path.sh))
}
return(list(script = fullscript))
}
}
####---- hpc.cmd.array ----
#' Create HPC command with qsub array jobs
#'
#' See \code{\link{hpc.cmd.funr}}.
#' If not on HPC, bash scripts are generated using lapply().
#' @param inputs R list of inputs or infiles, passed to an bash array and indexed as \code{$PBS_ARRAY_INDEX}
#' @param f.name Function name in quotes [default = norm_test], example function
#' @param f.arg Function arguments in the form \code{"arg1=x arg2=y"}
#' @param r.load R version to load with \code{module load}
#' @param other.load Other software to load with \code{module load}
#' @param hpc.time Job resource selection: time [default = TIMEPBS]
#' @param hpc.resource Job resource selection: core / node / memory [default = SMALLPBS]
#' @param hpc.q PBS queue name [default = NA]
#' @param submit Run either with \code{qsub} or \code{bash} [default = FALSE]
#' @param jobdepend In the form \code{depend=afterok:previous_jobs}
#' @param verbose Write stuff
#' @return List containing script to execute with \code{qsub} or \code{bash}. If \code{submit = TRUE}, run the script. If \code{submit = TRUE} and on HPC, output the PBS_JOBID number
#' @examples
#' hpc.cmd.array(inputs = list(3,5), f.name = "norm_test", f.arg = "n=$N", submit = TRUE)
#' @seealso \code{\link{hpc.cmd.funr}}
#' @export
hpc.cmd.array <- function(inputs = list(3,5),
f.name = "norm_test",
f.arg = "n=$N",
r.load = RLOAD,
other.load = "",
hpc.time = TIMEPBS,
hpc.resource = SMALLPBS,
hpc.q = NA,
submit = FALSE,
jobdepend = NULL,
verbose = TRUE)
{
#browser()
PATHRS <- system.file("ext", "do_funr.Rscript", package = PKG, mustWork = TRUE)
bang <- "#!/usr/bin/env bash"
##- HPC
if( nchar( Sys.which("qsub") ) ){
ins <- paste(paste0("IN=(",paste(inputs, collapse = ' '),")"), "N=${IN[$PBS_ARRAY_INDEX - 1]}", sep ="\n ") # zero indexed array
cmd <- paste(ins, paste("Rscript", PATHRS, f.name, f.arg), sep ="\n ")
if (verbose) {print(cmd)}
##- dir for log files
dirlog <- paste0("stdout", "/", f.name, ".log")
dir.create("stdout", showWarnings = FALSE)
dir.create(dirlog, showWarnings = FALSE)
##- header
if (!is.na(hpc.q)) q <- paste("#PBS -q", hpc.q) else q <- ""
hpc.head <- paste(bang, hpc.time, hpc.resource, paste("#PBS -o", dirlog, "-j oe"), q, "cd $PBS_O_WORKDIR", paste("module load", r.load, other.load), sep ="\n ")
fullscript <- paste(hpc.head, cmd, sep = "\n")
path.qsub <- tempfile(pattern = paste0(f.name,"_"), fileext = ".sub") # tmpdir = "."
writeLines(fullscript, con = path.qsub)
if(verbose) print(paste("Writing qsub script in", path.qsub))
array <- paste0("-J ", "1-", length(inputs))
args <- ifelse(!is.null(jobdepend), paste(jobdepend, array, path.qsub), paste(array, path.qsub))
if(submit){
jobid <- system2("qsub", args, stdout = TRUE) # system("qstat -t")
#extract first numeric part of jobid (e.i. "543220.cx1")
njobid <- as.numeric(regmatches(jobid, regexpr("[[:digit:]]+", jobid)))
return(list(script = fullscript, jobid = paste0(njobid, "[]")))
} else {
return(list(script = fullscript))
}
}
##- local machine
else {
lscripts <- lapply(inputs, function(x){
ins <- paste0("N=", x)
cmd <- paste(ins, paste("Rscript", PATHRS, f.name, f.arg), sep ="\n ")
fullscript <- paste(bang, cmd, sep = "\n")
path.sh <- tempfile(pattern = paste0(f.name,"_"), fileext = ".sh") # tmpdir = "."
writeLines(fullscript, con = path.sh)
if(verbose) print(paste("Writing bash script in", path.sh))
if(submit){
system(paste("bash", path.sh))
}
return(fullscript)
})
return(lscripts)
}
}
##---- get_jobid ----
#' Get PBS_JOBID from qsub query
#'
#' Check depth of list to find \code{jobid} object
#' @param lst List (or list of list) of qsub
#' @param array array jobs notation
#' @param qstat print qstat
#' @param verbose print stuff
#' @return Numeric of JOBID in form: single integer, array notation x:y, or vector if array = FALSE
get_jobid <- function(lst = "script", array = TRUE, qstat = FALSE, verbose = TRUE){
if (nchar(Sys.which("qsub")) > 0) { # if HPC, record JOBID
if (qstat) system('qstat')
if (list.depth(lst) == 1) {
jobs <- lst[['jobid']]
} else {
jobs <- sapply(lst, function(x) x[["jobid"]])
if (verbose) print(paste(length(jobs), "jobs launched"))
if (array) jobs <- paste0(min(jobs), ":", max(jobs))
}
if (verbose) print(paste0("previous_jobs=", jobs))
return(jobs)
} else {
return(NA)
}
}
| /R/generate_bash.R | no_license | slevu/garel | R | false | false | 8,275 | r | PKG <- "garel"
RLOAD <- "anaconda3/personal" #"R/3.3.3"
OUTSH <- "code"
TIMEPBS <- "#PBS -l walltime=24:00:00"
SMALLPBS <- "#PBS -l select=1:ncpus=1:mem=1gb"
LARGEPBS <- "#PBS -l select=1:ncpus=16:mem=8gb"
#PATHRS <- system.file("ext", "do_funr.Rscript", package = PKG, mustWork = TRUE)
####---- hpc.cmd.funr ----
#' @title Create cmd with do_funr.Rscript
#' @description Execute any R function as Rscript, either by wrapping for HPC or as a bash script.
#' @param f.name Function name in quotes [default = norm_test], example function
#' @param f.arg Function arguments in the form \code{"arg1=x arg2=y"}
#' @param r.load R version to load with \code{module load}
#' @param other.load Other software to load with \code{module load}
#' @param hpc.time Job resource selection: time [default = TIMEPBS]
#' @param hpc.resource Job resource selection: core / node / memory [default = SMALLPBS]
#' @param hpc.q PBS queue name [default = NA]
#' @param submit Run either with \code{qsub} or \code{bash} [default = FALSE]
#' @param jobdepend In the form \code{depend=afterok:previous_jobs}
#' @param verbose Write stuff
#' @return List containing script to execute with \code{qsub} or \code{bash}. If \code{submit = TRUE}, run the script. If \code{submit = TRUE} and on HPC, output the PBS_JOBID number
#' @examples
#' hpc.cmd.funr("norm_test", "n=3", submit = FALSE)
#' @export
hpc.cmd.funr <- function(f.name = "norm_test",
f.arg = "n=5",
r.load = RLOAD,
other.load = "",
hpc.time = TIMEPBS,
hpc.resource = SMALLPBS,
hpc.q = NA,
submit = FALSE,
jobdepend = NULL,
verbose = TRUE)
{
#browser()
PATHRS <- system.file("ext", "do_funr.Rscript", package = PKG, mustWork = TRUE)
cmd <- paste("Rscript", PATHRS, f.name, f.arg)
if (verbose) {print(cmd)}
bang <- "#!/usr/bin/env bash"
##- HPC
if( nchar( Sys.which("qsub") ) ){
##- dir for log files
dirlog <- paste0("stdout", "/", f.name, ".log")
dir.create("stdout", showWarnings = FALSE)
dir.create(dirlog, showWarnings = FALSE)
##- header
if (!is.na(hpc.q)) q <- paste("#PBS -q", hpc.q) else q <- ""
hpc.head <- paste(bang, hpc.time, hpc.resource, paste("#PBS -o", dirlog, "-j oe"), q, "cd $PBS_O_WORKDIR", paste("module load", r.load, other.load), sep ="\n ")
fullscript <- paste(hpc.head, cmd, sep = "\n")
path.qsub <- tempfile(pattern = paste0(f.name,"_"), fileext = ".sub") # tmpdir = "."
writeLines(fullscript, con = path.qsub)
if(verbose) print(paste("Writing qsub script in", path.qsub))
args <- ifelse(!is.null(jobdepend), paste(jobdepend, path.qsub), path.qsub)
if(submit){
jobid <- system2("qsub", args, stdout = TRUE)
#extract first numeric part of jobid (e.i. "543220.cx1")
njobid <- as.numeric(regmatches(jobid, regexpr("[[:digit:]]+", jobid)))
return(list(script = fullscript, jobid = njobid))
} else {
return(list(script = fullscript))
}
}
##- local machine
else {
fullscript <- paste(bang, cmd, sep = "\n")
path.sh <- tempfile(pattern = paste0(f.name,"_"), fileext = ".sh") # tmpdir = "."
writeLines(fullscript, con = path.sh)
if(verbose) print(paste("Writing bash script in", path.sh))
if(submit){
system(paste("bash", path.sh))
}
return(list(script = fullscript))
}
}
####---- hpc.cmd.array ----
#' Create HPC command with qsub array jobs
#'
#' See \code{\link{hpc.cmd.funr}}.
#' If not on HPC, bash scripts are generated using lapply().
#' @param inputs R list of inputs or infiles, passed to an bash array and indexed as \code{$PBS_ARRAY_INDEX}
#' @param f.name Function name in quotes [default = norm_test], example function
#' @param f.arg Function arguments in the form \code{"arg1=x arg2=y"}
#' @param r.load R version to load with \code{module load}
#' @param other.load Other software to load with \code{module load}
#' @param hpc.time Job resource selection: time [default = TIMEPBS]
#' @param hpc.resource Job resource selection: core / node / memory [default = SMALLPBS]
#' @param hpc.q PBS queue name [default = NA]
#' @param submit Run either with \code{qsub} or \code{bash} [default = FALSE]
#' @param jobdepend In the form \code{depend=afterok:previous_jobs}
#' @param verbose Write stuff
#' @return List containing script to execute with \code{qsub} or \code{bash}. If \code{submit = TRUE}, run the script. If \code{submit = TRUE} and on HPC, output the PBS_JOBID number
#' @examples
#' hpc.cmd.array(inputs = list(3,5), f.name = "norm_test", f.arg = "n=$N", submit = TRUE)
#' @seealso \code{\link{hpc.cmd.funr}}
#' @export
hpc.cmd.array <- function(inputs = list(3,5),
f.name = "norm_test",
f.arg = "n=$N",
r.load = RLOAD,
other.load = "",
hpc.time = TIMEPBS,
hpc.resource = SMALLPBS,
hpc.q = NA,
submit = FALSE,
jobdepend = NULL,
verbose = TRUE)
{
#browser()
PATHRS <- system.file("ext", "do_funr.Rscript", package = PKG, mustWork = TRUE)
bang <- "#!/usr/bin/env bash"
##- HPC
if( nchar( Sys.which("qsub") ) ){
ins <- paste(paste0("IN=(",paste(inputs, collapse = ' '),")"), "N=${IN[$PBS_ARRAY_INDEX - 1]}", sep ="\n ") # zero indexed array
cmd <- paste(ins, paste("Rscript", PATHRS, f.name, f.arg), sep ="\n ")
if (verbose) {print(cmd)}
##- dir for log files
dirlog <- paste0("stdout", "/", f.name, ".log")
dir.create("stdout", showWarnings = FALSE)
dir.create(dirlog, showWarnings = FALSE)
##- header
if (!is.na(hpc.q)) q <- paste("#PBS -q", hpc.q) else q <- ""
hpc.head <- paste(bang, hpc.time, hpc.resource, paste("#PBS -o", dirlog, "-j oe"), q, "cd $PBS_O_WORKDIR", paste("module load", r.load, other.load), sep ="\n ")
fullscript <- paste(hpc.head, cmd, sep = "\n")
path.qsub <- tempfile(pattern = paste0(f.name,"_"), fileext = ".sub") # tmpdir = "."
writeLines(fullscript, con = path.qsub)
if(verbose) print(paste("Writing qsub script in", path.qsub))
array <- paste0("-J ", "1-", length(inputs))
args <- ifelse(!is.null(jobdepend), paste(jobdepend, array, path.qsub), paste(array, path.qsub))
if(submit){
jobid <- system2("qsub", args, stdout = TRUE) # system("qstat -t")
#extract first numeric part of jobid (e.i. "543220.cx1")
njobid <- as.numeric(regmatches(jobid, regexpr("[[:digit:]]+", jobid)))
return(list(script = fullscript, jobid = paste0(njobid, "[]")))
} else {
return(list(script = fullscript))
}
}
##- local machine
else {
lscripts <- lapply(inputs, function(x){
ins <- paste0("N=", x)
cmd <- paste(ins, paste("Rscript", PATHRS, f.name, f.arg), sep ="\n ")
fullscript <- paste(bang, cmd, sep = "\n")
path.sh <- tempfile(pattern = paste0(f.name,"_"), fileext = ".sh") # tmpdir = "."
writeLines(fullscript, con = path.sh)
if(verbose) print(paste("Writing bash script in", path.sh))
if(submit){
system(paste("bash", path.sh))
}
return(fullscript)
})
return(lscripts)
}
}
##---- get_jobid ----
#' Get PBS_JOBID from qsub query
#'
#' Check depth of list to find \code{jobid} object
#' @param lst List (or list of list) of qsub
#' @param array array jobs notation
#' @param qstat print qstat
#' @param verbose print stuff
#' @return Numeric of JOBID in form: single integer, array notation x:y, or vector if array = FALSE
get_jobid <- function(lst = "script", array = TRUE, qstat = FALSE, verbose = TRUE){
if (nchar(Sys.which("qsub")) > 0) { # if HPC, record JOBID
if (qstat) system('qstat')
if (list.depth(lst) == 1) {
jobs <- lst[['jobid']]
} else {
jobs <- sapply(lst, function(x) x[["jobid"]])
if (verbose) print(paste(length(jobs), "jobs launched"))
if (array) jobs <- paste0(min(jobs), ":", max(jobs))
}
if (verbose) print(paste0("previous_jobs=", jobs))
return(jobs)
} else {
return(NA)
}
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{getPackagesLogs}
\alias{getPackagesLogs}
\title{Get package(s) logs configuration}
\usage{
getPackagesLogs()
}
\value{
data.frame.
}
\description{
Get package(s) logs configuration. Level and log file
}
\details{
Get package(s) logs configuration. Level and log file
}
\examples{
getPackagesLogs()
}
| /man/getPackagesLogs.Rd | no_license | bthieurmel/logeasier | R | false | false | 360 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{getPackagesLogs}
\alias{getPackagesLogs}
\title{Get package(s) logs configuration}
\usage{
getPackagesLogs()
}
\value{
data.frame.
}
\description{
Get package(s) logs configuration. Level and log file
}
\details{
Get package(s) logs configuration. Level and log file
}
\examples{
getPackagesLogs()
}
|
spectral <- function(numcolors = 256) {
require(RColorBrewer)
rev(colorRampPalette(brewer.pal(11, "Spectral"))(numcolors))
}
raster_plot <- function(x, png_file = NULL, maxpixels = ncell(x), cuts = 255, pal_fun = viridis,
bg_color = "white", ...) {
library(viridis)
plot_theme <- list(layout.heights = list(top.padding = 0,
main.key.padding = 0,
key.axis.padding = 0,
axis.xlab.padding = 0,
xlab.key.padding = 0,
key.sub.padding = 0,
bottom.padding = 0),
layout.widths = list(left.padding = 0,
key.ylab.padding = 0,
ylab.axis.padding = 0,
axis.key.padding = 0,
right.padding = 0),
axis.line = list(col = 0),
panel.background = list(col = bg_color))
if (is.null(png_file)) {
spplot(x, maxpixels = maxpixels, col.regions = pal_fun(cuts + 1), cuts = cuts, colorkey = FALSE, par.settings = plot_theme, ...)
} else {
png(png_file, width = ncol(x), height = nrow(x))
print(spplot(x, maxpixels = maxpixels, col.regions = pal_fun(cuts + 1), cuts = cuts, colorkey = FALSE, par.settings = plot_theme, ...))
dev.off()
}
}
| /R/raster_plot.R | permissive | ecohealthalliance/hotspots2 | R | false | false | 1,556 | r | spectral <- function(numcolors = 256) {
require(RColorBrewer)
rev(colorRampPalette(brewer.pal(11, "Spectral"))(numcolors))
}
raster_plot <- function(x, png_file = NULL, maxpixels = ncell(x), cuts = 255, pal_fun = viridis,
bg_color = "white", ...) {
library(viridis)
plot_theme <- list(layout.heights = list(top.padding = 0,
main.key.padding = 0,
key.axis.padding = 0,
axis.xlab.padding = 0,
xlab.key.padding = 0,
key.sub.padding = 0,
bottom.padding = 0),
layout.widths = list(left.padding = 0,
key.ylab.padding = 0,
ylab.axis.padding = 0,
axis.key.padding = 0,
right.padding = 0),
axis.line = list(col = 0),
panel.background = list(col = bg_color))
if (is.null(png_file)) {
spplot(x, maxpixels = maxpixels, col.regions = pal_fun(cuts + 1), cuts = cuts, colorkey = FALSE, par.settings = plot_theme, ...)
} else {
png(png_file, width = ncol(x), height = nrow(x))
print(spplot(x, maxpixels = maxpixels, col.regions = pal_fun(cuts + 1), cuts = cuts, colorkey = FALSE, par.settings = plot_theme, ...))
dev.off()
}
}
|
getwd()
#setwd('/media/bulk_01/users/goryu003/samples/')
ls()
#GO=read.table("ITAG2.3_desc_and_GO.txt", as.is =T,header=T )
setwd('/media/bulk_01/users/goryu003/samples/topGO_ITAG2.3')
enrichment_list = dir (pattern='.*_corresponding_overrepresent_.*')
print(enrichment_list)
GOI_list = dir (pattern='.*map.txt')
print (GOI_list)
z=1
for (i in 1:length(GOI_list))
{
n=0
print(z)
print(n)
while (n<3)
{
ROI_df=read.table (GOI_list[[i]],as.is=T, header=T)
df=read.table (enrichment_list[[z]],as.is=T, header=T)
print (GOI_list[[i]])
print (enrichment_list[[z]])
#print(head (ROI_df))
#print(head (df))
map=merge (ROI_df, df, by.x=c("Polypeptide.ID"), by.y=c("Genes"), all.x=F, all.y=F)
print(map)
write.table(map, paste(sep="",strsplit(enrichment_list [[z]], "\\.")[[1]][1],"HR_annotation.txt"))
n=n+1
z=z+1
}
}
| /GO_to_H-2.R | permissive | PBR/scripts | R | false | false | 1,093 | r | getwd()
#setwd('/media/bulk_01/users/goryu003/samples/')
ls()
#GO=read.table("ITAG2.3_desc_and_GO.txt", as.is =T,header=T )
setwd('/media/bulk_01/users/goryu003/samples/topGO_ITAG2.3')
enrichment_list = dir (pattern='.*_corresponding_overrepresent_.*')
print(enrichment_list)
GOI_list = dir (pattern='.*map.txt')
print (GOI_list)
z=1
for (i in 1:length(GOI_list))
{
n=0
print(z)
print(n)
while (n<3)
{
ROI_df=read.table (GOI_list[[i]],as.is=T, header=T)
df=read.table (enrichment_list[[z]],as.is=T, header=T)
print (GOI_list[[i]])
print (enrichment_list[[z]])
#print(head (ROI_df))
#print(head (df))
map=merge (ROI_df, df, by.x=c("Polypeptide.ID"), by.y=c("Genes"), all.x=F, all.y=F)
print(map)
write.table(map, paste(sep="",strsplit(enrichment_list [[z]], "\\.")[[1]][1],"HR_annotation.txt"))
n=n+1
z=z+1
}
}
|
makeCacheMatrix <- function(m = matrix() ) {
## Initialize the inverse property
i <- NULL
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
m
}
## Way to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Way to get the inverse of the matrix
getInverse <- function() {
i
}
## Back a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the unique matrix back by "makeCacheMatrix"
## Back to a matrix “m”
m <- x$getInverse()
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Compute the inverse via matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Coming back the matrix
m
}
| /cachematrix.R | no_license | hoptran/ProgrammingAssignment2 | R | false | false | 952 | r | makeCacheMatrix <- function(m = matrix() ) {
## Initialize the inverse property
i <- NULL
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
m
}
## Way to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Way to get the inverse of the matrix
getInverse <- function() {
i
}
## Back a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the unique matrix back by "makeCacheMatrix"
## Back to a matrix “m”
m <- x$getInverse()
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Compute the inverse via matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Coming back the matrix
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TextInput.R
\name{TextInput}
\alias{TextInput}
\title{TextInput}
\usage{
TextInput(input, output, session, n, checkFun = NULL, addArgs = NULL)
}
\arguments{
\item{input}{argument used by shiny session}
\item{output}{argument used by shiny session}
\item{session}{argument used by shiny session}
\item{n}{int number of textInput fields. n must be length(labels) from FileUploadUI.}
\item{checkFun}{chr or NULL (NULL) if not NULL name of a function which can be used as a quality check for textInputs}
\item{addArgs}{list or NULL (NULL) if not NULL list of additional arguments which will be passed to checkFun}
}
\value{
chr arr of user provided text input fields or NULL
}
\description{
This is a series of textInputs fields with a functionality for validating the provided text inputs.
}
\details{
With argument checkFun a function name can be defined which will be used as quality control for the text inputs.
This function must take a chr arr as first argument. These are the user provided text inputs.
The function must either return NULL or a chr value. NULL means the input is valid.
Thereby the module returns this chr arr.
If the user input should not be valid, the function must return a character value.
This chr value will be rendered as a error message for the user, and the modul returns NULL.
Additional argumets can be handed over to checkFun via the list addArgs.
}
\examples{
library(shinyTools)
# some function as example
check <- function(text, add){ if(any(grepl(add$pat, text))) return(paste("Don't use letter", add$pat, "in any entry."))}
# little app with module
ui <- fluidPage(sidebarLayout(
sidebarPanel(h2("TextInputUI"),
TextInputUI("id1", c("positive Ctrls", "non-targeting Ctrls"), c("positive", "non-targeting"),
help = "use HGNC symbols", horiz = FALSE)
),
mainPanel(h2("Output of TextInput"), verbatimTextOutput("display"))
))
server <-function(input, output, session) {
display <- callModule(TextInput, "id1", n = 2, checkFun = "check", addArgs = list(pat = "X"))
output$display <- renderPrint(display())
}
shinyApp(ui, server)
}
\seealso{
Other TextInput module functions: \code{\link{TextInputUI}}
}
| /man/TextInput.Rd | no_license | nishuai/shinyTools | R | false | true | 2,278 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TextInput.R
\name{TextInput}
\alias{TextInput}
\title{TextInput}
\usage{
TextInput(input, output, session, n, checkFun = NULL, addArgs = NULL)
}
\arguments{
\item{input}{argument used by shiny session}
\item{output}{argument used by shiny session}
\item{session}{argument used by shiny session}
\item{n}{int number of textInput fields. n must be length(labels) from FileUploadUI.}
\item{checkFun}{chr or NULL (NULL) if not NULL name of a function which can be used as a quality check for textInputs}
\item{addArgs}{list or NULL (NULL) if not NULL list of additional arguments which will be passed to checkFun}
}
\value{
chr arr of user provided text input fields or NULL
}
\description{
This is a series of textInputs fields with a functionality for validating the provided text inputs.
}
\details{
With argument checkFun a function name can be defined which will be used as quality control for the text inputs.
This function must take a chr arr as first argument. These are the user provided text inputs.
The function must either return NULL or a chr value. NULL means the input is valid.
Thereby the module returns this chr arr.
If the user input should not be valid, the function must return a character value.
This chr value will be rendered as a error message for the user, and the modul returns NULL.
Additional argumets can be handed over to checkFun via the list addArgs.
}
\examples{
library(shinyTools)
# some function as example
check <- function(text, add){ if(any(grepl(add$pat, text))) return(paste("Don't use letter", add$pat, "in any entry."))}
# little app with module
ui <- fluidPage(sidebarLayout(
sidebarPanel(h2("TextInputUI"),
TextInputUI("id1", c("positive Ctrls", "non-targeting Ctrls"), c("positive", "non-targeting"),
help = "use HGNC symbols", horiz = FALSE)
),
mainPanel(h2("Output of TextInput"), verbatimTextOutput("display"))
))
server <-function(input, output, session) {
display <- callModule(TextInput, "id1", n = 2, checkFun = "check", addArgs = list(pat = "X"))
output$display <- renderPrint(display())
}
shinyApp(ui, server)
}
\seealso{
Other TextInput module functions: \code{\link{TextInputUI}}
}
|
setwd("/ifs/scratch/msph/biostat/sw2206/yuqi")
.libPaths("/ifs/scratch/msph/biostat/sw2206/yuqi/R_libs")
# Goal: large scale parameter tunning for gl_simlr_clust:
# scenario setting:
## 2 changes: division_methods(data_divide): div4: 123, div2: 456
## vars_of_feature(sigma): 1:14, 3:25, 5:36
library(parallel)
library(clValid)
library(dplyr)
library(SNFtool)
library(igraph)
library(Matrix)
library(quadprog)
source("./functions/gl_simlr2.0_cluster.R")
source("./functions/simulation_function.R")
set.seed(286) # set seed because we
scen1 = simulation_3(eff_size = c(1,1,1), data_divide = rep("1/2/3/4",3),sigma = rep(1,3))
scen2 = simulation_3(eff_size = c(1,1,1), data_divide = rep("1/2/3/4",3),sigma = rep(3,3))
scen3 = simulation_3(eff_size = c(1,1,1), data_divide = rep("1/2/3/4",3),sigma = rep(5,3))
scen4 = simulation_3(eff_size = c(1,1,1), sigma = rep(1,3))
scen5 = simulation_3(eff_size = c(1,1,1), sigma = rep(3,3))
scen6 = simulation_3(eff_size = c(1,1,1), sigma = rep(5,3))
mk_list = lapply(list(scen1,scen2,scen3,scen4,scen5,scen6), function(x){unlist(lapply(x[1:3], multi_kernels_gl))})
names(mk_list) = c('scen1','scen2','scen3','scen4','scen5','scen6')
# par setting
par = list()
beta_list = gamma_list = rho_list = c(0.01,0.03,0.05,0.1,0.3,0.5,1)
alpha0_list = alpha_list = c(1,3,5)
for(i in 1:length(beta_list)){
for(j in 1:length(gamma_list)){
for(k in 1:length(rho_list)){
for(l in 1:length(alpha0_list)){
for(m in 1:length(alpha_list)){
par_name = paste("beta", beta_list[i],
"_gamma", gamma_list[j],
"_rho", rho_list[k],
"_alpha0", alpha0_list[l],
"_alpha", alpha_list[m],
sep = "")
par[[par_name]] = list(beta = beta_list[i],
gamma = gamma_list[j],
rho = rho_list[k],
alpha0 = alpha0_list[l],
alpha = alpha_list[m])
}
}
}
}
}
save(mk_list, file = "simu_0206/mk_list.Rdata")
save(par, file = "simu_0206/par.Rdata")
| /code/simulation_scripts/simulation_0206_mk_generation.R | no_license | yuqimiao/multiomics-SIMLR | R | false | false | 2,185 | r | setwd("/ifs/scratch/msph/biostat/sw2206/yuqi")
.libPaths("/ifs/scratch/msph/biostat/sw2206/yuqi/R_libs")
# Goal: large scale parameter tunning for gl_simlr_clust:
# scenario setting:
## 2 changes: division_methods(data_divide): div4: 123, div2: 456
## vars_of_feature(sigma): 1:14, 3:25, 5:36
library(parallel)
library(clValid)
library(dplyr)
library(SNFtool)
library(igraph)
library(Matrix)
library(quadprog)
source("./functions/gl_simlr2.0_cluster.R")
source("./functions/simulation_function.R")
set.seed(286) # set seed because we
scen1 = simulation_3(eff_size = c(1,1,1), data_divide = rep("1/2/3/4",3),sigma = rep(1,3))
scen2 = simulation_3(eff_size = c(1,1,1), data_divide = rep("1/2/3/4",3),sigma = rep(3,3))
scen3 = simulation_3(eff_size = c(1,1,1), data_divide = rep("1/2/3/4",3),sigma = rep(5,3))
scen4 = simulation_3(eff_size = c(1,1,1), sigma = rep(1,3))
scen5 = simulation_3(eff_size = c(1,1,1), sigma = rep(3,3))
scen6 = simulation_3(eff_size = c(1,1,1), sigma = rep(5,3))
mk_list = lapply(list(scen1,scen2,scen3,scen4,scen5,scen6), function(x){unlist(lapply(x[1:3], multi_kernels_gl))})
names(mk_list) = c('scen1','scen2','scen3','scen4','scen5','scen6')
# par setting
par = list()
beta_list = gamma_list = rho_list = c(0.01,0.03,0.05,0.1,0.3,0.5,1)
alpha0_list = alpha_list = c(1,3,5)
for(i in 1:length(beta_list)){
for(j in 1:length(gamma_list)){
for(k in 1:length(rho_list)){
for(l in 1:length(alpha0_list)){
for(m in 1:length(alpha_list)){
par_name = paste("beta", beta_list[i],
"_gamma", gamma_list[j],
"_rho", rho_list[k],
"_alpha0", alpha0_list[l],
"_alpha", alpha_list[m],
sep = "")
par[[par_name]] = list(beta = beta_list[i],
gamma = gamma_list[j],
rho = rho_list[k],
alpha0 = alpha0_list[l],
alpha = alpha_list[m])
}
}
}
}
}
save(mk_list, file = "simu_0206/mk_list.Rdata")
save(par, file = "simu_0206/par.Rdata")
|
library(rcrossref)
### Name: cr_cn
### Title: Get citations in various formats from CrossRef.
### Aliases: cr_cn
### ** Examples
## Not run:
##D cr_cn(dois="10.1126/science.169.3946.635")
##D cr_cn(dois="10.1126/science.169.3946.635", "citeproc-json")
##D cr_cn(dois="10.1126/science.169.3946.635", "citeproc-json-ish")
##D cr_cn("10.1126/science.169.3946.635", "rdf-xml")
##D cr_cn("10.1126/science.169.3946.635", "crossref-xml")
##D cr_cn("10.1126/science.169.3946.635", "text")
##D
##D # return an R bibentry type
##D cr_cn("10.1126/science.169.3946.635", "bibentry")
##D cr_cn("10.6084/m9.figshare.97218", "bibentry")
##D
##D # return an apa style citation
##D cr_cn("10.1126/science.169.3946.635", "text", "apa")
##D cr_cn("10.1126/science.169.3946.635", "text", "harvard3")
##D cr_cn("10.1126/science.169.3946.635", "text", "elsevier-harvard")
##D cr_cn("10.1126/science.169.3946.635", "text", "ecoscience")
##D cr_cn("10.1126/science.169.3946.635", "text", "heredity")
##D cr_cn("10.1126/science.169.3946.635", "text", "oikos")
##D
##D # example with many DOIs
##D dois <- cr_r(2)
##D cr_cn(dois, "text", "apa")
##D
##D # Cycle through random styles - print style on each try
##D stys <- get_styles()
##D foo <- function(x){
##D cat(sprintf("<Style>:%s\n", x), sep = "\n\n")
##D cat(cr_cn("10.1126/science.169.3946.635", "text", style=x))
##D }
##D foo(sample(stys, 1))
##D
##D # Using DataCite DOIs
##D ## some formats don't work
##D # cr_cn("10.5284/1011335", "crossref-xml")
##D # cr_cn("10.5284/1011335", "crossref-tdm")
##D ## But most do work
##D cr_cn("10.5284/1011335", "text")
##D cr_cn("10.5284/1011335", "datacite-xml")
##D cr_cn("10.5284/1011335", "rdf-xml")
##D cr_cn("10.5284/1011335", "turtle")
##D cr_cn("10.5284/1011335", "citeproc-json-ish")
##D cr_cn("10.5284/1011335", "ris")
##D cr_cn("10.5284/1011335", "bibtex")
##D cr_cn("10.5284/1011335", "bibentry")
##D
##D # Using Medra DOIs
##D cr_cn("10.3233/ISU-150780", "onix-xml")
##D
##D # Get raw output
##D cr_cn(dois = "10.1002/app.27716", format = "citeproc-json", raw = TRUE)
##D
##D # sometimes messy DOIs even work
##D ## in this case, a DOI minting agency can't be found
##D ## but we proceed anyway, just assuming it's "crossref"
##D cr_cn("10.1890/0012-9615(1999)069[0569:EDILSA]2.0.CO;2")
##D
##D # Use a different base url
##D cr_cn("10.1126/science.169.3946.635", "text", url = "https://data.datacite.org")
##D cr_cn("10.1126/science.169.3946.635", "text", url = "http://dx.doi.org")
##D cr_cn("10.1126/science.169.3946.635", "text", "heredity", url = "http://dx.doi.org")
##D cr_cn("10.5284/1011335", url = "https://citation.crosscite.org/format",
##D style = "oikos")
##D cr_cn("10.5284/1011335", url = "https://citation.crosscite.org/format",
##D style = "plant-cell-and-environment")
##D cr_cn("10.5284/1011335", url = "https://data.datacite.org",
##D style = "plant-cell-and-environment")
## End(Not run)
| /data/genthat_extracted_code/rcrossref/examples/cr_cn.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,928 | r | library(rcrossref)
### Name: cr_cn
### Title: Get citations in various formats from CrossRef.
### Aliases: cr_cn
### ** Examples
## Not run:
##D cr_cn(dois="10.1126/science.169.3946.635")
##D cr_cn(dois="10.1126/science.169.3946.635", "citeproc-json")
##D cr_cn(dois="10.1126/science.169.3946.635", "citeproc-json-ish")
##D cr_cn("10.1126/science.169.3946.635", "rdf-xml")
##D cr_cn("10.1126/science.169.3946.635", "crossref-xml")
##D cr_cn("10.1126/science.169.3946.635", "text")
##D
##D # return an R bibentry type
##D cr_cn("10.1126/science.169.3946.635", "bibentry")
##D cr_cn("10.6084/m9.figshare.97218", "bibentry")
##D
##D # return an apa style citation
##D cr_cn("10.1126/science.169.3946.635", "text", "apa")
##D cr_cn("10.1126/science.169.3946.635", "text", "harvard3")
##D cr_cn("10.1126/science.169.3946.635", "text", "elsevier-harvard")
##D cr_cn("10.1126/science.169.3946.635", "text", "ecoscience")
##D cr_cn("10.1126/science.169.3946.635", "text", "heredity")
##D cr_cn("10.1126/science.169.3946.635", "text", "oikos")
##D
##D # example with many DOIs
##D dois <- cr_r(2)
##D cr_cn(dois, "text", "apa")
##D
##D # Cycle through random styles - print style on each try
##D stys <- get_styles()
##D foo <- function(x){
##D cat(sprintf("<Style>:%s\n", x), sep = "\n\n")
##D cat(cr_cn("10.1126/science.169.3946.635", "text", style=x))
##D }
##D foo(sample(stys, 1))
##D
##D # Using DataCite DOIs
##D ## some formats don't work
##D # cr_cn("10.5284/1011335", "crossref-xml")
##D # cr_cn("10.5284/1011335", "crossref-tdm")
##D ## But most do work
##D cr_cn("10.5284/1011335", "text")
##D cr_cn("10.5284/1011335", "datacite-xml")
##D cr_cn("10.5284/1011335", "rdf-xml")
##D cr_cn("10.5284/1011335", "turtle")
##D cr_cn("10.5284/1011335", "citeproc-json-ish")
##D cr_cn("10.5284/1011335", "ris")
##D cr_cn("10.5284/1011335", "bibtex")
##D cr_cn("10.5284/1011335", "bibentry")
##D
##D # Using Medra DOIs
##D cr_cn("10.3233/ISU-150780", "onix-xml")
##D
##D # Get raw output
##D cr_cn(dois = "10.1002/app.27716", format = "citeproc-json", raw = TRUE)
##D
##D # sometimes messy DOIs even work
##D ## in this case, a DOI minting agency can't be found
##D ## but we proceed anyway, just assuming it's "crossref"
##D cr_cn("10.1890/0012-9615(1999)069[0569:EDILSA]2.0.CO;2")
##D
##D # Use a different base url
##D cr_cn("10.1126/science.169.3946.635", "text", url = "https://data.datacite.org")
##D cr_cn("10.1126/science.169.3946.635", "text", url = "http://dx.doi.org")
##D cr_cn("10.1126/science.169.3946.635", "text", "heredity", url = "http://dx.doi.org")
##D cr_cn("10.5284/1011335", url = "https://citation.crosscite.org/format",
##D style = "oikos")
##D cr_cn("10.5284/1011335", url = "https://citation.crosscite.org/format",
##D style = "plant-cell-and-environment")
##D cr_cn("10.5284/1011335", url = "https://data.datacite.org",
##D style = "plant-cell-and-environment")
## End(Not run)
|
library(DSviaDRM)
### Name: expressionBasedfilter
### Title: Filter genes according to expression level
### Aliases: expressionBasedfilter
### Keywords: gene filtering
### ** Examples
data(exprs1)
expressionBasedfilter(exprs1)
| /data/genthat_extracted_code/DSviaDRM/examples/expressionBasedfilter.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 234 | r | library(DSviaDRM)
### Name: expressionBasedfilter
### Title: Filter genes according to expression level
### Aliases: expressionBasedfilter
### Keywords: gene filtering
### ** Examples
data(exprs1)
expressionBasedfilter(exprs1)
|
processSTR = function(file, model = c("SBE","RBR")) {
# Set environment variables to UTC
Sys.setenv(TZ = 'UTC')
if (model == "SBE") {
#Load data
test = read.csv(file, header = FALSE)[1:20 ,]
#Find number of rows to skip in header
skip = min(which(test == "Date")) - 1
#Skip header
str = read.csv(file, skip = skip)
#Concatenate date and time
str$DateTime = ymd_hms(paste0(str$Date, str$Time), tz = "UTC")
}
if (model == "RBR"){
str.rbr = read.oce(file)
DateTime = str.rbr[['time']]
Temperature = str.rbr[['temperature']]
str = data.frame(DateTime,Temperature)
str$DateTime = ymd_hms(str$DateTime, tz = "UTC")
}
#Define function for "Close Window" button in JavaScript
jscode = 'shinyjs.closeWindow = function() { window.close(); }'
# Create user interface. This bit of code defines what will appear on the UI.
ui <- fluidPage(
mainPanel(
br(),
# Paste file name at top of page
h2(paste0(file_path_sans_ext(basename(
file
)))),
# "Be patient" text...these datasets are huge!
h6("(Be patient...the plots may take a minute to load!)"),
br(),
h5("1. Review the entire raw time series."),
# Display the raw time series
plotlyOutput("whole.ts", height = "200px"),
br(),
h5(
"2. Click on time series below to select in situ start and end points. Gray scroll bars at bottom of plots adjust the time windows."
),
# Disploy the start and end plots of the time series
fixedRow(column(
6, plotlyOutput("start.plot", height = "400px")
),
column(
6, plotlyOutput("end.plot", height = "400px")
),
style = 'padding:40px;'),
# Display the selected start and end times
fixedRow(
column(6, verbatimTextOutput("start.select")),
column(6, verbatimTextOutput("end.select"))
),
h5(
"3. Review the resulting trimmed time series based on start and end time selected."
),
# Display the trimmed time series
plotlyOutput("cut.ts", height = "200px"),
h5(
"4. If you are happy with the trimmed time series, click the 'Save' button. A 'File saved' message will appear when file has saved successfully."
),
# Create a save button
actionButton("save", "Save"),
useShinyalert(),
h5(
"5. Once you have saved the file, click the 'Stop' button to stop the app."
),
# Create a "Close Window" button
useShinyjs(),
extendShinyjs(text = jscode, functions = c("closeWindow")),
actionButton("close", "Stop app"),
br(),
br(),
br(),
br(),
br()
)
)
# This code creates the shiny server (i.e. the code that the app is actually running)
server <- function(input, output) {
# Create start and end time variables that change based on user input
vals = reactiveValues(start.time = NULL,
end.time = NULL)
# Plot the raw time series
output$whole.ts = renderPlotly({
plot_ly(
str,
x = ~ DateTime,
y = ~ Temperature,
mode = 'lines',
type = 'scatter'
)
})
# Plot the first month of the time series
output$start.plot = renderPlotly({
plot_ly(
str,
x = ~ DateTime,
y = ~ Temperature,
mode = 'lines',
type = 'scatter',
source = 'S',
# Connect this plot to the selection of start time
hoverinfo = 'none' # Remove the default hover boxes
) %>%
layout(
title = 'Select START point',
xaxis = list(
rangeslider = list(type = "date"),
# Add a range slider at the bottom of the plot
spikemode = "across",
# Add crosshairs
spikethickness = 0.5,
# Crosshair thickness
spikecolor = "black",
# Crosshair color
spikedash = "line",
# Make crosshairs a solid line
showspikes = TRUE,
range = c(min(str$DateTime), (min(str$DateTime) + months(1))) # Define x axis range
),
yaxis = list(
spikemode = "across",
#Same crosshair properties but for y axis
spikemode = "across",
spikethickness = 0.5,
spikecolor = "black",
spikedash = "line",
showspikes = TRUE
)
) %>% onRender(
"function(el, x) {
Plotly.d3.select('.cursor-crosshair').style('cursor', 'default')} "
)
})
output$start.select = renderPrint ({
vals$start.time = event_data("plotly_click", source = 'S')
if (length(vals$start.time) == 0) {
"Select a start time"
} else {
cat("Selected start time is \n")
return(print(ymd_hms(vals$start.time$x, tz = 'UTC')))
}
})
output$end.plot = renderPlotly({
plot_ly(
str,
x = ~ DateTime,
y = ~ Temperature,
mode = 'lines',
type = 'scatter',
source = 'E',
hoverinfo = 'none'
) %>%
onRender(
"function(el, x) {
Plotly.d3.select('.cursor-crosshair').style('cursor', 'default')} "
) %>%
layout(
title = 'Select END point',
xaxis = list(
rangeslider = list(type = "date"),
spikemode = "across",
spikethickness = 0.5,
spikecolor = "black",
spikedash = "line",
showspikes = TRUE,
range = c(max(str$DateTime) - months(1), (max(str$DateTime)))
),
yaxis = list(
spikemode = "across",
spikemode = "across",
spikethickness = 0.5,
spikecolor = "black",
spikedash = "line",
showspikes = TRUE
)
)
})
output$end.select = renderPrint ({
vals$end.time = event_data("plotly_click", source = 'E')
if (length(vals$end.time) == 0) {
"Select an end time"
} else {
cat("Selected end time is \n")
return(print(ymd_hms(vals$end.time$x, tz = 'UTC')))
}
})
observeEvent(c(vals$start.time, vals$end.time), {
str.subset = subset(str,
DateTime >= vals$start.time$x &
DateTime <= vals$end.time$x)
output$cut.ts = renderPlotly({
plot_ly(
str.subset,
x = ~ DateTime,
y = ~ Temperature,
mode = 'lines',
type = 'scatter'
) %>%
layout(xaxis = list(range = c(
vals$start.time$x, vals$end.time$x
)))
})
})
observe({
if (input$save > 0) {
str.subset = subset(str,
DateTime >= vals$start.time$x &
DateTime <= vals$end.time$x)
str.subset$UTCDateTime = ymd_hms(str.subset$DateTime, tz = "UTC")
str.subset$Year = year(str.subset$UTCDateTime)
str.subset$Month = month(str.subset$UTCDateTime)
str.subset$Day = day(str.subset$UTCDateTime)
str.subset$Hour = hour(str.subset$UTCDateTime)
str.subset$Minute = minute(str.subset$UTCDateTime)
str.subset$Second = second(str.subset$UTCDateTime)
str.df = str.subset[c("Year",
"Month",
"Day",
"Hour",
"Minute",
"Second",
"Temperature")]
output.file = file(paste0(
dirname(file),
"/",
file_path_sans_ext(basename(file)),
".cdp"
),
"wb")
write.table(
str.df,
file = output.file,
row.names = FALSE,
col.names = FALSE,
sep = "\t",
eol = "\n"
)
close(output.file)
plot = ggplot(data = str.subset) +
geom_line(aes(x = ymd_hms(DateTime), y = Temperature), col = 'dodgerblue') +
theme_bw() +
scale_x_datetime(breaks = date_breaks("4 months"),
labels = date_format("%m/%y")) +
ylab(expression(atop(
paste("Temperature (", degree, "C)")
))) +
theme(axis.title.x = element_blank())
box = ggplot(data = str.subset) +
geom_boxplot(aes(x = "", y = Temperature), fill = 'dodgerblue') +
theme_bw() +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank())
comb = ggarrange(plot, box, widths = c(6, 1))
ggsave(
filename = paste0(
dirname(file),
"/",
file_path_sans_ext(basename(file)),
".png"
),
plot = comb,
width = 12,
height = 3
)
write.csv(
str.subset[c("DateTime", "Temperature")],
paste0(
dirname(file),
"/",
file_path_sans_ext(basename(file)),
"_processed.csv"
),
row.names = FALSE
)
}
})
observeEvent(input$save, {
shinyalert("File saved",
type = 'success')
})
observeEvent(input$close, {
js$closeWindow()
stopApp()
})
}
# Run the shiny app
shinyApp(ui, server)
}
| /R/processSTR.r | no_license | npomeroy/occ | R | false | false | 9,745 | r | processSTR = function(file, model = c("SBE","RBR")) {
# Set environment variables to UTC
Sys.setenv(TZ = 'UTC')
if (model == "SBE") {
#Load data
test = read.csv(file, header = FALSE)[1:20 ,]
#Find number of rows to skip in header
skip = min(which(test == "Date")) - 1
#Skip header
str = read.csv(file, skip = skip)
#Concatenate date and time
str$DateTime = ymd_hms(paste0(str$Date, str$Time), tz = "UTC")
}
if (model == "RBR"){
str.rbr = read.oce(file)
DateTime = str.rbr[['time']]
Temperature = str.rbr[['temperature']]
str = data.frame(DateTime,Temperature)
str$DateTime = ymd_hms(str$DateTime, tz = "UTC")
}
#Define function for "Close Window" button in JavaScript
jscode = 'shinyjs.closeWindow = function() { window.close(); }'
# Create user interface. This bit of code defines what will appear on the UI.
ui <- fluidPage(
mainPanel(
br(),
# Paste file name at top of page
h2(paste0(file_path_sans_ext(basename(
file
)))),
# "Be patient" text...these datasets are huge!
h6("(Be patient...the plots may take a minute to load!)"),
br(),
h5("1. Review the entire raw time series."),
# Display the raw time series
plotlyOutput("whole.ts", height = "200px"),
br(),
h5(
"2. Click on time series below to select in situ start and end points. Gray scroll bars at bottom of plots adjust the time windows."
),
# Disploy the start and end plots of the time series
fixedRow(column(
6, plotlyOutput("start.plot", height = "400px")
),
column(
6, plotlyOutput("end.plot", height = "400px")
),
style = 'padding:40px;'),
# Display the selected start and end times
fixedRow(
column(6, verbatimTextOutput("start.select")),
column(6, verbatimTextOutput("end.select"))
),
h5(
"3. Review the resulting trimmed time series based on start and end time selected."
),
# Display the trimmed time series
plotlyOutput("cut.ts", height = "200px"),
h5(
"4. If you are happy with the trimmed time series, click the 'Save' button. A 'File saved' message will appear when file has saved successfully."
),
# Create a save button
actionButton("save", "Save"),
useShinyalert(),
h5(
"5. Once you have saved the file, click the 'Stop' button to stop the app."
),
# Create a "Close Window" button
useShinyjs(),
extendShinyjs(text = jscode, functions = c("closeWindow")),
actionButton("close", "Stop app"),
br(),
br(),
br(),
br(),
br()
)
)
# This code creates the shiny server (i.e. the code that the app is actually running)
server <- function(input, output) {
# Create start and end time variables that change based on user input
vals = reactiveValues(start.time = NULL,
end.time = NULL)
# Plot the raw time series
output$whole.ts = renderPlotly({
plot_ly(
str,
x = ~ DateTime,
y = ~ Temperature,
mode = 'lines',
type = 'scatter'
)
})
# Plot the first month of the time series
output$start.plot = renderPlotly({
plot_ly(
str,
x = ~ DateTime,
y = ~ Temperature,
mode = 'lines',
type = 'scatter',
source = 'S',
# Connect this plot to the selection of start time
hoverinfo = 'none' # Remove the default hover boxes
) %>%
layout(
title = 'Select START point',
xaxis = list(
rangeslider = list(type = "date"),
# Add a range slider at the bottom of the plot
spikemode = "across",
# Add crosshairs
spikethickness = 0.5,
# Crosshair thickness
spikecolor = "black",
# Crosshair color
spikedash = "line",
# Make crosshairs a solid line
showspikes = TRUE,
range = c(min(str$DateTime), (min(str$DateTime) + months(1))) # Define x axis range
),
yaxis = list(
spikemode = "across",
#Same crosshair properties but for y axis
spikemode = "across",
spikethickness = 0.5,
spikecolor = "black",
spikedash = "line",
showspikes = TRUE
)
) %>% onRender(
"function(el, x) {
Plotly.d3.select('.cursor-crosshair').style('cursor', 'default')} "
)
})
output$start.select = renderPrint ({
vals$start.time = event_data("plotly_click", source = 'S')
if (length(vals$start.time) == 0) {
"Select a start time"
} else {
cat("Selected start time is \n")
return(print(ymd_hms(vals$start.time$x, tz = 'UTC')))
}
})
output$end.plot = renderPlotly({
plot_ly(
str,
x = ~ DateTime,
y = ~ Temperature,
mode = 'lines',
type = 'scatter',
source = 'E',
hoverinfo = 'none'
) %>%
onRender(
"function(el, x) {
Plotly.d3.select('.cursor-crosshair').style('cursor', 'default')} "
) %>%
layout(
title = 'Select END point',
xaxis = list(
rangeslider = list(type = "date"),
spikemode = "across",
spikethickness = 0.5,
spikecolor = "black",
spikedash = "line",
showspikes = TRUE,
range = c(max(str$DateTime) - months(1), (max(str$DateTime)))
),
yaxis = list(
spikemode = "across",
spikemode = "across",
spikethickness = 0.5,
spikecolor = "black",
spikedash = "line",
showspikes = TRUE
)
)
})
output$end.select = renderPrint ({
vals$end.time = event_data("plotly_click", source = 'E')
if (length(vals$end.time) == 0) {
"Select an end time"
} else {
cat("Selected end time is \n")
return(print(ymd_hms(vals$end.time$x, tz = 'UTC')))
}
})
observeEvent(c(vals$start.time, vals$end.time), {
str.subset = subset(str,
DateTime >= vals$start.time$x &
DateTime <= vals$end.time$x)
output$cut.ts = renderPlotly({
plot_ly(
str.subset,
x = ~ DateTime,
y = ~ Temperature,
mode = 'lines',
type = 'scatter'
) %>%
layout(xaxis = list(range = c(
vals$start.time$x, vals$end.time$x
)))
})
})
observe({
if (input$save > 0) {
str.subset = subset(str,
DateTime >= vals$start.time$x &
DateTime <= vals$end.time$x)
str.subset$UTCDateTime = ymd_hms(str.subset$DateTime, tz = "UTC")
str.subset$Year = year(str.subset$UTCDateTime)
str.subset$Month = month(str.subset$UTCDateTime)
str.subset$Day = day(str.subset$UTCDateTime)
str.subset$Hour = hour(str.subset$UTCDateTime)
str.subset$Minute = minute(str.subset$UTCDateTime)
str.subset$Second = second(str.subset$UTCDateTime)
str.df = str.subset[c("Year",
"Month",
"Day",
"Hour",
"Minute",
"Second",
"Temperature")]
output.file = file(paste0(
dirname(file),
"/",
file_path_sans_ext(basename(file)),
".cdp"
),
"wb")
write.table(
str.df,
file = output.file,
row.names = FALSE,
col.names = FALSE,
sep = "\t",
eol = "\n"
)
close(output.file)
plot = ggplot(data = str.subset) +
geom_line(aes(x = ymd_hms(DateTime), y = Temperature), col = 'dodgerblue') +
theme_bw() +
scale_x_datetime(breaks = date_breaks("4 months"),
labels = date_format("%m/%y")) +
ylab(expression(atop(
paste("Temperature (", degree, "C)")
))) +
theme(axis.title.x = element_blank())
box = ggplot(data = str.subset) +
geom_boxplot(aes(x = "", y = Temperature), fill = 'dodgerblue') +
theme_bw() +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank())
comb = ggarrange(plot, box, widths = c(6, 1))
ggsave(
filename = paste0(
dirname(file),
"/",
file_path_sans_ext(basename(file)),
".png"
),
plot = comb,
width = 12,
height = 3
)
write.csv(
str.subset[c("DateTime", "Temperature")],
paste0(
dirname(file),
"/",
file_path_sans_ext(basename(file)),
"_processed.csv"
),
row.names = FALSE
)
}
})
observeEvent(input$save, {
shinyalert("File saved",
type = 'success')
})
observeEvent(input$close, {
js$closeWindow()
stopApp()
})
}
# Run the shiny app
shinyApp(ui, server)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{flu_data}
\alias{flu_data}
\title{Regional influenza incidence in the US (1997 - 2018)}
\format{A data.frame with 12,056 observations on weighted influenza-like illness
measurements from all HHS regions, including the national level.}
\source{
The cdcfluview R package.
}
\usage{
data(flu_data)
}
\description{
A dataset of public influenza data from the US CDC.
}
| /man/flu_data.Rd | no_license | reichlab/2018-2019-cdc-flu-contest | R | false | true | 472 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{flu_data}
\alias{flu_data}
\title{Regional influenza incidence in the US (1997 - 2018)}
\format{A data.frame with 12,056 observations on weighted influenza-like illness
measurements from all HHS regions, including the national level.}
\source{
The cdcfluview R package.
}
\usage{
data(flu_data)
}
\description{
A dataset of public influenza data from the US CDC.
}
|
# Load libraries ----------------------------------------------------------
library(MiniModel)
library(ggplot2)
library(readr)
library(dplyr)
library(doParallel)
library(Hmisc)
# Load data ---------------------------------------------------------------
# Parameters fitted to literature
load("Intermediate/ParametersLiterature.RData")
load("Intermediate/ParametersExperiment.RData")
parameters = params_literature
for(i in row.names(params_experiment)) {
parameters[i,] = params_experiment[i,]
}
parnames = row.names(parameters)
parameters = parameters[,"value"]
names(parameters) = parnames
parameters["gamma3"] = 1 - parameters["gamma2"] - parameters["gamma1"]
rm(data_summary, params_experiment, params_literature, experiment_summary)
# Induction & Relaxation curve ---------------------------------------------------------
run = function(model) {
# CVODE settings
model$set_settings(c("atol","rtol","maxsteps","maxerr","maxnonlin","maxconvfail","minimum"),
c(1e-10,1e-6,1e4,20,20,20, -1e-6))
model$set_settings(c("silent","positive", "force_positive"), c(TRUE, TRUE,TRUE))
# Assign parameters from literature to the model
filter = which(names(parameters) %in% names(model$Parameters$Values))
model$set_parameters(names(parameters[filter]),unname(parameters[filter]))
# Simulate a LiCOR with 10% red - 90% blue
model$set_forcings("Ib", cbind(c(0,1), c(5,5)))
model$set_forcings("Ir", cbind(c(0,1), c(45,45)))
model$set_forcings("Ig", cbind(c(0,1), c(0,0)))
model$set_forcings("CO2R", cbind(c(0,1), c(400,400)))
model$set_forcings("H2OR", cbind(c(0,1), c(20,20)))
model$set_forcings("Ta", cbind(c(0,1), c(298.15, 298.15)))
model$set_forcings("Tl", cbind(c(0,1), c(298.15, 298.15)))
model$set_states("Ci", 400)
model$set_states("Cc", 400)
model$set_states("Ccyt", 400)
tryCatch(model$set_states("PR", 25), error = function(x) NULL)
# Calculate steady-state
model$set_time(c(0,3600))
steadyState = cvode(model)[2,names(model$States$Values)]
model$set_states(names(steadyState), steadyState)
# Simulate Transient
model$set_forcings("Ib", cbind(c(0,3600, 3601, 7200), c(100,100,5,5)))
model$set_forcings("Ir", cbind(c(0,3600, 3601, 7200), c(900,900,45,45)))
model$set_time(1:7200)
Transient = cvode(model)
class(Transient) = "matrix"
as_data_frame(Transient)
}
# Simulate all mutants --------------------------------------------------------------------------------------------
control = run(generate_MiniModel_model())
QssfR = run(generate_MiniModelQssfR_model())
QssfRB = run(generate_MiniModelQssfRB_model())
QssKgs = run(generate_MiniModelQssgs_model())
QssqE = run(generate_MiniModelQssqE_model())
QssqM = run(generate_MiniModelQssqM_model())
QssqI = run(generate_MiniModelQssqI_model())
QssPR = run(generate_MiniModelQssPR_model())
Qss = run(generate_MiniModelQss_model())
# Calculate differences in Photo ----------------------------------------------------------------------------------
mutations = data_frame(fR = (QssfR$A - control$A),#/control$A,
fRB = (QssfRB$A - control$A),#/control$A,
Kgs = (QssKgs$A - control$A),#/control$A,
qE = (QssqE$A - control$A),#/control$A,
qI = (QssqI$A - control$A),#/control$A,
qM = (QssqM$A - control$A),#/control$A)
PR = (QssPR$A - control$A),#/control$A)
QSS = (Qss$A - control$A))#/control$A)
filter1 = 2:3600
filter2 = 3615:7200
time1 = (1:7200)[filter1]/60
time2 = (1:7200)[filter2]/60
png("Output/figureLimitations.png", width = 10, height = 6, pointsize = 8, units = "cm",
res = 600, bg = "white", antialias = "default")
with(mutations, {
par(mfrow = c(1,1), xaxs = "i", yaxs = "i", las = 1, mar= c(4.0,4.0,0.5,1), mgp = c(2,1,0))
plot(1,1, xlim = c(0,120), ylim = c(-0.3,5), type = "n", ylab = expression(italic(Delta*A)~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (min)")
lines(time1, fR[filter1], col = 1, lty = 1)
lines(time1, fRB[filter1], col = 2, lty = 2)
lines(time1, Kgs[filter1], col = 3, lty = 3)
lines(time2, fR[filter2], col = 1, lty = 1)
lines(time2, fRB[filter2], col = 2, lty = 2)
lines(time2, Kgs[filter2], col = 3, lty = 3)
lines(time1, qE[filter1], col = 4, lty = 4)
lines(time1, qI[filter1], col = 5, lty = 5)
lines(time1, qM[filter1], col = 6, lty = 6)
lines(time2, qE[filter2], col = 4, lty = 4)
lines(time2, qI[filter2], col = 5, lty = 5)
lines(time2, qM[filter2], col = 6, lty = 6)
lines(time1, PR[filter1], col = 7, lty = 7)
lines(time2, PR[filter2], col = 7, lty = 7)
lines(time1, QSS[filter1], col = 8, lty = 8)
lines(time2, QSS[filter2], col = 8, lty = 8)
abline(v = 60, lty = 2)
abline(h = 0, lty = 2)
legend("topright", c("R", "RB", "gs", "qE", "qI", "qM", "PR" ,"QSS"), col = 1:8, lty = 1:8, bty = "n")
text(30, 4.8, "1000", cex = 1.3)
text(90, 4.8, "50", cex = 1.3)
})
dev.off()
png("Output/figureCumulativeLimitations.png", width = 10, height = 6, pointsize = 8, units = "cm",
res = 600, bg = "white", antialias = "default")
with(mutations, {
par(mfrow = c(1,1), xaxs = "i", yaxs = "i", las = 1, mar= c(4.0,5.2,0.5,1), mgp = c(2.7,1,0))
plot(1,1, xlim = c(0,120), ylim = c(-100,3000), type = "n", ylab = expression(sum(italic(Delta*A)["i"]~(mu*mol~m^{-2}), i == 0, i == t)),
xlab = "Time (min)")
lines(time1, cumsum(fR[filter1]), col = 1, lty = 1)
lines(time1, cumsum(fRB[filter1]), col = 2, lty = 2)
lines(time1, cumsum(Kgs[filter1]), col = 3, lty = 3)
lines(time2, cumsum(fR[filter2]), col = 1, lty = 1)
lines(time2, cumsum(fRB[filter2]), col = 2, lty = 2)
lines(time2, cumsum(Kgs[filter2]), col = 3, lty = 3)
lines(time1, cumsum(qE[filter1]), col = 4, lty = 4)
lines(time1, cumsum(qI[filter1]), col = 5, lty = 5)
lines(time1, cumsum(qM[filter1]), col = 6, lty = 6)
lines(time2, cumsum(qE[filter2]), col = 4, lty = 4)
lines(time2, cumsum(qI[filter2]), col = 5, lty = 5)
lines(time2, cumsum(qM[filter2]), col = 6, lty = 6)
lines(time1, cumsum(PR[filter1]), col = 7, lty = 7)
lines(time2, cumsum(PR[filter2]), col = 7, lty = 7)
lines(time1, cumsum(QSS[filter1]), col = 8, lty = 8)
lines(time2, cumsum(QSS[filter2]), col = 8, lty = 8)
abline(v = 60, lty = 2)
abline(h = 0, lty = 2)
legend("topright", c("R", "RB", "gs", "qE", "qI", "qM", "PR", "QSS"), col = 1:8, lty = 1:8, bty = "n")
text(30, 2800, "1000", cex = 1.3)
text(90, 2800, "50", cex = 1.3)
})
dev.off()
# Fluctuating light -----------------------------------------------------------------------------------------------
lightflecks = function(model, period, PARs, param = NULL, variable = "A") {
PAR1 = PARs[1]
PAR2 = PARs[2]
# CVODE settings
model$set_settings(c("atol","rtol","maxsteps","maxerr","maxnonlin","maxconvfail","minimum"),
c(1e-14,1e-8,1e4,20,20,20, -1e-6))
model$set_settings(c("silent","positive", "force_positive"), c(TRUE, TRUE,TRUE))
model$set_settings("maxtime", 1000)
# Assign parameters from literature to the model
filter = which(names(parameters) %in% names(model$Parameters$Values))
model$set_parameters(names(parameters[filter]),unname(parameters[filter]))
# Assign parameter for sensitivity analysis
if(!is.null(param)) model$set_parameters(names(param), param)
# Simulate a LiCOR with 10% red - 90% blue
model$set_forcings("Ib", cbind(c(0,1), c(PAR1,PAR1)*0.1))
model$set_forcings("Ir", cbind(c(0,1), c(PAR1,PAR1)*0.9))
model$set_forcings("Ig", cbind(c(0,1), c(0,0)))
model$set_forcings("CO2R", cbind(c(0,1), c(400,400)))
model$set_forcings("H2OR", cbind(c(0,1), c(20,20)))
model$set_forcings("Ta", cbind(c(0,1), c(298.15, 298.15)))
model$set_forcings("Tl", cbind(c(0,1), c(298.15, 298.15)))
model$set_states("Ci", 400)
model$set_states("Cc", 400)
model$set_states("Ccyt", 400)
tryCatch(model$set_states("PR", 25), error = function(x) NULL)
# Calculate steady-state
model$set_time(c(0,1800))
steadyState = cvode(model)[2,names(model$States$Values)]
model$set_states(names(steadyState), steadyState)
# Simulate Transient - Square wave light determined by period
timeI = sort(c(seq(0,1800, by = period), seq(1e-2,1800 + 1e-2, by = period)))
model$set_forcings("Ib", cbind(timeI, c(rep(c(PAR1,PAR2,PAR2,PAR1)*0.1, times = 1800/period/2), PAR1*0.1,PAR2*0.1)))
model$set_forcings("Ir", cbind(timeI, c(rep(c(PAR1,PAR2,PAR2,PAR1)*0.9, times = 1800/period/2), PAR1*0.9,PAR2*0.9)))
model$set_time(seq(0,1800,by = min(period/10, 1)))
cvode(model)[,variable]
}
# Calculate steady-state at 1000 and 50 uE
run_steady = function(model, PAR) {
# CVODE settings
model$set_settings(c("atol","rtol","maxsteps","maxerr","maxnonlin","maxconvfail","minimum"),
c(1e-14,1e-6,1e4,20,20,20, -1e-6))
model$set_settings(c("silent","positive", "force_positive"), c(TRUE, TRUE,TRUE))
model$set_settings("maxtime", 600)
# Assign parameters from literature to the model
filter = which(names(parameters) %in% names(model$Parameters$Values))
model$set_parameters(names(parameters[filter]),unname(parameters[filter]))
# Simulate a LiCOR with 10% red - 90% blue
model$set_forcings("Ib", cbind(c(0,1), c(PAR,PAR)*0.1))
model$set_forcings("Ir", cbind(c(0,1), c(PAR,PAR)*0.9))
model$set_forcings("Ig", cbind(c(0,1), c(0,0)))
model$set_forcings("CO2R", cbind(c(0,1), c(400,400)))
model$set_forcings("H2OR", cbind(c(0,1), c(20,20)))
model$set_forcings("Ta", cbind(c(0,1), c(298.15, 298.15)))
model$set_forcings("Tl", cbind(c(0,1), c(298.15, 298.15)))
model$set_states("Ci", 400)
model$set_states("Cc", 400)
model$set_states("Ccyt", 400)
tryCatch(model$set_states("PR", 25), error = function(x) NULL)
# Calculate steady-state
model$set_time(c(0,7200))
steadyState = cvode(model)[2,]
}
test = run_steady(generate_MiniModel_model(), 0)
APARdata = matrix(NA, nrow = 30, ncol = length(test))
colnames(APARdata) = names(test)
PARdata = seq(0,1200, l = 30)
for(i in 1:30) {
APARdata[i,] = run_steady(generate_MiniModel_model(), PARdata[i])
}
APAR = loess(A~PAR, data = data.frame(A = APARdata[,"A"], PAR = PARdata), span = 0.5,
control = loess.control(surface = "direct", statistics = "exact"))
plot(PARdata, APARdata[,"A"])
lines(PARdata, predict(APAR, newdata=data.frame(PAR = PARdata)))
# Calculate QSS curve with all the rate constants increased
test = run_steady(generate_MiniModelQss_model(), 0)
APARdata2 = matrix(NA, nrow = 30, ncol = length(test))
colnames(APARdata2) = names(test)
model = generate_MiniModel_model()
filter = which(names(parameters) %in% names(model$Parameters$Values))
model$set_parameters(names(parameters[filter]),unname(parameters[filter]))
for(i in 1:30) {
APARdata2[i,] = run_steady(generate_MiniModelQss_model(), PARdata[i])
}
plot(PARdata, APARdata2[,"A"])
points(PARdata, APARdata[,"A"], col = 2)
APAR = loess(A~PAR, data = data.frame(A = APARdata2[,"A"], PAR = PARdata), span = 0.5,
control = loess.control(surface = "direct", statistics = "exact"))
RpPAR = loess(A~PAR, data = data.frame(A = APARdata2[,"Rp"], PAR = PARdata), span = 0.5,
control = loess.control(surface = "direct", statistics = "exact"))
RuBPPAR = loess(A~PAR, data = data.frame(A = APARdata2[,"RuBP"], PAR = PARdata), span = 0.5,
control = loess.control(surface = "direct", statistics = "exact"))
fRBPAR = loess(A~PAR, data = data.frame(A = APARdata2[,"fRB"], PAR = PARdata), span = 0.5,
control = loess.control(surface = "direct", statistics = "exact"))
lines(PARdata, predict(APAR, newdata=data.frame(PAR = PARdata)))
# Simulate all mutants and periods -------------------------------------------------------------------------------------
if(.Platform$OS.type == "windows") {
cl <- makeCluster(8)
} else {
cl <- makeForkCluster(8)
}
registerDoParallel(cl)
pars = list(parameters["KiR"],
parameters["Krca"],
parameters["Kgsi"],
parameters[c("KdqEp", "KdqEz")],
parameters["Krep25"],
parameters["Kialpha25"],
parameters["kPR"])
W = length(pars)
periods = c(0.1, 0.3, 0.5, 1, 2, 3, 5, 10, 15, 30, 60, 90, 150, 225)
LF = foreach(period = periods, .packages = "MiniModel") %dopar% {
N = length(seq(0,1800,by = min(period/10, 1)))
out = matrix(NA, ncol = W + 4, nrow = N)
colnames(out) = c("control", "R", "RB", "gs", "qE", "qI", "qM", "PR", "All", "QSS", "QSS2")
PARdata = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "PAR")
out[,1] = lightflecks(generate_MiniModel_model(), period, c(50,1000))
for(i in 2:(W + 1)) out[,i] = lightflecks(generate_MiniModel_model(), period, c(50,1000), pars[[i - 1]]*1e6)
out[,W + 2] = lightflecks(generate_MiniModel_model(), period, c(50,1000), unlist(pars)*1e6)
out[,W + 3] = lightflecks(generate_MiniModelQss_model(), period, c(50,1000))
out[,W + 4] = PARdata
out
}
stopCluster(cl)
# Check that average PAR is always the same
for(i in 1:length(LF)) {
print(mean(LF[[i]][,"QSS2"]))
}
for(i in 1:length(LF)) {
LF[[i]][,"QSS2"] = predict(APAR, newdata=data.frame(PAR = as.numeric(LF[[i]][,"QSS2"])))
}
save(LF, file = "Intermediate/LFTotal.RData")
load("Intermediate/LFTotal.RData")
# Compute relative change in A due to virtual mutant
processLF = function(LF) {
LFeffect = LF
LFhigh = LF
LFlow = LF
for(i in 1:length(LFeffect)) {
period = periods[i]
time = sort(c(seq(0,1800, l = 1800/period), seq(1e-2,1800 + 1e-2, l = 1800/period)))
PAR = rep(c(0,1,1,0), times = 1800/period/2)
PAR = approx(time, PAR, seq(0,1800,by = min(period/10, 1)))$y
print(c(length(PAR), nrow(LFeffect[[i]])))
# Time-integrated CO2 assimilation
LFeffect[[i]] = colSums(LFeffect[[i]])
LFeffect[[i]] = (LFeffect[[i]] - LFeffect[[i]][1])/LFeffect[[i]][1]
# Low irradiance average CO2 assimilation
LFlow[[i]] = colSums(LFlow[[i]][which(PAR <= 0.5),])
LFlow[[i]] = (LFlow[[i]] - LFlow[[i]][1])/LFlow[[i]][1]
# High irradiance average CO2 assimilation
LFhigh[[i]] = colSums(LFhigh[[i]][which(PAR >= 0.5),])
LFhigh[[i]] = (LFhigh[[i]] - LFhigh[[i]][1])/LFhigh[[i]][1]
}
LFeffect = do.call("rbind", LFeffect)
LFeffect = cbind(Period = periods, LFeffect)
colnames(LFeffect)[1] = "Period"
LFlow = do.call("rbind", LFlow)
LFlow = cbind(Period = periods, LFlow)
colnames(LFlow)[1] = "Period"
LFhigh = do.call("rbind", LFhigh)
LFhigh = cbind(Period = periods, LFhigh)
colnames(LFhigh)[1] = "Period"
return(list(LFeffect, LFhigh, LFlow))
}
LFlist = processLF(LF)
LFeffect = LFlist[[1]]
LFhigh = LFlist[[2]]
LFlow = LFlist[[3]]
png("Output/figureLF.png", width = 14, height = 7, pointsize = 10, units = "cm",
res = 600, bg = "white", antialias = "default")
par(mfrow = c(1,2), xaxs = "i", yaxs = "i", las = 1, mar = c(4.0,4.2,0.5,1), mgp = c(2,1,0))
with(as.data.frame(LFeffect), {
plot(Period, R*100, t = "l", log = "x", ylim = c(-2,40), xaxt = "n", xlim = c(0.1,300),
col = 1, lty = 1,
xlab = "Lightfleck duration (s)", ylab = expression(italic(Delta*A/A)~("%")))
lines(Period, RB*100, col = 2, lty = 2, t = "l")
lines(Period, gs*100, col = 3, lty = 3, t = "l")
lines(Period, qE*100, col = 4, lty = 4, t = "l")
lines(Period, qI*100, col = 5, lty = 5, t = "l")
lines(Period, qM*100, col = 6, lty = 6, t = "l")
lines(Period, PR*100, col = 7, lty = 7, t = "l")
axis(1, at = c(0.1, 1, 10, 100,200,300), labels = c("0.1","1", "10","100","", "300"))
axis(1, at = 300, labels = "300")
axis(1, at = seq(0.2,0.9,0.1), tcl = -0.2, labels = NA)
axis(1, at = seq(2,9,1), tcl = -0.2, labels = NA)
axis(1, at = seq(20,100,10), tcl = -0.2, labels = NA)
legend("topleft", c("R", "RB", "gs", "qE", "qI", "qM", "PR"), col = 1:7, lty = 1:7,
bty = "n", ncol = 2, cex = 0.65, x.intersp = 0.5)
abline(h= 0, lty = 1, col = "gray")
text(10,38,labels = "A", cex = 1.2)
})
with(as.data.frame(LFeffect), {
plot(Period, All*100, t = "l", log = "x", ylim = c(-10,40), xaxt = "n", xlim = c(0.1,300),
col = 1, lty = 1,
xlab = "Lightfleck duration (s)", ylab = expression(italic(Delta*A/A)~("%")))
lines(Period, QSS2*100, col = 2 , lty = 2, t = "l")
#lines(Period, QSS2*100, col = 3 , lty = 3, t = "l")
axis(1, at = c(0.1, 1, 10, 100,200,300), labels = c("0.1","1", "10","100","", "300"))
axis(1, at = 300, labels = "300")
axis(1, at = seq(0.2,0.9,0.1), tcl = -0.2, labels = NA)
axis(1, at = seq(2,9,1), tcl = -0.2, labels = NA)
axis(1, at = seq(20,100,10), tcl = -0.2, labels = NA)
legend("topleft", c("All", "QSS"), col = 1:2, lty = 1:2,
bty = "n", ncol = 1, cex = 0.65, x.intersp = 0.5)
abline(h= 0, lty = 1, col = "gray")
text(10,38,labels = "B", cex = 1.2)
})
dev.off()
# Plot dynamics of RuBP and photosynthesis during lightflecks -------------
# Get dynamics of RuBP
pars = list(parameters["KiR"],
parameters["Krca"],
parameters["Kgsi"],
parameters[c("KdqEp", "KdqEz")],
parameters["Krep25"],
parameters["Kialpha25"],
parameters["kPR"])
W = length(pars)
period = 0.5
N = length(seq(0,1800,by = min(period/10, 1)))
# RuBP dynamics
RuBP = matrix(NA, ncol = 3, nrow = N)
colnames(RuBP) = c("All", "control", "QSS2")
PARdata = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "PAR")
RuBP[,1] = lightflecks(generate_MiniModel_model(), period, c(50,1000), unlist(pars)*1e6, variable = "RuBP")
RuBP[,2] = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "RuBP")
RuBP[,3] = PARdata
RuBP[,3] = predict(RuBPPAR, newdata=data.frame(PAR = as.numeric(RuBP[,3])))
period = 90
N = length(seq(0,1800,by = min(period/10, 1)))
RuBP2 = matrix(NA, ncol = 3, nrow = N)
colnames(RuBP2) = c("All", "control", "QSS2")
PARdata = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "PAR")
RuBP2[,1] = lightflecks(generate_MiniModel_model(), period, c(50,1000), unlist(pars)*1e6, variable = "RuBP")
RuBP2[,2] = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "RuBP")
RuBP2[,3] = PARdata
RuBP2[,3] = predict(RuBPPAR, newdata=data.frame(PAR = as.numeric(RuBP2[,3])))
# Rubisco activity
period = 0.5
N = length(seq(0,1800,by = min(period/10, 1)))
fRB = matrix(NA, ncol = 3, nrow = N)
colnames(fRB) = c("All", "control", "QSS2")
PARdata = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "PAR")
fRB[,1] = lightflecks(generate_MiniModel_model(), period, c(50,1000), unlist(pars)*1e6, variable = "fRB")
fRB[,2] = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "fRB")
fRB[,3] = PARdata
fRB[,3] = predict(fRBPAR, newdata=data.frame(PAR = as.numeric(fRB[,3])))
PARdata5 = PARdata
period = 90
N = length(seq(0,1800,by = min(period/10, 1)))
fRB2 = matrix(NA, ncol = 3, nrow = N)
PARdata = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "PAR")
colnames(fRB2) = c("All", "control", "QSS2")
fRB2[,1] = lightflecks(generate_MiniModel_model(), period, c(50,1000), unlist(pars)*1e6, variable = "fRB")
fRB2[,2] = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "fRB")
fRB2[,3] = PARdata
fRB2[,3] = predict(fRBPAR, newdata=data.frame(PAR = as.numeric(fRB2[,3])))
# Supplemental figure S6
png("Output/figureLightFlecksSample.png", width = 14, height = 18, pointsize = 10, units = "cm",
res = 600, bg = "white", antialias = "default")
par(mfcol = c(3,2), mar = c(4,4.5,0.5,0.65), las = 1, xaxs = "i", yaxs = "i")
t0 = 10000 + 11
t1 = t0 + 20
l = t0:t1 - t0
# Period= 0.5 s
plot(0.05*l, LF[[3]][t0:t1,"All"], t = "l", ylim = c(0,14),
ylab = expression(A~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)")
lines(0.05*l, LF[[3]][t0:t1,"control"], col = 2)
minQSS2 = min(LF[[3]][t0:t1,"QSS2"])
maxQSS2 = max(LF[[3]][t0:t1,"QSS2"])
QSStime = c(0,0,10,10,20)
lines(0.05*QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v = 0.05*(which(abs(diff(PARdata5[t0:t1])) > 100) - 1), lty = 2)
text(0.05,13.5, "A")
text(c(0.25, 0.75),13.5, c(50,1000))
legend("bottomright", c("control", "All", "QSS"), col = c(2,1,3), lty = 1, bty = "n")
plot(0.05*l, RuBP[t0:t1,"All"], t = "l", ylim = c(0,100),
ylab = expression(RuBP~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)")
lines(0.05*l, RuBP[t0:t1,"control"], col = 2)
minQSS2 = min(RuBP[t0:t1,"QSS2"])
maxQSS2 = max(RuBP[t0:t1,"QSS2"])
QSStime = c(0,0,10,10,20)
lines(0.05*QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v = 0.05*(which(abs(diff(PARdata5[t0:t1])) > 100) - 1), lty = 2)
text(0.05,95, "B")
text(c(0.25, 0.75),95, c(50,1000))
plot(0.05*l, fRB[t0:t1,"All"], t = "l", ylim = c(0,1),
ylab = expression(fRB~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)")
lines(0.05*l, fRB[t0:t1,"control"], col = 2)
minQSS2 = min(fRB[t0:t1,"QSS2"])
maxQSS2 = max(fRB[t0:t1,"QSS2"])
QSStime = c(0,0,10,10,20)
lines(0.05*QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v = 0.05*(which(abs(diff(PARdata5[t0:t1])) > 100) - 1), lty = 2)
text(0.05,0.95, "C")
text(c(0.25, 0.75),0.95, c(50,1000))
# Period = 90 s
t0 = 1801 - 270
t1 = 1801 - 90
l = t0:t1 - t0
plot(l, LF[[12]][t0:t1,"All"], t = "l", ylim = c(0,14),
ylab = expression(A~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)", xaxt = "n")
axis(1, seq(0,180,30))
lines(l, LF[[12]][t0:t1,"control"], col = 2)
minQSS2 = min(LF[[12]][t0:t1,"QSS2"])
maxQSS2 = max(LF[[12]][t0:t1,"QSS2"])
QSStime = c(0,0,90,90,180)
lines(QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v = (which(abs(diff(PARdata[t0:t1])) > 100) - 1), lty = 2)
text(9,13.5, "D")
text(c(45, 135),13.5, c(50,1000))
plot(l, RuBP2[t0:t1,"All"], t = "l", ylim = c(0,100),
ylab = expression(RuBP~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)", xaxt = "n")
axis(1, seq(0,180,30))
lines(l, RuBP2[t0:t1,"control"], col = 2)
minQSS2 = min(RuBP2[t0:t1,"QSS2"])
maxQSS2 = max(RuBP2[t0:t1,"QSS2"])
QSStime = c(0,0,90,90,180)
lines(QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v =(which(abs(diff(PARdata[t0:t1])) > 100) - 1), lty = 2)
text(9,95, "E")
text(c(45, 135),95, c(50,1000))
plot(l, fRB2[t0:t1,"All"], t = "l", ylim = c(0,1),
ylab = expression(fRB~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)", xaxt = "n")
axis(1, seq(0,180,30))
lines(l, fRB2[t0:t1,"control"], col = 2)
minQSS2 = min(fRB2[t0:t1,"QSS2"])
maxQSS2 = max(fRB2[t0:t1,"QSS2"])
QSStime = c(0,0,90,90,180)
lines(QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v = (which(abs(diff(PARdata[t0:t1])) > 100) - 1), lty = 2)
text(9,0.95, "F")
text(c(45, 135),0.95, c(50,1000))
dev.off()
# Effect of frequency -----------------------------------------------------
# Simulate control model at different frequencies
# Calculata accumulated photosynthesis
lightflecksSumA = function(model, period, PARs) {
PAR1 = PARs[1]
PAR2 = PARs[2]
# CVODE settings
model$set_settings(c("atol","rtol","maxsteps","maxerr","maxnonlin","maxconvfail","minimum"),
c(1e-10,1e-8,5e4,20,20,20, -1e-6))
model$set_settings(c("silent","positive", "force_positive"), c(TRUE, TRUE,TRUE))
model$set_settings("maxtime", 1000)
# Assign parameters from literature to the model
filter = which(names(parameters) %in% names(model$Parameters$Values))
model$set_parameters(names(parameters[filter]),unname(parameters[filter]))
# Simulate a LiCOR with 10% red - 90% blue
model$set_forcings("Ib", cbind(c(0,1), c(PAR1,PAR1)*0.1))
model$set_forcings("Ir", cbind(c(0,1), c(PAR1,PAR1)*0.9))
model$set_forcings("Ig", cbind(c(0,1), c(0,0)))
model$set_forcings("CO2R", cbind(c(0,1), c(400,400)))
model$set_forcings("H2OR", cbind(c(0,1), c(20,20)))
model$set_forcings("Ta", cbind(c(0,1), c(298.15, 298.15)))
model$set_forcings("Tl", cbind(c(0,1), c(298.15, 298.15)))
model$set_states("Ci", 400)
model$set_states("Cc", 400)
model$set_states("Ccyt", 400)
tryCatch(model$set_states("PR", 25), error = function(x) NULL)
# Calculate steady-state
model$set_time(c(0,1800))
steadyState = cvode(model)[2,names(model$States$Values)]
model$set_states(names(steadyState), steadyState)
model$set_states("sumA", 0)
# Simulate Transient - Square wave light determined by period
dt = min(1e-2, period/10)
timeI = sort(c(seq(0,3600, by = period), seq(dt,3600 + dt, by = period)))
model$set_forcings("Ib", cbind(timeI, c(rep(c(PAR1,PAR2,PAR2,PAR1)*0.1, times = 3600/period/2), PAR1*0.1,PAR2*0.1)))
model$set_forcings("Ir", cbind(timeI, c(rep(c(PAR1,PAR2,PAR2,PAR1)*0.9, times = 3600/period/2), PAR1*0.9,PAR2*0.9)))
model$set_time(seq(0,3600,by = 1))
diff(cvode(model)[c(1800,3600),"sumA"])/1800
}
if(.Platform$OS.type == "windows") {
cl <- makeCluster(8)
} else {
cl <- makeForkCluster(8)
}
registerDoParallel(cl)
periods = c(0.1, 0.3, 0.5, 1, 2, 3, 5, 10, 15, 30, 60, 90, 150, 225, 300)
LF1000 = foreach(period = periods, .packages = "MiniModel") %dopar% {
lightflecksSumA(generate_MiniModel_model(), period, c(50,1000))
}
LF800 = foreach(period = periods, .packages = "MiniModel") %dopar% {
lightflecksSumA(generate_MiniModel_model(), period, c(50,800))
}
LF150_1000 = foreach(period = periods, .packages = "MiniModel") %dopar% {
lightflecksSumA(generate_MiniModel_model(), period, c(150,800))
}
LF600 = foreach(period = periods, .packages = "MiniModel") %dopar% {
lightflecksSumA(generate_MiniModel_model(), period, c(50,600))
}
LF400 = foreach(period = periods, .packages = "MiniModel") %dopar% {
lightflecksSumA(generate_MiniModel_model(), period, c(50,400))
}
stopCluster(cl)
AQSS1000 = mean(predict(APAR, newdata = data.frame(PAR = c(50,1000))))
AQSS800 = mean(predict(APAR, newdata = data.frame(PAR = c(50,800))))
AQSS600 = mean(predict(APAR, newdata = data.frame(PAR = c(50,600))))
AQSS400 = mean(predict(APAR, newdata = data.frame(PAR = c(50,400))))
# Check that average PAR is always the same
Aconst = predict(APAR, newdata = data.frame(PAR = mean(c(50,1000))))
png(file = "LightfleckDuration.png", width = 7, height = 4, units = "in",
bg = "transparent", res = 1000)
par(mfrow = c(1,1), las = 1, yaxs = "i", xaxs = "i", mar = c(4,5.5,0.5,0.7),
cex.axis = 1.2, cex.lab = 1.2, lwd = 1.2)
plot(periods, unlist(LF1000), ylim = c(4,13), log = "x", t = "o",
xlab = "Lightfleck duration (s)",
ylab = expression(bar(A)~(mu*mol~m^{-2}~s^{-1})))
text(100, 5, "Flashing dynamic")
abline(h = Aconst)
text(100, 12, "Constant light")
abline(h = AQSS1000, lty = 2)
text(90, 8, "Flashing steady-state")
dev.off()
png(file = "FrequencyProposal.png", width = 5, height = 4, units = "in",
bg = "transparent", res = 1000)
par(mfrow = c(1,1), las = 1, yaxs = "i", xaxs = "i", mar = c(4,5.5,0.5,0.7),
cex.axis = 1.2, cex.lab = 1.2, lwd = 1.2)
plot(periods, unlist(LF150_1000), ylim = c(8,13), log = "x", t = "o",
xlab = "Fluctuation duration (s)",
ylab = expression(Photosynthesis~(mu*mol~m^{-2}~s^{-1})),
xaxt = "n")
axis(1, at = c(0.1,0.5,5,50))
text(100, 9, "Fluctuating")
abline(h = predict(APAR, newdata = data.frame(PAR = mean(c(150,1000)))))
text(100, 12.7, "Constant")
dev.off()
png(file = "LRC.png", width = 5, height = 4, units = "in",
bg = "transparent", res = 1000)
par(mfrow = c(1,1), las = 1, yaxs = "i", xaxs = "i", mar = c(4,5.5,0.5,1.2),
cex.axis = 1.2, cex.lab = 1.2, lwd = 1)
plot(PARdata, predict(APAR, newdata=data.frame(PAR = PARdata)), t = "l",
ylab = expression(Photosynthesis~(mu*mol~m^{-2}~s^{-1})),
xlab = expression(Light~intensity~(mu*mol~m^{-2}~s^{-1})),
ylim = c(-1,13))
# Explain the non-linear effect
A100 = predict(APAR, newdata=data.frame(PAR = 100))
lines(c(100, 100), c(-10,A100), lty = 2, lwd = 1)
lines(c(0, 100), c(A100,A100), lty = 2, lwd = 1)
A600 = predict(APAR, newdata=data.frame(PAR = 600))
lines(c(600, 600), c(-10,A600), lty = 2, lwd = 1)
lines(c(0, 600), c(A600,A600), lty = 2, lwd = 1)
muA = mean(c(A100, A600))
A350 = predict(APAR, newdata=data.frame(PAR = 350))
lines(c(350, 350), c(-10,A350), lty = 2, lwd = 1)
lines(c(0, 350), c(A350,A350), lty = 2, lwd = 1)
lines(c(0,100), c(muA, muA))
dev.off()
# Calculate cutoff for each lightfleck intensity
cut1000 = approx(unlist(LF1000) - AQSS1000, periods, 0)$y
cut800 = approx(unlist(LF800) - AQSS800, periods, 0)$y
cut600 = approx(unlist(LF600) - AQSS600, periods, 0)$y
cut400 = approx(unlist(LF400) - AQSS400, periods, 0)$y
plot(c(400,600, 800, 1000), c(cut400, cut600, cut800, cut1000))
png(file = "InductionRelaxation.png", width = 6, height = 3, units = "in",
bg = "transparent", res = 1000)
par(mar = c(0.5,0.5,0.5,0.5), bg = "transparent")
with(control, plot(time, Photo, t = "l", lwd = 4, col = "darkgreen", xaxt = "n",
yaxt = "n", bg = "transparent", bty = "n"))
dev.off()
| /Code/Simulations/LimitingFactors.R | no_license | AleMorales/DynamicPhotosynthesis | R | false | false | 29,011 | r | # Load libraries ----------------------------------------------------------
library(MiniModel)
library(ggplot2)
library(readr)
library(dplyr)
library(doParallel)
library(Hmisc)
# Load data ---------------------------------------------------------------
# Parameters fitted to literature
load("Intermediate/ParametersLiterature.RData")
load("Intermediate/ParametersExperiment.RData")
parameters = params_literature
for(i in row.names(params_experiment)) {
parameters[i,] = params_experiment[i,]
}
parnames = row.names(parameters)
parameters = parameters[,"value"]
names(parameters) = parnames
parameters["gamma3"] = 1 - parameters["gamma2"] - parameters["gamma1"]
rm(data_summary, params_experiment, params_literature, experiment_summary)
# Induction & Relaxation curve ---------------------------------------------------------
run = function(model) {
# CVODE settings
model$set_settings(c("atol","rtol","maxsteps","maxerr","maxnonlin","maxconvfail","minimum"),
c(1e-10,1e-6,1e4,20,20,20, -1e-6))
model$set_settings(c("silent","positive", "force_positive"), c(TRUE, TRUE,TRUE))
# Assign parameters from literature to the model
filter = which(names(parameters) %in% names(model$Parameters$Values))
model$set_parameters(names(parameters[filter]),unname(parameters[filter]))
# Simulate a LiCOR with 10% red - 90% blue
model$set_forcings("Ib", cbind(c(0,1), c(5,5)))
model$set_forcings("Ir", cbind(c(0,1), c(45,45)))
model$set_forcings("Ig", cbind(c(0,1), c(0,0)))
model$set_forcings("CO2R", cbind(c(0,1), c(400,400)))
model$set_forcings("H2OR", cbind(c(0,1), c(20,20)))
model$set_forcings("Ta", cbind(c(0,1), c(298.15, 298.15)))
model$set_forcings("Tl", cbind(c(0,1), c(298.15, 298.15)))
model$set_states("Ci", 400)
model$set_states("Cc", 400)
model$set_states("Ccyt", 400)
tryCatch(model$set_states("PR", 25), error = function(x) NULL)
# Calculate steady-state
model$set_time(c(0,3600))
steadyState = cvode(model)[2,names(model$States$Values)]
model$set_states(names(steadyState), steadyState)
# Simulate Transient
model$set_forcings("Ib", cbind(c(0,3600, 3601, 7200), c(100,100,5,5)))
model$set_forcings("Ir", cbind(c(0,3600, 3601, 7200), c(900,900,45,45)))
model$set_time(1:7200)
Transient = cvode(model)
class(Transient) = "matrix"
as_data_frame(Transient)
}
# Simulate all mutants --------------------------------------------------------------------------------------------
control = run(generate_MiniModel_model())
QssfR = run(generate_MiniModelQssfR_model())
QssfRB = run(generate_MiniModelQssfRB_model())
QssKgs = run(generate_MiniModelQssgs_model())
QssqE = run(generate_MiniModelQssqE_model())
QssqM = run(generate_MiniModelQssqM_model())
QssqI = run(generate_MiniModelQssqI_model())
QssPR = run(generate_MiniModelQssPR_model())
Qss = run(generate_MiniModelQss_model())
# Calculate differences in Photo ----------------------------------------------------------------------------------
mutations = data_frame(fR = (QssfR$A - control$A),#/control$A,
fRB = (QssfRB$A - control$A),#/control$A,
Kgs = (QssKgs$A - control$A),#/control$A,
qE = (QssqE$A - control$A),#/control$A,
qI = (QssqI$A - control$A),#/control$A,
qM = (QssqM$A - control$A),#/control$A)
PR = (QssPR$A - control$A),#/control$A)
QSS = (Qss$A - control$A))#/control$A)
filter1 = 2:3600
filter2 = 3615:7200
time1 = (1:7200)[filter1]/60
time2 = (1:7200)[filter2]/60
png("Output/figureLimitations.png", width = 10, height = 6, pointsize = 8, units = "cm",
res = 600, bg = "white", antialias = "default")
with(mutations, {
par(mfrow = c(1,1), xaxs = "i", yaxs = "i", las = 1, mar= c(4.0,4.0,0.5,1), mgp = c(2,1,0))
plot(1,1, xlim = c(0,120), ylim = c(-0.3,5), type = "n", ylab = expression(italic(Delta*A)~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (min)")
lines(time1, fR[filter1], col = 1, lty = 1)
lines(time1, fRB[filter1], col = 2, lty = 2)
lines(time1, Kgs[filter1], col = 3, lty = 3)
lines(time2, fR[filter2], col = 1, lty = 1)
lines(time2, fRB[filter2], col = 2, lty = 2)
lines(time2, Kgs[filter2], col = 3, lty = 3)
lines(time1, qE[filter1], col = 4, lty = 4)
lines(time1, qI[filter1], col = 5, lty = 5)
lines(time1, qM[filter1], col = 6, lty = 6)
lines(time2, qE[filter2], col = 4, lty = 4)
lines(time2, qI[filter2], col = 5, lty = 5)
lines(time2, qM[filter2], col = 6, lty = 6)
lines(time1, PR[filter1], col = 7, lty = 7)
lines(time2, PR[filter2], col = 7, lty = 7)
lines(time1, QSS[filter1], col = 8, lty = 8)
lines(time2, QSS[filter2], col = 8, lty = 8)
abline(v = 60, lty = 2)
abline(h = 0, lty = 2)
legend("topright", c("R", "RB", "gs", "qE", "qI", "qM", "PR" ,"QSS"), col = 1:8, lty = 1:8, bty = "n")
text(30, 4.8, "1000", cex = 1.3)
text(90, 4.8, "50", cex = 1.3)
})
dev.off()
png("Output/figureCumulativeLimitations.png", width = 10, height = 6, pointsize = 8, units = "cm",
res = 600, bg = "white", antialias = "default")
with(mutations, {
par(mfrow = c(1,1), xaxs = "i", yaxs = "i", las = 1, mar= c(4.0,5.2,0.5,1), mgp = c(2.7,1,0))
plot(1,1, xlim = c(0,120), ylim = c(-100,3000), type = "n", ylab = expression(sum(italic(Delta*A)["i"]~(mu*mol~m^{-2}), i == 0, i == t)),
xlab = "Time (min)")
lines(time1, cumsum(fR[filter1]), col = 1, lty = 1)
lines(time1, cumsum(fRB[filter1]), col = 2, lty = 2)
lines(time1, cumsum(Kgs[filter1]), col = 3, lty = 3)
lines(time2, cumsum(fR[filter2]), col = 1, lty = 1)
lines(time2, cumsum(fRB[filter2]), col = 2, lty = 2)
lines(time2, cumsum(Kgs[filter2]), col = 3, lty = 3)
lines(time1, cumsum(qE[filter1]), col = 4, lty = 4)
lines(time1, cumsum(qI[filter1]), col = 5, lty = 5)
lines(time1, cumsum(qM[filter1]), col = 6, lty = 6)
lines(time2, cumsum(qE[filter2]), col = 4, lty = 4)
lines(time2, cumsum(qI[filter2]), col = 5, lty = 5)
lines(time2, cumsum(qM[filter2]), col = 6, lty = 6)
lines(time1, cumsum(PR[filter1]), col = 7, lty = 7)
lines(time2, cumsum(PR[filter2]), col = 7, lty = 7)
lines(time1, cumsum(QSS[filter1]), col = 8, lty = 8)
lines(time2, cumsum(QSS[filter2]), col = 8, lty = 8)
abline(v = 60, lty = 2)
abline(h = 0, lty = 2)
legend("topright", c("R", "RB", "gs", "qE", "qI", "qM", "PR", "QSS"), col = 1:8, lty = 1:8, bty = "n")
text(30, 2800, "1000", cex = 1.3)
text(90, 2800, "50", cex = 1.3)
})
dev.off()
# Fluctuating light -----------------------------------------------------------------------------------------------
lightflecks = function(model, period, PARs, param = NULL, variable = "A") {
PAR1 = PARs[1]
PAR2 = PARs[2]
# CVODE settings
model$set_settings(c("atol","rtol","maxsteps","maxerr","maxnonlin","maxconvfail","minimum"),
c(1e-14,1e-8,1e4,20,20,20, -1e-6))
model$set_settings(c("silent","positive", "force_positive"), c(TRUE, TRUE,TRUE))
model$set_settings("maxtime", 1000)
# Assign parameters from literature to the model
filter = which(names(parameters) %in% names(model$Parameters$Values))
model$set_parameters(names(parameters[filter]),unname(parameters[filter]))
# Assign parameter for sensitivity analysis
if(!is.null(param)) model$set_parameters(names(param), param)
# Simulate a LiCOR with 10% red - 90% blue
model$set_forcings("Ib", cbind(c(0,1), c(PAR1,PAR1)*0.1))
model$set_forcings("Ir", cbind(c(0,1), c(PAR1,PAR1)*0.9))
model$set_forcings("Ig", cbind(c(0,1), c(0,0)))
model$set_forcings("CO2R", cbind(c(0,1), c(400,400)))
model$set_forcings("H2OR", cbind(c(0,1), c(20,20)))
model$set_forcings("Ta", cbind(c(0,1), c(298.15, 298.15)))
model$set_forcings("Tl", cbind(c(0,1), c(298.15, 298.15)))
model$set_states("Ci", 400)
model$set_states("Cc", 400)
model$set_states("Ccyt", 400)
tryCatch(model$set_states("PR", 25), error = function(x) NULL)
# Calculate steady-state
model$set_time(c(0,1800))
steadyState = cvode(model)[2,names(model$States$Values)]
model$set_states(names(steadyState), steadyState)
# Simulate Transient - Square wave light determined by period
timeI = sort(c(seq(0,1800, by = period), seq(1e-2,1800 + 1e-2, by = period)))
model$set_forcings("Ib", cbind(timeI, c(rep(c(PAR1,PAR2,PAR2,PAR1)*0.1, times = 1800/period/2), PAR1*0.1,PAR2*0.1)))
model$set_forcings("Ir", cbind(timeI, c(rep(c(PAR1,PAR2,PAR2,PAR1)*0.9, times = 1800/period/2), PAR1*0.9,PAR2*0.9)))
model$set_time(seq(0,1800,by = min(period/10, 1)))
cvode(model)[,variable]
}
# Calculate steady-state at 1000 and 50 uE
run_steady = function(model, PAR) {
# CVODE settings
model$set_settings(c("atol","rtol","maxsteps","maxerr","maxnonlin","maxconvfail","minimum"),
c(1e-14,1e-6,1e4,20,20,20, -1e-6))
model$set_settings(c("silent","positive", "force_positive"), c(TRUE, TRUE,TRUE))
model$set_settings("maxtime", 600)
# Assign parameters from literature to the model
filter = which(names(parameters) %in% names(model$Parameters$Values))
model$set_parameters(names(parameters[filter]),unname(parameters[filter]))
# Simulate a LiCOR with 10% red - 90% blue
model$set_forcings("Ib", cbind(c(0,1), c(PAR,PAR)*0.1))
model$set_forcings("Ir", cbind(c(0,1), c(PAR,PAR)*0.9))
model$set_forcings("Ig", cbind(c(0,1), c(0,0)))
model$set_forcings("CO2R", cbind(c(0,1), c(400,400)))
model$set_forcings("H2OR", cbind(c(0,1), c(20,20)))
model$set_forcings("Ta", cbind(c(0,1), c(298.15, 298.15)))
model$set_forcings("Tl", cbind(c(0,1), c(298.15, 298.15)))
model$set_states("Ci", 400)
model$set_states("Cc", 400)
model$set_states("Ccyt", 400)
tryCatch(model$set_states("PR", 25), error = function(x) NULL)
# Calculate steady-state
model$set_time(c(0,7200))
steadyState = cvode(model)[2,]
}
test = run_steady(generate_MiniModel_model(), 0)
APARdata = matrix(NA, nrow = 30, ncol = length(test))
colnames(APARdata) = names(test)
PARdata = seq(0,1200, l = 30)
for(i in 1:30) {
APARdata[i,] = run_steady(generate_MiniModel_model(), PARdata[i])
}
APAR = loess(A~PAR, data = data.frame(A = APARdata[,"A"], PAR = PARdata), span = 0.5,
control = loess.control(surface = "direct", statistics = "exact"))
plot(PARdata, APARdata[,"A"])
lines(PARdata, predict(APAR, newdata=data.frame(PAR = PARdata)))
# Calculate QSS curve with all the rate constants increased
test = run_steady(generate_MiniModelQss_model(), 0)
APARdata2 = matrix(NA, nrow = 30, ncol = length(test))
colnames(APARdata2) = names(test)
model = generate_MiniModel_model()
filter = which(names(parameters) %in% names(model$Parameters$Values))
model$set_parameters(names(parameters[filter]),unname(parameters[filter]))
for(i in 1:30) {
APARdata2[i,] = run_steady(generate_MiniModelQss_model(), PARdata[i])
}
plot(PARdata, APARdata2[,"A"])
points(PARdata, APARdata[,"A"], col = 2)
APAR = loess(A~PAR, data = data.frame(A = APARdata2[,"A"], PAR = PARdata), span = 0.5,
control = loess.control(surface = "direct", statistics = "exact"))
RpPAR = loess(A~PAR, data = data.frame(A = APARdata2[,"Rp"], PAR = PARdata), span = 0.5,
control = loess.control(surface = "direct", statistics = "exact"))
RuBPPAR = loess(A~PAR, data = data.frame(A = APARdata2[,"RuBP"], PAR = PARdata), span = 0.5,
control = loess.control(surface = "direct", statistics = "exact"))
fRBPAR = loess(A~PAR, data = data.frame(A = APARdata2[,"fRB"], PAR = PARdata), span = 0.5,
control = loess.control(surface = "direct", statistics = "exact"))
lines(PARdata, predict(APAR, newdata=data.frame(PAR = PARdata)))
# Simulate all mutants and periods -------------------------------------------------------------------------------------
if(.Platform$OS.type == "windows") {
cl <- makeCluster(8)
} else {
cl <- makeForkCluster(8)
}
registerDoParallel(cl)
pars = list(parameters["KiR"],
parameters["Krca"],
parameters["Kgsi"],
parameters[c("KdqEp", "KdqEz")],
parameters["Krep25"],
parameters["Kialpha25"],
parameters["kPR"])
W = length(pars)
periods = c(0.1, 0.3, 0.5, 1, 2, 3, 5, 10, 15, 30, 60, 90, 150, 225)
LF = foreach(period = periods, .packages = "MiniModel") %dopar% {
N = length(seq(0,1800,by = min(period/10, 1)))
out = matrix(NA, ncol = W + 4, nrow = N)
colnames(out) = c("control", "R", "RB", "gs", "qE", "qI", "qM", "PR", "All", "QSS", "QSS2")
PARdata = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "PAR")
out[,1] = lightflecks(generate_MiniModel_model(), period, c(50,1000))
for(i in 2:(W + 1)) out[,i] = lightflecks(generate_MiniModel_model(), period, c(50,1000), pars[[i - 1]]*1e6)
out[,W + 2] = lightflecks(generate_MiniModel_model(), period, c(50,1000), unlist(pars)*1e6)
out[,W + 3] = lightflecks(generate_MiniModelQss_model(), period, c(50,1000))
out[,W + 4] = PARdata
out
}
stopCluster(cl)
# Check that average PAR is always the same
for(i in 1:length(LF)) {
print(mean(LF[[i]][,"QSS2"]))
}
for(i in 1:length(LF)) {
LF[[i]][,"QSS2"] = predict(APAR, newdata=data.frame(PAR = as.numeric(LF[[i]][,"QSS2"])))
}
save(LF, file = "Intermediate/LFTotal.RData")
load("Intermediate/LFTotal.RData")
# Compute relative change in A due to virtual mutant
processLF = function(LF) {
LFeffect = LF
LFhigh = LF
LFlow = LF
for(i in 1:length(LFeffect)) {
period = periods[i]
time = sort(c(seq(0,1800, l = 1800/period), seq(1e-2,1800 + 1e-2, l = 1800/period)))
PAR = rep(c(0,1,1,0), times = 1800/period/2)
PAR = approx(time, PAR, seq(0,1800,by = min(period/10, 1)))$y
print(c(length(PAR), nrow(LFeffect[[i]])))
# Time-integrated CO2 assimilation
LFeffect[[i]] = colSums(LFeffect[[i]])
LFeffect[[i]] = (LFeffect[[i]] - LFeffect[[i]][1])/LFeffect[[i]][1]
# Low irradiance average CO2 assimilation
LFlow[[i]] = colSums(LFlow[[i]][which(PAR <= 0.5),])
LFlow[[i]] = (LFlow[[i]] - LFlow[[i]][1])/LFlow[[i]][1]
# High irradiance average CO2 assimilation
LFhigh[[i]] = colSums(LFhigh[[i]][which(PAR >= 0.5),])
LFhigh[[i]] = (LFhigh[[i]] - LFhigh[[i]][1])/LFhigh[[i]][1]
}
LFeffect = do.call("rbind", LFeffect)
LFeffect = cbind(Period = periods, LFeffect)
colnames(LFeffect)[1] = "Period"
LFlow = do.call("rbind", LFlow)
LFlow = cbind(Period = periods, LFlow)
colnames(LFlow)[1] = "Period"
LFhigh = do.call("rbind", LFhigh)
LFhigh = cbind(Period = periods, LFhigh)
colnames(LFhigh)[1] = "Period"
return(list(LFeffect, LFhigh, LFlow))
}
LFlist = processLF(LF)
LFeffect = LFlist[[1]]
LFhigh = LFlist[[2]]
LFlow = LFlist[[3]]
png("Output/figureLF.png", width = 14, height = 7, pointsize = 10, units = "cm",
res = 600, bg = "white", antialias = "default")
par(mfrow = c(1,2), xaxs = "i", yaxs = "i", las = 1, mar = c(4.0,4.2,0.5,1), mgp = c(2,1,0))
with(as.data.frame(LFeffect), {
plot(Period, R*100, t = "l", log = "x", ylim = c(-2,40), xaxt = "n", xlim = c(0.1,300),
col = 1, lty = 1,
xlab = "Lightfleck duration (s)", ylab = expression(italic(Delta*A/A)~("%")))
lines(Period, RB*100, col = 2, lty = 2, t = "l")
lines(Period, gs*100, col = 3, lty = 3, t = "l")
lines(Period, qE*100, col = 4, lty = 4, t = "l")
lines(Period, qI*100, col = 5, lty = 5, t = "l")
lines(Period, qM*100, col = 6, lty = 6, t = "l")
lines(Period, PR*100, col = 7, lty = 7, t = "l")
axis(1, at = c(0.1, 1, 10, 100,200,300), labels = c("0.1","1", "10","100","", "300"))
axis(1, at = 300, labels = "300")
axis(1, at = seq(0.2,0.9,0.1), tcl = -0.2, labels = NA)
axis(1, at = seq(2,9,1), tcl = -0.2, labels = NA)
axis(1, at = seq(20,100,10), tcl = -0.2, labels = NA)
legend("topleft", c("R", "RB", "gs", "qE", "qI", "qM", "PR"), col = 1:7, lty = 1:7,
bty = "n", ncol = 2, cex = 0.65, x.intersp = 0.5)
abline(h= 0, lty = 1, col = "gray")
text(10,38,labels = "A", cex = 1.2)
})
with(as.data.frame(LFeffect), {
plot(Period, All*100, t = "l", log = "x", ylim = c(-10,40), xaxt = "n", xlim = c(0.1,300),
col = 1, lty = 1,
xlab = "Lightfleck duration (s)", ylab = expression(italic(Delta*A/A)~("%")))
lines(Period, QSS2*100, col = 2 , lty = 2, t = "l")
#lines(Period, QSS2*100, col = 3 , lty = 3, t = "l")
axis(1, at = c(0.1, 1, 10, 100,200,300), labels = c("0.1","1", "10","100","", "300"))
axis(1, at = 300, labels = "300")
axis(1, at = seq(0.2,0.9,0.1), tcl = -0.2, labels = NA)
axis(1, at = seq(2,9,1), tcl = -0.2, labels = NA)
axis(1, at = seq(20,100,10), tcl = -0.2, labels = NA)
legend("topleft", c("All", "QSS"), col = 1:2, lty = 1:2,
bty = "n", ncol = 1, cex = 0.65, x.intersp = 0.5)
abline(h= 0, lty = 1, col = "gray")
text(10,38,labels = "B", cex = 1.2)
})
dev.off()
# Plot dynamics of RuBP and photosynthesis during lightflecks -------------
# Get dynamics of RuBP
pars = list(parameters["KiR"],
parameters["Krca"],
parameters["Kgsi"],
parameters[c("KdqEp", "KdqEz")],
parameters["Krep25"],
parameters["Kialpha25"],
parameters["kPR"])
W = length(pars)
period = 0.5
N = length(seq(0,1800,by = min(period/10, 1)))
# RuBP dynamics
RuBP = matrix(NA, ncol = 3, nrow = N)
colnames(RuBP) = c("All", "control", "QSS2")
PARdata = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "PAR")
RuBP[,1] = lightflecks(generate_MiniModel_model(), period, c(50,1000), unlist(pars)*1e6, variable = "RuBP")
RuBP[,2] = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "RuBP")
RuBP[,3] = PARdata
RuBP[,3] = predict(RuBPPAR, newdata=data.frame(PAR = as.numeric(RuBP[,3])))
period = 90
N = length(seq(0,1800,by = min(period/10, 1)))
RuBP2 = matrix(NA, ncol = 3, nrow = N)
colnames(RuBP2) = c("All", "control", "QSS2")
PARdata = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "PAR")
RuBP2[,1] = lightflecks(generate_MiniModel_model(), period, c(50,1000), unlist(pars)*1e6, variable = "RuBP")
RuBP2[,2] = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "RuBP")
RuBP2[,3] = PARdata
RuBP2[,3] = predict(RuBPPAR, newdata=data.frame(PAR = as.numeric(RuBP2[,3])))
# Rubisco activity
period = 0.5
N = length(seq(0,1800,by = min(period/10, 1)))
fRB = matrix(NA, ncol = 3, nrow = N)
colnames(fRB) = c("All", "control", "QSS2")
PARdata = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "PAR")
fRB[,1] = lightflecks(generate_MiniModel_model(), period, c(50,1000), unlist(pars)*1e6, variable = "fRB")
fRB[,2] = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "fRB")
fRB[,3] = PARdata
fRB[,3] = predict(fRBPAR, newdata=data.frame(PAR = as.numeric(fRB[,3])))
PARdata5 = PARdata
period = 90
N = length(seq(0,1800,by = min(period/10, 1)))
fRB2 = matrix(NA, ncol = 3, nrow = N)
PARdata = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "PAR")
colnames(fRB2) = c("All", "control", "QSS2")
fRB2[,1] = lightflecks(generate_MiniModel_model(), period, c(50,1000), unlist(pars)*1e6, variable = "fRB")
fRB2[,2] = lightflecks(generate_MiniModel_model(), period, c(50,1000), variable = "fRB")
fRB2[,3] = PARdata
fRB2[,3] = predict(fRBPAR, newdata=data.frame(PAR = as.numeric(fRB2[,3])))
# Supplemental figure S6
png("Output/figureLightFlecksSample.png", width = 14, height = 18, pointsize = 10, units = "cm",
res = 600, bg = "white", antialias = "default")
par(mfcol = c(3,2), mar = c(4,4.5,0.5,0.65), las = 1, xaxs = "i", yaxs = "i")
t0 = 10000 + 11
t1 = t0 + 20
l = t0:t1 - t0
# Period= 0.5 s
plot(0.05*l, LF[[3]][t0:t1,"All"], t = "l", ylim = c(0,14),
ylab = expression(A~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)")
lines(0.05*l, LF[[3]][t0:t1,"control"], col = 2)
minQSS2 = min(LF[[3]][t0:t1,"QSS2"])
maxQSS2 = max(LF[[3]][t0:t1,"QSS2"])
QSStime = c(0,0,10,10,20)
lines(0.05*QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v = 0.05*(which(abs(diff(PARdata5[t0:t1])) > 100) - 1), lty = 2)
text(0.05,13.5, "A")
text(c(0.25, 0.75),13.5, c(50,1000))
legend("bottomright", c("control", "All", "QSS"), col = c(2,1,3), lty = 1, bty = "n")
plot(0.05*l, RuBP[t0:t1,"All"], t = "l", ylim = c(0,100),
ylab = expression(RuBP~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)")
lines(0.05*l, RuBP[t0:t1,"control"], col = 2)
minQSS2 = min(RuBP[t0:t1,"QSS2"])
maxQSS2 = max(RuBP[t0:t1,"QSS2"])
QSStime = c(0,0,10,10,20)
lines(0.05*QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v = 0.05*(which(abs(diff(PARdata5[t0:t1])) > 100) - 1), lty = 2)
text(0.05,95, "B")
text(c(0.25, 0.75),95, c(50,1000))
plot(0.05*l, fRB[t0:t1,"All"], t = "l", ylim = c(0,1),
ylab = expression(fRB~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)")
lines(0.05*l, fRB[t0:t1,"control"], col = 2)
minQSS2 = min(fRB[t0:t1,"QSS2"])
maxQSS2 = max(fRB[t0:t1,"QSS2"])
QSStime = c(0,0,10,10,20)
lines(0.05*QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v = 0.05*(which(abs(diff(PARdata5[t0:t1])) > 100) - 1), lty = 2)
text(0.05,0.95, "C")
text(c(0.25, 0.75),0.95, c(50,1000))
# Period = 90 s
t0 = 1801 - 270
t1 = 1801 - 90
l = t0:t1 - t0
plot(l, LF[[12]][t0:t1,"All"], t = "l", ylim = c(0,14),
ylab = expression(A~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)", xaxt = "n")
axis(1, seq(0,180,30))
lines(l, LF[[12]][t0:t1,"control"], col = 2)
minQSS2 = min(LF[[12]][t0:t1,"QSS2"])
maxQSS2 = max(LF[[12]][t0:t1,"QSS2"])
QSStime = c(0,0,90,90,180)
lines(QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v = (which(abs(diff(PARdata[t0:t1])) > 100) - 1), lty = 2)
text(9,13.5, "D")
text(c(45, 135),13.5, c(50,1000))
plot(l, RuBP2[t0:t1,"All"], t = "l", ylim = c(0,100),
ylab = expression(RuBP~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)", xaxt = "n")
axis(1, seq(0,180,30))
lines(l, RuBP2[t0:t1,"control"], col = 2)
minQSS2 = min(RuBP2[t0:t1,"QSS2"])
maxQSS2 = max(RuBP2[t0:t1,"QSS2"])
QSStime = c(0,0,90,90,180)
lines(QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v =(which(abs(diff(PARdata[t0:t1])) > 100) - 1), lty = 2)
text(9,95, "E")
text(c(45, 135),95, c(50,1000))
plot(l, fRB2[t0:t1,"All"], t = "l", ylim = c(0,1),
ylab = expression(fRB~(mu*mol~m^{-2}~s^{-1})),
xlab = "Time (s)", xaxt = "n")
axis(1, seq(0,180,30))
lines(l, fRB2[t0:t1,"control"], col = 2)
minQSS2 = min(fRB2[t0:t1,"QSS2"])
maxQSS2 = max(fRB2[t0:t1,"QSS2"])
QSStime = c(0,0,90,90,180)
lines(QSStime, c(maxQSS2, minQSS2, minQSS2, maxQSS2, maxQSS2), col = 3)
abline(v = (which(abs(diff(PARdata[t0:t1])) > 100) - 1), lty = 2)
text(9,0.95, "F")
text(c(45, 135),0.95, c(50,1000))
dev.off()
# Effect of frequency -----------------------------------------------------
# Simulate control model at different frequencies
# Calculata accumulated photosynthesis
lightflecksSumA = function(model, period, PARs) {
PAR1 = PARs[1]
PAR2 = PARs[2]
# CVODE settings
model$set_settings(c("atol","rtol","maxsteps","maxerr","maxnonlin","maxconvfail","minimum"),
c(1e-10,1e-8,5e4,20,20,20, -1e-6))
model$set_settings(c("silent","positive", "force_positive"), c(TRUE, TRUE,TRUE))
model$set_settings("maxtime", 1000)
# Assign parameters from literature to the model
filter = which(names(parameters) %in% names(model$Parameters$Values))
model$set_parameters(names(parameters[filter]),unname(parameters[filter]))
# Simulate a LiCOR with 10% red - 90% blue
model$set_forcings("Ib", cbind(c(0,1), c(PAR1,PAR1)*0.1))
model$set_forcings("Ir", cbind(c(0,1), c(PAR1,PAR1)*0.9))
model$set_forcings("Ig", cbind(c(0,1), c(0,0)))
model$set_forcings("CO2R", cbind(c(0,1), c(400,400)))
model$set_forcings("H2OR", cbind(c(0,1), c(20,20)))
model$set_forcings("Ta", cbind(c(0,1), c(298.15, 298.15)))
model$set_forcings("Tl", cbind(c(0,1), c(298.15, 298.15)))
model$set_states("Ci", 400)
model$set_states("Cc", 400)
model$set_states("Ccyt", 400)
tryCatch(model$set_states("PR", 25), error = function(x) NULL)
# Calculate steady-state
model$set_time(c(0,1800))
steadyState = cvode(model)[2,names(model$States$Values)]
model$set_states(names(steadyState), steadyState)
model$set_states("sumA", 0)
# Simulate Transient - Square wave light determined by period
dt = min(1e-2, period/10)
timeI = sort(c(seq(0,3600, by = period), seq(dt,3600 + dt, by = period)))
model$set_forcings("Ib", cbind(timeI, c(rep(c(PAR1,PAR2,PAR2,PAR1)*0.1, times = 3600/period/2), PAR1*0.1,PAR2*0.1)))
model$set_forcings("Ir", cbind(timeI, c(rep(c(PAR1,PAR2,PAR2,PAR1)*0.9, times = 3600/period/2), PAR1*0.9,PAR2*0.9)))
model$set_time(seq(0,3600,by = 1))
diff(cvode(model)[c(1800,3600),"sumA"])/1800
}
if(.Platform$OS.type == "windows") {
cl <- makeCluster(8)
} else {
cl <- makeForkCluster(8)
}
registerDoParallel(cl)
periods = c(0.1, 0.3, 0.5, 1, 2, 3, 5, 10, 15, 30, 60, 90, 150, 225, 300)
LF1000 = foreach(period = periods, .packages = "MiniModel") %dopar% {
lightflecksSumA(generate_MiniModel_model(), period, c(50,1000))
}
LF800 = foreach(period = periods, .packages = "MiniModel") %dopar% {
lightflecksSumA(generate_MiniModel_model(), period, c(50,800))
}
LF150_1000 = foreach(period = periods, .packages = "MiniModel") %dopar% {
lightflecksSumA(generate_MiniModel_model(), period, c(150,800))
}
LF600 = foreach(period = periods, .packages = "MiniModel") %dopar% {
lightflecksSumA(generate_MiniModel_model(), period, c(50,600))
}
LF400 = foreach(period = periods, .packages = "MiniModel") %dopar% {
lightflecksSumA(generate_MiniModel_model(), period, c(50,400))
}
stopCluster(cl)
AQSS1000 = mean(predict(APAR, newdata = data.frame(PAR = c(50,1000))))
AQSS800 = mean(predict(APAR, newdata = data.frame(PAR = c(50,800))))
AQSS600 = mean(predict(APAR, newdata = data.frame(PAR = c(50,600))))
AQSS400 = mean(predict(APAR, newdata = data.frame(PAR = c(50,400))))
# Check that average PAR is always the same
Aconst = predict(APAR, newdata = data.frame(PAR = mean(c(50,1000))))
png(file = "LightfleckDuration.png", width = 7, height = 4, units = "in",
bg = "transparent", res = 1000)
par(mfrow = c(1,1), las = 1, yaxs = "i", xaxs = "i", mar = c(4,5.5,0.5,0.7),
cex.axis = 1.2, cex.lab = 1.2, lwd = 1.2)
plot(periods, unlist(LF1000), ylim = c(4,13), log = "x", t = "o",
xlab = "Lightfleck duration (s)",
ylab = expression(bar(A)~(mu*mol~m^{-2}~s^{-1})))
text(100, 5, "Flashing dynamic")
abline(h = Aconst)
text(100, 12, "Constant light")
abline(h = AQSS1000, lty = 2)
text(90, 8, "Flashing steady-state")
dev.off()
png(file = "FrequencyProposal.png", width = 5, height = 4, units = "in",
bg = "transparent", res = 1000)
par(mfrow = c(1,1), las = 1, yaxs = "i", xaxs = "i", mar = c(4,5.5,0.5,0.7),
cex.axis = 1.2, cex.lab = 1.2, lwd = 1.2)
plot(periods, unlist(LF150_1000), ylim = c(8,13), log = "x", t = "o",
xlab = "Fluctuation duration (s)",
ylab = expression(Photosynthesis~(mu*mol~m^{-2}~s^{-1})),
xaxt = "n")
axis(1, at = c(0.1,0.5,5,50))
text(100, 9, "Fluctuating")
abline(h = predict(APAR, newdata = data.frame(PAR = mean(c(150,1000)))))
text(100, 12.7, "Constant")
dev.off()
png(file = "LRC.png", width = 5, height = 4, units = "in",
bg = "transparent", res = 1000)
par(mfrow = c(1,1), las = 1, yaxs = "i", xaxs = "i", mar = c(4,5.5,0.5,1.2),
cex.axis = 1.2, cex.lab = 1.2, lwd = 1)
plot(PARdata, predict(APAR, newdata=data.frame(PAR = PARdata)), t = "l",
ylab = expression(Photosynthesis~(mu*mol~m^{-2}~s^{-1})),
xlab = expression(Light~intensity~(mu*mol~m^{-2}~s^{-1})),
ylim = c(-1,13))
# Explain the non-linear effect
A100 = predict(APAR, newdata=data.frame(PAR = 100))
lines(c(100, 100), c(-10,A100), lty = 2, lwd = 1)
lines(c(0, 100), c(A100,A100), lty = 2, lwd = 1)
A600 = predict(APAR, newdata=data.frame(PAR = 600))
lines(c(600, 600), c(-10,A600), lty = 2, lwd = 1)
lines(c(0, 600), c(A600,A600), lty = 2, lwd = 1)
muA = mean(c(A100, A600))
A350 = predict(APAR, newdata=data.frame(PAR = 350))
lines(c(350, 350), c(-10,A350), lty = 2, lwd = 1)
lines(c(0, 350), c(A350,A350), lty = 2, lwd = 1)
lines(c(0,100), c(muA, muA))
dev.off()
# Calculate cutoff for each lightfleck intensity
cut1000 = approx(unlist(LF1000) - AQSS1000, periods, 0)$y
cut800 = approx(unlist(LF800) - AQSS800, periods, 0)$y
cut600 = approx(unlist(LF600) - AQSS600, periods, 0)$y
cut400 = approx(unlist(LF400) - AQSS400, periods, 0)$y
plot(c(400,600, 800, 1000), c(cut400, cut600, cut800, cut1000))
png(file = "InductionRelaxation.png", width = 6, height = 3, units = "in",
bg = "transparent", res = 1000)
par(mar = c(0.5,0.5,0.5,0.5), bg = "transparent")
with(control, plot(time, Photo, t = "l", lwd = 4, col = "darkgreen", xaxt = "n",
yaxt = "n", bg = "transparent", bty = "n"))
dev.off()
|
## JavaScript that dis/enables the ABILITY to click the tab (without changing aesthetics)
# app_jscode <-
# "shinyjs.disableTab = function(name) {
# var tab = $('.nav li a[data-value=' + name + ']');
# tab.bind('click.tab', function(e) {
# e.preventDefault();
# return false;
# });
# tab.addClass('disabled');
# }
# shinyjs.enableTab = function(name) {
# var tab = $('.nav li a[data-value=' + name + ']');
# tab.unbind('click.tab');
# tab.removeClass('disabled');
# }"
## css snipit that makes it LOOK like we are/n't able click the tab (with outchanging functionality)
app_css <-
".nav li a.disabled {
background-color: #aaa !important;
color: #333 !important;
cursor: not-allowed !important;
border-color: #aaa !important;
}
.progress-bar {
background-color: #9c5c16;
}
.js-irs-0 .irs-single, .js-irs-0 .irs-bar-edge, .js-irs-0 .irs-bar {
background: #9c5c16;
border-top: 1px #9c5c16 ;
border-bottom: 1px #9c5c16 ;
}
label > input[type='radio'] + *::before {
content: '';
margin: 4px 0 0;
width: 13px;
height: 13px;
position: absolute;
margin-left: -20px;
border-radius: 50%;
border-style: solid;
border-width: 0.1rem;
border-color: #9c5c16;
}
label > input[type='radio']:checked + *::before {
background: radial-gradient(white 0%, white 30%, #9c5c16 30%, #9c5c16);
border-color: #9c5c16;
}
label > input[type='checkbox'] {
opacity: 0;
position: absolute;
}
label > input[type='checkbox'] + *::before {
content: '';
position: absolute;
margin: 4px 0 0;
margin-left: -20px;
align: center;
width: 13px;
height: 13px;
margin-right: 1rem;
border-radius: 0%;
border-style: solid;
border-width: 0.1rem;
border-color: #9c5c16;
}
label > input[type='checkbox']:checked + *::before {
content: '';
width: 13px;
height: 13px;
background-color: #9c5c16;
}
"
ui <- fluidPage(
shinyjs::useShinyjs(),
# shinyjs::extendShinyjs(text = app_jscode, functions = c('disableTab','enableTab')),
shinyjs::inlineCSS(app_css),
titlePanel(
tags$head(tags$link(rel = "icon", type = "image/png", href = "logodromics-dr.png"), tags$title(" DRomicsInterpreter-shiny"))
),
br(),
navbarPage(title = "",
tabPanel(img(src = "logodromics.png", width = 180),
fluidRow(
br(), br(),
HTML("<center><font face=verdana size=6 color=#9c5c16>Welcome to the DRomicsInterpreter-shiny application</font></center>"),
HTML("<center><font face=verdana size=5 color=#9c5c16>A second workflow for interpretation in light of a biological annotation</font></center>"),
br(), br(),
fixedRow(column(10, offset = 1,
br(),
p(strong("Links and resources")),
p("The DRomicsInterpreter-shiny application runs on the ",
a("shiny server of the LBBE", href = "http://lbbe-shiny.univ-lyon1.fr/", TARGET="_blank", style="color:#f28d0f;"),
"with the develoment version of the DRomics package (available on ",
a("Github", href = "https://github.com/aursiber/DRomics", TARGET="_blank", style="color:#f28d0f;"),")."),
p("DRomics is also an R package, available on ",
a("CRAN", href = "https://cran.r-project.org/package=DRomics", TARGET="_blank", style="color:#f28d0f;"), ".",
" You can find more information and help about the DRomicsInterpreter-shiny application and the DRomics package on ",
a("this web page", href = "https://lbbe.univ-lyon1.fr/fr/dromics", TARGET="_blank", style="color:#f28d0f;"), "."),
p(" Reading the vignette first and using the cheat sheet (both are available on this ",
a("this page", href = "https://lbbe.univ-lyon1.fr/fr/dromics", TARGET="_blank", style="color:#f28d0f;"),
") are recommended. "),
br(),
p(strong("Citation and publications")),
p("If you use Dromics Shiny App, you should cite:"),
p(em("DRomics: a turnkey tool to support the use of the dose-response framework for omics data in ecological risk assessment."), br(),
"Larras F, Billoir E, Baillard V, Siberchicot A, Scholz S, Wubet T, Tarkka M, Schmitt-Jansen M and Delignette-Muller ML (2018).",
"Environmental Science & Technology.",
a("https://doi.org/10.1021/acs.est.8b04752", href = "https://pubs.acs.org/doi/10.1021/acs.est.8b04752", TARGET = "_blank", style="color:#f28d0f;")),
p("You can freely find this article at: ", a("https://hal.archives-ouvertes.fr/hal-02309919", href = "https://hal.archives-ouvertes.fr/hal-02309919", TARGET = "_blank", style="color:#f28d0f;")),
br(),
p("You can also look at the following citation for a complete example of use:"),
p(em("A multi-omics concentration-response framework uncovers novel understanding of triclosan effects in the chlorophyte Scenedesmus vacuolatus."), br(),
"Larras F, Billoir E, Scholz S, Tarkka M, Wubet T, Delignette-Muller ML, Schmitt-Jansen M (2020).",
"Journal of Hazardous Materials.",
a("https://doi.org/10.1016/j.jhazmat.2020.122727", href = "https://doi.org/10.1016/j.jhazmat.2020.122727", TARGET = "_blank", style="color:#f28d0f;")),
br(),
p(strong("Contact")),
p("If you have any need that is not yet covered, any feedback on the package / Shiny app, or any training needs, feel free to email us at ", strong("dromics@univ-lyon1.fr"), "."),
p("Issues can be reported on",
a("https://github.com/aursiber/DRomics/issues", href = "https://github.com/aursiber/DRomics/issues", TARGET = "_blank", style="color:#f28d0f;"), ".")
)),
hr(style='width: 80%;'),
br(),
fixedRow(column(10, offset = 3,
fillRow(flex = NA,
a(img(src = "https://lbbe.univ-lyon1.fr/sites/default/files/icons/logo_1.svg", width = 220), href="https://lbbe.univ-lyon1.fr/", TARGET="_blank"),
a(img(src = "logoVetAgroSup.jpg", height = 100, width = 120), href="http://www.vetagro-sup.fr/", TARGET="_blank"),
a(img(src = "logoLyon1.png", height = 80, width = 380), href="https://www.univ-lyon1.fr/", TARGET="_blank"),
a(img(src = "LogoUniversiteLorraine.png", height = 80, width = 180), href="http://www.univ-lorraine.fr/", TARGET="_blank"),
style="text-align: center;"
)
))
)
),
####################################################################################
####### STEP 1 #####################################################################
####################################################################################
tabPanel(HTML("<font face=verdana size=3 color=#9c5c16>Step 1</font>"),
br(), HTML("<font face=verdana size=5 color=#9c5c16><b>Import and merge of DRomics results and annotation data</b></font>"), br(), br(), br(),
fluidRow(
column(1,
numericInput("nbLevel", "Number of experimental levels", value = 1, min = 1, max = 10, step = 1)
),
column(2, style = "margin-top: 25px;padding:20px;",
shinyBS::bsButton("nblevel_help", label = "", icon = icon("info"), size = "small"),
shinyBS::bsPopover("nblevel_help", "", texthelpnblevel, placement = "right", trigger = "hover", options = NULL)
),
column(2,
textInput("maxDoseXScale", "Maximal dose/concentration for definition of x-scale of plots", width = "100%")
),
column(3, style = "margin-top: 25px;padding:20px;",
shinyBS::bsButton("maxdosexscale_help", label = "", icon = icon("info"), size = "small"),
shinyBS::bsPopover("maxdosexscale_help", "", texthelpmaxdosexscale, placement = "right", trigger = "hover", options = NULL)
)
),
br(),
uiOutput("inputstep1"),
fixedRow(
div(align = "center", actionButton("buttonRunStep1", "Merge and Combine", icon = icon("object-group"), style='font-size:150%')), br(), br(),
conditionalPanel(
condition = "input.nbLevel > 1",
span(textOutput("txtcolumnidannot"), style = 'color:#9c5c16;font-size:large;'), br(), br()
),
shinyjs::hidden(div(id = 'text1_step1',
style = 'color:#9c5c16; font-size:large;line-height: 50px;',
"Structure of the data frame merged and combined")),
verbatimTextOutput("strmergeddata"), br(), br(),
shinyjs::hidden(div(id = 'text2_step1',
style = 'color:#9c5c16; font-size:large;line-height: 50px;',
"First 3 lines of each experimental level in the data frame merged and combined")),
uiOutput("headmergeddata"),
br(), br()
)
),
####################################################################################
####### STEP 2 #####################################################################
####################################################################################
tabPanel(HTML("<font face=verdana size=3 color=#9c5c16>Step 2</font>"),
value = "step2",
br(), HTML("<font face=verdana size=5 color=#9c5c16><b>Trend and sensitivity plots</b></font>"), br(), br(), br(),
wellPanel(
fixedRow(
column(3, style='border-right:1px solid #9c5c16;',
HTML("<font face=verdana size=3 color=#9c5c16><b>Selection of annotation groups to plot </b></font>"),
shinyBS::bsButton("helplabel1step2", label = "", icon = icon("info"), size = "small", style="color:#9c5c16"),
shinyBS::bsPopover("helplabel1step2", "", helplabel1step2, placement = "right", trigger = "hover", options = list(container = "body")),
br(),
br(),
checkboxInput("keepAllExplev", label = HTML("<b>Keep all experimental levels</b>"), value = FALSE),
br(),
sliderInput("minNbItem", "Minimum for the number of items",
width = "90%",
min = 1, max = 10, step = 1, ticks = TRUE,
value = 3),
numericInput("BMDmax", label = "Maximum for the BMD summary value", value = 0, min = 0, step = 0.1, width = "70%")
),
column(1,
checkboxInput("BMDlogtransfoSensitivityplot", label = HTML("<b>Log transformation of the BMD</b>"), value = FALSE),
),
column(1,
radioButtons("BMDtype", label = "BMD type",
choices = list("zSD" = "zSD",
"xfold" = "xfold")
)
),
column(1,
radioButtons("BMDsummarysensitivityPlot", label = "BMD summary",
choices = list("First quartile" = "first.quartile",
"Median" = "median",
"Median and IQR" = "median.and.IQR")
)),
column(2,
# if several experimental level
conditionalPanel(condition = "input.nbLevel > 1",
radioButtons("ordering_moreonelev", label = "Ordering of the annotations",
choices = list("alphabetic order" = "alphaorder_moreonelev",
"ordered by total number of items in all the experimental levels" = "numbitemorder_moreonelev",
"specific order" = "specificorder_moreonelev"))
),
# if only one experimental level
conditionalPanel(condition = "input.nbLevel == 1",
radioButtons("ordering_onelev", label = "Ordering of the annotations",
choices = list("alphabetic order" = "alphaorder_onelev",
"ordered by number of items" = "numbitemorder_onelev",
"ordered by BMD summary value" = "BMDorder_onelev",
"specific order" = "specificorder_onelev"))
),
),
column(3,
conditionalPanel(condition = "input.ordering_moreonelev == 'specificorder_moreonelev' | input.ordering_onelev == 'specificorder_onelev'",
uiOutput("specificorder", style="font-size:85%;")
)
),
column(1,
br(),
div(align="right", actionButton("buttonRunStep2", "Run", icon = icon("fas fa-gear"), style='font-size:200%'))
)
)),
fixedRow(
column(6,
shinyjs::hidden(div(id = 'text1_step2',
style = 'color:#9c5c16; font-size:large;text-align: center;line-height: 150px;',
"Trend plot")),
plotOutput("trendplot", width = "100%", height = "900px"),
br(), br(),
div(align = "center", downloadButton("buttonDownloadTrendplot", "Download Trend Plot", icon = icon("fas fa-download")))),
column(6,
fixedRow(
shinyjs::hidden(div(id = 'text2_step2',
style = 'color:#9c5c16; font-size:large;text-align: center;line-height: 150px;',
"Sensitivity plot")),
plotOutput("sensitivityplot", width = "100%", height = "900px"),
br(), br(),
div(align = "center", downloadButton("buttonDownloadSensitivityplot", "Download Sensitivity Plot", icon = icon("fas fa-download")))
)
)
),
br(), br(),
fixedRow(
shinyjs::hidden(div(id = 'text3_step2',
style = 'color:#9c5c16; font-size:large;line-height: 50px;',
"Structure of the data frame merged and combined")),
verbatimTextOutput("filteredsorteddata"),
downloadButton('downloadData', 'Download Data')
),
br(), br(), br(), br()
),
####################################################################################
####### STEP 3 #####################################################################
####################################################################################
tabPanel(HTML("<font face=verdana size=3 color=#9c5c16>Step 3</font>"),
value = "step3",
br(), HTML("<font face=verdana size=5 color=#9c5c16><b>BMD plots (with and without gradient)</b></font>"), br(), br(), br(),
wellPanel(
fixedRow(
column(12,
checkboxGroupInput("annotcheckboxBMDplot", label = "Choose at least on annotation"),
actionButton("selectallBMDplot", "Select All"),
actionButton("unselectallBMDplot", "Unselect All")
))),
wellPanel(
fixedRow(
column(2,
checkboxInput("addciBMDplot", label = HTML("<b>Add CI</b> (only for the BMD plot without gradient)"), value = FALSE),
checkboxInput("BMDlogtransfoBMDplot", label = HTML("<b>Log transformation of the BMD</b>"), value = FALSE),
checkboxInput("addlabelBMDplot", label = HTML("<b>Add labels</b>"), value = FALSE)
),
column(2,
radioButtons("facetbycolumnsBMDplot", label = "Facet by (for columns)",
choices = list("Annotation" = "annotation",
"Experimental level" = "explevel")
)),
conditionalPanel(condition = "input.nbLevel > 1",
column(2,
radioButtons("facetbyrowsBMDplot", label = "Facet by (for rows)",
choices = list("Annotation" = "annotation",
"Experimental level" = "explevel")
))
),
column(2,
fixedRow(
checkboxInput("shapebyBMDplot", label = HTML("<b>Shape by trend</b>"), value = FALSE),
checkboxInput("colorbyBMDplot", label = HTML("<b>Color by trend</b> (only for the BMD plot without gradient)"), value = FALSE)
)
),
column(1,
br(),
div(align="right", actionButton("buttonRunStep3", "Run", icon = icon("fas fa-gear"), style='font-size:200%'))
)
)
),
fixedRow(
column(6,
shinyjs::hidden(div(id = 'text1_step3',
style = 'color:#9c5c16; font-size:large;text-align: center;line-height: 150px;',
"BMD plot")),
plotOutput("bmdplot", width = "100%", height = "900px"),
br(), br(),
div(align = "center", downloadButton("buttonDownloadBMDplot", "Download BMD Plot", icon = icon("fas fa-download")))),
column(6,
shinyjs::hidden(div(id = 'text2_step3',
style = 'color:#9c5c16; font-size:large;text-align: center;line-height: 150px;',
"BMD plot with gradient")),
plotOutput("bmdplotwithgradient", width = "100%", height = "900px"),
br(), br(),
div(align = "center", downloadButton("buttonDownloadBMDplotwithgradient", "Download BMD Plot with gradient", icon = icon("fas fa-download")))
)
),
br(), br(), br(), br()
),
####################################################################################
####### STEP 4 #####################################################################
####################################################################################
tabPanel(HTML("<font face=verdana size=3 color=#9c5c16>Step 4</font>"),
value = "step4",
br(), HTML("<font face=verdana size=5 color=#9c5c16><b>Curves plot</b></font>"), br(), br(), br(),
wellPanel(
fixedRow(
column(12,
checkboxGroupInput("annotcheckboxCurvesplot", label = "Choose at least on annotation"),
actionButton("selectallCurvesplot", "Select All"),
actionButton("unselectallCurvesplot", "Unselect All")
))),
wellPanel(
fixedRow(
column(3,
splitLayout(cellWidths = c("60%", "40%"),
checkboxInput("doselogtransfoCurvesplot", label = HTML("<b>Dose log transformation</b>"), value = FALSE),
shinyBS::bsButton("helplabel1step4", label = "", icon = icon("info"), size = "small", style="color:#9c5c16"),
shinyBS::bsPopover("helplabel1step4", "", helplabel1step4, placement = "right", trigger = "hover", options = list(container = "body"))
),
numericInput("mindoseCurvesplot", label = "Minimal dose for the x range", value = 0, width = "60%")
),
column(2,
radioButtons("facetbycolumnsCurvesplot", label = "Facet by (for columns)",
choices = list("Annotation" = "annotation",
"Experimental level" = "explevel")
)),
conditionalPanel(condition = "input.nbLevel > 1",
column(2,
radioButtons("facetbyrowsCurvesplot", label = "Facet by (for rows)",
choices = list("Annotation" = "annotation",
"Experimental level" = "explevel")
))
),
column(2,
fixedRow(
checkboxInput("colorbyCurvesplot", label = HTML("<b>Color by trend</b>"), value = TRUE)
)
),
column(1,
br(),
div(align="right", actionButton("buttonRunStep4", "Run", icon = icon("fas fa-gear"), style='font-size:200%'))
)
)
),
fixedRow(
plotOutput("curvesplot", width = "100%", height = "900px"),
br(), br(),
div(align = "center", downloadButton("buttonDownloadCurvesplot", "Download Curves Plot", icon = icon("fas fa-download"))),
),
br(), br(), br(), br()
),
####################################################################################
####### STEP 5 #####################################################################
####################################################################################
tabPanel(HTML("<font face=verdana size=3 color=#9c5c16>R code to go further</font>"),
value = "step5",
fixedRow(
column(8,
br(), HTML("<font face=verdana size=5 color=#9c5c16><b>R CODE TO GO FURTHER</b></font>"), br(), br(), br(),
tags$blockquote("To see what more you can do using the R package, we recommend you to consult the vignette and the cheat sheet",
"(links to all resources ", a("here", href = "https://lbbe.univ-lyon1.fr/fr/dromics", TARGET="_blank", style="color:#f28d0f;"),")."),
br(),
downloadButton("buttonDownRCode", "Download R Code", icon = icon("fas fa-download"), style = 'background-color:#e6e6e6; color:#000000; border-color:#9d9d9d;'), br(), br(),
verbatimTextOutput('printRCode'), br(), br()
))
)
)
)
| /inst/DRomicsInterpreter-shiny/ui.R | no_license | cran/DRomics | R | false | false | 28,029 | r | ## JavaScript that dis/enables the ABILITY to click the tab (without changing aesthetics)
# app_jscode <-
# "shinyjs.disableTab = function(name) {
# var tab = $('.nav li a[data-value=' + name + ']');
# tab.bind('click.tab', function(e) {
# e.preventDefault();
# return false;
# });
# tab.addClass('disabled');
# }
# shinyjs.enableTab = function(name) {
# var tab = $('.nav li a[data-value=' + name + ']');
# tab.unbind('click.tab');
# tab.removeClass('disabled');
# }"
## css snipit that makes it LOOK like we are/n't able click the tab (with outchanging functionality)
app_css <-
".nav li a.disabled {
background-color: #aaa !important;
color: #333 !important;
cursor: not-allowed !important;
border-color: #aaa !important;
}
.progress-bar {
background-color: #9c5c16;
}
.js-irs-0 .irs-single, .js-irs-0 .irs-bar-edge, .js-irs-0 .irs-bar {
background: #9c5c16;
border-top: 1px #9c5c16 ;
border-bottom: 1px #9c5c16 ;
}
label > input[type='radio'] + *::before {
content: '';
margin: 4px 0 0;
width: 13px;
height: 13px;
position: absolute;
margin-left: -20px;
border-radius: 50%;
border-style: solid;
border-width: 0.1rem;
border-color: #9c5c16;
}
label > input[type='radio']:checked + *::before {
background: radial-gradient(white 0%, white 30%, #9c5c16 30%, #9c5c16);
border-color: #9c5c16;
}
label > input[type='checkbox'] {
opacity: 0;
position: absolute;
}
label > input[type='checkbox'] + *::before {
content: '';
position: absolute;
margin: 4px 0 0;
margin-left: -20px;
align: center;
width: 13px;
height: 13px;
margin-right: 1rem;
border-radius: 0%;
border-style: solid;
border-width: 0.1rem;
border-color: #9c5c16;
}
label > input[type='checkbox']:checked + *::before {
content: '';
width: 13px;
height: 13px;
background-color: #9c5c16;
}
"
ui <- fluidPage(
shinyjs::useShinyjs(),
# shinyjs::extendShinyjs(text = app_jscode, functions = c('disableTab','enableTab')),
shinyjs::inlineCSS(app_css),
titlePanel(
tags$head(tags$link(rel = "icon", type = "image/png", href = "logodromics-dr.png"), tags$title(" DRomicsInterpreter-shiny"))
),
br(),
navbarPage(title = "",
tabPanel(img(src = "logodromics.png", width = 180),
fluidRow(
br(), br(),
HTML("<center><font face=verdana size=6 color=#9c5c16>Welcome to the DRomicsInterpreter-shiny application</font></center>"),
HTML("<center><font face=verdana size=5 color=#9c5c16>A second workflow for interpretation in light of a biological annotation</font></center>"),
br(), br(),
fixedRow(column(10, offset = 1,
br(),
p(strong("Links and resources")),
p("The DRomicsInterpreter-shiny application runs on the ",
a("shiny server of the LBBE", href = "http://lbbe-shiny.univ-lyon1.fr/", TARGET="_blank", style="color:#f28d0f;"),
"with the develoment version of the DRomics package (available on ",
a("Github", href = "https://github.com/aursiber/DRomics", TARGET="_blank", style="color:#f28d0f;"),")."),
p("DRomics is also an R package, available on ",
a("CRAN", href = "https://cran.r-project.org/package=DRomics", TARGET="_blank", style="color:#f28d0f;"), ".",
" You can find more information and help about the DRomicsInterpreter-shiny application and the DRomics package on ",
a("this web page", href = "https://lbbe.univ-lyon1.fr/fr/dromics", TARGET="_blank", style="color:#f28d0f;"), "."),
p(" Reading the vignette first and using the cheat sheet (both are available on this ",
a("this page", href = "https://lbbe.univ-lyon1.fr/fr/dromics", TARGET="_blank", style="color:#f28d0f;"),
") are recommended. "),
br(),
p(strong("Citation and publications")),
p("If you use Dromics Shiny App, you should cite:"),
p(em("DRomics: a turnkey tool to support the use of the dose-response framework for omics data in ecological risk assessment."), br(),
"Larras F, Billoir E, Baillard V, Siberchicot A, Scholz S, Wubet T, Tarkka M, Schmitt-Jansen M and Delignette-Muller ML (2018).",
"Environmental Science & Technology.",
a("https://doi.org/10.1021/acs.est.8b04752", href = "https://pubs.acs.org/doi/10.1021/acs.est.8b04752", TARGET = "_blank", style="color:#f28d0f;")),
p("You can freely find this article at: ", a("https://hal.archives-ouvertes.fr/hal-02309919", href = "https://hal.archives-ouvertes.fr/hal-02309919", TARGET = "_blank", style="color:#f28d0f;")),
br(),
p("You can also look at the following citation for a complete example of use:"),
p(em("A multi-omics concentration-response framework uncovers novel understanding of triclosan effects in the chlorophyte Scenedesmus vacuolatus."), br(),
"Larras F, Billoir E, Scholz S, Tarkka M, Wubet T, Delignette-Muller ML, Schmitt-Jansen M (2020).",
"Journal of Hazardous Materials.",
a("https://doi.org/10.1016/j.jhazmat.2020.122727", href = "https://doi.org/10.1016/j.jhazmat.2020.122727", TARGET = "_blank", style="color:#f28d0f;")),
br(),
p(strong("Contact")),
p("If you have any need that is not yet covered, any feedback on the package / Shiny app, or any training needs, feel free to email us at ", strong("dromics@univ-lyon1.fr"), "."),
p("Issues can be reported on",
a("https://github.com/aursiber/DRomics/issues", href = "https://github.com/aursiber/DRomics/issues", TARGET = "_blank", style="color:#f28d0f;"), ".")
)),
hr(style='width: 80%;'),
br(),
fixedRow(column(10, offset = 3,
fillRow(flex = NA,
a(img(src = "https://lbbe.univ-lyon1.fr/sites/default/files/icons/logo_1.svg", width = 220), href="https://lbbe.univ-lyon1.fr/", TARGET="_blank"),
a(img(src = "logoVetAgroSup.jpg", height = 100, width = 120), href="http://www.vetagro-sup.fr/", TARGET="_blank"),
a(img(src = "logoLyon1.png", height = 80, width = 380), href="https://www.univ-lyon1.fr/", TARGET="_blank"),
a(img(src = "LogoUniversiteLorraine.png", height = 80, width = 180), href="http://www.univ-lorraine.fr/", TARGET="_blank"),
style="text-align: center;"
)
))
)
),
####################################################################################
####### STEP 1 #####################################################################
####################################################################################
tabPanel(HTML("<font face=verdana size=3 color=#9c5c16>Step 1</font>"),
br(), HTML("<font face=verdana size=5 color=#9c5c16><b>Import and merge of DRomics results and annotation data</b></font>"), br(), br(), br(),
fluidRow(
column(1,
numericInput("nbLevel", "Number of experimental levels", value = 1, min = 1, max = 10, step = 1)
),
column(2, style = "margin-top: 25px;padding:20px;",
shinyBS::bsButton("nblevel_help", label = "", icon = icon("info"), size = "small"),
shinyBS::bsPopover("nblevel_help", "", texthelpnblevel, placement = "right", trigger = "hover", options = NULL)
),
column(2,
textInput("maxDoseXScale", "Maximal dose/concentration for definition of x-scale of plots", width = "100%")
),
column(3, style = "margin-top: 25px;padding:20px;",
shinyBS::bsButton("maxdosexscale_help", label = "", icon = icon("info"), size = "small"),
shinyBS::bsPopover("maxdosexscale_help", "", texthelpmaxdosexscale, placement = "right", trigger = "hover", options = NULL)
)
),
br(),
uiOutput("inputstep1"),
fixedRow(
div(align = "center", actionButton("buttonRunStep1", "Merge and Combine", icon = icon("object-group"), style='font-size:150%')), br(), br(),
conditionalPanel(
condition = "input.nbLevel > 1",
span(textOutput("txtcolumnidannot"), style = 'color:#9c5c16;font-size:large;'), br(), br()
),
shinyjs::hidden(div(id = 'text1_step1',
style = 'color:#9c5c16; font-size:large;line-height: 50px;',
"Structure of the data frame merged and combined")),
verbatimTextOutput("strmergeddata"), br(), br(),
shinyjs::hidden(div(id = 'text2_step1',
style = 'color:#9c5c16; font-size:large;line-height: 50px;',
"First 3 lines of each experimental level in the data frame merged and combined")),
uiOutput("headmergeddata"),
br(), br()
)
),
####################################################################################
####### STEP 2 #####################################################################
####################################################################################
tabPanel(HTML("<font face=verdana size=3 color=#9c5c16>Step 2</font>"),
value = "step2",
br(), HTML("<font face=verdana size=5 color=#9c5c16><b>Trend and sensitivity plots</b></font>"), br(), br(), br(),
wellPanel(
fixedRow(
column(3, style='border-right:1px solid #9c5c16;',
HTML("<font face=verdana size=3 color=#9c5c16><b>Selection of annotation groups to plot </b></font>"),
shinyBS::bsButton("helplabel1step2", label = "", icon = icon("info"), size = "small", style="color:#9c5c16"),
shinyBS::bsPopover("helplabel1step2", "", helplabel1step2, placement = "right", trigger = "hover", options = list(container = "body")),
br(),
br(),
checkboxInput("keepAllExplev", label = HTML("<b>Keep all experimental levels</b>"), value = FALSE),
br(),
sliderInput("minNbItem", "Minimum for the number of items",
width = "90%",
min = 1, max = 10, step = 1, ticks = TRUE,
value = 3),
numericInput("BMDmax", label = "Maximum for the BMD summary value", value = 0, min = 0, step = 0.1, width = "70%")
),
column(1,
checkboxInput("BMDlogtransfoSensitivityplot", label = HTML("<b>Log transformation of the BMD</b>"), value = FALSE),
),
column(1,
radioButtons("BMDtype", label = "BMD type",
choices = list("zSD" = "zSD",
"xfold" = "xfold")
)
),
column(1,
radioButtons("BMDsummarysensitivityPlot", label = "BMD summary",
choices = list("First quartile" = "first.quartile",
"Median" = "median",
"Median and IQR" = "median.and.IQR")
)),
column(2,
# if several experimental level
conditionalPanel(condition = "input.nbLevel > 1",
radioButtons("ordering_moreonelev", label = "Ordering of the annotations",
choices = list("alphabetic order" = "alphaorder_moreonelev",
"ordered by total number of items in all the experimental levels" = "numbitemorder_moreonelev",
"specific order" = "specificorder_moreonelev"))
),
# if only one experimental level
conditionalPanel(condition = "input.nbLevel == 1",
radioButtons("ordering_onelev", label = "Ordering of the annotations",
choices = list("alphabetic order" = "alphaorder_onelev",
"ordered by number of items" = "numbitemorder_onelev",
"ordered by BMD summary value" = "BMDorder_onelev",
"specific order" = "specificorder_onelev"))
),
),
column(3,
conditionalPanel(condition = "input.ordering_moreonelev == 'specificorder_moreonelev' | input.ordering_onelev == 'specificorder_onelev'",
uiOutput("specificorder", style="font-size:85%;")
)
),
column(1,
br(),
div(align="right", actionButton("buttonRunStep2", "Run", icon = icon("fas fa-gear"), style='font-size:200%'))
)
)),
fixedRow(
column(6,
shinyjs::hidden(div(id = 'text1_step2',
style = 'color:#9c5c16; font-size:large;text-align: center;line-height: 150px;',
"Trend plot")),
plotOutput("trendplot", width = "100%", height = "900px"),
br(), br(),
div(align = "center", downloadButton("buttonDownloadTrendplot", "Download Trend Plot", icon = icon("fas fa-download")))),
column(6,
fixedRow(
shinyjs::hidden(div(id = 'text2_step2',
style = 'color:#9c5c16; font-size:large;text-align: center;line-height: 150px;',
"Sensitivity plot")),
plotOutput("sensitivityplot", width = "100%", height = "900px"),
br(), br(),
div(align = "center", downloadButton("buttonDownloadSensitivityplot", "Download Sensitivity Plot", icon = icon("fas fa-download")))
)
)
),
br(), br(),
fixedRow(
shinyjs::hidden(div(id = 'text3_step2',
style = 'color:#9c5c16; font-size:large;line-height: 50px;',
"Structure of the data frame merged and combined")),
verbatimTextOutput("filteredsorteddata"),
downloadButton('downloadData', 'Download Data')
),
br(), br(), br(), br()
),
####################################################################################
####### STEP 3 #####################################################################
####################################################################################
tabPanel(HTML("<font face=verdana size=3 color=#9c5c16>Step 3</font>"),
value = "step3",
br(), HTML("<font face=verdana size=5 color=#9c5c16><b>BMD plots (with and without gradient)</b></font>"), br(), br(), br(),
wellPanel(
fixedRow(
column(12,
checkboxGroupInput("annotcheckboxBMDplot", label = "Choose at least on annotation"),
actionButton("selectallBMDplot", "Select All"),
actionButton("unselectallBMDplot", "Unselect All")
))),
wellPanel(
fixedRow(
column(2,
checkboxInput("addciBMDplot", label = HTML("<b>Add CI</b> (only for the BMD plot without gradient)"), value = FALSE),
checkboxInput("BMDlogtransfoBMDplot", label = HTML("<b>Log transformation of the BMD</b>"), value = FALSE),
checkboxInput("addlabelBMDplot", label = HTML("<b>Add labels</b>"), value = FALSE)
),
column(2,
radioButtons("facetbycolumnsBMDplot", label = "Facet by (for columns)",
choices = list("Annotation" = "annotation",
"Experimental level" = "explevel")
)),
conditionalPanel(condition = "input.nbLevel > 1",
column(2,
radioButtons("facetbyrowsBMDplot", label = "Facet by (for rows)",
choices = list("Annotation" = "annotation",
"Experimental level" = "explevel")
))
),
column(2,
fixedRow(
checkboxInput("shapebyBMDplot", label = HTML("<b>Shape by trend</b>"), value = FALSE),
checkboxInput("colorbyBMDplot", label = HTML("<b>Color by trend</b> (only for the BMD plot without gradient)"), value = FALSE)
)
),
column(1,
br(),
div(align="right", actionButton("buttonRunStep3", "Run", icon = icon("fas fa-gear"), style='font-size:200%'))
)
)
),
fixedRow(
column(6,
shinyjs::hidden(div(id = 'text1_step3',
style = 'color:#9c5c16; font-size:large;text-align: center;line-height: 150px;',
"BMD plot")),
plotOutput("bmdplot", width = "100%", height = "900px"),
br(), br(),
div(align = "center", downloadButton("buttonDownloadBMDplot", "Download BMD Plot", icon = icon("fas fa-download")))),
column(6,
shinyjs::hidden(div(id = 'text2_step3',
style = 'color:#9c5c16; font-size:large;text-align: center;line-height: 150px;',
"BMD plot with gradient")),
plotOutput("bmdplotwithgradient", width = "100%", height = "900px"),
br(), br(),
div(align = "center", downloadButton("buttonDownloadBMDplotwithgradient", "Download BMD Plot with gradient", icon = icon("fas fa-download")))
)
),
br(), br(), br(), br()
),
####################################################################################
####### STEP 4 #####################################################################
####################################################################################
tabPanel(HTML("<font face=verdana size=3 color=#9c5c16>Step 4</font>"),
value = "step4",
br(), HTML("<font face=verdana size=5 color=#9c5c16><b>Curves plot</b></font>"), br(), br(), br(),
wellPanel(
fixedRow(
column(12,
checkboxGroupInput("annotcheckboxCurvesplot", label = "Choose at least on annotation"),
actionButton("selectallCurvesplot", "Select All"),
actionButton("unselectallCurvesplot", "Unselect All")
))),
wellPanel(
fixedRow(
column(3,
splitLayout(cellWidths = c("60%", "40%"),
checkboxInput("doselogtransfoCurvesplot", label = HTML("<b>Dose log transformation</b>"), value = FALSE),
shinyBS::bsButton("helplabel1step4", label = "", icon = icon("info"), size = "small", style="color:#9c5c16"),
shinyBS::bsPopover("helplabel1step4", "", helplabel1step4, placement = "right", trigger = "hover", options = list(container = "body"))
),
numericInput("mindoseCurvesplot", label = "Minimal dose for the x range", value = 0, width = "60%")
),
column(2,
radioButtons("facetbycolumnsCurvesplot", label = "Facet by (for columns)",
choices = list("Annotation" = "annotation",
"Experimental level" = "explevel")
)),
conditionalPanel(condition = "input.nbLevel > 1",
column(2,
radioButtons("facetbyrowsCurvesplot", label = "Facet by (for rows)",
choices = list("Annotation" = "annotation",
"Experimental level" = "explevel")
))
),
column(2,
fixedRow(
checkboxInput("colorbyCurvesplot", label = HTML("<b>Color by trend</b>"), value = TRUE)
)
),
column(1,
br(),
div(align="right", actionButton("buttonRunStep4", "Run", icon = icon("fas fa-gear"), style='font-size:200%'))
)
)
),
fixedRow(
plotOutput("curvesplot", width = "100%", height = "900px"),
br(), br(),
div(align = "center", downloadButton("buttonDownloadCurvesplot", "Download Curves Plot", icon = icon("fas fa-download"))),
),
br(), br(), br(), br()
),
####################################################################################
####### STEP 5 #####################################################################
####################################################################################
tabPanel(HTML("<font face=verdana size=3 color=#9c5c16>R code to go further</font>"),
value = "step5",
fixedRow(
column(8,
br(), HTML("<font face=verdana size=5 color=#9c5c16><b>R CODE TO GO FURTHER</b></font>"), br(), br(), br(),
tags$blockquote("To see what more you can do using the R package, we recommend you to consult the vignette and the cheat sheet",
"(links to all resources ", a("here", href = "https://lbbe.univ-lyon1.fr/fr/dromics", TARGET="_blank", style="color:#f28d0f;"),")."),
br(),
downloadButton("buttonDownRCode", "Download R Code", icon = icon("fas fa-download"), style = 'background-color:#e6e6e6; color:#000000; border-color:#9d9d9d;'), br(), br(),
verbatimTextOutput('printRCode'), br(), br()
))
)
)
)
|
########################
########################
##unit.handlers
########################
########################
#this uses checkInput
#in place
#################
#getUnits
#setUnits
#convertUnits
#addUnitConversion
#addUnitAlias
#listUnitConversions
#TO DO
################
#urgent
#convert units to log in history?
#################
#fix for aliases in convertUnits
#################
#removeUnitConversion
#removeUnitAlias
##this needs a stop orphaning ids option?
#questions
###################
#
########################
########################
##getUnits
########################
########################
#version 0.2.0
#karl 17/09/2010
getUnits <- function(input = NULL, data = NULL, ...,
if.missing = c("stop", "warning", "return")){
#if.missing handling
if.missing <- checkOption(if.missing[1], formals(setUnits)$if.missing,
"if.missing", "allowed if.missings",
fun.name = "getUnits")
#get input, then units
ans <- getPEMSElement(!!enquo(input), data, fun.name="getUnits",
if.missing = "return", ref.name="input")
#was
# ans <- if(!hijack)
# checkInput(input = input, data = data, if.missing = if.missing,
# fun.name = "getUnits") else
# input
checkUnits(ans, if.missing = if.missing, fun.name = "getUnits")
}
########################
########################
##setUnits
########################
########################
#version 0.2.0
#karl 17/09/2010
setUnits <- function(input = NULL, units = NULL, data = NULL, ...,
if.missing = c("stop", "warning", "return"),
output = c("input", "data.frame", "pems", "special"),
force = FALSE, overwrite = FALSE){
fun.name <- "setUnits"
#output handling
output <- checkOption(output[1], formals(setUnits)$output,
"output", "allowed outputs",
fun.name = "setUnits")
if(output == "special"){
output <- if(is.null(data))
"input" else if(comment(isPEMS(data)) == "other")
"input" else comment(isPEMS(data))
}
#if.missing handling
if.missing <- checkOption(if.missing[1], formals(setUnits)$if.missing,
"if.missing", "allowed if.missings",
fun.name = "setUnits")
#units
if(is.null(units)){
if(if.missing=="stop")
stop(paste("\t In ", fun.name,"(...) units not set/NULL", sep=""),
paste("\n\t [suggest setting units]", sep=""),
call. = FALSE, domain = NA)
if(if.missing=="warning" & !force)
warning(paste("\t In ", fun.name,"(...) units not set/NULL", sep=""),
paste("\n\t [ignoring setUnits]", sep=""),
paste("\n\t [suggest setUnits(..., force = TRUE) if you meant to delete units]", sep=""),
call. = FALSE, domain = NA)
}
ans <- getPEMSElement(!!enquo(input), data, fun.name="setUnits",
if.missing = "return", ref.name="input")
#was
# ans <- if(!hijack)
# checkInput(input = input, data = data, if.missing = if.missing,
# output = "input", fun.name = "setUnits") else
# input
if(is.null(units)) units <- ""
if(is.null(attributes(ans)$units) || force || as.character(attributes(ans)$units) == as.character(units)){
#allow null/reset
attr(ans, "units") <- if(units=="")
NULL else units
} else {
if(if.missing=="stop")
stop(paste("\t In ", fun.name,"(...) could not reset already set units", sep=""),
paste("\n\t [suggest using convertUnits to convert current to required units]", sep=""),
paste("\n\t [or setUnits(..., force = TRUE) if reset really wanted]", sep=""),
call. = FALSE, domain = NA)
if(if.missing=="warning")
warning(paste("\t In ", fun.name,"(...) already set units not reset", sep=""),
paste("\n\t [ignoring setUnits call]", sep=""),
paste("\n\t [suggest using convertUnits to convert current to required units]", sep=""),
paste("\n\t [or setUnits(..., force = TRUE) if reset really wanted]", sep=""),
call. = FALSE, domain = NA)
}
if(output=="input")
attr(ans, "class") <- unique(c("pems.element", attr(ans, "class")))
checkOutput(input = ans, data = data, if.missing = if.missing,
fun.name = "setUnits", output = output, overwrite = overwrite)
}
########################
########################
##convertUnits
########################
########################
#version 0.2.0
#karl 17/09/2010
convertUnits <- function(input = NULL, to = NULL, from = NULL, data = NULL, ...,
if.missing = c("stop", "warning", "return"),
output = c("input", "data.frame", "pems", "special"),
unit.conversions = NULL, force = FALSE, overwrite = FALSE){
fun.name <- "convertUnits"
#output handling
output <- checkOption(output[1], formals(convertUnits)$output,
"output", "allowed outputs",
fun.name = fun.name)
if(output == "special"){
output <- if(is.null(data))
"input" else if(comment(isPEMS(data)) == "other")
"input" else comment(isPEMS(data))
}
#if.missing handling
if.missing <- checkOption(if.missing[1], formals(convertUnits)$if.missing,
"if.missing", "allowed if.missings",
fun.name = fun.name)
ans <- getPEMSElement(!!enquo(input), data, fun.name=fun.name,
if.missing = "return", ref.name="input")
#was
# ans <- if(!hijack)
# checkInput(input = input, data = data, if.missing = if.missing,
# output = "input", fun.name = fun.name) else
# input
#from handling
temp <- checkUnits(ans, if.missing = "return",
output = "units", fun.name = fun.name)
if(is.null(from)){
from <- temp
}else {
#################################
#fix for later
#from could be an alias of temp
#################################
if(!force && as.character(from) != as.character(temp)){
if(if.missing=="stop")
stop(paste("\t In ", fun.name,"(...) from/input unit mismatch", sep=""),
paste("\n\t [suggest confirming input units/conversion]", sep=""),
paste("\n\t [or convertUnits(..., force = TRUE) if you really want conversion forced]", sep=""),
call. = FALSE, domain = NA)
if(if.missing=="warning")
warning(paste("\t In ", fun.name,"(...) from/input unit mismatch", sep=""),
paste("\n\t [ignoring requested convertUnits]", sep=""),
paste("\n\t [suggest confirming input units/conversion]", sep=""),
paste("\n\t [or convertUnits(..., overwrite = TRUE) if you really want conversion forced]", sep=""),
call. = FALSE, domain = NA)
from <- NULL
to <- NULL
}
}
#if both to and from not set
if(is.null(from) & is.null(to)){
if(if.missing=="stop")
stop(paste("\t In ", fun.name,"(...) to and from not set, unknown or NULL", sep=""),
paste("\n\t [suggest setting both]", sep=""),
call. = FALSE, domain = NA)
if(if.missing=="warning")
warning(paste("\t In ", fun.name,"(...) to and from not set, unknown or NULL", sep=""),
paste("\n\t [ignoring convertUnits request]", sep=""),
paste("\n\t [suggest setting both]", sep=""),
call. = FALSE, domain = NA)
}
#to handling
if(is.null(to)){
if(if.missing=="stop")
stop(paste("\t In ", fun.name,"(...) to not set/NULL", sep=""),
paste("\n\t [suggest setting to]", sep=""),
call. = FALSE, domain = NA)
if(if.missing=="warning")
warning(paste("\t In ", fun.name,"(...) to not set/NULL", sep=""),
paste("\n\t [ignoring setUnits]", sep=""),
paste("\n\t [suggest setting to in call]", sep=""),
call. = FALSE, domain = NA)
to <- from
}
if(!is.null(from)){
attributes(ans)$units <- as.character(from)
}
if(!is.null(to)){
ans <- checkUnits(ans, to, unit.conversions = unit.conversions,
fun.name = fun.name)
}
checkOutput(input = ans, data = data, if.missing = if.missing,
fun.name = fun.name, output = output, overwrite = overwrite)
}
########################
########################
##addUnitConversion
########################
########################
#version 0.2.0
#karl 17/09/2010
addUnitConversion <- function(to = NULL, from = NULL, conversion = NULL,
tag = "undocumented",
unit.conversions = ref.unit.conversions, ...,
overwrite = FALSE){
#if not unit.conversion
if(!is.list(unit.conversions))
unit.conversions <- list()
#check to, from and conversion are all there!
if(any(sapply(list(to, from, conversion), is.null)))
stop(paste("\t In addUnitConversion(...) need all of: to, from and conversion", sep=""),
paste("\n\t [suggest setting all in call]", sep=""),
call. = FALSE, domain = NA)
to <- as.character(to)
from <- as.character(from)
tag <- as.character(tag)
if(length(to)<1 | length(from)<1)
stop(paste("\t In addUnitConversion(...) to and/or are not viable ids", sep=""),
paste("\n\t [suggest renaming]", sep=""),
call. = FALSE, domain = NA)
if(is.numeric(conversion))
eval(parse(text=
paste("conversion <- function(x) x * ", conversion, sep="")
))
if(!is.function(conversion))
stop(paste("\t In addUnitConversion(...) conversion not viable as method", sep=""),
paste("\n\t [check help ?addUnitConversion]", sep=""),
call. = FALSE, domain = NA)
temp <- NULL
if(length(unit.conversions)>0){
temp <- sapply(unit.conversions, function(x)
if(to %in% x$to & from %in% x$from)
TRUE else FALSE)
if(length(temp[temp])>1){
warning(paste("In addUnitConversion(...) multipe matching conversion methods!", sep=""),
paste("\n\t [corrupt unit.conversions?]", sep =""),
"\n\t [ignoring all but first]",
"\n\t [suggest checking sources]",
call. = FALSE, domain = NA)
}
}
if(is.null(temp) || !any(temp)){
#no duplicate
unit.conversions[[length(unit.conversions)+1]] <- list(to = to, from = from, tag = tag, conversion = conversion)
} else {
if(overwrite){
unit.conversions[temp][[1]]$conversion <- conversion
if(!is.character(unit.conversions[temp][[1]]$tag) || unit.conversions[temp][[1]]$tag == "" ||
unit.conversions[temp][[1]]$tag == "undocuments")
unit.conversions[temp][[1]]$tag <- tag
} else {
stop(paste("\t In addUnitConversion(...) existing conversion method encountered", sep=""),
paste("\n\t [suggest overwrite = TRUE if you really want to do this]", sep=""),
call. = FALSE, domain = NA)
}
}
unit.conversions
}
########################
########################
##addUnitAlias
########################
########################
#version 0.2.0
#karl 17/09/2010
addUnitAlias <- function(ref = NULL, alias = NULL,
unit.conversions = ref.unit.conversions, ...){
#if not unit.conversion
if(!is.list(unit.conversions))
stop(paste("\t In addUnitAlias(...) no unit.conversion to reference", sep=""),
paste("\n\t [suggest updating call/checking ?addUnitAlias]", sep=""),
call. = FALSE, domain = NA)
#check ref, alias are all there!
if(any(sapply(list(ref, alias), is.null)))
stop(paste("\t In addUnitAlias(...) need all of: ref and alias", sep=""),
paste("\n\t [suggest setting all in call]", sep=""),
call. = FALSE, domain = NA)
ref <- as.character(ref)
alias <- as.character(alias)
if(length(ref)<1 | length(alias)<1)
stop(paste("\t In addUnitAlias(...) ref and/or alias not viable ids", sep=""),
paste("\n\t [suggest renaming]", sep=""),
call. = FALSE, domain = NA)
temp <- FALSE
for(i in 1:length(unit.conversions)){
if(ref %in% unit.conversions[[i]]$to){
unit.conversions[[i]]$to <- unique(c(unit.conversions[[i]]$to, alias))
temp <- TRUE
}
if(ref %in% unit.conversions[[i]]$from){
unit.conversions[[i]]$from <- unique(c(unit.conversions[[i]]$from, alias))
temp <- TRUE
}
}
if(!temp)
warning(paste("In addUnitAlias(...) ref not found in look-up table", sep=""),
paste("\n\t [no alias updates]", sep=""),
call. = FALSE, domain = NA)
unit.conversions
}
########################
########################
##listUnitConversions
########################
########################
#version 0.2.0
#karl 17/09/2010
listUnitConversions <- function(unit.conversions = ref.unit.conversions, ...,
verbose = FALSE, to = NULL, from = NULL){
#if not unit.conversion
if(!is.list(unit.conversions))
stop(paste("\t In listUnitConversions(...) no unit.conversion to reference", sep=""),
paste("\n\t [suggest updating call/checking ?listUnitConversions]", sep=""),
call. = FALSE, domain = NA)
#set up to, from
to <- if(!is.null(to))
as.character(to) else ""
from <- if(!is.null(from))
as.character(from) else ""
######################
#error if to, from no good?
######################
if(to != "" | from != ""){
temp <- sapply(unit.conversions, function(x)
if(to %in% x$to | from %in% x$from)
TRUE else FALSE)
unit.conversions <- unit.conversions[temp]
}
if(length(unit.conversions)<1)
stop(paste("\t In listUnitConversions(...) no matched methods located", sep=""),
paste("\n\t [no suggestion]", sep=""),
call. = FALSE, domain = NA)
temp.fun <- if(verbose){
function(x)
paste("TAG: ", paste(x$tag, sep ="", collapse =","),
"; FROM:", paste(x$from, sep ="", collapse =","),
"; TO:", paste(x$to, sep ="", collapse =","), sep="")
} else {
function(x)
paste(x$tag, sep ="", collapse =",")
}
sapply(unit.conversions, temp.fun)
}
| /pems.utils/R/unit.handlers.R | no_license | akhikolla/updatedatatype-list4 | R | false | false | 16,386 | r | ########################
########################
##unit.handlers
########################
########################
#this uses checkInput
#in place
#################
#getUnits
#setUnits
#convertUnits
#addUnitConversion
#addUnitAlias
#listUnitConversions
#TO DO
################
#urgent
#convert units to log in history?
#################
#fix for aliases in convertUnits
#################
#removeUnitConversion
#removeUnitAlias
##this needs a stop orphaning ids option?
#questions
###################
#
########################
########################
##getUnits
########################
########################
#version 0.2.0
#karl 17/09/2010
getUnits <- function(input = NULL, data = NULL, ...,
if.missing = c("stop", "warning", "return")){
#if.missing handling
if.missing <- checkOption(if.missing[1], formals(setUnits)$if.missing,
"if.missing", "allowed if.missings",
fun.name = "getUnits")
#get input, then units
ans <- getPEMSElement(!!enquo(input), data, fun.name="getUnits",
if.missing = "return", ref.name="input")
#was
# ans <- if(!hijack)
# checkInput(input = input, data = data, if.missing = if.missing,
# fun.name = "getUnits") else
# input
checkUnits(ans, if.missing = if.missing, fun.name = "getUnits")
}
########################
########################
##setUnits
########################
########################
#version 0.2.0
#karl 17/09/2010
setUnits <- function(input = NULL, units = NULL, data = NULL, ...,
if.missing = c("stop", "warning", "return"),
output = c("input", "data.frame", "pems", "special"),
force = FALSE, overwrite = FALSE){
fun.name <- "setUnits"
#output handling
output <- checkOption(output[1], formals(setUnits)$output,
"output", "allowed outputs",
fun.name = "setUnits")
if(output == "special"){
output <- if(is.null(data))
"input" else if(comment(isPEMS(data)) == "other")
"input" else comment(isPEMS(data))
}
#if.missing handling
if.missing <- checkOption(if.missing[1], formals(setUnits)$if.missing,
"if.missing", "allowed if.missings",
fun.name = "setUnits")
#units
if(is.null(units)){
if(if.missing=="stop")
stop(paste("\t In ", fun.name,"(...) units not set/NULL", sep=""),
paste("\n\t [suggest setting units]", sep=""),
call. = FALSE, domain = NA)
if(if.missing=="warning" & !force)
warning(paste("\t In ", fun.name,"(...) units not set/NULL", sep=""),
paste("\n\t [ignoring setUnits]", sep=""),
paste("\n\t [suggest setUnits(..., force = TRUE) if you meant to delete units]", sep=""),
call. = FALSE, domain = NA)
}
ans <- getPEMSElement(!!enquo(input), data, fun.name="setUnits",
if.missing = "return", ref.name="input")
#was
# ans <- if(!hijack)
# checkInput(input = input, data = data, if.missing = if.missing,
# output = "input", fun.name = "setUnits") else
# input
if(is.null(units)) units <- ""
if(is.null(attributes(ans)$units) || force || as.character(attributes(ans)$units) == as.character(units)){
#allow null/reset
attr(ans, "units") <- if(units=="")
NULL else units
} else {
if(if.missing=="stop")
stop(paste("\t In ", fun.name,"(...) could not reset already set units", sep=""),
paste("\n\t [suggest using convertUnits to convert current to required units]", sep=""),
paste("\n\t [or setUnits(..., force = TRUE) if reset really wanted]", sep=""),
call. = FALSE, domain = NA)
if(if.missing=="warning")
warning(paste("\t In ", fun.name,"(...) already set units not reset", sep=""),
paste("\n\t [ignoring setUnits call]", sep=""),
paste("\n\t [suggest using convertUnits to convert current to required units]", sep=""),
paste("\n\t [or setUnits(..., force = TRUE) if reset really wanted]", sep=""),
call. = FALSE, domain = NA)
}
if(output=="input")
attr(ans, "class") <- unique(c("pems.element", attr(ans, "class")))
checkOutput(input = ans, data = data, if.missing = if.missing,
fun.name = "setUnits", output = output, overwrite = overwrite)
}
########################
########################
##convertUnits
########################
########################
#version 0.2.0
#karl 17/09/2010
convertUnits <- function(input = NULL, to = NULL, from = NULL, data = NULL, ...,
if.missing = c("stop", "warning", "return"),
output = c("input", "data.frame", "pems", "special"),
unit.conversions = NULL, force = FALSE, overwrite = FALSE){
fun.name <- "convertUnits"
#output handling
output <- checkOption(output[1], formals(convertUnits)$output,
"output", "allowed outputs",
fun.name = fun.name)
if(output == "special"){
output <- if(is.null(data))
"input" else if(comment(isPEMS(data)) == "other")
"input" else comment(isPEMS(data))
}
#if.missing handling
if.missing <- checkOption(if.missing[1], formals(convertUnits)$if.missing,
"if.missing", "allowed if.missings",
fun.name = fun.name)
ans <- getPEMSElement(!!enquo(input), data, fun.name=fun.name,
if.missing = "return", ref.name="input")
#was
# ans <- if(!hijack)
# checkInput(input = input, data = data, if.missing = if.missing,
# output = "input", fun.name = fun.name) else
# input
#from handling
temp <- checkUnits(ans, if.missing = "return",
output = "units", fun.name = fun.name)
if(is.null(from)){
from <- temp
}else {
#################################
#fix for later
#from could be an alias of temp
#################################
if(!force && as.character(from) != as.character(temp)){
if(if.missing=="stop")
stop(paste("\t In ", fun.name,"(...) from/input unit mismatch", sep=""),
paste("\n\t [suggest confirming input units/conversion]", sep=""),
paste("\n\t [or convertUnits(..., force = TRUE) if you really want conversion forced]", sep=""),
call. = FALSE, domain = NA)
if(if.missing=="warning")
warning(paste("\t In ", fun.name,"(...) from/input unit mismatch", sep=""),
paste("\n\t [ignoring requested convertUnits]", sep=""),
paste("\n\t [suggest confirming input units/conversion]", sep=""),
paste("\n\t [or convertUnits(..., overwrite = TRUE) if you really want conversion forced]", sep=""),
call. = FALSE, domain = NA)
from <- NULL
to <- NULL
}
}
#if both to and from not set
if(is.null(from) & is.null(to)){
if(if.missing=="stop")
stop(paste("\t In ", fun.name,"(...) to and from not set, unknown or NULL", sep=""),
paste("\n\t [suggest setting both]", sep=""),
call. = FALSE, domain = NA)
if(if.missing=="warning")
warning(paste("\t In ", fun.name,"(...) to and from not set, unknown or NULL", sep=""),
paste("\n\t [ignoring convertUnits request]", sep=""),
paste("\n\t [suggest setting both]", sep=""),
call. = FALSE, domain = NA)
}
#to handling
if(is.null(to)){
if(if.missing=="stop")
stop(paste("\t In ", fun.name,"(...) to not set/NULL", sep=""),
paste("\n\t [suggest setting to]", sep=""),
call. = FALSE, domain = NA)
if(if.missing=="warning")
warning(paste("\t In ", fun.name,"(...) to not set/NULL", sep=""),
paste("\n\t [ignoring setUnits]", sep=""),
paste("\n\t [suggest setting to in call]", sep=""),
call. = FALSE, domain = NA)
to <- from
}
if(!is.null(from)){
attributes(ans)$units <- as.character(from)
}
if(!is.null(to)){
ans <- checkUnits(ans, to, unit.conversions = unit.conversions,
fun.name = fun.name)
}
checkOutput(input = ans, data = data, if.missing = if.missing,
fun.name = fun.name, output = output, overwrite = overwrite)
}
########################
########################
##addUnitConversion
########################
########################
#version 0.2.0
#karl 17/09/2010
addUnitConversion <- function(to = NULL, from = NULL, conversion = NULL,
tag = "undocumented",
unit.conversions = ref.unit.conversions, ...,
overwrite = FALSE){
#if not unit.conversion
if(!is.list(unit.conversions))
unit.conversions <- list()
#check to, from and conversion are all there!
if(any(sapply(list(to, from, conversion), is.null)))
stop(paste("\t In addUnitConversion(...) need all of: to, from and conversion", sep=""),
paste("\n\t [suggest setting all in call]", sep=""),
call. = FALSE, domain = NA)
to <- as.character(to)
from <- as.character(from)
tag <- as.character(tag)
if(length(to)<1 | length(from)<1)
stop(paste("\t In addUnitConversion(...) to and/or are not viable ids", sep=""),
paste("\n\t [suggest renaming]", sep=""),
call. = FALSE, domain = NA)
if(is.numeric(conversion))
eval(parse(text=
paste("conversion <- function(x) x * ", conversion, sep="")
))
if(!is.function(conversion))
stop(paste("\t In addUnitConversion(...) conversion not viable as method", sep=""),
paste("\n\t [check help ?addUnitConversion]", sep=""),
call. = FALSE, domain = NA)
temp <- NULL
if(length(unit.conversions)>0){
temp <- sapply(unit.conversions, function(x)
if(to %in% x$to & from %in% x$from)
TRUE else FALSE)
if(length(temp[temp])>1){
warning(paste("In addUnitConversion(...) multipe matching conversion methods!", sep=""),
paste("\n\t [corrupt unit.conversions?]", sep =""),
"\n\t [ignoring all but first]",
"\n\t [suggest checking sources]",
call. = FALSE, domain = NA)
}
}
if(is.null(temp) || !any(temp)){
#no duplicate
unit.conversions[[length(unit.conversions)+1]] <- list(to = to, from = from, tag = tag, conversion = conversion)
} else {
if(overwrite){
unit.conversions[temp][[1]]$conversion <- conversion
if(!is.character(unit.conversions[temp][[1]]$tag) || unit.conversions[temp][[1]]$tag == "" ||
unit.conversions[temp][[1]]$tag == "undocuments")
unit.conversions[temp][[1]]$tag <- tag
} else {
stop(paste("\t In addUnitConversion(...) existing conversion method encountered", sep=""),
paste("\n\t [suggest overwrite = TRUE if you really want to do this]", sep=""),
call. = FALSE, domain = NA)
}
}
unit.conversions
}
########################
########################
##addUnitAlias
########################
########################
#version 0.2.0
#karl 17/09/2010
addUnitAlias <- function(ref = NULL, alias = NULL,
unit.conversions = ref.unit.conversions, ...){
#if not unit.conversion
if(!is.list(unit.conversions))
stop(paste("\t In addUnitAlias(...) no unit.conversion to reference", sep=""),
paste("\n\t [suggest updating call/checking ?addUnitAlias]", sep=""),
call. = FALSE, domain = NA)
#check ref, alias are all there!
if(any(sapply(list(ref, alias), is.null)))
stop(paste("\t In addUnitAlias(...) need all of: ref and alias", sep=""),
paste("\n\t [suggest setting all in call]", sep=""),
call. = FALSE, domain = NA)
ref <- as.character(ref)
alias <- as.character(alias)
if(length(ref)<1 | length(alias)<1)
stop(paste("\t In addUnitAlias(...) ref and/or alias not viable ids", sep=""),
paste("\n\t [suggest renaming]", sep=""),
call. = FALSE, domain = NA)
temp <- FALSE
for(i in 1:length(unit.conversions)){
if(ref %in% unit.conversions[[i]]$to){
unit.conversions[[i]]$to <- unique(c(unit.conversions[[i]]$to, alias))
temp <- TRUE
}
if(ref %in% unit.conversions[[i]]$from){
unit.conversions[[i]]$from <- unique(c(unit.conversions[[i]]$from, alias))
temp <- TRUE
}
}
if(!temp)
warning(paste("In addUnitAlias(...) ref not found in look-up table", sep=""),
paste("\n\t [no alias updates]", sep=""),
call. = FALSE, domain = NA)
unit.conversions
}
########################
########################
##listUnitConversions
########################
########################
#version 0.2.0
#karl 17/09/2010
listUnitConversions <- function(unit.conversions = ref.unit.conversions, ...,
verbose = FALSE, to = NULL, from = NULL){
#if not unit.conversion
if(!is.list(unit.conversions))
stop(paste("\t In listUnitConversions(...) no unit.conversion to reference", sep=""),
paste("\n\t [suggest updating call/checking ?listUnitConversions]", sep=""),
call. = FALSE, domain = NA)
#set up to, from
to <- if(!is.null(to))
as.character(to) else ""
from <- if(!is.null(from))
as.character(from) else ""
######################
#error if to, from no good?
######################
if(to != "" | from != ""){
temp <- sapply(unit.conversions, function(x)
if(to %in% x$to | from %in% x$from)
TRUE else FALSE)
unit.conversions <- unit.conversions[temp]
}
if(length(unit.conversions)<1)
stop(paste("\t In listUnitConversions(...) no matched methods located", sep=""),
paste("\n\t [no suggestion]", sep=""),
call. = FALSE, domain = NA)
temp.fun <- if(verbose){
function(x)
paste("TAG: ", paste(x$tag, sep ="", collapse =","),
"; FROM:", paste(x$from, sep ="", collapse =","),
"; TO:", paste(x$to, sep ="", collapse =","), sep="")
} else {
function(x)
paste(x$tag, sep ="", collapse =",")
}
sapply(unit.conversions, temp.fun)
}
|
##' Plot two profiles and order the terms by ones that have the most difference
##' in enrichment
## Plot go profiles with biggest differences
plotProfilesMostDiff <- function(aProf, aTitle="Functional Profiles",
anOnto=NULL, percentage=FALSE, HORIZVERT=TRUE,
legendText=NULL, colores=c('white', 'red'),
labelWidth=25, n=NULL, ...) {
freq <- t(as.matrix(aProf[, 3:4]))
if (is.null(n)) {
n <- ncol(freq)
}
desc <- as.character(aProf$Description)
opt <- par(mar = c(4, 12, 4, 4), xpd = TRUE, cex.axis = 0.01)
if (percentage) {
numGenes1 <- attr(aProf, "numGenes1")
numGenes2 <- attr(aProf, "numGenes2")
if (!is.null(numGenes1) & !(numGenes1 == 0) &
!is.null(numGenes2) & !(numGenes2 == 0)) {
freq[1, ] <- round((freq[1, ]/numGenes1 * 100), 2)
}
freq[2, ] <- round((freq[2, ]/numGenes2 * 100), 2)
xlim <- c(0, 100)
} else {
xlim <- c(0, max(freq))
}
inorder <- order(abs(freq[1,] - freq[2,]), decreasing=TRUE)
inorder <- head(inorder, n)
bp <- barplot(freq[, inorder], horiz=HORIZVERT, beside=TRUE,
legend.text=legendText, col=colores, xlim=xlim, ...)
text(freq[, inorder], round(bp, 1), freq[, inorder], pos = 4, cex = 0.6)
axis(1, cex.axis=0.8, labels = seq(0, 100, by=20),
at=seq(0, 100, by=20))
axis(2, at=(bp[1, ] + bp[2, ])/2, cex.axis = 0.6, las=2,
labels=goProfiles:::shortName(desc[inorder], labelWidth))
if (is.null(anOnto)) {
title(aTitle)
} else {
title(main=paste(aTitle, ". ", anOnto, " ontology", sep = ""))
}
par(opt)
}
| /R/goprofiles.R | no_license | lianos/ARE.utils | R | false | false | 1,656 | r | ##' Plot two profiles and order the terms by ones that have the most difference
##' in enrichment
## Plot go profiles with biggest differences
plotProfilesMostDiff <- function(aProf, aTitle="Functional Profiles",
anOnto=NULL, percentage=FALSE, HORIZVERT=TRUE,
legendText=NULL, colores=c('white', 'red'),
labelWidth=25, n=NULL, ...) {
freq <- t(as.matrix(aProf[, 3:4]))
if (is.null(n)) {
n <- ncol(freq)
}
desc <- as.character(aProf$Description)
opt <- par(mar = c(4, 12, 4, 4), xpd = TRUE, cex.axis = 0.01)
if (percentage) {
numGenes1 <- attr(aProf, "numGenes1")
numGenes2 <- attr(aProf, "numGenes2")
if (!is.null(numGenes1) & !(numGenes1 == 0) &
!is.null(numGenes2) & !(numGenes2 == 0)) {
freq[1, ] <- round((freq[1, ]/numGenes1 * 100), 2)
}
freq[2, ] <- round((freq[2, ]/numGenes2 * 100), 2)
xlim <- c(0, 100)
} else {
xlim <- c(0, max(freq))
}
inorder <- order(abs(freq[1,] - freq[2,]), decreasing=TRUE)
inorder <- head(inorder, n)
bp <- barplot(freq[, inorder], horiz=HORIZVERT, beside=TRUE,
legend.text=legendText, col=colores, xlim=xlim, ...)
text(freq[, inorder], round(bp, 1), freq[, inorder], pos = 4, cex = 0.6)
axis(1, cex.axis=0.8, labels = seq(0, 100, by=20),
at=seq(0, 100, by=20))
axis(2, at=(bp[1, ] + bp[2, ])/2, cex.axis = 0.6, las=2,
labels=goProfiles:::shortName(desc[inorder], labelWidth))
if (is.null(anOnto)) {
title(aTitle)
} else {
title(main=paste(aTitle, ". ", anOnto, " ontology", sep = ""))
}
par(opt)
}
|
\name{coord_trans}
\alias{coord_trans}
\title{Transformed cartesian coordinate system.}
\usage{
coord_trans(xtrans = "identity", ytrans = "identity",
limx = NULL, limy = NULL)
}
\arguments{
\item{xtrans,ytrans}{transformers for x and y axes}
\item{limx,limy}{limits for x and y axes. (Named so for
backward compatability)}
}
\description{
\code{coord_trans} is different to scale transformations
in that it occurs after statistical transformation and
will affect the visual appearance of geoms - there is no
guarantee that straight lines will continue to be
straight.
}
\details{
All current transformations only work with continuous
values - see \code{\link[scales]{trans_new}} for list of
transformations, and instructions on how to create your
own.
}
\examples{
\donttest{
# See ?geom_boxplot for other examples
# Three ways of doing transformating in ggplot:
# * by transforming the data
qplot(log10(carat), log10(price), data=diamonds)
# * by transforming the scales
qplot(carat, price, data=diamonds, log="xy")
qplot(carat, price, data=diamonds) + scale_x_log10() + scale_y_log10()
# * by transforming the coordinate system:
qplot(carat, price, data=diamonds) + coord_trans(x = "log10", y = "log10")
# The difference between transforming the scales and
# transforming the coordinate system is that scale
# transformation occurs BEFORE statistics, and coordinate
# transformation afterwards. Coordinate transformation also
# changes the shape of geoms:
d <- subset(diamonds, carat > 0.5)
qplot(carat, price, data = d, log="xy") +
geom_smooth(method="lm")
qplot(carat, price, data = d) +
geom_smooth(method="lm") +
coord_trans(x = "log10", y = "log10")
# Here I used a subset of diamonds so that the smoothed line didn't
# drop below zero, which obviously causes problems on the log-transformed
# scale
# With a combination of scale and coordinate transformation, it's
# possible to do back-transformations:
library(scales)
qplot(carat, price, data=diamonds, log="xy") +
geom_smooth(method="lm") +
coord_trans(x = exp_trans(10), y = exp_trans(10))
# cf.
qplot(carat, price, data=diamonds) + geom_smooth(method = "lm")
# Also works with discrete scales
df <- data.frame(a = abs(rnorm(26)),letters)
plot <- ggplot(df,aes(a,letters)) + geom_point()
plot + coord_trans(x = "log10")
plot + coord_trans(x = "sqrt")
}
}
| /man/coord_trans.Rd | no_license | ThierryO/ggplot2 | R | false | false | 2,370 | rd | \name{coord_trans}
\alias{coord_trans}
\title{Transformed cartesian coordinate system.}
\usage{
coord_trans(xtrans = "identity", ytrans = "identity",
limx = NULL, limy = NULL)
}
\arguments{
\item{xtrans,ytrans}{transformers for x and y axes}
\item{limx,limy}{limits for x and y axes. (Named so for
backward compatability)}
}
\description{
\code{coord_trans} is different to scale transformations
in that it occurs after statistical transformation and
will affect the visual appearance of geoms - there is no
guarantee that straight lines will continue to be
straight.
}
\details{
All current transformations only work with continuous
values - see \code{\link[scales]{trans_new}} for list of
transformations, and instructions on how to create your
own.
}
\examples{
\donttest{
# See ?geom_boxplot for other examples
# Three ways of doing transformating in ggplot:
# * by transforming the data
qplot(log10(carat), log10(price), data=diamonds)
# * by transforming the scales
qplot(carat, price, data=diamonds, log="xy")
qplot(carat, price, data=diamonds) + scale_x_log10() + scale_y_log10()
# * by transforming the coordinate system:
qplot(carat, price, data=diamonds) + coord_trans(x = "log10", y = "log10")
# The difference between transforming the scales and
# transforming the coordinate system is that scale
# transformation occurs BEFORE statistics, and coordinate
# transformation afterwards. Coordinate transformation also
# changes the shape of geoms:
d <- subset(diamonds, carat > 0.5)
qplot(carat, price, data = d, log="xy") +
geom_smooth(method="lm")
qplot(carat, price, data = d) +
geom_smooth(method="lm") +
coord_trans(x = "log10", y = "log10")
# Here I used a subset of diamonds so that the smoothed line didn't
# drop below zero, which obviously causes problems on the log-transformed
# scale
# With a combination of scale and coordinate transformation, it's
# possible to do back-transformations:
library(scales)
qplot(carat, price, data=diamonds, log="xy") +
geom_smooth(method="lm") +
coord_trans(x = exp_trans(10), y = exp_trans(10))
# cf.
qplot(carat, price, data=diamonds) + geom_smooth(method = "lm")
# Also works with discrete scales
df <- data.frame(a = abs(rnorm(26)),letters)
plot <- ggplot(df,aes(a,letters)) + geom_point()
plot + coord_trans(x = "log10")
plot + coord_trans(x = "sqrt")
}
}
|
# Cluster cars for the same vendor:
rm(list=ls())
library("data.table")
library("sets")
library("survival")
#Set working directory
project_directory<- "C:/Users/Nk/Documents/Uni/MA"
data_directory<-"/Pkw/MobileDaten/generatedData/final_dataset/"
wd<- paste0(project_directory, data_directory)
setwd(wd)
#Load dataset:
load("df_full2007.RData")
#######################################################################
vendor<- "7723851"
relCols<- c("Erstzulassung", "valuePrice", "Kilometer", "car_ID", "Typ")
mydata<- df[vendor_ID==vendor, relCols, with=F]
mydata<- mydata[Typ=="A150"]
mydata$Erstzulassung<- as.numeric(mydata$Erstzulassung)
mydata$Typ<- NULL
mydata<- mydata[!duplicated(mydata$car_ID)]
#######################################################################
# Calculate distances for cars from same vendor -----------------------
mydata<- mydata[, by=.(Erstzulassung, vendor_ID, Typ, Eigenschaften, Schaltung)]
clusterVars<- c("valuePrice", "Kilometer", "Erstzulassung")
mydata.c<- mydata[,clusterVars, with=F]
#######################################################################
#Experiment with other datasets:
mydata.c <- scale(mydata.c) # standardize variables
mydata.c <- na.omit(mydata.c) # listwise deletion of missing
# K-Means Clustering with 5 clusters
fit <- kmeans(mydata.c, 5)
# Cluster Plot against 1st 2 principal components
plotcluster(mydata.c, fit$cluster) # your usual method
dcmat <- discrcoord(mydata.c, fit$cluster)
#plot(dcmat$proj[,1], dcmat$proj[,2], pch="") # scatter plot of 1st two dcs
text(dcmat$proj[,1], dcmat$proj[,2], labels=mydata$car_ID) # label points with row index
library(cluster)
clusplot(mydata, fit$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0)
# Ward Hierarchical Clustering
d <- dist(mydata, method = "euclidean") # distance matrix
fit <- hclust(d, method="ward.D")
plot(fit) # display dendogram
groups <- cutree(fit, k=20) # cut tree into 5 clusters
# draw dendogram with red borders around the 5 clusters
rect.hclust(fit, k=5, border="red")
labs = paste("sta_",1:50,sep="") #new labels
rownames(USArrests)<-labs #set new row names
hc <- hclust(dist(USArrests), "ave")
library(ggplot2)
library(ggdendro)
#convert cluster object to use with ggplot
dendr <- dendro_data(hc, type="rectangle")
#your own labels (now rownames) are supplied in geom_text() and label=label
require(graphics)
labs = paste("sta_",1:50,sep="") #new labels
USArrests2<-USArrests #new data frame (just to keep original unchanged)
rownames(USArrests2)<-labs #set new row names
hc <- hclust(dist(USArrests2), "ave")
par(mar=c(3,1,1,5))
plot(as.dendrogram(hc),horiz=T)
| /post_data_prep/step3_cluster_cars_same_vendor.R | no_license | nvkov/MA_Code | R | false | false | 2,655 | r | # Cluster cars for the same vendor:
rm(list=ls())
library("data.table")
library("sets")
library("survival")
#Set working directory
project_directory<- "C:/Users/Nk/Documents/Uni/MA"
data_directory<-"/Pkw/MobileDaten/generatedData/final_dataset/"
wd<- paste0(project_directory, data_directory)
setwd(wd)
#Load dataset:
load("df_full2007.RData")
#######################################################################
vendor<- "7723851"
relCols<- c("Erstzulassung", "valuePrice", "Kilometer", "car_ID", "Typ")
mydata<- df[vendor_ID==vendor, relCols, with=F]
mydata<- mydata[Typ=="A150"]
mydata$Erstzulassung<- as.numeric(mydata$Erstzulassung)
mydata$Typ<- NULL
mydata<- mydata[!duplicated(mydata$car_ID)]
#######################################################################
# Calculate distances for cars from same vendor -----------------------
mydata<- mydata[, by=.(Erstzulassung, vendor_ID, Typ, Eigenschaften, Schaltung)]
clusterVars<- c("valuePrice", "Kilometer", "Erstzulassung")
mydata.c<- mydata[,clusterVars, with=F]
#######################################################################
#Experiment with other datasets:
mydata.c <- scale(mydata.c) # standardize variables
mydata.c <- na.omit(mydata.c) # listwise deletion of missing
# K-Means Clustering with 5 clusters
fit <- kmeans(mydata.c, 5)
# Cluster Plot against 1st 2 principal components
plotcluster(mydata.c, fit$cluster) # your usual method
dcmat <- discrcoord(mydata.c, fit$cluster)
#plot(dcmat$proj[,1], dcmat$proj[,2], pch="") # scatter plot of 1st two dcs
text(dcmat$proj[,1], dcmat$proj[,2], labels=mydata$car_ID) # label points with row index
library(cluster)
clusplot(mydata, fit$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0)
# Ward Hierarchical Clustering
d <- dist(mydata, method = "euclidean") # distance matrix
fit <- hclust(d, method="ward.D")
plot(fit) # display dendogram
groups <- cutree(fit, k=20) # cut tree into 5 clusters
# draw dendogram with red borders around the 5 clusters
rect.hclust(fit, k=5, border="red")
labs = paste("sta_",1:50,sep="") #new labels
rownames(USArrests)<-labs #set new row names
hc <- hclust(dist(USArrests), "ave")
library(ggplot2)
library(ggdendro)
#convert cluster object to use with ggplot
dendr <- dendro_data(hc, type="rectangle")
#your own labels (now rownames) are supplied in geom_text() and label=label
require(graphics)
labs = paste("sta_",1:50,sep="") #new labels
USArrests2<-USArrests #new data frame (just to keep original unchanged)
rownames(USArrests2)<-labs #set new row names
hc <- hclust(dist(USArrests2), "ave")
par(mar=c(3,1,1,5))
plot(as.dendrogram(hc),horiz=T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_and_reporting.R
\name{umx_aggregate}
\alias{umx_aggregate}
\title{Convenient formula-based cross-tabs & built-in summary functions}
\usage{
umx_aggregate(
formula = DV ~ condition,
data = df,
what = c("mean_sd", "n"),
digits = 2,
report = c("markdown", "html", "txt")
)
}
\arguments{
\item{formula}{The aggregation formula. e.g., DV ~ condition.}
\item{data}{frame to aggregate (defaults to df for common case)}
\item{what}{function to use. Default reports "mean (sd)".}
\item{digits}{to round results to.}
\item{report}{Format for the table: Default is markdown.}
}
\value{
\itemize{
\item table
}
}
\description{
A common task is preparing summary tables, aggregating over some grouping factor.
Like mean and sd of age, by sex. R's \code{\link[=aggregate]{aggregate()}} function is useful and powerful, allowing
xtabs based on a formula.
umx_aggregate makes using it a bit easier. In particular, it has some common functions
for summarizing data built-in, like "mean (sd)" (the default).
\code{umx_aggregate(mpg ~ cyl, data = mtcars, what = "mean_sd")}
\tabular{ll}{
cyl \tab mpg\cr
4 (n = 11) \tab 26.66 (4.51)\cr
6 (n = 7) \tab 19.74 (1.45)\cr
8 (n = 14) \tab 15.1 (2.56)\cr
}
}
\examples{
# =====================================
# = Basic use, compare with aggregate =
# =====================================
aggregate(mpg ~ cyl, FUN = mean, na.rm = TRUE, data = mtcars)
umx_aggregate(mpg ~ cyl, data = mtcars)
# =============================================
# = Use different (or user-defined) functions =
# =============================================
umx_aggregate(mpg ~ cyl, data = mtcars, what = "n")
umx_aggregate(mpg ~ cyl, data = mtcars, what = function(x){sum(!is.na(x))})
# turn off markdown
umx_aggregate(mpg ~ cyl, data = mtcars, report = "txt")
# ============================================
# = More than one item on the left hand side =
# ============================================
umx_aggregate(cbind(mpg, qsec) ~ cyl, data = mtcars, digits = 3)
# Transpose table
t(umx_aggregate(cbind(mpg, qsec) ~ cyl, data = mtcars))
\dontrun{
umx_aggregate(cbind(moodAvg, mood) ~ condition, data = study1)
}
}
\references{
\itemize{
\item \url{https://github.com/tbates/umx}, \url{https://tbates.github.io}
}
}
\seealso{
\itemize{
\item \code{\link[=umx_apply]{umx_apply()}}, \code{\link[=aggregate]{aggregate()}}
}
Other Reporting Functions:
\code{\link{umxAPA}()},
\code{\link{umxFactorScores}()},
\code{\link{umxGetParameters}()},
\code{\link{umxParameters}()},
\code{\link{umx_time}()},
\code{\link{umx}}
}
\concept{Reporting Functions}
| /man/umx_aggregate.Rd | no_license | MATA62N/umx | R | false | true | 2,665 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_and_reporting.R
\name{umx_aggregate}
\alias{umx_aggregate}
\title{Convenient formula-based cross-tabs & built-in summary functions}
\usage{
umx_aggregate(
formula = DV ~ condition,
data = df,
what = c("mean_sd", "n"),
digits = 2,
report = c("markdown", "html", "txt")
)
}
\arguments{
\item{formula}{The aggregation formula. e.g., DV ~ condition.}
\item{data}{frame to aggregate (defaults to df for common case)}
\item{what}{function to use. Default reports "mean (sd)".}
\item{digits}{to round results to.}
\item{report}{Format for the table: Default is markdown.}
}
\value{
\itemize{
\item table
}
}
\description{
A common task is preparing summary tables, aggregating over some grouping factor.
Like mean and sd of age, by sex. R's \code{\link[=aggregate]{aggregate()}} function is useful and powerful, allowing
xtabs based on a formula.
umx_aggregate makes using it a bit easier. In particular, it has some common functions
for summarizing data built-in, like "mean (sd)" (the default).
\code{umx_aggregate(mpg ~ cyl, data = mtcars, what = "mean_sd")}
\tabular{ll}{
cyl \tab mpg\cr
4 (n = 11) \tab 26.66 (4.51)\cr
6 (n = 7) \tab 19.74 (1.45)\cr
8 (n = 14) \tab 15.1 (2.56)\cr
}
}
\examples{
# =====================================
# = Basic use, compare with aggregate =
# =====================================
aggregate(mpg ~ cyl, FUN = mean, na.rm = TRUE, data = mtcars)
umx_aggregate(mpg ~ cyl, data = mtcars)
# =============================================
# = Use different (or user-defined) functions =
# =============================================
umx_aggregate(mpg ~ cyl, data = mtcars, what = "n")
umx_aggregate(mpg ~ cyl, data = mtcars, what = function(x){sum(!is.na(x))})
# turn off markdown
umx_aggregate(mpg ~ cyl, data = mtcars, report = "txt")
# ============================================
# = More than one item on the left hand side =
# ============================================
umx_aggregate(cbind(mpg, qsec) ~ cyl, data = mtcars, digits = 3)
# Transpose table
t(umx_aggregate(cbind(mpg, qsec) ~ cyl, data = mtcars))
\dontrun{
umx_aggregate(cbind(moodAvg, mood) ~ condition, data = study1)
}
}
\references{
\itemize{
\item \url{https://github.com/tbates/umx}, \url{https://tbates.github.io}
}
}
\seealso{
\itemize{
\item \code{\link[=umx_apply]{umx_apply()}}, \code{\link[=aggregate]{aggregate()}}
}
Other Reporting Functions:
\code{\link{umxAPA}()},
\code{\link{umxFactorScores}()},
\code{\link{umxGetParameters}()},
\code{\link{umxParameters}()},
\code{\link{umx_time}()},
\code{\link{umx}}
}
\concept{Reporting Functions}
|
# Simple but effective by way of Unicode regex group
cleanArabicString <- function(string) {
return(
gsub(
# Second, collapse all occurances of multiple
# spaces into a single space each.
'[ ]{2,}', ' ',
gsub(
# First, remove everything that is not a space
# or an Arabic character or that is an opening
# or closing perenthesis
'[^( \\p{Arabic})]|[()]', '',
text.single_string.v, perl = TRUE
), perl = TRUE
)
)
}
| /homework/schmid_jonathan/lib/cleanup.R | no_license | Islamicate-DH/hw | R | false | false | 499 | r | # Simple but effective by way of Unicode regex group
cleanArabicString <- function(string) {
return(
gsub(
# Second, collapse all occurances of multiple
# spaces into a single space each.
'[ ]{2,}', ' ',
gsub(
# First, remove everything that is not a space
# or an Arabic character or that is an opening
# or closing perenthesis
'[^( \\p{Arabic})]|[()]', '',
text.single_string.v, perl = TRUE
), perl = TRUE
)
)
}
|
#' @title Apply a function to all elements in a list
#'
#' @description
#' Applies a function to the unlisted elements of a list
#'
#' @param alist a list
#' @param f a function to be applied
#' @param ... further arguments passed on to \code{f}
#' @return value
#' @author Gaston Sanchez
#' @seealso \code{\link{lapply}}, \code{\link{sapply}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the sum of all elements in list1
#' funlist(list1, sum)
#'
#' # get the maximum element in list1
#' funlist(list1, max)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the sum removing NAs
#' funlist(list2, sum, na.rm=TRUE)
funlist <- function(alist, f, ...)
{
if (!is.list(alist))
stop("\nA list is required")
if (!is.function(f))
stop('\nA function is requried')
f(unlist(alist), ...)
}
#' @title Sum of all elements in a list
#'
#' @description
#' This is just a wrapper of \code{funlist} using \code{sum}
#'
#' @param alist a list
#' @param na.rm logical indicating whether missing values should be removed
#' @return the sum
#' @author Gaston Sanchez
#' @seealso \code{\link{funlist}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the sum of all elements in list1
#' sumlist(list1)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the sum of all elements in list2 removing NAs
#' sumlist(list2, na.rm=TRUE)
sumlist <- function(alist, na.rm = FALSE)
{
funlist(alist, sum, na.rm = na.rm)
}
#' @title Product of all elements in a list
#'
#' @description
#' This is just a wrapper of \code{funlist} using \code{prod}
#'
#' @param alist a list
#' @param na.rm logical indicating whether missing values should be removed
#' @return the product
#' @author Gaston Sanchez
#' @seealso \code{\link{funlist}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the product of all elements in list1
#' prodlist(list1)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the prod of all elements in list2 removing NAs
#' prodlist(list2, na.rm=TRUE)
prodlist <- function(alist, na.rm = FALSE)
{
funlist(alist, prod, na.rm = na.rm)
}
#' @title Maximum of all elements in a list
#'
#' @description
#' This is just a wrapper of \code{funlist} using \code{max}
#'
#' @param alist a list
#' @param na.rm logical indicating whether missing values should be removed
#' @return the maximum
#' @author Gaston Sanchez
#' @seealso \code{\link{funlist}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the max of all elements in list1
#' maxlist(list1)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the max of all elements in list2 removing NAs
#' maxlist(list2, na.rm=TRUE)
maxlist <- function(alist, na.rm = FALSE)
{
funlist(alist, max, na.rm = na.rm)
}
#' @title Minimum of all elements in a list
#'
#' @description
#' This is just a wrapper of \code{funlist} using \code{min}
#'
#' @param alist a list
#' @param na.rm logical indicating whether missing values should be removed
#' @return the minimum
#' @author Gaston Sanchez
#' @seealso \code{\link{funlist}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the min of all elements in list1
#' minlist(list1)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the min of all elements in list2 removing NAs
#' minlist(list2, na.rm=TRUE)
minlist <- function(alist, na.rm=FALSE)
{
funlist(alist, min, na.rm=na.rm)
}
#' @title Mean of all elements in a list
#'
#' @description
#' This is just a wrapper of \code{funlist} using \code{mean}
#'
#' @param alist a list
#' @param na.rm logical indicating whether missing values should be removed
#' @return the mean
#' @author Gaston Sanchez
#' @seealso \code{\link{funlist}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the mean of all elements in list1
#' meanlist(list1)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the mean of all elements in list2 removing NAs
#' meanlist(list2, na.rm=TRUE)
meanlist <- function(alist, na.rm=FALSE)
{
funlist(alist, mean, na.rm=na.rm)
}
| /R/funlist.r | no_license | gastonstat/turner | R | false | false | 4,527 | r | #' @title Apply a function to all elements in a list
#'
#' @description
#' Applies a function to the unlisted elements of a list
#'
#' @param alist a list
#' @param f a function to be applied
#' @param ... further arguments passed on to \code{f}
#' @return value
#' @author Gaston Sanchez
#' @seealso \code{\link{lapply}}, \code{\link{sapply}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the sum of all elements in list1
#' funlist(list1, sum)
#'
#' # get the maximum element in list1
#' funlist(list1, max)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the sum removing NAs
#' funlist(list2, sum, na.rm=TRUE)
funlist <- function(alist, f, ...)
{
if (!is.list(alist))
stop("\nA list is required")
if (!is.function(f))
stop('\nA function is requried')
f(unlist(alist), ...)
}
#' @title Sum of all elements in a list
#'
#' @description
#' This is just a wrapper of \code{funlist} using \code{sum}
#'
#' @param alist a list
#' @param na.rm logical indicating whether missing values should be removed
#' @return the sum
#' @author Gaston Sanchez
#' @seealso \code{\link{funlist}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the sum of all elements in list1
#' sumlist(list1)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the sum of all elements in list2 removing NAs
#' sumlist(list2, na.rm=TRUE)
sumlist <- function(alist, na.rm = FALSE)
{
funlist(alist, sum, na.rm = na.rm)
}
#' @title Product of all elements in a list
#'
#' @description
#' This is just a wrapper of \code{funlist} using \code{prod}
#'
#' @param alist a list
#' @param na.rm logical indicating whether missing values should be removed
#' @return the product
#' @author Gaston Sanchez
#' @seealso \code{\link{funlist}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the product of all elements in list1
#' prodlist(list1)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the prod of all elements in list2 removing NAs
#' prodlist(list2, na.rm=TRUE)
prodlist <- function(alist, na.rm = FALSE)
{
funlist(alist, prod, na.rm = na.rm)
}
#' @title Maximum of all elements in a list
#'
#' @description
#' This is just a wrapper of \code{funlist} using \code{max}
#'
#' @param alist a list
#' @param na.rm logical indicating whether missing values should be removed
#' @return the maximum
#' @author Gaston Sanchez
#' @seealso \code{\link{funlist}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the max of all elements in list1
#' maxlist(list1)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the max of all elements in list2 removing NAs
#' maxlist(list2, na.rm=TRUE)
maxlist <- function(alist, na.rm = FALSE)
{
funlist(alist, max, na.rm = na.rm)
}
#' @title Minimum of all elements in a list
#'
#' @description
#' This is just a wrapper of \code{funlist} using \code{min}
#'
#' @param alist a list
#' @param na.rm logical indicating whether missing values should be removed
#' @return the minimum
#' @author Gaston Sanchez
#' @seealso \code{\link{funlist}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the min of all elements in list1
#' minlist(list1)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the min of all elements in list2 removing NAs
#' minlist(list2, na.rm=TRUE)
minlist <- function(alist, na.rm=FALSE)
{
funlist(alist, min, na.rm=na.rm)
}
#' @title Mean of all elements in a list
#'
#' @description
#' This is just a wrapper of \code{funlist} using \code{mean}
#'
#' @param alist a list
#' @param na.rm logical indicating whether missing values should be removed
#' @return the mean
#' @author Gaston Sanchez
#' @seealso \code{\link{funlist}}
#' @export
#' @examples
#' # say you have some list
#' list1 = list(1:5, runif(3), rnorm(4))
#'
#' # get the mean of all elements in list1
#' meanlist(list1)
#'
#' # say you have missing data
#' list2 = list(c(1:4, NA), runif(3), rnorm(4))
#'
#' # get the mean of all elements in list2 removing NAs
#' meanlist(list2, na.rm=TRUE)
meanlist <- function(alist, na.rm=FALSE)
{
funlist(alist, mean, na.rm=na.rm)
}
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
result <- data.frame()
for (i in 1:332) {
file <- paste(directory, "/", formatC(i, width=3, flag="0"), ".csv", sep="")
temp <- read.csv(file, header = TRUE)
obs <- sum(complete.cases(temp))
corr <- cor(temp$sulfate,temp$nitrate, use="pairwise.complete.obs")
result <- rbind(result, c(i,obs,corr))
}
names(result) <- c("id","nobs","corr")
thresholded <- subset(result, nobs > threshold)
return(thresholded$corr)
} | /R Programming/Programming Assignment 1/corr.R | no_license | hebuguiqu/jhu_data_science_coursera | R | false | false | 908 | r | corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
result <- data.frame()
for (i in 1:332) {
file <- paste(directory, "/", formatC(i, width=3, flag="0"), ".csv", sep="")
temp <- read.csv(file, header = TRUE)
obs <- sum(complete.cases(temp))
corr <- cor(temp$sulfate,temp$nitrate, use="pairwise.complete.obs")
result <- rbind(result, c(i,obs,corr))
}
names(result) <- c("id","nobs","corr")
thresholded <- subset(result, nobs > threshold)
return(thresholded$corr)
} |
library(geozoning)
### Name: contourArea
### Title: contourArea
### Aliases: contourArea
### ** Examples
data(mapTest)
cL=list()
cL=contourAuto(cL,mapTest$step,mapTest$xsize,mapTest$ysize,mapTest$krigGrid,c(5,7),mapTest$boundary)
contourArea(cL[[8]])
| /data/genthat_extracted_code/geozoning/examples/contourArea.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 258 | r | library(geozoning)
### Name: contourArea
### Title: contourArea
### Aliases: contourArea
### ** Examples
data(mapTest)
cL=list()
cL=contourAuto(cL,mapTest$step,mapTest$xsize,mapTest$ysize,mapTest$krigGrid,c(5,7),mapTest$boundary)
contourArea(cL[[8]])
|
##
## Question 2: Have total emissions from PM2.5 decreased in the Baltimore City,
## Maryland (fips == "24510") from 1999 to 2008? Use the base plotting system to
## make a plot answering this question.
##
library(dplyr)
rm(list = ls())
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# filter the Baltimore data
baltimore <- filter(NEI, fips == "24510")
# calculate the sum of emissions group by year
data <- summarize(group_by(baltimore, year), emissions = sum(Emissions))
# Let us plot
png("./plot2.png")
# set xaxt = n to disable x-axis
plot(data$year, data$emissions, type = "b",
xaxt = "n", xlab = "Year", ylab = "PM2.5 Emissions (tons)",
main = "Total Baltimore PM2.5 Emissions (1999 ~ 2008)",
lwd = 2, col = "blue")
# customize the x-axis, because default axis has no 1999
axis(side = 1, at = data$year)
dev.off()
| /plot2.R | no_license | wanyx2015/ExData_Plotting2 | R | false | false | 887 | r | ##
## Question 2: Have total emissions from PM2.5 decreased in the Baltimore City,
## Maryland (fips == "24510") from 1999 to 2008? Use the base plotting system to
## make a plot answering this question.
##
library(dplyr)
rm(list = ls())
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# filter the Baltimore data
baltimore <- filter(NEI, fips == "24510")
# calculate the sum of emissions group by year
data <- summarize(group_by(baltimore, year), emissions = sum(Emissions))
# Let us plot
png("./plot2.png")
# set xaxt = n to disable x-axis
plot(data$year, data$emissions, type = "b",
xaxt = "n", xlab = "Year", ylab = "PM2.5 Emissions (tons)",
main = "Total Baltimore PM2.5 Emissions (1999 ~ 2008)",
lwd = 2, col = "blue")
# customize the x-axis, because default axis has no 1999
axis(side = 1, at = data$year)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/name_builder.R
\name{pkg_name_changed}
\alias{pkg_name_changed}
\title{Scan package changes by name only}
\usage{
pkg_name_changed(startNew = FALSE)
}
\arguments{
\item{startNew}{Default FALSE, compare user's environment with name table
shipped with this package, only update difference. If True, build from
scratch.}
}
\value{
list(pkg_to_add, pkg_to_remove)
}
\description{
Compare currently packages name with previous list \code{pkg_list}.
}
\details{
Use \code{.packages(all.available = TRUE)} to check folder under library
location path \code{lib.loc}. Faster than checking name and version both, but
has more false positives. It's recommended to use this only when scan
packages name and version both is too slow for you.
}
| /man/pkg_name_changed.Rd | no_license | dracodoc/namebrowser | R | false | true | 810 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/name_builder.R
\name{pkg_name_changed}
\alias{pkg_name_changed}
\title{Scan package changes by name only}
\usage{
pkg_name_changed(startNew = FALSE)
}
\arguments{
\item{startNew}{Default FALSE, compare user's environment with name table
shipped with this package, only update difference. If True, build from
scratch.}
}
\value{
list(pkg_to_add, pkg_to_remove)
}
\description{
Compare currently packages name with previous list \code{pkg_list}.
}
\details{
Use \code{.packages(all.available = TRUE)} to check folder under library
location path \code{lib.loc}. Faster than checking name and version both, but
has more false positives. It's recommended to use this only when scan
packages name and version both is too slow for you.
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/sum_up.R
\name{sum_up}
\alias{sum_up}
\alias{sum_up_}
\title{Gives summary statistics (corresponds to Stata command summarize)}
\usage{
sum_up(x, ..., d = FALSE, w = NULL, i = NULL, digits = 3)
sum_up_(x, ..., .dots, d = FALSE, w = NULL, i = NULL, digits = 3)
}
\arguments{
\item{x}{a data.frame}
\item{...}{Variables to include. Defaults to all non-grouping variables. See the \link[dplyr]{select} documentation.}
\item{d}{Should detailed summary statistics be printed?}
\item{w}{Weights. Default to NULL.}
\item{i}{Condition}
\item{digits}{Number of significant decimal digits. Default to 3}
\item{.dots}{Used to work around non-standard evaluation.}
}
\value{
a data.frame
}
\description{
Gives summary statistics (corresponds to Stata command summarize)
}
\examples{
library(dplyr)
N <- 100
df <- data_frame(
id = 1:N,
v1 = sample(5, N, TRUE),
v2 = sample(1e6, N, TRUE)
)
sum_up(df)
sum_up(df, v2, d = TRUE)
sum_up(df, v2, d = TRUE, i = v1>3)
df \%>\% group_by(v1) \%>\% sum_up(starts_with("v"))
}
| /man/sum_up.Rd | no_license | eloualiche/statar | R | false | false | 1,102 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/sum_up.R
\name{sum_up}
\alias{sum_up}
\alias{sum_up_}
\title{Gives summary statistics (corresponds to Stata command summarize)}
\usage{
sum_up(x, ..., d = FALSE, w = NULL, i = NULL, digits = 3)
sum_up_(x, ..., .dots, d = FALSE, w = NULL, i = NULL, digits = 3)
}
\arguments{
\item{x}{a data.frame}
\item{...}{Variables to include. Defaults to all non-grouping variables. See the \link[dplyr]{select} documentation.}
\item{d}{Should detailed summary statistics be printed?}
\item{w}{Weights. Default to NULL.}
\item{i}{Condition}
\item{digits}{Number of significant decimal digits. Default to 3}
\item{.dots}{Used to work around non-standard evaluation.}
}
\value{
a data.frame
}
\description{
Gives summary statistics (corresponds to Stata command summarize)
}
\examples{
library(dplyr)
N <- 100
df <- data_frame(
id = 1:N,
v1 = sample(5, N, TRUE),
v2 = sample(1e6, N, TRUE)
)
sum_up(df)
sum_up(df, v2, d = TRUE)
sum_up(df, v2, d = TRUE, i = v1>3)
df \%>\% group_by(v1) \%>\% sum_up(starts_with("v"))
}
|
BRISC_bootstrap <- function(BRISC_Out, n_boot = 100, h = 1, n_omp = 1, init = "Initial", verbose = TRUE, nugget_status = 1){
if(missing(BRISC_Out)){stop("error: BRISC_bootstrap expects BRISC_Out\n")}
if(nugget_status == 0){fix_nugget = 0}
if(nugget_status == 1){fix_nugget = 1}
X <- BRISC_Out$X
n.omp.threads <- as.integer(n_omp)
n.neighbors <- BRISC_Out$n.neighbors
eps <- BRISC_Out$eps
cov.model <- BRISC_Out$cov.model
p <- ncol(X)
n <- nrow(X)
storage.mode(X) <- "double"
storage.mode(p) <- "integer"
storage.mode(n) <- "integer"
storage.mode(n.neighbors) <- "integer"
storage.mode(n.omp.threads) <- "integer"
storage.mode(eps) <- "double"
cov.model.names <- c("exponential","spherical","matern","gaussian")
cov.model.indx <- which(cov.model == cov.model.names) - 1
storage.mode(cov.model.indx) <- "integer"
cov.model <- BRISC_Out$cov.model
norm.residual = BRISC_Out$BRISC_Object$norm.residual
B = BRISC_Out$BRISC_Object$B
F = BRISC_Out$BRISC_Object$F
Xbeta = BRISC_Out$BRISC_Object$Xbeta
D = BRISC_Out$BRISC_Object$D
d = BRISC_Out$BRISC_Object$d
nnIndx = BRISC_Out$BRISC_Object$nnIndx
nnIndxLU = BRISC_Out$BRISC_Object$nnIndxLU
CIndx = BRISC_Out$BRISC_Object$CIndx
Length.D = BRISC_Out$BRISC_Object$Length.D
if(init == "Initial"){
if(cov.model == "matern") {theta_boot_init <- c(BRISC_Out$init[2]/BRISC_Out$init[1], BRISC_Out$init[3], BRISC_Out$init[4])}
else {theta_boot_init <- c(BRISC_Out$init[2]/BRISC_Out$init[1], BRISC_Out$init[3])}
}
if(init == "Estimate"){
if(cov.model == "matern") {theta_boot_init <- c(BRISC_Out$Theta[2]/BRISC_Out$Theta[1], BRISC_Out$Theta[3], BRISC_Out$Theta[4])}
else {theta_boot_init <- c(BRISC_Out$Theta[2]/BRISC_Out$Theta[1], BRISC_Out$Theta[3])}
}
theta_boot_init <- sqrt(theta_boot_init)
p3 <- proc.time()
if(h > 1){
cl <- makeCluster(h)
clusterExport(cl=cl, varlist=c("norm.residual", "X", "B", "F", "Xbeta", "D", "d", "nnIndx", "nnIndxLU",
"CIndx", "n", "p", "n.neighbors", "theta_boot_init", "cov.model.indx", "Length.D",
"n.omp.threads", "bootstrap_brisc", "eps", "fix_nugget"),envir=environment())
if(verbose == TRUE){
cat(paste(("----------------------------------------"), collapse=" "), "\n"); cat(paste(("\tBootstrap Progress"), collapse=" "), "\n"); cat(paste(("----------------------------------------"), collapse=" "), "\n")
pboptions(type = "txt", char = "=")
result <- pblapply(1:n_boot,bootstrap_brisc,norm.residual, X, B, F, Xbeta, D, d, nnIndx, nnIndxLU, CIndx, n, p, n.neighbors, theta_boot_init,
cov.model.indx, Length.D, n.omp.threads, eps, fix_nugget, cl = cl)
}
if(verbose != TRUE){result <- parLapply(cl,1:n_boot,bootstrap_brisc,norm.residual, X, B, F, Xbeta, D, d, nnIndx, nnIndxLU, CIndx, n, p, n.neighbors, theta_boot_init,
cov.model.indx, Length.D, n.omp.threads, eps, fix_nugget)}
stopCluster(cl)
}
if(h == 1){
if(verbose == TRUE){
cat(paste(("----------------------------------------"), collapse=" "), "\n"); cat(paste(("\tBootstrap Progress"), collapse=" "), "\n"); cat(paste(("----------------------------------------"), collapse=" "), "\n")
pboptions(type = "txt", char = "=")
result <- pblapply(1:n_boot,bootstrap_brisc,norm.residual, X, B, F, Xbeta, D, d, nnIndx, nnIndxLU, CIndx, n, p, n.neighbors, theta_boot_init,
cov.model.indx, Length.D, n.omp.threads, eps, fix_nugget)
}
if(verbose != TRUE){
result <- lapply(1:n_boot,bootstrap_brisc,norm.residual, X, B, F, Xbeta, D, d, nnIndx, nnIndxLU, CIndx, n, p, n.neighbors, theta_boot_init,
cov.model.indx, Length.D, n.omp.threads, eps, fix_nugget)
}
}
p4 <- proc.time()
result_table = arrange(result)
estimate <- c(BRISC_Out$Beta, BRISC_Out$Theta)
result_CI <- matrix(0,2,length(estimate))
for(i in 1:length(estimate)){
result_CI[,i] <- 2*estimate[i] - quantile(result_table[,i], c(.975,.025))
}
result_list <- list()
result_list$boot.Theta <- result_table[,(length(BRISC_Out$Beta) + 1):dim(result_table)[2]]
if (cov.model != "matern") {colnames(result_list$boot.Theta) <- c("sigma.sq", "tau.sq", "phi")}
if (cov.model == "matern") {colnames(result_list$boot.Theta) <- c("sigma.sq", "tau.sq", "phi", "nu")}
result_list$boot.Beta <- as.matrix(result_table[,1:length(BRISC_Out$Beta)])
colnames(result_list$boot.Beta) <- rep(0, length(BRISC_Out$Beta))
for(i in 1:length(BRISC_Out$Beta)){
name_beta <- paste0("beta_",i)
colnames(result_list$boot.Beta)[i] <- name_beta
}
result_list$confidence.interval <- cbind(result_CI[,1:length(BRISC_Out$Beta)],pmax(result_CI[,(length(BRISC_Out$Beta) + 1)
:dim(result_table)[2]], 0*result_CI[,(length(BRISC_Out$Beta) + 1):dim(result_table)[2]]))
if (cov.model != "matern") {colnames(result_list$confidence.interval)[(length(BRISC_Out$Beta) + 1):dim(result_table)[2]] <-
c("sigma.sq", "tau.sq", "phi")}
if (cov.model == "matern") {colnames(result_list$confidence.interval)[(length(BRISC_Out$Beta) + 1):dim(result_table)[2]] <-
c("sigma.sq", "tau.sq", "phi", "nu")}
for(i in 1:length(BRISC_Out$Beta)){
name_beta <- paste0("beta_",i)
colnames(result_list$confidence.interval)[i] <- name_beta
}
result_list$boot.time = p4 - p3
result_list
}
| /R/bootstrap.R | no_license | ArkajyotiSaha/BRISC | R | false | false | 5,517 | r | BRISC_bootstrap <- function(BRISC_Out, n_boot = 100, h = 1, n_omp = 1, init = "Initial", verbose = TRUE, nugget_status = 1){
if(missing(BRISC_Out)){stop("error: BRISC_bootstrap expects BRISC_Out\n")}
if(nugget_status == 0){fix_nugget = 0}
if(nugget_status == 1){fix_nugget = 1}
X <- BRISC_Out$X
n.omp.threads <- as.integer(n_omp)
n.neighbors <- BRISC_Out$n.neighbors
eps <- BRISC_Out$eps
cov.model <- BRISC_Out$cov.model
p <- ncol(X)
n <- nrow(X)
storage.mode(X) <- "double"
storage.mode(p) <- "integer"
storage.mode(n) <- "integer"
storage.mode(n.neighbors) <- "integer"
storage.mode(n.omp.threads) <- "integer"
storage.mode(eps) <- "double"
cov.model.names <- c("exponential","spherical","matern","gaussian")
cov.model.indx <- which(cov.model == cov.model.names) - 1
storage.mode(cov.model.indx) <- "integer"
cov.model <- BRISC_Out$cov.model
norm.residual = BRISC_Out$BRISC_Object$norm.residual
B = BRISC_Out$BRISC_Object$B
F = BRISC_Out$BRISC_Object$F
Xbeta = BRISC_Out$BRISC_Object$Xbeta
D = BRISC_Out$BRISC_Object$D
d = BRISC_Out$BRISC_Object$d
nnIndx = BRISC_Out$BRISC_Object$nnIndx
nnIndxLU = BRISC_Out$BRISC_Object$nnIndxLU
CIndx = BRISC_Out$BRISC_Object$CIndx
Length.D = BRISC_Out$BRISC_Object$Length.D
if(init == "Initial"){
if(cov.model == "matern") {theta_boot_init <- c(BRISC_Out$init[2]/BRISC_Out$init[1], BRISC_Out$init[3], BRISC_Out$init[4])}
else {theta_boot_init <- c(BRISC_Out$init[2]/BRISC_Out$init[1], BRISC_Out$init[3])}
}
if(init == "Estimate"){
if(cov.model == "matern") {theta_boot_init <- c(BRISC_Out$Theta[2]/BRISC_Out$Theta[1], BRISC_Out$Theta[3], BRISC_Out$Theta[4])}
else {theta_boot_init <- c(BRISC_Out$Theta[2]/BRISC_Out$Theta[1], BRISC_Out$Theta[3])}
}
theta_boot_init <- sqrt(theta_boot_init)
p3 <- proc.time()
if(h > 1){
cl <- makeCluster(h)
clusterExport(cl=cl, varlist=c("norm.residual", "X", "B", "F", "Xbeta", "D", "d", "nnIndx", "nnIndxLU",
"CIndx", "n", "p", "n.neighbors", "theta_boot_init", "cov.model.indx", "Length.D",
"n.omp.threads", "bootstrap_brisc", "eps", "fix_nugget"),envir=environment())
if(verbose == TRUE){
cat(paste(("----------------------------------------"), collapse=" "), "\n"); cat(paste(("\tBootstrap Progress"), collapse=" "), "\n"); cat(paste(("----------------------------------------"), collapse=" "), "\n")
pboptions(type = "txt", char = "=")
result <- pblapply(1:n_boot,bootstrap_brisc,norm.residual, X, B, F, Xbeta, D, d, nnIndx, nnIndxLU, CIndx, n, p, n.neighbors, theta_boot_init,
cov.model.indx, Length.D, n.omp.threads, eps, fix_nugget, cl = cl)
}
if(verbose != TRUE){result <- parLapply(cl,1:n_boot,bootstrap_brisc,norm.residual, X, B, F, Xbeta, D, d, nnIndx, nnIndxLU, CIndx, n, p, n.neighbors, theta_boot_init,
cov.model.indx, Length.D, n.omp.threads, eps, fix_nugget)}
stopCluster(cl)
}
if(h == 1){
if(verbose == TRUE){
cat(paste(("----------------------------------------"), collapse=" "), "\n"); cat(paste(("\tBootstrap Progress"), collapse=" "), "\n"); cat(paste(("----------------------------------------"), collapse=" "), "\n")
pboptions(type = "txt", char = "=")
result <- pblapply(1:n_boot,bootstrap_brisc,norm.residual, X, B, F, Xbeta, D, d, nnIndx, nnIndxLU, CIndx, n, p, n.neighbors, theta_boot_init,
cov.model.indx, Length.D, n.omp.threads, eps, fix_nugget)
}
if(verbose != TRUE){
result <- lapply(1:n_boot,bootstrap_brisc,norm.residual, X, B, F, Xbeta, D, d, nnIndx, nnIndxLU, CIndx, n, p, n.neighbors, theta_boot_init,
cov.model.indx, Length.D, n.omp.threads, eps, fix_nugget)
}
}
p4 <- proc.time()
result_table = arrange(result)
estimate <- c(BRISC_Out$Beta, BRISC_Out$Theta)
result_CI <- matrix(0,2,length(estimate))
for(i in 1:length(estimate)){
result_CI[,i] <- 2*estimate[i] - quantile(result_table[,i], c(.975,.025))
}
result_list <- list()
result_list$boot.Theta <- result_table[,(length(BRISC_Out$Beta) + 1):dim(result_table)[2]]
if (cov.model != "matern") {colnames(result_list$boot.Theta) <- c("sigma.sq", "tau.sq", "phi")}
if (cov.model == "matern") {colnames(result_list$boot.Theta) <- c("sigma.sq", "tau.sq", "phi", "nu")}
result_list$boot.Beta <- as.matrix(result_table[,1:length(BRISC_Out$Beta)])
colnames(result_list$boot.Beta) <- rep(0, length(BRISC_Out$Beta))
for(i in 1:length(BRISC_Out$Beta)){
name_beta <- paste0("beta_",i)
colnames(result_list$boot.Beta)[i] <- name_beta
}
result_list$confidence.interval <- cbind(result_CI[,1:length(BRISC_Out$Beta)],pmax(result_CI[,(length(BRISC_Out$Beta) + 1)
:dim(result_table)[2]], 0*result_CI[,(length(BRISC_Out$Beta) + 1):dim(result_table)[2]]))
if (cov.model != "matern") {colnames(result_list$confidence.interval)[(length(BRISC_Out$Beta) + 1):dim(result_table)[2]] <-
c("sigma.sq", "tau.sq", "phi")}
if (cov.model == "matern") {colnames(result_list$confidence.interval)[(length(BRISC_Out$Beta) + 1):dim(result_table)[2]] <-
c("sigma.sq", "tau.sq", "phi", "nu")}
for(i in 1:length(BRISC_Out$Beta)){
name_beta <- paste0("beta_",i)
colnames(result_list$confidence.interval)[i] <- name_beta
}
result_list$boot.time = p4 - p3
result_list
}
|
\name{ex01.43}
\alias{ex01.43}
\docType{data}
\title{R Data set: ex01.43}
\description{
The \code{ex01.43} data frame has 10 rows and 1 column.
}
\usage{data(ex01.43)}
\format{
A data frame with 10 observations on the following variable.
\describe{
\item{\code{Lifetime}}{a numeric vector}
}
}
\details{
Consult the web site \url{http://www.thomsonedu.com/statistics/devore} for additional online resources that are available for this book.
}
\source{
Devore, J. L. (2008) \emph{Probability and Statistics for Engineering and the Sciences (7th Edition)}, ISBN-10: 0495382175 ISBN-13: 9780495382171
}
\examples{
data(ex01.43)
str(ex01.43)
}
\keyword{datasets}
| /man/ex01.43.Rd | no_license | cran/Devore7 | R | false | false | 672 | rd | \name{ex01.43}
\alias{ex01.43}
\docType{data}
\title{R Data set: ex01.43}
\description{
The \code{ex01.43} data frame has 10 rows and 1 column.
}
\usage{data(ex01.43)}
\format{
A data frame with 10 observations on the following variable.
\describe{
\item{\code{Lifetime}}{a numeric vector}
}
}
\details{
Consult the web site \url{http://www.thomsonedu.com/statistics/devore} for additional online resources that are available for this book.
}
\source{
Devore, J. L. (2008) \emph{Probability and Statistics for Engineering and the Sciences (7th Edition)}, ISBN-10: 0495382175 ISBN-13: 9780495382171
}
\examples{
data(ex01.43)
str(ex01.43)
}
\keyword{datasets}
|
#//MMM - Sept 2014
#// Modified code to allow specification of filename with
#// extension. Cases exist where 2 files have the same name, but
#// different extensions (e.g.StAnnsMPA.csv vs StAnnsMPA.dat) - the
#// original code could never get an exact match since it ignored the
#// extension.
#
#//Also changed so that in the case of a single closest match, the data
#//for the lone match is simply returned
polygon_file = function(
polyname,
loc = project.datadirectory("aegis"),
ignorelist=c("archive", "retired"),
acceptable.extensions = c( "dat", "csv", "xy", "shp","ll" ),
returndata=FALSE ,
return.one.match=T) {
fs = .Platform$file.sep
out = NULL
rem = NULL
# find all filenames and match
flist = list.files( path=loc, pattern="*", recursive=TRUE, ignore.case=TRUE, include.dirs=TRUE, full.names=TRUE )
#MMM - Sept 2014 - this isn't ideal. Cases exist where 2 files have the same name, but different extensions
# and this prevents a full path from correctly identifying the file (e.g.StAnnsMPA.csv vs StAnnsMPA.dat)
#fl = basename(flist)
fl<-flist
# keep only acceptable file types
keep = NULL
acceptable.extensions = paste( "[.]*.", acceptable.extensions, "$", sep="")
for (ik in acceptable.extensions) {
keep0 = grep ( ik, fl, ignore.case=T )
if (length( keep0)>0 ) keep = c( keep, keep0 )
}
if ( length(keep)>0 ) {
keep = unique(keep)
flist = flist[keep]
}
# remove data flagged to be "ignored" or archived etc..
for (ig in ignorelist) {
rem0 = grep ( ig, flist, ignore.case =T ) ## NOTE:: this matches directory names too
if (length( rem0)>0 ) rem = c( rem, rem0 )
}
if ( length(rem)>0 ) {
rem = unique(rem)
flist = flist[-rem]
}
#see note above about finding exact matches with extension
# reset to current list
#fl = basename(flist)
fl<-flist
for (pn in polyname ) {
i = grep( paste("\\", fs, pn, sep=""), flist , ignore.case=TRUE )
if (length(i) == 1) { ## EXACT match
return(fl[i])
} else {
if(return.one.match) {# added by amc hope it does not make too much of a mess..used if one file name
ff = sub("([^.]+)\\.[[:alnum:]]+$", "\\1", basename(flist)) #recursive search to last dot and the remove everything after last dot
pn1 = paste("^",pn,"$",sep="")
fil = grep(pn1,ff)
return(flist[fil])
}
j = !is.na(pmatch( pn, fl ))
k = agrep( pn, fl, ignore.case=TRUE )
l = unique( c(j, k) )
if ( length(l) > 2 ){ #return the list of candidates
print( paste( "No exact match found for", pn, ", here are the closest matches:" ) )
print( flist[l] )
} else if ( length(l) > 1 ) { #treat a lone result like an exact match
return(fl[l])
#out = c( out, l)
} else {
print( "No similarly named files found")
}
}
}
#MMM Sep 2014
#don't think we need to load the data - just want to find the file
#also, this results in an extra null in any non-exact match search
# res = NULL
# if (length(out) > 0 ) {
# if (returndata) {
# for ( o in out ) {
# res[[ fl[o] ]] = read.table( flist[o], header=F)
# }
# } else {
# res = flist[out]
# }
# }
# return( res )
}
| /R/polygon_file.r | permissive | PEDsnowcrab/aegis.polygons | R | false | false | 3,410 | r | #//MMM - Sept 2014
#// Modified code to allow specification of filename with
#// extension. Cases exist where 2 files have the same name, but
#// different extensions (e.g.StAnnsMPA.csv vs StAnnsMPA.dat) - the
#// original code could never get an exact match since it ignored the
#// extension.
#
#//Also changed so that in the case of a single closest match, the data
#//for the lone match is simply returned
polygon_file = function(
polyname,
loc = project.datadirectory("aegis"),
ignorelist=c("archive", "retired"),
acceptable.extensions = c( "dat", "csv", "xy", "shp","ll" ),
returndata=FALSE ,
return.one.match=T) {
fs = .Platform$file.sep
out = NULL
rem = NULL
# find all filenames and match
flist = list.files( path=loc, pattern="*", recursive=TRUE, ignore.case=TRUE, include.dirs=TRUE, full.names=TRUE )
#MMM - Sept 2014 - this isn't ideal. Cases exist where 2 files have the same name, but different extensions
# and this prevents a full path from correctly identifying the file (e.g.StAnnsMPA.csv vs StAnnsMPA.dat)
#fl = basename(flist)
fl<-flist
# keep only acceptable file types
keep = NULL
acceptable.extensions = paste( "[.]*.", acceptable.extensions, "$", sep="")
for (ik in acceptable.extensions) {
keep0 = grep ( ik, fl, ignore.case=T )
if (length( keep0)>0 ) keep = c( keep, keep0 )
}
if ( length(keep)>0 ) {
keep = unique(keep)
flist = flist[keep]
}
# remove data flagged to be "ignored" or archived etc..
for (ig in ignorelist) {
rem0 = grep ( ig, flist, ignore.case =T ) ## NOTE:: this matches directory names too
if (length( rem0)>0 ) rem = c( rem, rem0 )
}
if ( length(rem)>0 ) {
rem = unique(rem)
flist = flist[-rem]
}
#see note above about finding exact matches with extension
# reset to current list
#fl = basename(flist)
fl<-flist
for (pn in polyname ) {
i = grep( paste("\\", fs, pn, sep=""), flist , ignore.case=TRUE )
if (length(i) == 1) { ## EXACT match
return(fl[i])
} else {
if(return.one.match) {# added by amc hope it does not make too much of a mess..used if one file name
ff = sub("([^.]+)\\.[[:alnum:]]+$", "\\1", basename(flist)) #recursive search to last dot and the remove everything after last dot
pn1 = paste("^",pn,"$",sep="")
fil = grep(pn1,ff)
return(flist[fil])
}
j = !is.na(pmatch( pn, fl ))
k = agrep( pn, fl, ignore.case=TRUE )
l = unique( c(j, k) )
if ( length(l) > 2 ){ #return the list of candidates
print( paste( "No exact match found for", pn, ", here are the closest matches:" ) )
print( flist[l] )
} else if ( length(l) > 1 ) { #treat a lone result like an exact match
return(fl[l])
#out = c( out, l)
} else {
print( "No similarly named files found")
}
}
}
#MMM Sep 2014
#don't think we need to load the data - just want to find the file
#also, this results in an extra null in any non-exact match search
# res = NULL
# if (length(out) > 0 ) {
# if (returndata) {
# for ( o in out ) {
# res[[ fl[o] ]] = read.table( flist[o], header=F)
# }
# } else {
# res = flist[out]
# }
# }
# return( res )
}
|
library(shiny)
# Define UI for miles per gallon app ----
ui <- fluidPage(
# App title ----
titlePanel("Demo App 3: Miles Per Gallon"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Selector for variable to plot against mpg ----
selectInput("variable", "Variable:",
c("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear")),
# Input: Checkbox for whether outliers should be included ----
checkboxInput("outliers", "Show outliers", TRUE)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Formatted text for caption ----
h3(textOutput("caption")),
# Output: Plot of the requested variable against mpg ----
plotOutput("mpgPlot")
)
)
)
# Data pre-processing ----
# Tweak the "am" variable to have nicer factor labels -- since this
# doesn't rely on any user inputs, we can do this once at startup
# and then use the value throughout the lifetime of the app
mpgData <- mtcars
mpgData$am <- factor(mpgData$am, labels = c("Automatic", "Manual"))
# Define server logic to plot various variables against mpg ----
server <- function(input, output) {
# Compute the formula text ----
# This is in a reactive expression since it is shared by the
# output$caption and output$mpgPlot functions
formulaText <- reactive({
paste("mpg ~", input$variable)
})
# Return the formula text for printing as a caption ----
output$caption <- renderText({
formulaText()
})
# Generate a plot of the requested variable against mpg ----
# and only exclude outliers if requested
output$mpgPlot <- renderPlot({
boxplot(as.formula(formulaText()),
data = mpgData,
outline = input$outliers,
col = "#75AADB", pch = 19)
})
}
shinyApp(ui, server)
| /shiny-apps/demo-app-3/app.R | no_license | joshpencheon/r-shiny-poc | R | false | false | 1,937 | r | library(shiny)
# Define UI for miles per gallon app ----
ui <- fluidPage(
# App title ----
titlePanel("Demo App 3: Miles Per Gallon"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Selector for variable to plot against mpg ----
selectInput("variable", "Variable:",
c("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear")),
# Input: Checkbox for whether outliers should be included ----
checkboxInput("outliers", "Show outliers", TRUE)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Formatted text for caption ----
h3(textOutput("caption")),
# Output: Plot of the requested variable against mpg ----
plotOutput("mpgPlot")
)
)
)
# Data pre-processing ----
# Tweak the "am" variable to have nicer factor labels -- since this
# doesn't rely on any user inputs, we can do this once at startup
# and then use the value throughout the lifetime of the app
mpgData <- mtcars
mpgData$am <- factor(mpgData$am, labels = c("Automatic", "Manual"))
# Define server logic to plot various variables against mpg ----
server <- function(input, output) {
# Compute the formula text ----
# This is in a reactive expression since it is shared by the
# output$caption and output$mpgPlot functions
formulaText <- reactive({
paste("mpg ~", input$variable)
})
# Return the formula text for printing as a caption ----
output$caption <- renderText({
formulaText()
})
# Generate a plot of the requested variable against mpg ----
# and only exclude outliers if requested
output$mpgPlot <- renderPlot({
boxplot(as.formula(formulaText()),
data = mpgData,
outline = input$outliers,
col = "#75AADB", pch = 19)
})
}
shinyApp(ui, server)
|
library(tidyverse)
library(hexbin)
library(jsonlite)
library(httr)
library(scales)
percent_formatter = function(x) {
scales::percent(x, accuracy = 1)
}
players_url = "http://stats.nba.com/stats/commonallplayers?LeagueID=00&Season=2019-20&IsOnlyCurrentSeason=0"
request_headers = c(
`Connection` = 'keep-alive',
`Accept` = 'application/json, text/plain, */*',
`x-nba-stats-token` = 'true',
`X-NewRelic-ID` = 'VQECWF5UChAHUlNTBwgBVw==',
`User-Agent` = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36',
`x-nba-stats-origin` = 'stats',
`Sec-Fetch-Site` = 'same-origin',
`Sec-Fetch-Mode` = 'cors',
`Referer` = 'https://stats.nba.com/players/leaguedashplayerbiostats/',
`Accept-Encoding` = 'gzip, deflate, br',
`Accept-Language` = 'en-US,en;q=0.9'
)
request = GET(players_url, add_headers(request_headers))
players_data = fromJSON(content(request, as = "text"))
players = tbl_df(data.frame(players_data$resultSets$rowSet[[1]], stringsAsFactors = FALSE))
names(players) = tolower(players_data$resultSets$headers[[1]])
players = mutate(players,
person_id = as.numeric(person_id),
rosterstatus = as.logical(as.numeric(rosterstatus)),
from_year = as.numeric(from_year),
to_year = as.numeric(to_year),
team_id = as.numeric(team_id)
)
if (Sys.Date() <= as.Date("2017-10-20")) {
players = mutate(players, to_year = pmin(to_year, 2016))
}
players$name = sapply(players$display_last_comma_first, function(s) {
paste(rev(strsplit(s, ", ")[[1]]), collapse = " ")
})
first_year_of_data = 1996
last_year_of_data = max(players$to_year)
season_strings = paste(first_year_of_data:last_year_of_data,
substr(first_year_of_data:last_year_of_data + 1, 3, 4),
sep = "-")
names(season_strings) = first_year_of_data:last_year_of_data
available_players = filter(players, to_year >= first_year_of_data)
names_table = table(available_players$name)
dupe_names = names(names_table[which(names_table > 1)])
available_players$name[available_players$name %in% dupe_names] = paste(
available_players$name[available_players$name %in% dupe_names],
available_players$person_id[available_players$name %in% dupe_names]
)
available_players$lower_name = tolower(available_players$name)
available_players = arrange(available_players, lower_name)
find_player_by_name = function(n) {
filter(available_players, lower_name == tolower(n))
}
find_player_id_by_name = function(n) {
find_player_by_name(n)$person_id
}
| /NBA/NBA.R/NBA_Shot_Charts.R | no_license | Jared-A/NBA | R | false | false | 2,697 | r | library(tidyverse)
library(hexbin)
library(jsonlite)
library(httr)
library(scales)
percent_formatter = function(x) {
scales::percent(x, accuracy = 1)
}
players_url = "http://stats.nba.com/stats/commonallplayers?LeagueID=00&Season=2019-20&IsOnlyCurrentSeason=0"
request_headers = c(
`Connection` = 'keep-alive',
`Accept` = 'application/json, text/plain, */*',
`x-nba-stats-token` = 'true',
`X-NewRelic-ID` = 'VQECWF5UChAHUlNTBwgBVw==',
`User-Agent` = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36',
`x-nba-stats-origin` = 'stats',
`Sec-Fetch-Site` = 'same-origin',
`Sec-Fetch-Mode` = 'cors',
`Referer` = 'https://stats.nba.com/players/leaguedashplayerbiostats/',
`Accept-Encoding` = 'gzip, deflate, br',
`Accept-Language` = 'en-US,en;q=0.9'
)
request = GET(players_url, add_headers(request_headers))
players_data = fromJSON(content(request, as = "text"))
players = tbl_df(data.frame(players_data$resultSets$rowSet[[1]], stringsAsFactors = FALSE))
names(players) = tolower(players_data$resultSets$headers[[1]])
players = mutate(players,
person_id = as.numeric(person_id),
rosterstatus = as.logical(as.numeric(rosterstatus)),
from_year = as.numeric(from_year),
to_year = as.numeric(to_year),
team_id = as.numeric(team_id)
)
if (Sys.Date() <= as.Date("2017-10-20")) {
players = mutate(players, to_year = pmin(to_year, 2016))
}
players$name = sapply(players$display_last_comma_first, function(s) {
paste(rev(strsplit(s, ", ")[[1]]), collapse = " ")
})
first_year_of_data = 1996
last_year_of_data = max(players$to_year)
season_strings = paste(first_year_of_data:last_year_of_data,
substr(first_year_of_data:last_year_of_data + 1, 3, 4),
sep = "-")
names(season_strings) = first_year_of_data:last_year_of_data
available_players = filter(players, to_year >= first_year_of_data)
names_table = table(available_players$name)
dupe_names = names(names_table[which(names_table > 1)])
available_players$name[available_players$name %in% dupe_names] = paste(
available_players$name[available_players$name %in% dupe_names],
available_players$person_id[available_players$name %in% dupe_names]
)
available_players$lower_name = tolower(available_players$name)
available_players = arrange(available_players, lower_name)
find_player_by_name = function(n) {
filter(available_players, lower_name == tolower(n))
}
find_player_id_by_name = function(n) {
find_player_by_name(n)$person_id
}
|
#' Distance of kth nearest neighbour
#'
#' @param X the sample to work out the distance between a value and its kth
#' nearest neighbour
#' @param k the order of nearest neighbour to be used
#' @param d the dimension of the sample
#' @export
#' @import FNN
# The kth NN function
Rho <- function(X, k, d=1) {
if (d == 1){
# creating the matrix of kth nn distances for X
NNdist <- FNN::knn.dist(data=X, k=k)
n <- length(X)
# check that k is not larger than the length of the vector
stopifnot(n > k)
# return the kth column of the matrix
NNdist[,k]
} else {
return("Dimension is too high for this estimator")
}
} | /EntropyEst/R/Rho.R | no_license | KarinaMarks/Entropy-Estimators | R | false | false | 659 | r | #' Distance of kth nearest neighbour
#'
#' @param X the sample to work out the distance between a value and its kth
#' nearest neighbour
#' @param k the order of nearest neighbour to be used
#' @param d the dimension of the sample
#' @export
#' @import FNN
# The kth NN function
Rho <- function(X, k, d=1) {
if (d == 1){
# creating the matrix of kth nn distances for X
NNdist <- FNN::knn.dist(data=X, k=k)
n <- length(X)
# check that k is not larger than the length of the vector
stopifnot(n > k)
# return the kth column of the matrix
NNdist[,k]
} else {
return("Dimension is too high for this estimator")
}
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grab_funs.R
\name{grab_design_matrix}
\alias{grab_design_matrix}
\title{Grab a matrix of fixed effects from a model object}
\usage{
grab_design_matrix(data, rhs_formula, ...)
}
\arguments{
\item{data}{the data from which to extract the matrix}
\item{rhs_formula}{the right hand side of a model formula}
\item{...}{Can be used to pass \code{xlev} to \code{\link[stats]{model.frame}}}
}
\value{
a \code{\link[stats]{model.matrix}}
}
\description{
Grab a matrix of fixed effects from a model object
}
\examples{
# Create a "desigm" matrix for the first ten rows of iris data
fit <- lm(Sepal.Width ~ Petal.Width, data = iris)
grab_design_matrix(
data = iris[1:10, ],
grab_fixed_formula(fit))
}
| /man/grab_design_matrix.Rd | permissive | bsaul/geex | R | false | true | 774 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grab_funs.R
\name{grab_design_matrix}
\alias{grab_design_matrix}
\title{Grab a matrix of fixed effects from a model object}
\usage{
grab_design_matrix(data, rhs_formula, ...)
}
\arguments{
\item{data}{the data from which to extract the matrix}
\item{rhs_formula}{the right hand side of a model formula}
\item{...}{Can be used to pass \code{xlev} to \code{\link[stats]{model.frame}}}
}
\value{
a \code{\link[stats]{model.matrix}}
}
\description{
Grab a matrix of fixed effects from a model object
}
\examples{
# Create a "desigm" matrix for the first ten rows of iris data
fit <- lm(Sepal.Width ~ Petal.Width, data = iris)
grab_design_matrix(
data = iris[1:10, ],
grab_fixed_formula(fit))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lazytensor_operations.R
\name{Arg.ComplexLazyTensor}
\alias{Arg.ComplexLazyTensor}
\title{Element-wise angle (or argument) of complex.}
\usage{
\method{Arg}{ComplexLazyTensor}(z)
}
\description{
Element-wise angle (or argument) of complex.
}
\author{
Chloe Serre-Combe, Amelie Vernay
}
\keyword{internal}
| /rkeops/man/Arg.ComplexLazyTensor.Rd | permissive | getkeops/keops | R | false | true | 383 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lazytensor_operations.R
\name{Arg.ComplexLazyTensor}
\alias{Arg.ComplexLazyTensor}
\title{Element-wise angle (or argument) of complex.}
\usage{
\method{Arg}{ComplexLazyTensor}(z)
}
\description{
Element-wise angle (or argument) of complex.
}
\author{
Chloe Serre-Combe, Amelie Vernay
}
\keyword{internal}
|
# load data
houseHold <- as.data.frame(read.table ("household_power_consumption.txt", sep = ";", header = TRUE))
houseHold <- houseHold[as.character(houseHold$Date) == "1/2/2007" | as.character(houseHold$Date) == "2/2/2007", ]
houseHold$Time = strptime(paste(houseHold[,1],houseHold[,2]),"%d/%m/%Y %H:%M:%S")
# Assign variables and check for any "?"
Y <- as.character(houseHold[,3])
X <- houseHold[,2]
X <- X[!Y == "?"]
Y <- as.numeric(Y[!Y == "?"])
png ("plot2.png")
plot (X,Y, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
| /plot2.R | no_license | filmonisme/EDA-Course-project-1 | R | false | false | 553 | r |
# load data
houseHold <- as.data.frame(read.table ("household_power_consumption.txt", sep = ";", header = TRUE))
houseHold <- houseHold[as.character(houseHold$Date) == "1/2/2007" | as.character(houseHold$Date) == "2/2/2007", ]
houseHold$Time = strptime(paste(houseHold[,1],houseHold[,2]),"%d/%m/%Y %H:%M:%S")
# Assign variables and check for any "?"
Y <- as.character(houseHold[,3])
X <- houseHold[,2]
X <- X[!Y == "?"]
Y <- as.numeric(Y[!Y == "?"])
png ("plot2.png")
plot (X,Y, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
fig <- plot_ly(regdata, x= ~x, y = ~y, type = 'bar',
text = y, texposition = 'auto',
marker = list(color = 'rgb(158,202,225)',
line = list(color = 'rgb(8,48,107')))
fig <- fig %>% layout(title = "Regional Job Performance",
xaxis = list(title = "Region"),
yaxis = list(title = "Job Performance"),
width = '200',
height = '200')
fig
| /plotly.R | no_license | meyerjoe-R/Spoketh | R | false | false | 479 | r | fig <- plot_ly(regdata, x= ~x, y = ~y, type = 'bar',
text = y, texposition = 'auto',
marker = list(color = 'rgb(158,202,225)',
line = list(color = 'rgb(8,48,107')))
fig <- fig %>% layout(title = "Regional Job Performance",
xaxis = list(title = "Region"),
yaxis = list(title = "Job Performance"),
width = '200',
height = '200')
fig
|
\name{is.exp}
\alias{is.exp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{is.exp}
\description{judge if the data obeys exp exponential distribution.}
\usage{
is.exp(x, m, a, lambda = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{data}
\item{m}{the number of intervals you want to divide the data in, default value is 10}
\item{a}{significance level}
\item{lambda}{the parameter lambda}
}
\details{
Given a set of observations from a certain distribution, this function is used to test whether the observations are from a distribution of Exponential distribution or not. Usually, to ensure the function works well, the sample size needs to be large enough, i.e. the result will be stable if the sample size is larger than 100. The Exponential distribution mentioned in this function is the one with mean 1/lambda and variance 1/lambda^2. The function will work better if the number of intervals you choose to divide the data in is between 10 and 20. This number cannot excess the number of given oberservations.
}
\value{if data possibly obeys exponential distribution, return a value named qchisq which represents the possibility. The larger qchisq is, the larger the possibility will be; else return -1.}
\references{
ROBERT V. HOGG/ALLEN T. CRAIG (Fifth Edition) Introduction Mathematical Statistics.}
\author{JunYao Chen, CuiYi He, BoXian Wei}
\note{
please pay attention to the definition of parameters in our functions.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{is.dt}} , \code{\link{DnE-package}}
}
\examples{
require(stats)
examplecheck<-rexp(100,10)
is.exp(examplecheck,10,0.05)
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/is.exp.Rd | no_license | cran/DnE | R | false | false | 2,060 | rd | \name{is.exp}
\alias{is.exp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{is.exp}
\description{judge if the data obeys exp exponential distribution.}
\usage{
is.exp(x, m, a, lambda = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{data}
\item{m}{the number of intervals you want to divide the data in, default value is 10}
\item{a}{significance level}
\item{lambda}{the parameter lambda}
}
\details{
Given a set of observations from a certain distribution, this function is used to test whether the observations are from a distribution of Exponential distribution or not. Usually, to ensure the function works well, the sample size needs to be large enough, i.e. the result will be stable if the sample size is larger than 100. The Exponential distribution mentioned in this function is the one with mean 1/lambda and variance 1/lambda^2. The function will work better if the number of intervals you choose to divide the data in is between 10 and 20. This number cannot excess the number of given oberservations.
}
\value{if data possibly obeys exponential distribution, return a value named qchisq which represents the possibility. The larger qchisq is, the larger the possibility will be; else return -1.}
\references{
ROBERT V. HOGG/ALLEN T. CRAIG (Fifth Edition) Introduction Mathematical Statistics.}
\author{JunYao Chen, CuiYi He, BoXian Wei}
\note{
please pay attention to the definition of parameters in our functions.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{is.dt}} , \code{\link{DnE-package}}
}
\examples{
require(stats)
examplecheck<-rexp(100,10)
is.exp(examplecheck,10,0.05)
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preText_score_plot.R
\name{preText_score_plot}
\alias{preText_score_plot}
\title{preText specification plot}
\usage{
preText_score_plot(preText_results, display_raw_rankings = FALSE,
remove_labels = FALSE, num_docs = NULL, text_size = 1)
}
\arguments{
\item{preText_results}{The output from the `preText_test()` or
`preText()` functions.}
\item{display_raw_rankings}{Logical indicating whether raw ranking
differences should be displayed (as opposed to relative differences).}
\item{remove_labels}{Option to remove preprocessing step labels. Defaults to
FALSE.}
\item{num_docs}{If display_raw_rankings = TRUE, the number of documents in
the corpus.}
\item{text_size}{The `cex` for text in dot plot generated by function.
Defaults to 1.}
}
\value{
A plot
}
\description{
preText plots for each preprocessing specification.
}
\examples{
\dontrun{
# load the package
library(preText)
# load in the data
data("UK_Manifestos")
# preprocess data
preprocessed_documents <- factorial_preprocessing(
UK_Manifestos,
use_ngrams = TRUE,
infrequent_term_threshold = 0.02,
verbose = TRUE)
# run preText
preText_results <- preText(
preprocessed_documents,
dataset_name = "Inaugural Speeches",
distance_method = "cosine",
num_comparisons = 100,
verbose = TRUE)
# generate preText score plot
preText_score_plot(preText_results)
}
}
| /man/preText_score_plot.Rd | no_license | paddytobias/preText | R | false | true | 1,435 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preText_score_plot.R
\name{preText_score_plot}
\alias{preText_score_plot}
\title{preText specification plot}
\usage{
preText_score_plot(preText_results, display_raw_rankings = FALSE,
remove_labels = FALSE, num_docs = NULL, text_size = 1)
}
\arguments{
\item{preText_results}{The output from the `preText_test()` or
`preText()` functions.}
\item{display_raw_rankings}{Logical indicating whether raw ranking
differences should be displayed (as opposed to relative differences).}
\item{remove_labels}{Option to remove preprocessing step labels. Defaults to
FALSE.}
\item{num_docs}{If display_raw_rankings = TRUE, the number of documents in
the corpus.}
\item{text_size}{The `cex` for text in dot plot generated by function.
Defaults to 1.}
}
\value{
A plot
}
\description{
preText plots for each preprocessing specification.
}
\examples{
\dontrun{
# load the package
library(preText)
# load in the data
data("UK_Manifestos")
# preprocess data
preprocessed_documents <- factorial_preprocessing(
UK_Manifestos,
use_ngrams = TRUE,
infrequent_term_threshold = 0.02,
verbose = TRUE)
# run preText
preText_results <- preText(
preprocessed_documents,
dataset_name = "Inaugural Speeches",
distance_method = "cosine",
num_comparisons = 100,
verbose = TRUE)
# generate preText score plot
preText_score_plot(preText_results)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bi_model.R
\name{insert_lines}
\alias{insert_lines}
\alias{insert_lines.bi_model}
\title{Insert lines in a LibBi model}
\usage{
\method{insert_lines}{bi_model}(x, lines, before, after, at_beginning_of,
at_end_of, ...)
}
\arguments{
\item{x}{a \code{\link{bi_model}} object}
\item{lines}{vector or line(s)}
\item{before}{line number before which to insert line(s)}
\item{after}{line number after which to insert line(s)}
\item{at_beginning_of}{block at the beginning of which to insert lines(s)}
\item{at_end_of}{block at the end of which to insert lines(s)}
\item{...}{ignored}
}
\value{
the updated bi model
}
\description{
Inserts one or more lines into a libbi model. If one of \code{before} or \code{after} is given, the line(s) will be inserted before or after a given line number or block name, respectively. If one of \code{at_beginning of} or \code{at_end_of} is given, the lines will be inserted at the beginning/end of the block, respectively.
}
\examples{
model_file_name <- system.file(package="rbi", "PZ.bi")
PZ <- bi_model(filename = model_file_name)
PZ <- insert_lines(PZ, lines = "noise beta", after = 8)
}
\seealso{
\code{\link{bi_model}}
}
| /man/insert_lines.Rd | no_license | thigm85/RBi | R | false | true | 1,244 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bi_model.R
\name{insert_lines}
\alias{insert_lines}
\alias{insert_lines.bi_model}
\title{Insert lines in a LibBi model}
\usage{
\method{insert_lines}{bi_model}(x, lines, before, after, at_beginning_of,
at_end_of, ...)
}
\arguments{
\item{x}{a \code{\link{bi_model}} object}
\item{lines}{vector or line(s)}
\item{before}{line number before which to insert line(s)}
\item{after}{line number after which to insert line(s)}
\item{at_beginning_of}{block at the beginning of which to insert lines(s)}
\item{at_end_of}{block at the end of which to insert lines(s)}
\item{...}{ignored}
}
\value{
the updated bi model
}
\description{
Inserts one or more lines into a libbi model. If one of \code{before} or \code{after} is given, the line(s) will be inserted before or after a given line number or block name, respectively. If one of \code{at_beginning of} or \code{at_end_of} is given, the lines will be inserted at the beginning/end of the block, respectively.
}
\examples{
model_file_name <- system.file(package="rbi", "PZ.bi")
PZ <- bi_model(filename = model_file_name)
PZ <- insert_lines(PZ, lines = "noise beta", after = 8)
}
\seealso{
\code{\link{bi_model}}
}
|
library(leaflet)
library(lubridate)
library(visdat)
library(patchwork)
library(janitor)
# The dataset contains 3 months of transactions for 100 customers
library(tidyverse)
# read data
file <- readxl::read_xlsx("data/synthesised-transaction-dataset.xlsx",
trim_ws = TRUE,
col_types = c("text","numeric", "text", "text", "text", "text", "text", "text", "numeric",
"text", "numeric", "date", "text", "numeric", "text", "text", "text", "numeric",
"text", "text", "text", "text","text"))
# Quality checks
visdat::vis_miss(file) +
theme(
axis.text = element_text(size = 10)
) +
labs(
title = "% of Missing Data"
) +
theme(
plot.background = element_rect(fill = "cornsilk")
)
#vis_dat(file) +
# colorspace::scale_fill_discrete_qualitative()
GGally::ggscatmat(file, columns=c(12, 15, 19), color = "month") +
scale_colour_brewer(palette="Set1")
# find duplicates
file %>%
janitor::get_dupes()
#file[4359,3]
#file[3051,3]
# Separate long and lat
file <- file %>%
separate(long_lat, into = c("long", "lat"), sep = "-") %>%
separate(merchant_long_lat, into = c("merch_long", "merch_lat"), sep = "-") %>%
mutate(long = as.numeric(long),
lat = as.numeric(lat),
merch_long = as.numeric(merch_long),
merch_lat = as.numeric(merch_lat),
date = lubridate::as_date(date),
month = month(date),
day = day(date),
lat = lat * (-1),
merch_lat = merch_lat * (-1),
time = stringr::str_sub(extraction, start = 12, end = 19),
hour = stringr::str_sub(time, start = 1, end = 2))
customer_map_data <- file %>%
select(account,long, lat) %>%
distinct()
merch_map_data <- file %>%
select(merch_long, merch_lat, merchant_suburb) %>%
na.omit() %>%
distinct()
leaflet() %>%
addProviderTiles("Stamen.Terrain") %>%
addMarkers(lng = customer_map_data$long, lat = customer_map_data$lat,
label = customer_map_data$account) %>%
addCircleMarkers(lng = merch_map_data$merch_long, lat = merch_map_data$merch_lat, label = merch_map_data$merchant_suburb,
color = 'orangee', fill = T, fillColor = 'orange')
file %>%
group_by(txn_description) %>%
summarise(average_amaount = mean(amount)) %>%
arrange(desc(average_amaount))
file %>%
filter(amount > 100) %>%
ggplot(aes(as.factor(month), amount, fill = txn_description)) +
geom_boxplot()
file %>%
filter(amount > 100) %>%
ggplot(aes(as.factor(day), amount)) +
geom_boxplot() +
coord_flip()
# Find: average transaction amount, How many transactions do customers make each month, on average?,
#segment the data by time and visualise them, look at transaction volume and
# spending over a day or week Consider the effect of any outliers that may distort your analysis.
# what insights can you draw from the location information provided in the dataset?
# Present in 2-3 slides
## Make a Summary of which clients receive the biggest payments and which ones have the most expenses to target
## people for loans
file %>%
filter(account == "ACC-1598451071") %>%
select(txn_description, balance, amount, date) %>%
filter(txn_description == "PAY/SALARY")
income <- file %>%
group_by(account, gender) %>%
select(txn_description, balance, amount, date, gender) %>%
filter(txn_description == "PAY/SALARY") %>%
summarise(total_payment = sum(amount)) %>%
arrange(desc(total_payment))
expenses <- file %>%
group_by(account) %>%
select(txn_description, balance, amount, date) %>%
filter(txn_description %in% c("POS", "SALES-POS", "PAYMENT")) %>%
summarise(total_expenses = sum(amount)) %>%
arrange(desc(total_expenses))
client_profile <- inner_join(income, expenses, by = "account") %>%
mutate(exp_perc_of_salary = total_expenses/total_payment * 100) %>%
arrange(desc(exp_perc_of_salary))
client_profile %>%
ggplot(aes(total_payment, y = ..density.., group = gender)) +
geom_density(aes(fill = gender, color = gender )) +
geom_histogram(alpha = 0.6, fill = "grey", binwidth = 1000) +
ggthemes::theme_economist() +
theme(
legend.position = "none",
) +
labs(
x = "Salary"
) +
scale_fill_viridis_d() +
scale_color_viridis_d() +
client_profile %>%
ggplot(aes(total_expenses, y = ..density..)) +
geom_density(aes(fill = gender, color = gender )) +
geom_histogram(alpha = 0.6, fill = "grey", binwidth = 500) +
ggthemes::theme_economist() +
theme(
legend.position = "bottom"
) +
labs(
x = "Total Expenses"
) +
scale_fill_viridis_d() +
scale_color_viridis_d() +
client_profile %>%
ggplot(aes(exp_perc_of_salary, y = ..density..)) +
geom_density(aes(fill = gender, color = gender )) +
geom_histogram(alpha = 0.6, fill = "grey") +
ggthemes::theme_economist() +
theme(
legend.position = "none"
) +
labs(
x = "Expenses as percentage of Salary"
) +
scale_fill_viridis_d() +
scale_color_viridis_d()
# average transaction amount
DT::datatable(
file %>%
mutate(
month = case_when(
month == '8' ~ 'August',
month == '9' ~ 'September',
month == '10' ~ 'October'
)
) %>%
group_by(txn_description, month) %>%
summarise(average_transaction = round(mean(amount, na.rm = TRUE),2)) %>%
arrange(txn_description)
)
file %>%
count(month)
# fraudulent paymenrs as payments occur only business hours
file %>%
group_by(txn_description, hour) %>%
summarise(total_amount = sum(amount)) %>%
ggplot(aes(hour, total_amount, color = txn_description, group = txn_description)) +
geom_line()
## Transaction volumes and effect of outliers
install.packages("OutlierDetection")
library(OutlierDetection)
dens(file[,"amount"])
library(FNN)
amount_knn <- get.knn(file[, 'amount'])
file_amount <- file %>%
select(date, amount)
file_amount$knn_score <- rowMeans(amount_knn$nn.dist)
file_amount %>%
ggplot(aes(date, amount, size = knn_score, color = knn_score)) +
geom_point()
file %>%
ggplot(aes(date, balance, group = account, color = amount)) +
geom_line()
balance_changes <- file %>%
select(account, date, amount, balance) %>%
group_by(account) %>%
arrange(date, .by_group = TRUE) %>%
mutate(pct_change = (balance/lead(balance) - 1) * 100)
balance_changes %>%
filter(account == "ACC-1222300524") %>%
arrange(date)
library(DataCombine)
balance_changes <- file %>%
select(date, account, txn_description, amount, balance) %>%
PercChange(Var = 'balance',
type = 'proportion',
NewVar = 'PercentChange',
GroupVar = 'account')
balance_changes %>%
filter(account == "ACC-559365433") %>%
arrange(date)
balance_changes %>%
ggplot(aes(date, PercentChange, group = account, color = PercentChange)) +
geom_line() +
geom_text(data = balance_changes %>% filter(PercentChange > 5000), aes(label = account))
balance_changes %>%
filter(account == "ACC-1598451071" ) %>%
ggplot(aes(date, amount, color = txn_description)) +
geom_point()
balance_changes %>%
filter(balance < 100000) %>%
ggplot(aes(date, balance, group = account)) +
geom_line()
file %>%
group_by(account) %>%
count(txn_description) %>%
filter(txn_description == "PAY/SALARY") %>%
arrange(desc(n))
# who got an increase
file %>%
group_by(account) %>%
filter(txn_description == "PAY/SALARY") %>%
ggplot(aes(date, amount, group = account)) +
geom_line()
file %>%
group_by(hour, txn_description) %>%
count() %>%
ggplot(aes(hour, n, fill = txn_description)) +
geom_col() +
facet_wrap(~txn_description) +
theme_classic() +
ggthemes::scale_fill_economist() +
labs(
x = 'Hour',
y = "Transaction Volume"
)
library(ggalluvial)
file %>%
mutate(
month = case_when(
month == '8' ~ 'August',
month == '9' ~ 'September',
month == '10' ~ 'October'
)
) %>%
ggplot(
aes(axis1 = gender, axis2 = month, axis3 = txn_description,
y = log(balance))) +
scale_x_discrete(limits = c("Gedner", "Month", "Transaction Type"), expand = c(.2, .05)) +
xlab("Demographic") +
geom_flow(width = 1/4) +
geom_alluvium(aes(fill = amount)) +
geom_stratum() +
geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
theme_minimal() +
ggtitle("Transaction Route",
"stratified by demographics and transaction type") +
theme(
text = element_text(size = 12),
axis.text.y = element_blank(),
axis.title = element_blank()
) +
labs(
fill = "Amount"
) +
scale_fill_continuous()
hist(file$balance)
| /R/customer_analysis.R | permissive | petestylianos/anz_virtual_experience | R | false | false | 8,784 | r | library(leaflet)
library(lubridate)
library(visdat)
library(patchwork)
library(janitor)
# The dataset contains 3 months of transactions for 100 customers
library(tidyverse)
# read data
file <- readxl::read_xlsx("data/synthesised-transaction-dataset.xlsx",
trim_ws = TRUE,
col_types = c("text","numeric", "text", "text", "text", "text", "text", "text", "numeric",
"text", "numeric", "date", "text", "numeric", "text", "text", "text", "numeric",
"text", "text", "text", "text","text"))
# Quality checks
visdat::vis_miss(file) +
theme(
axis.text = element_text(size = 10)
) +
labs(
title = "% of Missing Data"
) +
theme(
plot.background = element_rect(fill = "cornsilk")
)
#vis_dat(file) +
# colorspace::scale_fill_discrete_qualitative()
GGally::ggscatmat(file, columns=c(12, 15, 19), color = "month") +
scale_colour_brewer(palette="Set1")
# find duplicates
file %>%
janitor::get_dupes()
#file[4359,3]
#file[3051,3]
# Separate long and lat
file <- file %>%
separate(long_lat, into = c("long", "lat"), sep = "-") %>%
separate(merchant_long_lat, into = c("merch_long", "merch_lat"), sep = "-") %>%
mutate(long = as.numeric(long),
lat = as.numeric(lat),
merch_long = as.numeric(merch_long),
merch_lat = as.numeric(merch_lat),
date = lubridate::as_date(date),
month = month(date),
day = day(date),
lat = lat * (-1),
merch_lat = merch_lat * (-1),
time = stringr::str_sub(extraction, start = 12, end = 19),
hour = stringr::str_sub(time, start = 1, end = 2))
customer_map_data <- file %>%
select(account,long, lat) %>%
distinct()
merch_map_data <- file %>%
select(merch_long, merch_lat, merchant_suburb) %>%
na.omit() %>%
distinct()
leaflet() %>%
addProviderTiles("Stamen.Terrain") %>%
addMarkers(lng = customer_map_data$long, lat = customer_map_data$lat,
label = customer_map_data$account) %>%
addCircleMarkers(lng = merch_map_data$merch_long, lat = merch_map_data$merch_lat, label = merch_map_data$merchant_suburb,
color = 'orangee', fill = T, fillColor = 'orange')
file %>%
group_by(txn_description) %>%
summarise(average_amaount = mean(amount)) %>%
arrange(desc(average_amaount))
file %>%
filter(amount > 100) %>%
ggplot(aes(as.factor(month), amount, fill = txn_description)) +
geom_boxplot()
file %>%
filter(amount > 100) %>%
ggplot(aes(as.factor(day), amount)) +
geom_boxplot() +
coord_flip()
# Find: average transaction amount, How many transactions do customers make each month, on average?,
#segment the data by time and visualise them, look at transaction volume and
# spending over a day or week Consider the effect of any outliers that may distort your analysis.
# what insights can you draw from the location information provided in the dataset?
# Present in 2-3 slides
## Make a Summary of which clients receive the biggest payments and which ones have the most expenses to target
## people for loans
file %>%
filter(account == "ACC-1598451071") %>%
select(txn_description, balance, amount, date) %>%
filter(txn_description == "PAY/SALARY")
income <- file %>%
group_by(account, gender) %>%
select(txn_description, balance, amount, date, gender) %>%
filter(txn_description == "PAY/SALARY") %>%
summarise(total_payment = sum(amount)) %>%
arrange(desc(total_payment))
expenses <- file %>%
group_by(account) %>%
select(txn_description, balance, amount, date) %>%
filter(txn_description %in% c("POS", "SALES-POS", "PAYMENT")) %>%
summarise(total_expenses = sum(amount)) %>%
arrange(desc(total_expenses))
client_profile <- inner_join(income, expenses, by = "account") %>%
mutate(exp_perc_of_salary = total_expenses/total_payment * 100) %>%
arrange(desc(exp_perc_of_salary))
client_profile %>%
ggplot(aes(total_payment, y = ..density.., group = gender)) +
geom_density(aes(fill = gender, color = gender )) +
geom_histogram(alpha = 0.6, fill = "grey", binwidth = 1000) +
ggthemes::theme_economist() +
theme(
legend.position = "none",
) +
labs(
x = "Salary"
) +
scale_fill_viridis_d() +
scale_color_viridis_d() +
client_profile %>%
ggplot(aes(total_expenses, y = ..density..)) +
geom_density(aes(fill = gender, color = gender )) +
geom_histogram(alpha = 0.6, fill = "grey", binwidth = 500) +
ggthemes::theme_economist() +
theme(
legend.position = "bottom"
) +
labs(
x = "Total Expenses"
) +
scale_fill_viridis_d() +
scale_color_viridis_d() +
client_profile %>%
ggplot(aes(exp_perc_of_salary, y = ..density..)) +
geom_density(aes(fill = gender, color = gender )) +
geom_histogram(alpha = 0.6, fill = "grey") +
ggthemes::theme_economist() +
theme(
legend.position = "none"
) +
labs(
x = "Expenses as percentage of Salary"
) +
scale_fill_viridis_d() +
scale_color_viridis_d()
# average transaction amount
DT::datatable(
file %>%
mutate(
month = case_when(
month == '8' ~ 'August',
month == '9' ~ 'September',
month == '10' ~ 'October'
)
) %>%
group_by(txn_description, month) %>%
summarise(average_transaction = round(mean(amount, na.rm = TRUE),2)) %>%
arrange(txn_description)
)
file %>%
count(month)
# fraudulent paymenrs as payments occur only business hours
file %>%
group_by(txn_description, hour) %>%
summarise(total_amount = sum(amount)) %>%
ggplot(aes(hour, total_amount, color = txn_description, group = txn_description)) +
geom_line()
## Transaction volumes and effect of outliers
install.packages("OutlierDetection")
library(OutlierDetection)
dens(file[,"amount"])
library(FNN)
amount_knn <- get.knn(file[, 'amount'])
file_amount <- file %>%
select(date, amount)
file_amount$knn_score <- rowMeans(amount_knn$nn.dist)
file_amount %>%
ggplot(aes(date, amount, size = knn_score, color = knn_score)) +
geom_point()
file %>%
ggplot(aes(date, balance, group = account, color = amount)) +
geom_line()
balance_changes <- file %>%
select(account, date, amount, balance) %>%
group_by(account) %>%
arrange(date, .by_group = TRUE) %>%
mutate(pct_change = (balance/lead(balance) - 1) * 100)
balance_changes %>%
filter(account == "ACC-1222300524") %>%
arrange(date)
library(DataCombine)
balance_changes <- file %>%
select(date, account, txn_description, amount, balance) %>%
PercChange(Var = 'balance',
type = 'proportion',
NewVar = 'PercentChange',
GroupVar = 'account')
balance_changes %>%
filter(account == "ACC-559365433") %>%
arrange(date)
balance_changes %>%
ggplot(aes(date, PercentChange, group = account, color = PercentChange)) +
geom_line() +
geom_text(data = balance_changes %>% filter(PercentChange > 5000), aes(label = account))
balance_changes %>%
filter(account == "ACC-1598451071" ) %>%
ggplot(aes(date, amount, color = txn_description)) +
geom_point()
balance_changes %>%
filter(balance < 100000) %>%
ggplot(aes(date, balance, group = account)) +
geom_line()
file %>%
group_by(account) %>%
count(txn_description) %>%
filter(txn_description == "PAY/SALARY") %>%
arrange(desc(n))
# who got an increase
file %>%
group_by(account) %>%
filter(txn_description == "PAY/SALARY") %>%
ggplot(aes(date, amount, group = account)) +
geom_line()
file %>%
group_by(hour, txn_description) %>%
count() %>%
ggplot(aes(hour, n, fill = txn_description)) +
geom_col() +
facet_wrap(~txn_description) +
theme_classic() +
ggthemes::scale_fill_economist() +
labs(
x = 'Hour',
y = "Transaction Volume"
)
library(ggalluvial)
file %>%
mutate(
month = case_when(
month == '8' ~ 'August',
month == '9' ~ 'September',
month == '10' ~ 'October'
)
) %>%
ggplot(
aes(axis1 = gender, axis2 = month, axis3 = txn_description,
y = log(balance))) +
scale_x_discrete(limits = c("Gedner", "Month", "Transaction Type"), expand = c(.2, .05)) +
xlab("Demographic") +
geom_flow(width = 1/4) +
geom_alluvium(aes(fill = amount)) +
geom_stratum() +
geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
theme_minimal() +
ggtitle("Transaction Route",
"stratified by demographics and transaction type") +
theme(
text = element_text(size = 12),
axis.text.y = element_blank(),
axis.title = element_blank()
) +
labs(
fill = "Amount"
) +
scale_fill_continuous()
hist(file$balance)
|
plot3 <- function() {
## -------------------------
## read input file
## -------------------------
tpower<-read.table("household_power_consumption.txt",header=TRUE,sep=";",dec=".",na.strings="?")
## -------------------------
## get subset of data
## -------------------------
spower<-subset(tpower,Date=="1/2/2007" | Date=="2/2/2007",select=1:9)
#convert date and time to dt.time
spower$dt.time <- as.POSIXct(paste(spower$Date, spower$Time),format="%d/%m/%Y %H:%M:%S")
## -------------------------
# define device and plot
## -------------------------
png(filename="plot3.png",480,480)
plot(spower$dt.time,spower$Sub_metering_1 , type="o", col="black",pch=".",ylab="Energy sub metering",xlab="")
lines(spower$dt.time,spower$Sub_metering_2 , type="o", col="red",pch=".",ylab="Energy sub metering",xlab="")
lines(spower$dt.time,spower$Sub_metering_3 , type="o", col="blue",pch=".",ylab="Energy sub metering",xlab="")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1),
lwd=c(2.5,2.5),col=c("black","red","blue"))
dev.off()
} | /plot3.R | no_license | przytula/Electric_power | R | false | false | 1,088 | r | plot3 <- function() {
## -------------------------
## read input file
## -------------------------
tpower<-read.table("household_power_consumption.txt",header=TRUE,sep=";",dec=".",na.strings="?")
## -------------------------
## get subset of data
## -------------------------
spower<-subset(tpower,Date=="1/2/2007" | Date=="2/2/2007",select=1:9)
#convert date and time to dt.time
spower$dt.time <- as.POSIXct(paste(spower$Date, spower$Time),format="%d/%m/%Y %H:%M:%S")
## -------------------------
# define device and plot
## -------------------------
png(filename="plot3.png",480,480)
plot(spower$dt.time,spower$Sub_metering_1 , type="o", col="black",pch=".",ylab="Energy sub metering",xlab="")
lines(spower$dt.time,spower$Sub_metering_2 , type="o", col="red",pch=".",ylab="Energy sub metering",xlab="")
lines(spower$dt.time,spower$Sub_metering_3 , type="o", col="blue",pch=".",ylab="Energy sub metering",xlab="")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1),
lwd=c(2.5,2.5),col=c("black","red","blue"))
dev.off()
} |
regions = snakemake@params$regions
#Load in table that contains the positions of the causal variants from HAPGEN2
causal_snps = read.table(sprintf("hapgen2/snps_gwas_causal_region_%s",regions), header = F)
snp_1_causal_position = causal_snps[1]
snp_2_causal_position = causal_snps[2]
snp_3_causal_position = causal_snps[3]
#Loop through all possible configurations for the GWAS FINEMAPPING (See previous rule for details)
finemapped_file_configs = c("finemap/in_sample_ld_true_sumstats_region_%s",
"finemap/1000G_proportions_ld_true_sumstats_region_%s",
"finemap/AFR_ld_true_sumstats_region_%s",
"finemap/EUR_ld_true_sumstats_region_%s",
"finemap/EAS_ld_true_sumstats_region_%s",
"finemap/ref_admixed_ld_true_sumstats_region_%s",
"finemap/1000G_weighted_ld_true_sumstats_region_%s")
for (i in 1:length(finemapped_file_configs)) {
#Read in files
finemap_snp_results = read.table(sprintf(paste(finemapped_file_configs[i],".snp",sep =""), regions), header = T, stringsAsFactors = F)
finemap_config_results = read.table(sprintf(paste(finemapped_file_configs[i],".config",sep =""), regions), header = T, stringsAsFactors = F)
#Go through the config results until we have reached the 95% credible set
culmative_prob = cumsum(finemap_config_results$prob)
index_cred_set = min(which(culmative_prob > 0.95))
#Find RSID of the causal SNP index
causal_snp_rsids = finemap_snp_results$rsid[which(finemap_snp_results$position %in% c(snp_1_causal_position,snp_2_causal_position,snp_3_causal_position))]
#Loop through the rows in the credible set configs
pp_weighted_tp = c()
cred_set_snps = c()
for (j in 1:(index_cred_set)) {
#Check how many of the causal variants exist
tp_in_set = length(which(unlist(strsplit(finemap_config_results$config[j],",")) %in% as.character(causal_snp_rsids)))
frac_tp = tp_in_set/(length(unlist(strsplit(finemap_config_results$config[j],","))))
pp_weighted_tp[j] = frac_tp * finemap_config_results$prob[j]
cred_set_snps = append(cred_set_snps,unlist(strsplit(finemap_config_results$config[j],",")))
}
sum_tp_pp = sum(pp_weighted_tp)
indicator = length(which(unique(cred_set_snps) %in% causal_snp_rsids))
#Save the results
saveRDS(sum_tp_pp,sprintf(paste(finemapped_file_configs[i],"_cred_set_fdr.RData",sep=""),regions), version = 2)
saveRDS(indicator,sprintf(paste(finemapped_file_configs[i],"_cred_set_indicator.RData",sep=""),regions), version = 2)
}
| /three_way_admixture/calculate_credible_set.R | no_license | marcustutert/thesis_code | R | false | false | 2,642 | r | regions = snakemake@params$regions
#Load in table that contains the positions of the causal variants from HAPGEN2
causal_snps = read.table(sprintf("hapgen2/snps_gwas_causal_region_%s",regions), header = F)
snp_1_causal_position = causal_snps[1]
snp_2_causal_position = causal_snps[2]
snp_3_causal_position = causal_snps[3]
#Loop through all possible configurations for the GWAS FINEMAPPING (See previous rule for details)
finemapped_file_configs = c("finemap/in_sample_ld_true_sumstats_region_%s",
"finemap/1000G_proportions_ld_true_sumstats_region_%s",
"finemap/AFR_ld_true_sumstats_region_%s",
"finemap/EUR_ld_true_sumstats_region_%s",
"finemap/EAS_ld_true_sumstats_region_%s",
"finemap/ref_admixed_ld_true_sumstats_region_%s",
"finemap/1000G_weighted_ld_true_sumstats_region_%s")
for (i in 1:length(finemapped_file_configs)) {
#Read in files
finemap_snp_results = read.table(sprintf(paste(finemapped_file_configs[i],".snp",sep =""), regions), header = T, stringsAsFactors = F)
finemap_config_results = read.table(sprintf(paste(finemapped_file_configs[i],".config",sep =""), regions), header = T, stringsAsFactors = F)
#Go through the config results until we have reached the 95% credible set
culmative_prob = cumsum(finemap_config_results$prob)
index_cred_set = min(which(culmative_prob > 0.95))
#Find RSID of the causal SNP index
causal_snp_rsids = finemap_snp_results$rsid[which(finemap_snp_results$position %in% c(snp_1_causal_position,snp_2_causal_position,snp_3_causal_position))]
#Loop through the rows in the credible set configs
pp_weighted_tp = c()
cred_set_snps = c()
for (j in 1:(index_cred_set)) {
#Check how many of the causal variants exist
tp_in_set = length(which(unlist(strsplit(finemap_config_results$config[j],",")) %in% as.character(causal_snp_rsids)))
frac_tp = tp_in_set/(length(unlist(strsplit(finemap_config_results$config[j],","))))
pp_weighted_tp[j] = frac_tp * finemap_config_results$prob[j]
cred_set_snps = append(cred_set_snps,unlist(strsplit(finemap_config_results$config[j],",")))
}
sum_tp_pp = sum(pp_weighted_tp)
indicator = length(which(unique(cred_set_snps) %in% causal_snp_rsids))
#Save the results
saveRDS(sum_tp_pp,sprintf(paste(finemapped_file_configs[i],"_cred_set_fdr.RData",sep=""),regions), version = 2)
saveRDS(indicator,sprintf(paste(finemapped_file_configs[i],"_cred_set_indicator.RData",sep=""),regions), version = 2)
}
|
computeKernelArray <- function(sample, tPar, bandWidth){
size <- length(sample)
kernelArray <- array(data = 0, dim = size)
for(i in 1:length(kernelArray)){
kernelArray[i] <- dnorm((i/size - tPar)/bandWidth)/bandWidth
}
return(kernelArray)
}
| /Alex's assignments/computeKernelArray.R | no_license | evorition/introduction-to-speciality | R | false | false | 257 | r | computeKernelArray <- function(sample, tPar, bandWidth){
size <- length(sample)
kernelArray <- array(data = 0, dim = size)
for(i in 1:length(kernelArray)){
kernelArray[i] <- dnorm((i/size - tPar)/bandWidth)/bandWidth
}
return(kernelArray)
}
|
## Download data from:
## https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption.
## Unzip it to a text file named "household_power_consumption.txt".
##Open the data in R, creating dataset named "raw"
raw <- read.table("household_power_consumption.txt", header = TRUE, sep = ";",
na.strings = "?")
##Convert the Date and Time variables to a date/time variable named x
raw$Date <- as.Date(raw$Date, format = "%d/%m/%Y")
raw$x <- paste(raw$Date, raw$Time)
raw$x <- strptime(raw$x, "%Y-%m-%d %H:%M:%S")
##Save the data from the February 1st and 2nd of 2007 to dataset "raw2"
st <- as.POSIXct("2007-02-01 00:00:00")
en <- as.POSIXct("2007-02-02 23:59:59")
raw2 <- raw[raw$x > st & raw$x < en, ]
##Create Plot 2
plot(raw2$x, raw2$Global_active_power, type = "s", xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.copy(png, file = "plot2.png", width = 480, height = 480)
dev.off() | /plot2.R | no_license | QHR/ExData_Plotting1 | R | false | false | 943 | r | ## Download data from:
## https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption.
## Unzip it to a text file named "household_power_consumption.txt".
##Open the data in R, creating dataset named "raw"
raw <- read.table("household_power_consumption.txt", header = TRUE, sep = ";",
na.strings = "?")
##Convert the Date and Time variables to a date/time variable named x
raw$Date <- as.Date(raw$Date, format = "%d/%m/%Y")
raw$x <- paste(raw$Date, raw$Time)
raw$x <- strptime(raw$x, "%Y-%m-%d %H:%M:%S")
##Save the data from the February 1st and 2nd of 2007 to dataset "raw2"
st <- as.POSIXct("2007-02-01 00:00:00")
en <- as.POSIXct("2007-02-02 23:59:59")
raw2 <- raw[raw$x > st & raw$x < en, ]
##Create Plot 2
plot(raw2$x, raw2$Global_active_power, type = "s", xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.copy(png, file = "plot2.png", width = 480, height = 480)
dev.off() |
library(tidyverse)
source("R/basic_metrics.R")
file <- "R/clean_batters_overall.csv"
data <- read_csv(file)
columns <- c("name", "team", "season", "pa", "ab", "h", "x2b", "x3b", "hr",
"bb", "so", "hbp", "sf", "sh", "sb", "cs")
data <- data %>%
select(columns)
basic_calcs <- data %>%
mutate(hbp_p = hbp_p(.)) %>%
mutate(so_p = so_p(.)) %>%
mutate(bb_p = bb_p(.)) %>%
mutate(iso = iso(.)) %>%
mutate(babip = babip(.)) %>%
mutate(avg = avg(.)) %>%
mutate(obp = obp(.)) %>%
mutate(slg = slg(.)) %>%
mutate(ops = ops(.)) %>%
mutate(sar = sar(.))
basic_calcs
basic_calcs %>%
filter(team == "WLC", pa >= 50, season >= 2015) %>%
arrange(desc(ops))
| /R/main.R | no_license | troymoench/baseballstats | R | false | false | 694 | r | library(tidyverse)
source("R/basic_metrics.R")
file <- "R/clean_batters_overall.csv"
data <- read_csv(file)
columns <- c("name", "team", "season", "pa", "ab", "h", "x2b", "x3b", "hr",
"bb", "so", "hbp", "sf", "sh", "sb", "cs")
data <- data %>%
select(columns)
basic_calcs <- data %>%
mutate(hbp_p = hbp_p(.)) %>%
mutate(so_p = so_p(.)) %>%
mutate(bb_p = bb_p(.)) %>%
mutate(iso = iso(.)) %>%
mutate(babip = babip(.)) %>%
mutate(avg = avg(.)) %>%
mutate(obp = obp(.)) %>%
mutate(slg = slg(.)) %>%
mutate(ops = ops(.)) %>%
mutate(sar = sar(.))
basic_calcs
basic_calcs %>%
filter(team == "WLC", pa >= 50, season >= 2015) %>%
arrange(desc(ops))
|
## Two functions will be created
## makeCacheMatrix will take a matrix and cache the results
## cacheSolve will return the cache if already solved or solve it
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
# inv used to store the cached inverse
inv <- NULL
# set will set the matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
# get will get the matrix
get <- function() x
# setinverse will set the inverse
setinverse<- function(inverse) inv <<-inverse
# getinverse will get the inverse
getinverse <- function() inv
# list of available options
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then cacheSolve should retrieve the inverse from
## the cache. This is noted by the message.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# do a get on the matrix to find cached inverse store it in inv
inv <- x$getinverse()
# check to see if inv is not null meaning cached inverse exists
if (!is.null(inv)) {
message("getting cached inverse matrix")
return(inv)
}
# since cached inverse does not exist, we must calc it
data <- x$get()
# use sovlve to actually get the inverse
inv <- solve(data, ...)
# make sure to cache the results
x$setinverse(inv)
#return results
inv
} | /cachematrix.R | no_license | scottgrey/ProgrammingAssignment2 | R | false | false | 1,841 | r | ## Two functions will be created
## makeCacheMatrix will take a matrix and cache the results
## cacheSolve will return the cache if already solved or solve it
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
# inv used to store the cached inverse
inv <- NULL
# set will set the matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
# get will get the matrix
get <- function() x
# setinverse will set the inverse
setinverse<- function(inverse) inv <<-inverse
# getinverse will get the inverse
getinverse <- function() inv
# list of available options
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then cacheSolve should retrieve the inverse from
## the cache. This is noted by the message.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# do a get on the matrix to find cached inverse store it in inv
inv <- x$getinverse()
# check to see if inv is not null meaning cached inverse exists
if (!is.null(inv)) {
message("getting cached inverse matrix")
return(inv)
}
# since cached inverse does not exist, we must calc it
data <- x$get()
# use sovlve to actually get the inverse
inv <- solve(data, ...)
# make sure to cache the results
x$setinverse(inv)
#return results
inv
} |
library(AHMbook)
### Name: sim.ldata
### Title: Simulation of distance sampling data.
### Aliases: sim.ldata
### ** Examples
# Simulate a data set with the default arguments and look at the structure of the output:
tmp <- sim.ldata()
str(tmp)
| /data/genthat_extracted_code/AHMbook/examples/sim.ldata.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 250 | r | library(AHMbook)
### Name: sim.ldata
### Title: Simulation of distance sampling data.
### Aliases: sim.ldata
### ** Examples
# Simulate a data set with the default arguments and look at the structure of the output:
tmp <- sim.ldata()
str(tmp)
|
source("../AQ_data_preparation/utilities.R")
source("../AQ_data_preparation/nested_models.r")
source("full_year/scripts/variables.r")
df_2min_roll <- readRDS(DF_JOINED)
output_folder <-"C:/Github/AQ_analysis_calibration/output/full_year/calibration/scenarios/2weeks_24weeks_2weeks/"
if (!dir.exists(output_folder)){
dir.create(output_folder, recursive = T)
}
sensor_field = "median_PM25"
reference_field = "PM2.5"
rh_field = "rh"
method_list = c("initial","lr", "ols",
"mlr1", "koehlermass",
"koehlersize",
"laulainen", "rlmhuber",
"rlmrh", "rlmpart",
"rlmpartrh", "lmpart", "lmpartrh")
#method_list = c("rlmpart", "ols")
#method_list = c("rlmpart")
#method_list = c("ols")
#method_list = c("koehlermassrh85")
calibration_scenario = "2w24w2w"
df_2min_roll$cutDay <-as.POSIXct(cut(df_2min_roll$date, breaks = "1 day"))
dates<-unique(df_2min_roll$cutDay)
dates_list<-c()
for(i in 1:(length(dates)-196)){
dates_list<-append(dates_list,list(dates[i:(i+195)]))
}
require(lubridate)
#wrapper for lapply
my_func<-function(df, sensor_field = "median_PM25", reference_field = "PM2.5", method){
dates_list_ft <- unique(df$cutDay)
dates_list_train <- dates_list_ft[c(1:14,(length(dates_list_ft)-13):length(dates_list_ft))]
dates_list_cal <- dates_list_ft[15:(length(dates_list_ft)-14)]
df_train <- df[df$cutDay %in% dates_list_train, ]
df_calibration <- df[df$cutDay %in% dates_list_cal, ]
sensor_type = "PMS"
bin_field = c("median_gr03um","median_gr05um",
"median_gr10um","median_gr25um",
"median_gr50um","median_gr100um")
calibration_scenario <- paste0(substr(min(df$date),1,10), " 2weeks_24weeks_2weeks")
df_weather_pm_train <- df_train[grepl("PMS", df_train$sensor),] %>%
ungroup()
df_weather_pm_calibration<- df_calibration[grepl("PMS", df_calibration$sensor),] %>%
ungroup()
calibrate_sensors(df_weather_pm_train, df_weather_pm_calibration, sensor_type = sensor_type, calibration_scenario = calibration_scenario,
output_folder = output_folder,
method_list = method_list,
sensor_field = sensor_field, reference_field = reference_field,
rh_field = rh_field,
limit_value = limit_value, rm_uncertainty = rm_uncertainty,
bin_field = bin_field)
bin_field = c("median_n05","median_n1",
"median_n25",
"median_n4", "median_n10")
sensor_type <- "SPS"
df_weather_pm_train <- df_train[grepl("SPS", df_train$sensor) ,] %>%
ungroup()
df_weather_pm_calibration<- df_calibration[grepl("SPS", df_calibration$sensor),] %>%
ungroup()
calibrate_sensors(df_weather_pm_train, df_weather_pm_calibration, sensor_type = sensor_type, calibration_scenario = calibration_scenario,
output_folder = output_folder,
method_list = method_list,
sensor_field = sensor_field, reference_field = reference_field,
rh_field = rh_field,
limit_value = limit_value, rm_uncertainty = rm_uncertainty,
bin_field = bin_field)
}
# 2 min
res_2min_dates<- lapply(dates_list[51:80],
function (x) df_2min_roll[df_2min_roll$cutDay %in% x,]) %>%
lapply(., function(x) my_func(x, sensor_field = sensor_field, reference_field = reference_field)) #%>%
#bind_rows() | /R/calibration_scenarios_Xweeks_Xmonths/calibration_2weeks_24weeks_2weeks_2.R | permissive | FEEprojects/Paper_calibration_nocs_data_analysis | R | false | false | 3,514 | r | source("../AQ_data_preparation/utilities.R")
source("../AQ_data_preparation/nested_models.r")
source("full_year/scripts/variables.r")
df_2min_roll <- readRDS(DF_JOINED)
output_folder <-"C:/Github/AQ_analysis_calibration/output/full_year/calibration/scenarios/2weeks_24weeks_2weeks/"
if (!dir.exists(output_folder)){
dir.create(output_folder, recursive = T)
}
sensor_field = "median_PM25"
reference_field = "PM2.5"
rh_field = "rh"
method_list = c("initial","lr", "ols",
"mlr1", "koehlermass",
"koehlersize",
"laulainen", "rlmhuber",
"rlmrh", "rlmpart",
"rlmpartrh", "lmpart", "lmpartrh")
#method_list = c("rlmpart", "ols")
#method_list = c("rlmpart")
#method_list = c("ols")
#method_list = c("koehlermassrh85")
calibration_scenario = "2w24w2w"
df_2min_roll$cutDay <-as.POSIXct(cut(df_2min_roll$date, breaks = "1 day"))
dates<-unique(df_2min_roll$cutDay)
dates_list<-c()
for(i in 1:(length(dates)-196)){
dates_list<-append(dates_list,list(dates[i:(i+195)]))
}
require(lubridate)
#wrapper for lapply
my_func<-function(df, sensor_field = "median_PM25", reference_field = "PM2.5", method){
dates_list_ft <- unique(df$cutDay)
dates_list_train <- dates_list_ft[c(1:14,(length(dates_list_ft)-13):length(dates_list_ft))]
dates_list_cal <- dates_list_ft[15:(length(dates_list_ft)-14)]
df_train <- df[df$cutDay %in% dates_list_train, ]
df_calibration <- df[df$cutDay %in% dates_list_cal, ]
sensor_type = "PMS"
bin_field = c("median_gr03um","median_gr05um",
"median_gr10um","median_gr25um",
"median_gr50um","median_gr100um")
calibration_scenario <- paste0(substr(min(df$date),1,10), " 2weeks_24weeks_2weeks")
df_weather_pm_train <- df_train[grepl("PMS", df_train$sensor),] %>%
ungroup()
df_weather_pm_calibration<- df_calibration[grepl("PMS", df_calibration$sensor),] %>%
ungroup()
calibrate_sensors(df_weather_pm_train, df_weather_pm_calibration, sensor_type = sensor_type, calibration_scenario = calibration_scenario,
output_folder = output_folder,
method_list = method_list,
sensor_field = sensor_field, reference_field = reference_field,
rh_field = rh_field,
limit_value = limit_value, rm_uncertainty = rm_uncertainty,
bin_field = bin_field)
bin_field = c("median_n05","median_n1",
"median_n25",
"median_n4", "median_n10")
sensor_type <- "SPS"
df_weather_pm_train <- df_train[grepl("SPS", df_train$sensor) ,] %>%
ungroup()
df_weather_pm_calibration<- df_calibration[grepl("SPS", df_calibration$sensor),] %>%
ungroup()
calibrate_sensors(df_weather_pm_train, df_weather_pm_calibration, sensor_type = sensor_type, calibration_scenario = calibration_scenario,
output_folder = output_folder,
method_list = method_list,
sensor_field = sensor_field, reference_field = reference_field,
rh_field = rh_field,
limit_value = limit_value, rm_uncertainty = rm_uncertainty,
bin_field = bin_field)
}
# 2 min
res_2min_dates<- lapply(dates_list[51:80],
function (x) df_2min_roll[df_2min_roll$cutDay %in% x,]) %>%
lapply(., function(x) my_func(x, sensor_field = sensor_field, reference_field = reference_field)) #%>%
#bind_rows() |
library(pipe.design)
### Name: pipe.design
### Title: Dual-Agent Dose Escalation for Phase I Trials using the PIPE
### Design
### Aliases: pipe.design
### ** Examples
## Reproducing Figure 5 from Mander and Sweeting, Statistics in Medicine 2015.
## The true probability of DLT for a 6x6 grid of dose combinations
## (Scenario 3 from Mander and Sweeting, Statistics in Medicine 2015)
pi <-matrix(c(0.02,0.10,0.20,0.30,0.35,0.45,0.06,0.14,0.24,0.34,0.39,0.49,0.12,0.20,
0.30,0.40,0.45,0.55,0.17,0.25,0.35,0.45,0.50,0.60,0.22,0.30,0.40,0.50,0.60,0.70,0.30,
0.38,0.48,0.58,0.68,0.78),nrow=6,ncol=6)
## Using a weak prior with prior sample size 1/36 for each dose combination and prior
## median of Scenario 1
prior.med<-matrix(c(0.02,0.03,0.06,0.10,0.18,0.23,0.03,0.05,0.09,0.13,0.21,0.27,0.06,
0.09,0.14,0.18,0.26,0.30,0.11,0.14,0.18,0.23,0.30,0.36,0.18,0.21,0.26,0.30,0.39,0.44,
0.23,0.27,0.3,0.36,0.44,0.49),nrow=6,ncol=6)
## Using a neighbouring escalation constraint
## Selecting the closest admissible doses
## Using a safety constraint with epsilon = 0.8
## Obtain dose recommendations for first cohort
cohort1<-pipe.design(N=2,S=1,c=2,theta=0.3,prior.med=prior.med,
prior.ss=matrix(1/36,ncol=6,nrow=6),strategy="ss",constraint="neighbouring",
epsilon=0.8,admis="closest",alternate=FALSE)
cohort1 ## Recommends starting at (1,1)
## Second cohort
## Assume no toxicities are seen in first cohort
data1<-data.frame(patient=1:2,doseA=rep(1,2),doseB=rep(1,2),tox=rep(0,2))
cohort2<-pipe.design(data=data1,S=1,c=2,theta=0.3,prior.med=prior.med,
prior.ss=matrix(1/36,ncol=6,nrow=6),strategy="ss",constraint="neighbouring",
epsilon=0.8,admis="closest",alternate=FALSE)
cohort2 ## Recommends dosing at (2,2)
## Third cohort
## Assume one toxicity is seen in second cohort
data2<-rbind(data1,data.frame(patient=3:4,doseA=rep(2,2),doseB=rep(2,2),tox=c(1,0)))
cohort3<-pipe.design(data=data2,S=1,c=2,theta=0.3,prior.med=prior.med,
prior.ss=matrix(1/36,ncol=6,nrow=6),strategy="ss",constraint="neighbouring",
epsilon=0.8,admis="closest",alternate=FALSE)
cohort3 ## Recommends dosing at (1,3)
## Simulating a single trial with sample size of 40, cohort size 2
## and target toxicity of 0.3, using true probabilities of toxicity pi
## Not run:
##D set.seed(700)
##D example<-pipe.design(N=40,S=1,c=2,theta=0.3,pi=pi,prior.med=prior.med,
##D prior.ss=matrix(1/36,ncol=6,nrow=6),strategy="ss",constraint="neighbouring",
##D epsilon=0.8,admis="closest",alternate=FALSE)
##D example
##D plot(example)
## End(Not run)
## Not run:
##D ## Run a Shiny GUI version of pipe.design
##D runShinyPIPE()
## End(Not run)
## Not run:
##D ## Conducting a simulation study (1000 trials) to investigate
##D ## operating characteristics for Scenario 3
##D ## (as reported in Table III in Mander and Sweeting, Statistics in Medicine 2015)
##D set.seed(262)
##D scen3<-pipe.design(N=40,S=1000,c=2,theta=0.3,pi=pi,prior.med=prior.med,
##D prior.ss=matrix(1/36,ncol=6,nrow=6),strategy="ss",constraint="neighbouring",
##D epsilon=0.8,admis="closest",alternate=FALSE)
##D scen3
## End(Not run)
| /data/genthat_extracted_code/pipe.design/examples/pipe.design.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 3,091 | r | library(pipe.design)
### Name: pipe.design
### Title: Dual-Agent Dose Escalation for Phase I Trials using the PIPE
### Design
### Aliases: pipe.design
### ** Examples
## Reproducing Figure 5 from Mander and Sweeting, Statistics in Medicine 2015.
## The true probability of DLT for a 6x6 grid of dose combinations
## (Scenario 3 from Mander and Sweeting, Statistics in Medicine 2015)
pi <-matrix(c(0.02,0.10,0.20,0.30,0.35,0.45,0.06,0.14,0.24,0.34,0.39,0.49,0.12,0.20,
0.30,0.40,0.45,0.55,0.17,0.25,0.35,0.45,0.50,0.60,0.22,0.30,0.40,0.50,0.60,0.70,0.30,
0.38,0.48,0.58,0.68,0.78),nrow=6,ncol=6)
## Using a weak prior with prior sample size 1/36 for each dose combination and prior
## median of Scenario 1
prior.med<-matrix(c(0.02,0.03,0.06,0.10,0.18,0.23,0.03,0.05,0.09,0.13,0.21,0.27,0.06,
0.09,0.14,0.18,0.26,0.30,0.11,0.14,0.18,0.23,0.30,0.36,0.18,0.21,0.26,0.30,0.39,0.44,
0.23,0.27,0.3,0.36,0.44,0.49),nrow=6,ncol=6)
## Using a neighbouring escalation constraint
## Selecting the closest admissible doses
## Using a safety constraint with epsilon = 0.8
## Obtain dose recommendations for first cohort
cohort1<-pipe.design(N=2,S=1,c=2,theta=0.3,prior.med=prior.med,
prior.ss=matrix(1/36,ncol=6,nrow=6),strategy="ss",constraint="neighbouring",
epsilon=0.8,admis="closest",alternate=FALSE)
cohort1 ## Recommends starting at (1,1)
## Second cohort
## Assume no toxicities are seen in first cohort
data1<-data.frame(patient=1:2,doseA=rep(1,2),doseB=rep(1,2),tox=rep(0,2))
cohort2<-pipe.design(data=data1,S=1,c=2,theta=0.3,prior.med=prior.med,
prior.ss=matrix(1/36,ncol=6,nrow=6),strategy="ss",constraint="neighbouring",
epsilon=0.8,admis="closest",alternate=FALSE)
cohort2 ## Recommends dosing at (2,2)
## Third cohort
## Assume one toxicity is seen in second cohort
data2<-rbind(data1,data.frame(patient=3:4,doseA=rep(2,2),doseB=rep(2,2),tox=c(1,0)))
cohort3<-pipe.design(data=data2,S=1,c=2,theta=0.3,prior.med=prior.med,
prior.ss=matrix(1/36,ncol=6,nrow=6),strategy="ss",constraint="neighbouring",
epsilon=0.8,admis="closest",alternate=FALSE)
cohort3 ## Recommends dosing at (1,3)
## Simulating a single trial with sample size of 40, cohort size 2
## and target toxicity of 0.3, using true probabilities of toxicity pi
## Not run:
##D set.seed(700)
##D example<-pipe.design(N=40,S=1,c=2,theta=0.3,pi=pi,prior.med=prior.med,
##D prior.ss=matrix(1/36,ncol=6,nrow=6),strategy="ss",constraint="neighbouring",
##D epsilon=0.8,admis="closest",alternate=FALSE)
##D example
##D plot(example)
## End(Not run)
## Not run:
##D ## Run a Shiny GUI version of pipe.design
##D runShinyPIPE()
## End(Not run)
## Not run:
##D ## Conducting a simulation study (1000 trials) to investigate
##D ## operating characteristics for Scenario 3
##D ## (as reported in Table III in Mander and Sweeting, Statistics in Medicine 2015)
##D set.seed(262)
##D scen3<-pipe.design(N=40,S=1000,c=2,theta=0.3,pi=pi,prior.med=prior.med,
##D prior.ss=matrix(1/36,ncol=6,nrow=6),strategy="ss",constraint="neighbouring",
##D epsilon=0.8,admis="closest",alternate=FALSE)
##D scen3
## End(Not run)
|
funcH1 <- function(x)
{
c(cos(x[1])+2,sin(x[1])+2)
}
funcH2 <- function(x)
{
c(cos(x[1]),sin(x[1]))
}
funcH3 <- function(x)
{
c(4*cos(x[1]),4*sin(x[1]))
}
vertex1 <- function(x)
{
c( numDeriv::grad(funcH1,funcH1(x))[2]/norm(as.matrix(numDeriv::grad(funcH1,funcH1(x)))),
-numDeriv::grad(funcH1,funcH1(x))[1]/norm(as.matrix(numDeriv::grad(funcH1,funcH1(x)))) )
}
vertex2 <- function(x)
{
c( numDeriv::grad(funcH2,funcH2(x))[2]/norm(as.matrix(numDeriv::grad(funcH2,funcH2(x)))),
-numDeriv::grad(funcH2,funcH2(x))[1]/norm(as.matrix(numDeriv::grad(funcH2,funcH2(x)))) )
}
vertex3 <- function(x)
{
c( numDeriv::grad(funcH3,funcH3(x))[2]/norm(as.matrix(numDeriv::grad(funcH3,funcH3(x)))),
-numDeriv::grad(funcH3,funcH3(x))[1]/norm(as.matrix(numDeriv::grad(funcH3,funcH3(x)))) )
}
H11 <- function (x,y)
{
if (x == y){
dot(numDeriv::genD(funcH1,funcH1(x))$f0,vertex1(x))/2*norm(as.matrix(numDeriv::grad(funcH1,funcH1(x))))
}else{
x = funcH1(x);
y = funcH1(y);
(dot((x-y),vertex1(x)))/(norm(as.matrix(x-y)))**2
}
}
H12 <- function(x,y)
{
if(x == y){
log(1/(2.7 *norm(as.matrix(numDeriv::grad(funcH2,funcH2(x))))))/2
}else {
x = funcH2(x);
y = funcH2(y);
log(1/norm(as.matrix(x-y)))
}
}
H13 <- function (x,y)
{
if (x == y){
dot(numDeriv::genD(funcH1,funcH1(x))$f0,vertex1(x))/2*norm(as.matrix(numDeriv::grad(funcH1,funcH1(x))))
}else{
x = funcH3(x);
y = funcH3(y);
(dot((x-y),vertex3(x)))/(norm(as.matrix(x-y)))**2
}
}
H21 <- function(x,y)
{
if (x == y){
dot(numDeriv::genD(funcH1,funcH1(x))$f0,vertex1(x))/2*norm(as.matrix(numDeriv::grad(funcH1,funcH1(x))))
}else{
x = funcH1(x);
y = funcH1(y);
(dot((x-y),vertex1(x)))*(dot((x-y),vertex1(y)))/(norm(as.matrix(x-y)))**2
}
}
H22 <- function(x,y)
{
if (x == y){
dot(numDeriv::genD(funcH2,funcH2(x))$f0,vertex2(x))/2*norm(as.matrix(numDeriv::grad(funcH2,funcH2(x))))
}else{
x = funcH2(x);
y = funcH2(y);
(dot((x-y),vertex2(y)))/(norm(as.matrix(x-y)))**2
}
}
H23 <- function(x,y)
{
if (x == y){
dot(numDeriv::genD(funcH3,funcH3(x))$f0,vertex3(x))/2*norm(as.matrix(numDeriv::grad(funcH3,funcH3(x))))}else{
x = funcH3(x);
y = funcH3(y);
(dot((x-y),vertex3(x)))*(dot((x-y),vertex3(y)))/(norm(as.matrix(x-y)))**2
}
}
| /K1.R | no_license | Andrushkaa/LIR | R | false | false | 2,428 | r | funcH1 <- function(x)
{
c(cos(x[1])+2,sin(x[1])+2)
}
funcH2 <- function(x)
{
c(cos(x[1]),sin(x[1]))
}
funcH3 <- function(x)
{
c(4*cos(x[1]),4*sin(x[1]))
}
vertex1 <- function(x)
{
c( numDeriv::grad(funcH1,funcH1(x))[2]/norm(as.matrix(numDeriv::grad(funcH1,funcH1(x)))),
-numDeriv::grad(funcH1,funcH1(x))[1]/norm(as.matrix(numDeriv::grad(funcH1,funcH1(x)))) )
}
vertex2 <- function(x)
{
c( numDeriv::grad(funcH2,funcH2(x))[2]/norm(as.matrix(numDeriv::grad(funcH2,funcH2(x)))),
-numDeriv::grad(funcH2,funcH2(x))[1]/norm(as.matrix(numDeriv::grad(funcH2,funcH2(x)))) )
}
vertex3 <- function(x)
{
c( numDeriv::grad(funcH3,funcH3(x))[2]/norm(as.matrix(numDeriv::grad(funcH3,funcH3(x)))),
-numDeriv::grad(funcH3,funcH3(x))[1]/norm(as.matrix(numDeriv::grad(funcH3,funcH3(x)))) )
}
H11 <- function (x,y)
{
if (x == y){
dot(numDeriv::genD(funcH1,funcH1(x))$f0,vertex1(x))/2*norm(as.matrix(numDeriv::grad(funcH1,funcH1(x))))
}else{
x = funcH1(x);
y = funcH1(y);
(dot((x-y),vertex1(x)))/(norm(as.matrix(x-y)))**2
}
}
H12 <- function(x,y)
{
if(x == y){
log(1/(2.7 *norm(as.matrix(numDeriv::grad(funcH2,funcH2(x))))))/2
}else {
x = funcH2(x);
y = funcH2(y);
log(1/norm(as.matrix(x-y)))
}
}
H13 <- function (x,y)
{
if (x == y){
dot(numDeriv::genD(funcH1,funcH1(x))$f0,vertex1(x))/2*norm(as.matrix(numDeriv::grad(funcH1,funcH1(x))))
}else{
x = funcH3(x);
y = funcH3(y);
(dot((x-y),vertex3(x)))/(norm(as.matrix(x-y)))**2
}
}
H21 <- function(x,y)
{
if (x == y){
dot(numDeriv::genD(funcH1,funcH1(x))$f0,vertex1(x))/2*norm(as.matrix(numDeriv::grad(funcH1,funcH1(x))))
}else{
x = funcH1(x);
y = funcH1(y);
(dot((x-y),vertex1(x)))*(dot((x-y),vertex1(y)))/(norm(as.matrix(x-y)))**2
}
}
H22 <- function(x,y)
{
if (x == y){
dot(numDeriv::genD(funcH2,funcH2(x))$f0,vertex2(x))/2*norm(as.matrix(numDeriv::grad(funcH2,funcH2(x))))
}else{
x = funcH2(x);
y = funcH2(y);
(dot((x-y),vertex2(y)))/(norm(as.matrix(x-y)))**2
}
}
H23 <- function(x,y)
{
if (x == y){
dot(numDeriv::genD(funcH3,funcH3(x))$f0,vertex3(x))/2*norm(as.matrix(numDeriv::grad(funcH3,funcH3(x))))}else{
x = funcH3(x);
y = funcH3(y);
(dot((x-y),vertex3(x)))*(dot((x-y),vertex3(y)))/(norm(as.matrix(x-y)))**2
}
}
|
# Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://docs.intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.45.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' SecurityScreenClause Class
#'
#' @field field
#' @field operator
#' @field value
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
SecurityScreenClause <- R6::R6Class(
'SecurityScreenClause',
public = list(
`field` = NA,
`operator` = NA,
`value` = NA,
initialize = function(`field`, `operator`, `value`){
if (!missing(`field`)) {
self$`field` <- `field`
}
if (!missing(`operator`)) {
self$`operator` <- `operator`
}
if (!missing(`value`)) {
self$`value` <- `value`
}
},
toJSON = function() {
SecurityScreenClauseObject <- list()
if (!is.null(self$`field`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`field`) && ((length(self$`field`) == 0) || ((length(self$`field`) != 0 && R6::is.R6(self$`field`[[1]]))))) {
SecurityScreenClauseObject[['field']] <- lapply(self$`field`, function(x) x$toJSON())
} else {
SecurityScreenClauseObject[['field']] <- jsonlite::toJSON(self$`field`, auto_unbox = TRUE)
}
}
if (!is.null(self$`operator`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`operator`) && ((length(self$`operator`) == 0) || ((length(self$`operator`) != 0 && R6::is.R6(self$`operator`[[1]]))))) {
SecurityScreenClauseObject[['operator']] <- lapply(self$`operator`, function(x) x$toJSON())
} else {
SecurityScreenClauseObject[['operator']] <- jsonlite::toJSON(self$`operator`, auto_unbox = TRUE)
}
}
if (!is.null(self$`value`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`value`) && ((length(self$`value`) == 0) || ((length(self$`value`) != 0 && R6::is.R6(self$`value`[[1]]))))) {
SecurityScreenClauseObject[['value']] <- lapply(self$`value`, function(x) x$toJSON())
} else {
SecurityScreenClauseObject[['value']] <- jsonlite::toJSON(self$`value`, auto_unbox = TRUE)
}
}
SecurityScreenClauseObject
},
fromJSON = function(SecurityScreenClauseJson) {
SecurityScreenClauseObject <- jsonlite::fromJSON(SecurityScreenClauseJson)
if (!is.null(SecurityScreenClauseObject$`field`)) {
self$`field` <- SecurityScreenClauseObject$`field`
}
if (!is.null(SecurityScreenClauseObject$`operator`)) {
self$`operator` <- SecurityScreenClauseObject$`operator`
}
if (!is.null(SecurityScreenClauseObject$`value`)) {
self$`value` <- SecurityScreenClauseObject$`value`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(SecurityScreenClauseJson) {
SecurityScreenClauseObject <- jsonlite::fromJSON(SecurityScreenClauseJson, simplifyDataFrame = FALSE)
self$setFromList(SecurityScreenClauseObject)
},
setFromList = function(listObject) {
if (!is.null(listObject$`field`)) {
self$`field` <- listObject$`field`
}
else {
self$`field` <- NA
}
if (!is.null(listObject$`operator`)) {
self$`operator` <- listObject$`operator`
}
else {
self$`operator` <- NA
}
if (!is.null(listObject$`value`)) {
self$`value` <- listObject$`value`
}
else {
self$`value` <- NA
}
},
getAsList = function() {
listObject = list()
listObject[["field"]] <- self$`field`
listObject[["operator"]] <- self$`operator`
listObject[["value"]] <- self$`value`
return(listObject)
}
)
)
| /R/SecurityScreenClause.r | no_license | intrinio/r-sdk | R | false | false | 4,332 | r | # Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://docs.intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.45.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' SecurityScreenClause Class
#'
#' @field field
#' @field operator
#' @field value
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
SecurityScreenClause <- R6::R6Class(
'SecurityScreenClause',
public = list(
`field` = NA,
`operator` = NA,
`value` = NA,
initialize = function(`field`, `operator`, `value`){
if (!missing(`field`)) {
self$`field` <- `field`
}
if (!missing(`operator`)) {
self$`operator` <- `operator`
}
if (!missing(`value`)) {
self$`value` <- `value`
}
},
toJSON = function() {
SecurityScreenClauseObject <- list()
if (!is.null(self$`field`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`field`) && ((length(self$`field`) == 0) || ((length(self$`field`) != 0 && R6::is.R6(self$`field`[[1]]))))) {
SecurityScreenClauseObject[['field']] <- lapply(self$`field`, function(x) x$toJSON())
} else {
SecurityScreenClauseObject[['field']] <- jsonlite::toJSON(self$`field`, auto_unbox = TRUE)
}
}
if (!is.null(self$`operator`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`operator`) && ((length(self$`operator`) == 0) || ((length(self$`operator`) != 0 && R6::is.R6(self$`operator`[[1]]))))) {
SecurityScreenClauseObject[['operator']] <- lapply(self$`operator`, function(x) x$toJSON())
} else {
SecurityScreenClauseObject[['operator']] <- jsonlite::toJSON(self$`operator`, auto_unbox = TRUE)
}
}
if (!is.null(self$`value`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`value`) && ((length(self$`value`) == 0) || ((length(self$`value`) != 0 && R6::is.R6(self$`value`[[1]]))))) {
SecurityScreenClauseObject[['value']] <- lapply(self$`value`, function(x) x$toJSON())
} else {
SecurityScreenClauseObject[['value']] <- jsonlite::toJSON(self$`value`, auto_unbox = TRUE)
}
}
SecurityScreenClauseObject
},
fromJSON = function(SecurityScreenClauseJson) {
SecurityScreenClauseObject <- jsonlite::fromJSON(SecurityScreenClauseJson)
if (!is.null(SecurityScreenClauseObject$`field`)) {
self$`field` <- SecurityScreenClauseObject$`field`
}
if (!is.null(SecurityScreenClauseObject$`operator`)) {
self$`operator` <- SecurityScreenClauseObject$`operator`
}
if (!is.null(SecurityScreenClauseObject$`value`)) {
self$`value` <- SecurityScreenClauseObject$`value`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(SecurityScreenClauseJson) {
SecurityScreenClauseObject <- jsonlite::fromJSON(SecurityScreenClauseJson, simplifyDataFrame = FALSE)
self$setFromList(SecurityScreenClauseObject)
},
setFromList = function(listObject) {
if (!is.null(listObject$`field`)) {
self$`field` <- listObject$`field`
}
else {
self$`field` <- NA
}
if (!is.null(listObject$`operator`)) {
self$`operator` <- listObject$`operator`
}
else {
self$`operator` <- NA
}
if (!is.null(listObject$`value`)) {
self$`value` <- listObject$`value`
}
else {
self$`value` <- NA
}
},
getAsList = function() {
listObject = list()
listObject[["field"]] <- self$`field`
listObject[["operator"]] <- self$`operator`
listObject[["value"]] <- self$`value`
return(listObject)
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getObjects.R
\name{getObjects}
\alias{getObjects}
\title{Get objects of a certain class}
\usage{
getObjects(cls = "function")
}
\arguments{
\item{cls}{class type (default \code{function})}
}
\value{
list of objects of the specified class
}
\description{
This function returns the objects defined in the current frame that
are of a given class (by default functions)
}
\examples{
library(pascal)
a<-1
b <- function(x) { x }
getObjects("numeric");
getObjects("function");
## remove all objects that are not functions
ls()
rm(list=setdiff(ls(),getObjects()));
ls()
}
\author{
Pascal Niklaus \email{pascal.niklaus@ieu.uzh.ch}
}
| /pascal/man/getObjects.Rd | no_license | rededsky/pascal | R | false | true | 705 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getObjects.R
\name{getObjects}
\alias{getObjects}
\title{Get objects of a certain class}
\usage{
getObjects(cls = "function")
}
\arguments{
\item{cls}{class type (default \code{function})}
}
\value{
list of objects of the specified class
}
\description{
This function returns the objects defined in the current frame that
are of a given class (by default functions)
}
\examples{
library(pascal)
a<-1
b <- function(x) { x }
getObjects("numeric");
getObjects("function");
## remove all objects that are not functions
ls()
rm(list=setdiff(ls(),getObjects()));
ls()
}
\author{
Pascal Niklaus \email{pascal.niklaus@ieu.uzh.ch}
}
|
estimateCVM.crawl <- function(z, t, ...)
{
x = Re(z)
y = Im(z)
data <- data.frame(x, y, t)
initial.state <- list(
a1.x=c(x[1],0),
a1.y=c(y[1],0),
P1.x=diag(c(1,1)),
P1.y=diag(c(1,1)))
Fit.crawl <- crwMLE(
mov.model=~1,
data=data, coord=c("x","y"), Time.name="t", polar.coord = FALSE,
initial.state=initial.state, fixPar=c(NA, NA), ...)
Get.nutau <- function(fit)
{
sigma.hat <- exp(fit$par[1])
sigma.CI <- exp(fit$ci[1,])
tau.hat <- 1/exp(fit$par[2])
tau.CI <- sort(1/exp(fit$ci[2,]))
nu.hat <- sqrt(pi/tau.hat)*sigma.hat/2
nu.CI <- sqrt(pi/tau.hat)*sigma.CI/2
results <- data.frame(Estimate = c(tau.hat, nu.hat), rbind(tau.CI, nu.CI))
names(results) <- c("Estimate", "L", "U")
return(results)
}
nutau <- Get.nutau(Fit.crawl)
row.names(nutau) <- c("tau", "nu")
return(list(fit = Fit.crawl, nutau = nutau))
} | /cvm/cvm/R/estimateCVM.crawl.r | no_license | xiang-chen-git/ecomove | R | false | false | 962 | r | estimateCVM.crawl <- function(z, t, ...)
{
x = Re(z)
y = Im(z)
data <- data.frame(x, y, t)
initial.state <- list(
a1.x=c(x[1],0),
a1.y=c(y[1],0),
P1.x=diag(c(1,1)),
P1.y=diag(c(1,1)))
Fit.crawl <- crwMLE(
mov.model=~1,
data=data, coord=c("x","y"), Time.name="t", polar.coord = FALSE,
initial.state=initial.state, fixPar=c(NA, NA), ...)
Get.nutau <- function(fit)
{
sigma.hat <- exp(fit$par[1])
sigma.CI <- exp(fit$ci[1,])
tau.hat <- 1/exp(fit$par[2])
tau.CI <- sort(1/exp(fit$ci[2,]))
nu.hat <- sqrt(pi/tau.hat)*sigma.hat/2
nu.CI <- sqrt(pi/tau.hat)*sigma.CI/2
results <- data.frame(Estimate = c(tau.hat, nu.hat), rbind(tau.CI, nu.CI))
names(results) <- c("Estimate", "L", "U")
return(results)
}
nutau <- Get.nutau(Fit.crawl)
row.names(nutau) <- c("tau", "nu")
return(list(fit = Fit.crawl, nutau = nutau))
} |
rm(list=ls(all=TRUE))
# Download .asc file
#Download of NOAA Merged Land Ocean Global Surface Temperature Analysis Dataset (NOAAGlobalTemp) Data in the root/working directory of RStudio:
#NOAAGlobalTemp Dataset (8.7 MB)
#[ftp://ftp.ncdc.noaa.gov/pub/data/noaaglobaltemp/operational/gridded/NOAAGlobalTemp.gridded.v4.0.1.201810.asc]
setwd("/Users/Paul/Climate")
da1=scan("NOAAGlobalTemp.gridded.v4.0.1.201809.asc.gz")
length(da1)
#[1] 4319010
da1[1:30]
#[1] 1.0 1880.0 -999.9 #means mon, year, temp
#It is about a vector (0 dimensions), a sequence of numbers: mon, year, temp, temp....
#Here numerous temperatures follow according to a net over the whole globe in a distance of 5 x 5 degrees.
#At the 2595 and 2596th position appears the month February and the year 1880
#2595-2 = 2593 Temperaturvalues follow after each month and year values
da1[2590:2600]
# [1] -999.9 -999.9 -999.9 -999.9 -999.9 2.0 1880.0 -999.9 -999.9 -999.9 -999.9
da1[5180:5200]
#[1] -999.9 -999.9 -999.9 -999.9 -999.9 -999.9 -999.9 -999.9 -999.9 3.0 1880.0 -999.9 -999.9 -999.9 -999.9 -999.9 -999.9
#[18] -999.9 -999.9 -999.9 -999.9
#Data in 72 rows (2.5, ..., 357.5), Latitude, and
#Data in 36 columns (-87.5, ..., 87.5), Longitude
#Years 2018-1880=138 years; 138 * 12 months = 1656 months
#From inclusive: Thursday, 1. January 1880
#To Saturday, 1. September 2018
#(Enddate not counted)
#
#Result: 50 647 Days
#50 647 Days between the metioned dates, enddate not included.
#Or 138 years, 9 months (exclusive Enddate), 138*12 +9 = 1665
#We build a sequence for the years and months
tm1=seq(1,4319009, by=2594) #Sequence of months
tm2=seq(2,4319010, by=2594) #Sequence of years
length(tm1)
#[1] 1665
length(tm2)
#[1] 1665
mm1=da1[tm1] #Extract months
yy1=da1[tm2] #Extract years
head(mm1)
head(yy1)
length(mm1)
length(yy1)
rw1<-paste(yy1, sep="-", mm1) #Combine YYYY with MM
head(rw1)
# "1880-1" "1880-2" "1880-3" "1880-4" "1880-5" "1880-6"
head(tm1)
head(tm2)
tm3=cbind(tm1,tm2)
head(tm3)
tm4=as.vector(t(tm3))
head(tm4)
#[1] 1 2 2595 2596 5189 5190
da2<-da1[-tm4] #Remove the months and years data from the scanned data
length(da2)/(36*72)
#[1] 1665
#months
#36*72 = 2592, that is the number of gridded (5? ? 5?) global surface temperature datasets and these grids
# deliver measured values for 1665 months
2592*1665
#[1] 4315680
#Temperature values from 1880 to 2018 in these 2592 grids
#138 yrs 9 mon: Jan 1880-Sep 2018
#1656 months + 9 = 1665, 138 yrs 9 mon: Jan 1880 - Sep 2018
da3<-matrix(da2,ncol=1665) #Generate the space-time data
#2592 (=36*72) rows and 1665 months (=138 yrs 9 mon)
dim(da3)
colnames(da3)<-rw1
lat1=seq(-87.5, 87.5, length=36)
lon1=seq(2.5, 357.5, length=72)
LAT=rep(lat1, each=72)
LON=rep(lon1,36)
gpcpst=cbind(LAT, LON, da3)
head(gpcpst)
dim(gpcpst)
#[1] 2592 1667 #The first two columns are Lat and Lon
#-87.5 to 87.5, Latitude, and then 2.5 to 375.5, Longitude
#The first row for time is header, not counted as data.
write.csv(gpcpst,file="NOAAGlobalT.csv")
#Output the data as a csv file
#Plot the temperature data map of a given month on a map from Baffin Bay to Egypt
#With this space-time data, one can plot a data map for a given month or a data time series
#for a given location. For example, the following R code plots the temperature data map for
#September 2018 on a map from Baffin Bay to Egypt.
#Install maps package if not done before
#install.packages("maps")
library(maps)
#Baffin Bay
#DG (Dezimalgrad)*
#Latitude: 74.69324929999999
#Longitude: -68.49280190000002
#Egypt
#Latitude 26.820553
#Longitude 30.802498000000014
Lat= seq(26.820553,74.69324929999999, length=36)
Lon=seq(-68.49280190000002, 30.802498000000014, length=72)
mapmat=matrix(gpcpst[,1665],nrow=72)
#column 1665 corresponding to Sep 2018
#Convert the vector into a lon-lat matrix for R map plotting
mapmat=pmax(pmin(mapmat,6),-6)
#This command compresses numbers larger than 6 to 6
plot.new()
par(mar=c(4,5,3,0))
int=seq(-6,6,length.out=81)
rgb.palette=colorRampPalette(c("black","blue", "darkgreen","green","yellow","pink","red","maroon"), interpolate="spline")
mapmat= mapmat[,seq(length(mapmat[1,]),1)]
filled.contour(Lon, Lat, mapmat, color.palette=rgb.palette, levels=int,
plot.title=title(main="NOAAGlobalTemp Anomalies Sep 2018 [deg C]",
xlab="Longitude",ylab="Latitude", cex.lab=1.5),
plot.axes={axis(1, cex.axis=1.5);
axis(2, cex.axis=1.5);map("world", add=TRUE);grid()},
key.title=title(main="[oC]"),
key.axes={axis(4, cex.axis=1.5)})
#Extract the data for the tropical Pacific region (20S-20N, 160E-120W) from 1951-2000
#The following code studies the data over a particular region, the tropical Pacific for El
#Nino characteristics, it extracts the data for the region for the given time interval.
#Keep only the data for the Pacific region
n2<-which(gpcpst[,1]>-20&gpcpst[,1]<20&gpcpst[,2]>160&gpcpst[,2]<260)
dim(gpcpst)
length(n2)
#[1] 160 #4 latitude bends and 20 longitude bends
pacificdat=gpcpst[n2,855:1454] #from 1951-2000
#(1951-1880)*12 + lat col + lon col =854
#Thus, Jan 1951 data from column 855
Lat=seq(-17.5,17.5, by=5)
Lon=seq(162.5, 257.5, by=5)
plot.new()
par(mar=c(4,5,3,0))
mapmat=matrix(pacificdat[,564], nrow=20)
int=seq(-5,5,length.out=81)
rgb.palette=colorRampPalette(c("black","blue", "darkgreen",
"green", "yellow","pink","red","maroon"),interpolate="spline")
#mapmat= mapmat[,seq(length(mapmat[1,]),1)]
filled.contour(Lon, Lat, mapmat, color.palette=rgb.palette, levels=int,
xlim=c(120,300),ylim=c(-40,40),
plot.title=title(main="Tropic Pacific SAT Anomalies [deg C]: Dec 1997",
xlab="Longitude",ylab="Latitude", cex.lab=1.5),
plot.axes={axis(1, cex.axis=1.5); axis(2, cex.axis=1.5);
map("world2", add=TRUE);grid()},
key.title=title(main="[oC]"),
key.axes={axis(4, cex.axis=1.5)})
#Extract data from only one grid box
#A special case is to extract data for a specified grid box with given latitude and longitude for a given interval,
#e.g., the Iceland Region box (62.5, 337.5). This can be easily done by the
#following R code with a simple plotting command.
#Extract data for a specified box with given lat and lon
#Iceland Region
#Latitude 62.5
#Longitude 337.5
#For a survey of Latitude (LAT) and Longitude (LON)
#> gpcpst[1:100,1:10]
n2 <- which(gpcpst[,1]==62.5&gpcpst[,2]==337.5) #Latitude and Longitude
#Iceland <- gpcpst[n2,855:1454] #Interval 1880-2018
IcelandData <- gpcpst[n2,3:1667]
plot(seq(1880,2018, len=length(IcelandData)),
IcelandData, type="l",
xlab="Year", ylab="Temp [oC]",
main="Iceland Region temperature anomalies history")
#Spatial averages and their trends
#36-by-72 boxes and Jan 1880 - Jan 2017 = 1645 months (Jan 1880-Sep 2018,1667 months (=138 yrs 9 mon))+ lat and lon
temp=gpcpst
areaw=matrix(0,nrow=2592,ncol = 1667)
dim(areaw)
#[1] 2592 1667
areaw[,1]=temp[,1]
areaw[,2]=temp[,2]
veca=cos(temp[,1]*pi/180)
#Here, convert degrees to radians
#Area-weight matrix equal to cosine lat of the boxes with data
#and to zero for the boxes of missing data -999.9
for(j in 3:1667)
{
for (i in 1:2592)
{if(temp[i,j]> -290.0) {areaw[i,j]=veca[i]} }
}
#Area-weight data matrixs first two columns as lat-lon
tempw=areaw*temp
tempw[,1:2]=temp[,1:2]
#create monthly global average vector for 1667 months
#Jan 1880- Sep 2018
avev=colSums(tempw[,3:1667])/colSums(areaw[,3:1667])
#Spatial average of the monthly temperature data from NOAAGlobalTemp
#from January 1880 -January 2018 and can be generated by the following R code
timemo=seq(1880,2018,length=1665)
plot(timemo,avev,type="l", cex.lab=1.4,
xlab="Year", ylab="Temperature anomaly [oC]",
main="Area-weighted global average of monthly SAT anomalies: Jan 1880-Sep 2018")
abline(lm(avev ~ timemo),col="blue",lwd=2)
text(1930,0.7, "Linear trend: 0.69 [oC] per century",
cex=1.4, col="blue")
#Spatial average of annual data
plot.new()
avem = matrix(avev[1:1665], ncol=12, byrow=TRUE)
#compute annual average
annv=rowMeans(avem)
#Plot the annual mean global average temp
timeyr<-seq(1880, 2018)
plot(timeyr,annv,type="s",
cex.lab=1.4, lwd=2,
xlab="Year", ylab="Temperature anomaly [oC]",
main="Area-weighted global average of annual SAT anomalies: 1880-2018")
abline(lm(annv ~ timeyr),col="blue",lwd=2)
text(1940,0.4, "Linear trend: 0.69 [oC] per century",
cex=1.4, col="blue")
text(1900,0.07, "Base line",cex=1.4, col="red")
lines(timeyr,rep(0,139), type="l",col="red")
| /Script.R | no_license | ronnie-fr/Climate | R | false | false | 8,627 | r | rm(list=ls(all=TRUE))
# Download .asc file
#Download of NOAA Merged Land Ocean Global Surface Temperature Analysis Dataset (NOAAGlobalTemp) Data in the root/working directory of RStudio:
#NOAAGlobalTemp Dataset (8.7 MB)
#[ftp://ftp.ncdc.noaa.gov/pub/data/noaaglobaltemp/operational/gridded/NOAAGlobalTemp.gridded.v4.0.1.201810.asc]
setwd("/Users/Paul/Climate")
da1=scan("NOAAGlobalTemp.gridded.v4.0.1.201809.asc.gz")
length(da1)
#[1] 4319010
da1[1:30]
#[1] 1.0 1880.0 -999.9 #means mon, year, temp
#It is about a vector (0 dimensions), a sequence of numbers: mon, year, temp, temp....
#Here numerous temperatures follow according to a net over the whole globe in a distance of 5 x 5 degrees.
#At the 2595 and 2596th position appears the month February and the year 1880
#2595-2 = 2593 Temperaturvalues follow after each month and year values
da1[2590:2600]
# [1] -999.9 -999.9 -999.9 -999.9 -999.9 2.0 1880.0 -999.9 -999.9 -999.9 -999.9
da1[5180:5200]
#[1] -999.9 -999.9 -999.9 -999.9 -999.9 -999.9 -999.9 -999.9 -999.9 3.0 1880.0 -999.9 -999.9 -999.9 -999.9 -999.9 -999.9
#[18] -999.9 -999.9 -999.9 -999.9
#Data in 72 rows (2.5, ..., 357.5), Latitude, and
#Data in 36 columns (-87.5, ..., 87.5), Longitude
#Years 2018-1880=138 years; 138 * 12 months = 1656 months
#From inclusive: Thursday, 1. January 1880
#To Saturday, 1. September 2018
#(Enddate not counted)
#
#Result: 50 647 Days
#50 647 Days between the metioned dates, enddate not included.
#Or 138 years, 9 months (exclusive Enddate), 138*12 +9 = 1665
#We build a sequence for the years and months
tm1=seq(1,4319009, by=2594) #Sequence of months
tm2=seq(2,4319010, by=2594) #Sequence of years
length(tm1)
#[1] 1665
length(tm2)
#[1] 1665
mm1=da1[tm1] #Extract months
yy1=da1[tm2] #Extract years
head(mm1)
head(yy1)
length(mm1)
length(yy1)
rw1<-paste(yy1, sep="-", mm1) #Combine YYYY with MM
head(rw1)
# "1880-1" "1880-2" "1880-3" "1880-4" "1880-5" "1880-6"
head(tm1)
head(tm2)
tm3=cbind(tm1,tm2)
head(tm3)
tm4=as.vector(t(tm3))
head(tm4)
#[1] 1 2 2595 2596 5189 5190
da2<-da1[-tm4] #Remove the months and years data from the scanned data
length(da2)/(36*72)
#[1] 1665
#months
#36*72 = 2592, that is the number of gridded (5? ? 5?) global surface temperature datasets and these grids
# deliver measured values for 1665 months
2592*1665
#[1] 4315680
#Temperature values from 1880 to 2018 in these 2592 grids
#138 yrs 9 mon: Jan 1880-Sep 2018
#1656 months + 9 = 1665, 138 yrs 9 mon: Jan 1880 - Sep 2018
da3<-matrix(da2,ncol=1665) #Generate the space-time data
#2592 (=36*72) rows and 1665 months (=138 yrs 9 mon)
dim(da3)
colnames(da3)<-rw1
lat1=seq(-87.5, 87.5, length=36)
lon1=seq(2.5, 357.5, length=72)
LAT=rep(lat1, each=72)
LON=rep(lon1,36)
gpcpst=cbind(LAT, LON, da3)
head(gpcpst)
dim(gpcpst)
#[1] 2592 1667 #The first two columns are Lat and Lon
#-87.5 to 87.5, Latitude, and then 2.5 to 375.5, Longitude
#The first row for time is header, not counted as data.
write.csv(gpcpst,file="NOAAGlobalT.csv")
#Output the data as a csv file
#Plot the temperature data map of a given month on a map from Baffin Bay to Egypt
#With this space-time data, one can plot a data map for a given month or a data time series
#for a given location. For example, the following R code plots the temperature data map for
#September 2018 on a map from Baffin Bay to Egypt.
#Install maps package if not done before
#install.packages("maps")
library(maps)
#Baffin Bay
#DG (Dezimalgrad)*
#Latitude: 74.69324929999999
#Longitude: -68.49280190000002
#Egypt
#Latitude 26.820553
#Longitude 30.802498000000014
Lat= seq(26.820553,74.69324929999999, length=36)
Lon=seq(-68.49280190000002, 30.802498000000014, length=72)
mapmat=matrix(gpcpst[,1665],nrow=72)
#column 1665 corresponding to Sep 2018
#Convert the vector into a lon-lat matrix for R map plotting
mapmat=pmax(pmin(mapmat,6),-6)
#This command compresses numbers larger than 6 to 6
plot.new()
par(mar=c(4,5,3,0))
int=seq(-6,6,length.out=81)
rgb.palette=colorRampPalette(c("black","blue", "darkgreen","green","yellow","pink","red","maroon"), interpolate="spline")
mapmat= mapmat[,seq(length(mapmat[1,]),1)]
filled.contour(Lon, Lat, mapmat, color.palette=rgb.palette, levels=int,
plot.title=title(main="NOAAGlobalTemp Anomalies Sep 2018 [deg C]",
xlab="Longitude",ylab="Latitude", cex.lab=1.5),
plot.axes={axis(1, cex.axis=1.5);
axis(2, cex.axis=1.5);map("world", add=TRUE);grid()},
key.title=title(main="[oC]"),
key.axes={axis(4, cex.axis=1.5)})
#Extract the data for the tropical Pacific region (20S-20N, 160E-120W) from 1951-2000
#The following code studies the data over a particular region, the tropical Pacific for El
#Nino characteristics, it extracts the data for the region for the given time interval.
#Keep only the data for the Pacific region
n2<-which(gpcpst[,1]>-20&gpcpst[,1]<20&gpcpst[,2]>160&gpcpst[,2]<260)
dim(gpcpst)
length(n2)
#[1] 160 #4 latitude bends and 20 longitude bends
pacificdat=gpcpst[n2,855:1454] #from 1951-2000
#(1951-1880)*12 + lat col + lon col =854
#Thus, Jan 1951 data from column 855
Lat=seq(-17.5,17.5, by=5)
Lon=seq(162.5, 257.5, by=5)
plot.new()
par(mar=c(4,5,3,0))
mapmat=matrix(pacificdat[,564], nrow=20)
int=seq(-5,5,length.out=81)
rgb.palette=colorRampPalette(c("black","blue", "darkgreen",
"green", "yellow","pink","red","maroon"),interpolate="spline")
#mapmat= mapmat[,seq(length(mapmat[1,]),1)]
filled.contour(Lon, Lat, mapmat, color.palette=rgb.palette, levels=int,
xlim=c(120,300),ylim=c(-40,40),
plot.title=title(main="Tropic Pacific SAT Anomalies [deg C]: Dec 1997",
xlab="Longitude",ylab="Latitude", cex.lab=1.5),
plot.axes={axis(1, cex.axis=1.5); axis(2, cex.axis=1.5);
map("world2", add=TRUE);grid()},
key.title=title(main="[oC]"),
key.axes={axis(4, cex.axis=1.5)})
#Extract data from only one grid box
#A special case is to extract data for a specified grid box with given latitude and longitude for a given interval,
#e.g., the Iceland Region box (62.5, 337.5). This can be easily done by the
#following R code with a simple plotting command.
#Extract data for a specified box with given lat and lon
#Iceland Region
#Latitude 62.5
#Longitude 337.5
#For a survey of Latitude (LAT) and Longitude (LON)
#> gpcpst[1:100,1:10]
n2 <- which(gpcpst[,1]==62.5&gpcpst[,2]==337.5) #Latitude and Longitude
#Iceland <- gpcpst[n2,855:1454] #Interval 1880-2018
IcelandData <- gpcpst[n2,3:1667]
plot(seq(1880,2018, len=length(IcelandData)),
IcelandData, type="l",
xlab="Year", ylab="Temp [oC]",
main="Iceland Region temperature anomalies history")
#Spatial averages and their trends
#36-by-72 boxes and Jan 1880 - Jan 2017 = 1645 months (Jan 1880-Sep 2018,1667 months (=138 yrs 9 mon))+ lat and lon
temp=gpcpst
areaw=matrix(0,nrow=2592,ncol = 1667)
dim(areaw)
#[1] 2592 1667
areaw[,1]=temp[,1]
areaw[,2]=temp[,2]
veca=cos(temp[,1]*pi/180)
#Here, convert degrees to radians
#Area-weight matrix equal to cosine lat of the boxes with data
#and to zero for the boxes of missing data -999.9
for(j in 3:1667)
{
for (i in 1:2592)
{if(temp[i,j]> -290.0) {areaw[i,j]=veca[i]} }
}
#Area-weight data matrixs first two columns as lat-lon
tempw=areaw*temp
tempw[,1:2]=temp[,1:2]
#create monthly global average vector for 1667 months
#Jan 1880- Sep 2018
avev=colSums(tempw[,3:1667])/colSums(areaw[,3:1667])
#Spatial average of the monthly temperature data from NOAAGlobalTemp
#from January 1880 -January 2018 and can be generated by the following R code
timemo=seq(1880,2018,length=1665)
plot(timemo,avev,type="l", cex.lab=1.4,
xlab="Year", ylab="Temperature anomaly [oC]",
main="Area-weighted global average of monthly SAT anomalies: Jan 1880-Sep 2018")
abline(lm(avev ~ timemo),col="blue",lwd=2)
text(1930,0.7, "Linear trend: 0.69 [oC] per century",
cex=1.4, col="blue")
#Spatial average of annual data
plot.new()
avem = matrix(avev[1:1665], ncol=12, byrow=TRUE)
#compute annual average
annv=rowMeans(avem)
#Plot the annual mean global average temp
timeyr<-seq(1880, 2018)
plot(timeyr,annv,type="s",
cex.lab=1.4, lwd=2,
xlab="Year", ylab="Temperature anomaly [oC]",
main="Area-weighted global average of annual SAT anomalies: 1880-2018")
abline(lm(annv ~ timeyr),col="blue",lwd=2)
text(1940,0.4, "Linear trend: 0.69 [oC] per century",
cex=1.4, col="blue")
text(1900,0.07, "Base line",cex=1.4, col="red")
lines(timeyr,rep(0,139), type="l",col="red")
|
library(pcadapt)
### Name: plot.pcadapt
### Title: pcadapt visualization tool
### Aliases: plot.pcadapt
### ** Examples
## see ?pcadapt for examples
| /data/genthat_extracted_code/pcadapt/examples/plot.pcadapt.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 157 | r | library(pcadapt)
### Name: plot.pcadapt
### Title: pcadapt visualization tool
### Aliases: plot.pcadapt
### ** Examples
## see ?pcadapt for examples
|
# Project 2 - Linear Regression
# Getting the data
library(readxl)
mydata<- read_xlsx("E:\\Analytixlabs\\Module 6 (Data science using R)\\Case Studies\\Case study 2 - Linear Regression\\Linear Regression Case.xlsx")
View(mydata)
dim(mydata)
str(mydata)
names(mydata)
mydata$spoused[mydata$spoused==-1]<- 0
mydata$carvalue[mydata$carvalue==-1]<-0
# The first 83 variables of the dataset are the variables of interest. The remaining
# variables are the teleco data which are of no use here
mydata<- mydata[,1:83]
names(mydata)
mydata$total_spend<- mydata$cardspent + mydata$card2spent
mydata$total_items<- mydata$carditems + mydata$card2items
mydata$cardspent<- NULL
mydata$card2spent<- NULL
mydata$carditems<- NULL
mydata$card2items<- NULL
# Step 1: Explanatory Data analysis and Data cleaning/preparation
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Getting all the continuous variables
var_con<- c("age","ed","employ","income","lninc","debtinc","creddebt","lncreddebt",
"othdebt","lnothdebt","spoused","reside","pets","pets_cats","pets_dogs",
"pets_birds","pets_reptiles","pets_small","pets_saltfish","pets_freshfish",
"address","cars","carvalue","commutetime","cardtenure","card2tenure",
"total_items","total_spend")
# User defined function for calculating descriptives
udf<- function(x){
n<-length(x)
nmiss<-sum(is.na(x))
a<-x[!is.na(x)]
m<- mean(a)
max<- max(a)
min<- min(a)
p1<-quantile(a,0.01)
p5<- quantile(a,0.05)
p95<- quantile(a,0.95)
p99<-quantile(a,0.99)
return(c(count=n,nmiss=nmiss,mean=m,max=max,min=min,P1=p1,P5=p5,P95=p95,P99=p99))
}
options(scipen = 999)
desc_stats<- data.frame(t(apply(mydata[var_con],2,udf)))
write.csv(desc_stats,"Stats.csv")
# Outlier treatment:
udf2<- function(x){
p5<- quantile(x,0.05,na.rm = T)
p95<- quantile(x,0.95,na.rm = T)
x[x<p5]<- p5 # Any value less than p5 are treated as Outlier
x[x>p95]<- p95 # Any value greater than p95 are treated as Outlier
return(x)
}
mydata[var_con]<- data.frame(apply(mydata[var_con],2,udf2))
# Missing value treatment:
mydata$lncreddebt[is.na(mydata$lncreddebt)]<- -0.130453522
mydata$lnothdebt[is.na(mydata$lnothdebt)]<- 0.69691526
mydata$commutetime[is.na(mydata$commutetime)]<- 25.34553822
# Getting all the categorical variables:
var_cat<- (!names(mydata) %in% var_con)
View(var_cat)
udf3<- function(x){
n<- length(x)
nmiss<- sum(is.na(x))
return(c(n=n,nmiss=nmiss))
}
desc_stats_cat<- data.frame(t(apply(mydata[var_cat],2,udf3)))
write.csv(desc_stats_cat,"stats_cat.csv")
# We have 2 missing values in the 'townsize' variable. We'll impute the missing values
# with the most frequent value
prop.table(table(mydata$townsize))
mydata$townsize[is.na(mydata$townsize)]<- 1
######################################################################################
# Checking for normality
hist(mydata$total_spend)
mydata$lntotal_spend<- log(mydata$total_spend)
hist(mydata$lntotal_spend)
hist(mydata$income)
hist(mydata$lninc) # We take this variable because its distribution is more normal than 'income'
hist(mydata$debtinc)
hist(mydata$creddebt)
hist(mydata$lncreddebt) # We take this variable
hist(mydata$othdebt)
hist(mydata$lnothdebt) # We take this variable
# Step 2: Variable Reduction
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Selection of significant categorical variables using ANOVA test:
var_con<- c(var_con,"lntotal_spend")
var_cat<- (!names(mydata) %in% var_con)
names(mydata[var_cat])
summary(aov(lntotal_spend ~ region,data = mydata)) # Significant
summary(aov(lntotal_spend ~ townsize,data = mydata))
summary(aov(lntotal_spend ~ gender,data = mydata)) # Significant
summary(aov(lntotal_spend ~ agecat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ birthmonth,data = mydata))
summary(aov(lntotal_spend ~ edcat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ jobcat,data = mydata))
summary(aov(lntotal_spend ~ union,data = mydata))
summary(aov(lntotal_spend ~ empcat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ retire,data = mydata)) # Significant
summary(aov(lntotal_spend ~ inccat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ default,data = mydata))
summary(aov(lntotal_spend ~ jobsat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ marital,data = mydata))
summary(aov(lntotal_spend ~ spousedcat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ homeown,data = mydata)) # Significant
summary(aov(lntotal_spend ~ hometype,data = mydata))
summary(aov(lntotal_spend ~ addresscat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ carown,data = mydata)) # Significant
summary(aov(lntotal_spend ~ cartype,data = mydata))
summary(aov(lntotal_spend ~ carcatvalue,data = mydata)) # Significant
summary(aov(lntotal_spend ~ carbought,data = mydata))
summary(aov(lntotal_spend ~ carbuy,data = mydata))
summary(aov(lntotal_spend ~ commute,data = mydata))
summary(aov(lntotal_spend ~ commutecat,data = mydata))
summary(aov(lntotal_spend ~ commutecar,data = mydata))
summary(aov(lntotal_spend ~ commutemotorcycle,data = mydata))
summary(aov(lntotal_spend ~ commutecarpool,data = mydata))
summary(aov(lntotal_spend ~ commutebus,data = mydata))
summary(aov(lntotal_spend ~ commuterail,data = mydata))
summary(aov(lntotal_spend ~ commutepublic,data = mydata))
summary(aov(lntotal_spend ~ commutebike,data = mydata)) # Significant
summary(aov(lntotal_spend ~ commutewalk,data = mydata))
summary(aov(lntotal_spend ~ commutenonmotor,data = mydata))
summary(aov(lntotal_spend ~ telecommute,data = mydata))
summary(aov(lntotal_spend ~ reason,data = mydata))
summary(aov(lntotal_spend ~ polview,data = mydata))
summary(aov(lntotal_spend ~ polparty,data = mydata))
summary(aov(lntotal_spend ~ polcontrib,data = mydata))
summary(aov(lntotal_spend ~ vote,data = mydata)) # Significant
summary(aov(lntotal_spend ~ card,data = mydata)) # Significant
summary(aov(lntotal_spend ~ cardtype,data = mydata))
summary(aov(lntotal_spend ~ cardbenefit,data = mydata))
summary(aov(lntotal_spend ~ cardfee,data = mydata))
summary(aov(lntotal_spend ~ cardtenurecat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ card2,data = mydata)) # Significant
summary(aov(lntotal_spend ~ card2type,data = mydata))
summary(aov(lntotal_spend ~ card2benefit,data = mydata))
summary(aov(lntotal_spend ~ card2fee,data = mydata))
summary(aov(lntotal_spend ~ card2tenurecat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ active,data = mydata))
summary(aov(lntotal_spend ~ bfast,data = mydata))
# The significant categorical variables we got after performing ANOVA test:
# region,gender,agecat,edcat,empcat,retire,inccat,jobsat,
# spousedcat,homeown,addresscat,carown,carcatvalue,commutebike,
# vote,card,cardtenurecat,card2,card2tenurecat
# The Potential Continuous variables are:
# age,ed,employ,lninc,debtinc,lncreddebt,lnothdebt,spoused,reside,pets,pets_cats,
# pets_dogs,pets_birds,pets_small,pets_freshfish,address,
# cars,carvalue,commutetime,cardtenure,card2tenure
fit1<- lm(lntotal_spend ~ age+ed+employ+lninc+debtinc+lncreddebt+lnothdebt+spoused+
reside+pets+pets_cats+pets_dogs+pets_birds+pets_small+
pets_freshfish+address+cars+carvalue+commutetime+cardtenure+
card2tenure+region+gender+agecat+edcat+empcat+retire+inccat+jobsat+
spousedcat+homeown+addresscat+carown+carcatvalue+commutebike+vote+card+
cardtenurecat+card2+card2tenurecat,data = mydata)
summary(fit1)
# Stepwise Linear regression:
library(MASS)
step<- stepAIC(fit1,direction = "both")
ls(step)
step$anova
# The final model we get after applying stepAIC method
# lntotal_spend ~ age + lninc + lncreddebt + address + region + gender +
# jobsat + addresscat + commutebike + card + card2,data = mydata
########################################################################################
# Final Model Building
# _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Conevrting all categorical variables into factors
mydata$region<- as.factor(mydata$region)
mydata$gender<- as.factor(mydata$gender)
mydata$jobsat<- as.factor(mydata$jobsat)
mydata$addresscat<- as.factor(mydata$addresscat)
mydata$commutebike<- as.factor(mydata$commutebike)
mydata$card<- as.factor(mydata$card)
mydata$card2<- as.factor(mydata$card2)
# Splitting the dataset into "Developement" and "validation" datasets
set.seed(222)
ind<- sample(2,nrow(mydata),replace = TRUE,prob = c(0.7,0.3))
dev<- mydata[ind==1,]
val<- mydata[ind==2,]
# Building model on dev dataset
fit3<- lm(lntotal_spend ~ age + lninc + lncreddebt + address + region + gender +
jobsat + addresscat + commutebike + card + card2,data = dev)
summary(fit3)
library(car)
vif(fit3)
# Building the model with significant variables
fit4<- lm(lntotal_spend ~ lninc + gender + addresscat + card + card2,data = dev)
summary(fit4)
vif(fit4)
# Removing the Influential observations using Cook's D method
dev$cd<- cooks.distance(fit4)
dev1<- subset(dev,cd<(4/3500))
final_model<- lm(lntotal_spend ~ lninc + gender + addresscat + card + card2,
data = dev1)
summary(final_model)
dev1$cd1<-cooks.distance(final_model)
dev2<- subset(dev1,cd1<(4/3360))
final_model2<- lm(lntotal_spend ~ lninc + gender + addresscat + card + card2,
data = dev2)
summary(final_model2)
######################################################################################
# Predicting the Total spend in dev and val datasets:
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
pred1<- exp(predict(final_model2,newdata = dev))
dev<- cbind(dev,pred_spend=pred1)
pred2<- exp(predict(final_model2,newdata = val))
val<- cbind(val,pred_spend=pred2)
# Decile Analysis:
# _ _ _ _ _ _ _ _ _ _ _
dec_loc<- quantile(dev$pred_spend,probs = seq(0.1,0.9,by=0.1))
dev$decile<- findInterval(dev$pred_spend,c(-Inf,dec_loc,+Inf))
library(sqldf)
dev_decile<- sqldf("select decile,count(decile) as Count,
avg(total_spend) as Actual_spend,
avg(pred_spend) as Predicted_spend
from dev
group by decile
order by decile desc")
write.csv(dev_decile,"dev_decile.csv")
dec_loc<- quantile(val$pred_spend,probs = seq(0.1,0.9,by=0.1))
val$decile<- findInterval(val$pred_spend,c(-Inf,dec_loc,Inf))
val_decile<- sqldf("select decile,count(decile) as Count,
avg(total_spend) as Actual_spend,
avg(pred_spend) as Predicted_spend
from val
group by decile
order by decile desc")
write.csv(val_decile,"val_decile.csv")
############+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++############
| /Linear regression.R | no_license | AmanAkshansh/Linear-Regression | R | false | false | 11,245 | r | # Project 2 - Linear Regression
# Getting the data
library(readxl)
mydata<- read_xlsx("E:\\Analytixlabs\\Module 6 (Data science using R)\\Case Studies\\Case study 2 - Linear Regression\\Linear Regression Case.xlsx")
View(mydata)
dim(mydata)
str(mydata)
names(mydata)
mydata$spoused[mydata$spoused==-1]<- 0
mydata$carvalue[mydata$carvalue==-1]<-0
# The first 83 variables of the dataset are the variables of interest. The remaining
# variables are the teleco data which are of no use here
mydata<- mydata[,1:83]
names(mydata)
mydata$total_spend<- mydata$cardspent + mydata$card2spent
mydata$total_items<- mydata$carditems + mydata$card2items
mydata$cardspent<- NULL
mydata$card2spent<- NULL
mydata$carditems<- NULL
mydata$card2items<- NULL
# Step 1: Explanatory Data analysis and Data cleaning/preparation
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Getting all the continuous variables
var_con<- c("age","ed","employ","income","lninc","debtinc","creddebt","lncreddebt",
"othdebt","lnothdebt","spoused","reside","pets","pets_cats","pets_dogs",
"pets_birds","pets_reptiles","pets_small","pets_saltfish","pets_freshfish",
"address","cars","carvalue","commutetime","cardtenure","card2tenure",
"total_items","total_spend")
# User defined function for calculating descriptives
udf<- function(x){
n<-length(x)
nmiss<-sum(is.na(x))
a<-x[!is.na(x)]
m<- mean(a)
max<- max(a)
min<- min(a)
p1<-quantile(a,0.01)
p5<- quantile(a,0.05)
p95<- quantile(a,0.95)
p99<-quantile(a,0.99)
return(c(count=n,nmiss=nmiss,mean=m,max=max,min=min,P1=p1,P5=p5,P95=p95,P99=p99))
}
options(scipen = 999)
desc_stats<- data.frame(t(apply(mydata[var_con],2,udf)))
write.csv(desc_stats,"Stats.csv")
# Outlier treatment:
udf2<- function(x){
p5<- quantile(x,0.05,na.rm = T)
p95<- quantile(x,0.95,na.rm = T)
x[x<p5]<- p5 # Any value less than p5 are treated as Outlier
x[x>p95]<- p95 # Any value greater than p95 are treated as Outlier
return(x)
}
mydata[var_con]<- data.frame(apply(mydata[var_con],2,udf2))
# Missing value treatment:
mydata$lncreddebt[is.na(mydata$lncreddebt)]<- -0.130453522
mydata$lnothdebt[is.na(mydata$lnothdebt)]<- 0.69691526
mydata$commutetime[is.na(mydata$commutetime)]<- 25.34553822
# Getting all the categorical variables:
var_cat<- (!names(mydata) %in% var_con)
View(var_cat)
udf3<- function(x){
n<- length(x)
nmiss<- sum(is.na(x))
return(c(n=n,nmiss=nmiss))
}
desc_stats_cat<- data.frame(t(apply(mydata[var_cat],2,udf3)))
write.csv(desc_stats_cat,"stats_cat.csv")
# We have 2 missing values in the 'townsize' variable. We'll impute the missing values
# with the most frequent value
prop.table(table(mydata$townsize))
mydata$townsize[is.na(mydata$townsize)]<- 1
######################################################################################
# Checking for normality
hist(mydata$total_spend)
mydata$lntotal_spend<- log(mydata$total_spend)
hist(mydata$lntotal_spend)
hist(mydata$income)
hist(mydata$lninc) # We take this variable because its distribution is more normal than 'income'
hist(mydata$debtinc)
hist(mydata$creddebt)
hist(mydata$lncreddebt) # We take this variable
hist(mydata$othdebt)
hist(mydata$lnothdebt) # We take this variable
# Step 2: Variable Reduction
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Selection of significant categorical variables using ANOVA test:
var_con<- c(var_con,"lntotal_spend")
var_cat<- (!names(mydata) %in% var_con)
names(mydata[var_cat])
summary(aov(lntotal_spend ~ region,data = mydata)) # Significant
summary(aov(lntotal_spend ~ townsize,data = mydata))
summary(aov(lntotal_spend ~ gender,data = mydata)) # Significant
summary(aov(lntotal_spend ~ agecat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ birthmonth,data = mydata))
summary(aov(lntotal_spend ~ edcat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ jobcat,data = mydata))
summary(aov(lntotal_spend ~ union,data = mydata))
summary(aov(lntotal_spend ~ empcat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ retire,data = mydata)) # Significant
summary(aov(lntotal_spend ~ inccat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ default,data = mydata))
summary(aov(lntotal_spend ~ jobsat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ marital,data = mydata))
summary(aov(lntotal_spend ~ spousedcat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ homeown,data = mydata)) # Significant
summary(aov(lntotal_spend ~ hometype,data = mydata))
summary(aov(lntotal_spend ~ addresscat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ carown,data = mydata)) # Significant
summary(aov(lntotal_spend ~ cartype,data = mydata))
summary(aov(lntotal_spend ~ carcatvalue,data = mydata)) # Significant
summary(aov(lntotal_spend ~ carbought,data = mydata))
summary(aov(lntotal_spend ~ carbuy,data = mydata))
summary(aov(lntotal_spend ~ commute,data = mydata))
summary(aov(lntotal_spend ~ commutecat,data = mydata))
summary(aov(lntotal_spend ~ commutecar,data = mydata))
summary(aov(lntotal_spend ~ commutemotorcycle,data = mydata))
summary(aov(lntotal_spend ~ commutecarpool,data = mydata))
summary(aov(lntotal_spend ~ commutebus,data = mydata))
summary(aov(lntotal_spend ~ commuterail,data = mydata))
summary(aov(lntotal_spend ~ commutepublic,data = mydata))
summary(aov(lntotal_spend ~ commutebike,data = mydata)) # Significant
summary(aov(lntotal_spend ~ commutewalk,data = mydata))
summary(aov(lntotal_spend ~ commutenonmotor,data = mydata))
summary(aov(lntotal_spend ~ telecommute,data = mydata))
summary(aov(lntotal_spend ~ reason,data = mydata))
summary(aov(lntotal_spend ~ polview,data = mydata))
summary(aov(lntotal_spend ~ polparty,data = mydata))
summary(aov(lntotal_spend ~ polcontrib,data = mydata))
summary(aov(lntotal_spend ~ vote,data = mydata)) # Significant
summary(aov(lntotal_spend ~ card,data = mydata)) # Significant
summary(aov(lntotal_spend ~ cardtype,data = mydata))
summary(aov(lntotal_spend ~ cardbenefit,data = mydata))
summary(aov(lntotal_spend ~ cardfee,data = mydata))
summary(aov(lntotal_spend ~ cardtenurecat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ card2,data = mydata)) # Significant
summary(aov(lntotal_spend ~ card2type,data = mydata))
summary(aov(lntotal_spend ~ card2benefit,data = mydata))
summary(aov(lntotal_spend ~ card2fee,data = mydata))
summary(aov(lntotal_spend ~ card2tenurecat,data = mydata)) # Significant
summary(aov(lntotal_spend ~ active,data = mydata))
summary(aov(lntotal_spend ~ bfast,data = mydata))
# The significant categorical variables we got after performing ANOVA test:
# region,gender,agecat,edcat,empcat,retire,inccat,jobsat,
# spousedcat,homeown,addresscat,carown,carcatvalue,commutebike,
# vote,card,cardtenurecat,card2,card2tenurecat
# The Potential Continuous variables are:
# age,ed,employ,lninc,debtinc,lncreddebt,lnothdebt,spoused,reside,pets,pets_cats,
# pets_dogs,pets_birds,pets_small,pets_freshfish,address,
# cars,carvalue,commutetime,cardtenure,card2tenure
fit1<- lm(lntotal_spend ~ age+ed+employ+lninc+debtinc+lncreddebt+lnothdebt+spoused+
reside+pets+pets_cats+pets_dogs+pets_birds+pets_small+
pets_freshfish+address+cars+carvalue+commutetime+cardtenure+
card2tenure+region+gender+agecat+edcat+empcat+retire+inccat+jobsat+
spousedcat+homeown+addresscat+carown+carcatvalue+commutebike+vote+card+
cardtenurecat+card2+card2tenurecat,data = mydata)
summary(fit1)
# Stepwise Linear regression:
library(MASS)
step<- stepAIC(fit1,direction = "both")
ls(step)
step$anova
# The final model we get after applying stepAIC method
# lntotal_spend ~ age + lninc + lncreddebt + address + region + gender +
# jobsat + addresscat + commutebike + card + card2,data = mydata
########################################################################################
# Final Model Building
# _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Conevrting all categorical variables into factors
mydata$region<- as.factor(mydata$region)
mydata$gender<- as.factor(mydata$gender)
mydata$jobsat<- as.factor(mydata$jobsat)
mydata$addresscat<- as.factor(mydata$addresscat)
mydata$commutebike<- as.factor(mydata$commutebike)
mydata$card<- as.factor(mydata$card)
mydata$card2<- as.factor(mydata$card2)
# Splitting the dataset into "Developement" and "validation" datasets
set.seed(222)
ind<- sample(2,nrow(mydata),replace = TRUE,prob = c(0.7,0.3))
dev<- mydata[ind==1,]
val<- mydata[ind==2,]
# Building model on dev dataset
fit3<- lm(lntotal_spend ~ age + lninc + lncreddebt + address + region + gender +
jobsat + addresscat + commutebike + card + card2,data = dev)
summary(fit3)
library(car)
vif(fit3)
# Building the model with significant variables
fit4<- lm(lntotal_spend ~ lninc + gender + addresscat + card + card2,data = dev)
summary(fit4)
vif(fit4)
# Removing the Influential observations using Cook's D method
dev$cd<- cooks.distance(fit4)
dev1<- subset(dev,cd<(4/3500))
final_model<- lm(lntotal_spend ~ lninc + gender + addresscat + card + card2,
data = dev1)
summary(final_model)
dev1$cd1<-cooks.distance(final_model)
dev2<- subset(dev1,cd1<(4/3360))
final_model2<- lm(lntotal_spend ~ lninc + gender + addresscat + card + card2,
data = dev2)
summary(final_model2)
######################################################################################
# Predicting the Total spend in dev and val datasets:
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
pred1<- exp(predict(final_model2,newdata = dev))
dev<- cbind(dev,pred_spend=pred1)
pred2<- exp(predict(final_model2,newdata = val))
val<- cbind(val,pred_spend=pred2)
# Decile Analysis:
# _ _ _ _ _ _ _ _ _ _ _
dec_loc<- quantile(dev$pred_spend,probs = seq(0.1,0.9,by=0.1))
dev$decile<- findInterval(dev$pred_spend,c(-Inf,dec_loc,+Inf))
library(sqldf)
dev_decile<- sqldf("select decile,count(decile) as Count,
avg(total_spend) as Actual_spend,
avg(pred_spend) as Predicted_spend
from dev
group by decile
order by decile desc")
write.csv(dev_decile,"dev_decile.csv")
dec_loc<- quantile(val$pred_spend,probs = seq(0.1,0.9,by=0.1))
val$decile<- findInterval(val$pred_spend,c(-Inf,dec_loc,Inf))
val_decile<- sqldf("select decile,count(decile) as Count,
avg(total_spend) as Actual_spend,
avg(pred_spend) as Predicted_spend
from val
group by decile
order by decile desc")
write.csv(val_decile,"val_decile.csv")
############+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++############
|
testthat::context('sampling')
rats <- readRDS('../files/rats.Rds')
testthat::describe('slice',{
it('default',{
rstan::summary(rats)
x <- rats%>%shredder::stan_slice(1:10)
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),1010)
})
it('no warmup',{
x <- rats%>%shredder::stan_slice(1:10,inc_warmup = FALSE)
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),10)
})
it('bad indexs',{
suppressWarnings(x <- rats%>%shredder::stan_slice(900:1200,inc_warmup = FALSE))
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),101)
testthat::expect_warning(rats%>%shredder::stan_slice(900:1200),regexp = 'truncating the intersection')
})
})
testthat::describe('thinning',{
it('thin_n',{
x <- rats%>%shredder::stan_thin_n(size = 2)
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),1500)
})
it('thin_frac',{
x <- rats%>%shredder::stan_thin_frac(size = 0.25)
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),1250)
})
it('no warmup',{
x <- rats%>%shredder::stan_thin_n(size = 2,inc_warmup = FALSE)
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),500)
})
})
| /tests/testthat/test-sampling.R | permissive | yonicd/shredder | R | false | false | 1,179 | r | testthat::context('sampling')
rats <- readRDS('../files/rats.Rds')
testthat::describe('slice',{
it('default',{
rstan::summary(rats)
x <- rats%>%shredder::stan_slice(1:10)
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),1010)
})
it('no warmup',{
x <- rats%>%shredder::stan_slice(1:10,inc_warmup = FALSE)
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),10)
})
it('bad indexs',{
suppressWarnings(x <- rats%>%shredder::stan_slice(900:1200,inc_warmup = FALSE))
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),101)
testthat::expect_warning(rats%>%shredder::stan_slice(900:1200),regexp = 'truncating the intersection')
})
})
testthat::describe('thinning',{
it('thin_n',{
x <- rats%>%shredder::stan_thin_n(size = 2)
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),1500)
})
it('thin_frac',{
x <- rats%>%shredder::stan_thin_frac(size = 0.25)
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),1250)
})
it('no warmup',{
x <- rats%>%shredder::stan_thin_n(size = 2,inc_warmup = FALSE)
testthat::expect_equal(length(x@sim$samples[[1]][[1]]),500)
})
})
|
library(zinbwave)
cpuTime = lapply(c(50, 100, 500, 1000, 5000, 10000), function(nc){
fileName = sprintf('fig5-S10-S11-S15-S9/simZeisel_nc%s_ratio1_offs2', nc)
load(paste0(fileName, ".rda"))
tt = lapply(1:10, function(j){
counts = t(simData[[j]]$counts)
counts = counts[rowSums(counts) > 5, colSums(counts) > 5 ]
system.time(zinbFit(counts, K = 2, commondispersion = FALSE,
epsilon = 1000, ncores = 4))
})
tt
})
save(cpuTime, file = 'cpuTime.rda')
| /sims/figures/fig5-S10-S11-S15-S9/timeZinb.R | no_license | drisso/zinb_analysis | R | false | false | 495 | r | library(zinbwave)
cpuTime = lapply(c(50, 100, 500, 1000, 5000, 10000), function(nc){
fileName = sprintf('fig5-S10-S11-S15-S9/simZeisel_nc%s_ratio1_offs2', nc)
load(paste0(fileName, ".rda"))
tt = lapply(1:10, function(j){
counts = t(simData[[j]]$counts)
counts = counts[rowSums(counts) > 5, colSums(counts) > 5 ]
system.time(zinbFit(counts, K = 2, commondispersion = FALSE,
epsilon = 1000, ncores = 4))
})
tt
})
save(cpuTime, file = 'cpuTime.rda')
|
\name{trainControl}
\alias{trainControl}
\title{Control parameters for train}
\description{Control the computational nuances of the \code{\link{train}} function}
\usage{
trainControl(method = "boot",
number = ifelse(grepl("cv", method), 10, 25),
repeats = ifelse(grepl("cv", method), 1, number),
p = 0.75,
initialWindow = NULL,
horizon = 1,
fixedWindow = TRUE,
verboseIter = FALSE,
returnData = TRUE,
returnResamp = "final",
savePredictions = FALSE,
classProbs = FALSE,
summaryFunction = defaultSummary,
selectionFunction = "best",
preProcOptions = list(thresh = 0.95, ICAcomp = 3, k = 5),
index = NULL,
indexOut = NULL,
timingSamps = 0,
predictionBounds = rep(FALSE, 2),
seeds = NA,
adaptive = list(min = 5, alpha = 0.05,
method = "gls", complete = TRUE),
allowParallel = TRUE)
}
\arguments{
\item{method}{The resampling method: \code{boot}, \code{boot632}, \code{cv}, \code{repeatedcv},
\code{LOOCV}, \code{LGOCV} (for repeated training/test splits), \code{none} (only fits one model to the entire training set),
\code{oob} (only for random forest, bagged trees, bagged earth, bagged flexible discriminant analysis, or conditional tree forest models), \code{"adaptive_cv"}, \code{"adaptive_boot"} or \code{"adaptive_LGOCV"}}
\item{number}{Either the number of folds or number of resampling iterations}
\item{repeats}{For repeated k-fold cross-validation only: the number of complete sets of folds to compute}
\item{verboseIter}{A logical for printing a training log.}
\item{returnData}{A logical for saving the data}
\item{returnResamp}{A character string indicating how much of the resampled summary metrics should be saved. Values can be ``final'', ``all'' or ``none''}
\item{savePredictions}{a logical to save the hold-out predictions for each resample}
\item{p}{For leave-group out cross-validation: the training percentage}
\item{initialWindow, horizon, fixedWindow}{possible arguments to \code{\link{createTimeSlices}}}
\item{classProbs}{a logical; should class probabilities be computed for classification models (along with predicted values) in each resample?}
\item{summaryFunction}{a function to compute performance metrics across resamples. The arguments to the function should be the same as those in \code{\link{defaultSummary}}.}
\item{selectionFunction}{the function used to select the optimal tuning parameter. This can be a name of the function or the function itself. See \code{\link{best}} for details and other options.}
\item{preProcOptions}{A list of options to pass to \code{\link{preProcess}}. The type of pre-processing (e.g. center, scaling etc) is passed in via the \code{preProc} option in \code{\link{train}}.}
\item{index}{a list with elements for each resampling iteration. Each list element is the sample rows used for training at that iteration.}
\item{indexOut}{a list (the same length as \code{index}) that dictates which sample are held-out for each resample. If \code{NULL}, then the unique set of samples not contained in \code{index} is used.}
\item{timingSamps}{the number of training set samples that will be used to measure the time for predicting samples (zero indicates that the prediction time should not be estimated.}
\item{predictionBounds}{a logical or numeric vector of length 2 (regression only). If logical, the predictions can be constrained to be within the limit of the training set outcomes. For example, a value of \code{c(TRUE, FALSE)} would only constrain the lower end of predictions. If numeric, specific bounds can be used. For example, if \code{c(10, NA)}, values below 10 would be predicted as 10 (with no constraint in the upper side).}
\item{seeds}{an optional set of integers that will be used to set the seed at each resampling iteration. This is useful when the models are run in parallel. A value of \code{NA} will stop the seed from being set within the worker processes while a value of \code{NULL} will set the seeds using a random set of integers. Alternatively, a list can be used. The list should have \code{B+1} elements where \code{B} is the number of resamples. The first \code{B} elements of the list should be vectors of integers of length \code{M} where \code{M} is the number of models being evaluated. The last element of the list only needs to be a single integer (for the final model). See the Examples section below and the Details section. }
\item{adaptive}{a list used when \code{method} is \code{"adaptive_cv"}, \code{"adaptive_boot"} or \code{"adaptive_LGOCV"}. See Details below. }
\item{allowParallel}{if a parallel backend is loaded and available, should the function use it?}
}
\value{
An echo of the parameters specified
}
\details{
When setting the seeds manually, the number of models being evaluated is required. This may not be obvious as \code{train} does some optimizations for certain models. For example, when tuning over PLS model, the only model that is fit is the one with the largest number of components. So if the model is being tuned over \code{comp in 1:10}, the only model fit is \code{ncomp = 10}. However, if the vector of integers used in the \code{seeds} arguments is longer than actually needed, no error is thrown.
Using \code{method = "none"} and specifying model than one model in \code{\link{train}}'s \code{tuneGrid} or \code{tuneLength} arguments will result in an error.
Using adaptive resampling when \code{method} is either \code{"adaptive_cv"}, \code{"adaptive_boot"} or \code{"adaptive_LGOCV"}, the full set of resamples is not run for each model. As resampling continues, a futility analysis is conducted and models with a low probability of being optimal are removed. These features are experimental. See Kuhn (2014) for more details. The options for this procedure are:
\itemize{
\item \code{min}: the minimum number of resamples used before models are removed
\item \code{alpha}: the confidence level of the one-sided intervals used to measure futility
\item \code{method}: either generalized least squares (\code{method = "gls"}) or a Bradley-Terry model (\code{method = "BT"})
\item \code{complete}: if a single parameter value is found before the end of resampling, should the full set of resamples be computed for that parameter. )
}
}
\author{Max Kuhn}
\references{Kuhn (2014), ``Futility Analysis in the Cross-Validation of Machine Learning Models'' \url{http://arxiv.org/abs/1405.6974}}
\examples{
\dontrun{
## Do 5 repeats of 10-Fold CV for the iris data. We will fit
## a KNN model that evaluates 12 values of k and set the seed
## at each iteration.
set.seed(123)
seeds <- vector(mode = "list", length = 51)
for(i in 1:50) seeds[[i]] <- sample.int(1000, 22)
## For the last model:
seeds[[51]] <- sample.int(1000, 1)
ctrl <- trainControl(method = "repeatedcv",
repeats = 5,
seeds = seeds)
set.seed(1)
mod <- train(Species ~ ., data = iris,
method = "knn",
tuneLength = 12,
trControl = ctrl)
ctrl2 <- trainControl(method = "adaptive_cv",
repeats = 5,
verboseIter = TRUE,
seeds = seeds)
set.seed(1)
mod2 <- train(Species ~ ., data = iris,
method = "knn",
tuneLength = 12,
trControl = ctrl2)
}
}
\keyword{utilities}
| /caret_6.0-37/caret/man/trainControl.Rd | permissive | QunCh/Applied-Predictive-Modeling-with-Python | R | false | false | 7,634 | rd | \name{trainControl}
\alias{trainControl}
\title{Control parameters for train}
\description{Control the computational nuances of the \code{\link{train}} function}
\usage{
trainControl(method = "boot",
number = ifelse(grepl("cv", method), 10, 25),
repeats = ifelse(grepl("cv", method), 1, number),
p = 0.75,
initialWindow = NULL,
horizon = 1,
fixedWindow = TRUE,
verboseIter = FALSE,
returnData = TRUE,
returnResamp = "final",
savePredictions = FALSE,
classProbs = FALSE,
summaryFunction = defaultSummary,
selectionFunction = "best",
preProcOptions = list(thresh = 0.95, ICAcomp = 3, k = 5),
index = NULL,
indexOut = NULL,
timingSamps = 0,
predictionBounds = rep(FALSE, 2),
seeds = NA,
adaptive = list(min = 5, alpha = 0.05,
method = "gls", complete = TRUE),
allowParallel = TRUE)
}
\arguments{
\item{method}{The resampling method: \code{boot}, \code{boot632}, \code{cv}, \code{repeatedcv},
\code{LOOCV}, \code{LGOCV} (for repeated training/test splits), \code{none} (only fits one model to the entire training set),
\code{oob} (only for random forest, bagged trees, bagged earth, bagged flexible discriminant analysis, or conditional tree forest models), \code{"adaptive_cv"}, \code{"adaptive_boot"} or \code{"adaptive_LGOCV"}}
\item{number}{Either the number of folds or number of resampling iterations}
\item{repeats}{For repeated k-fold cross-validation only: the number of complete sets of folds to compute}
\item{verboseIter}{A logical for printing a training log.}
\item{returnData}{A logical for saving the data}
\item{returnResamp}{A character string indicating how much of the resampled summary metrics should be saved. Values can be ``final'', ``all'' or ``none''}
\item{savePredictions}{a logical to save the hold-out predictions for each resample}
\item{p}{For leave-group out cross-validation: the training percentage}
\item{initialWindow, horizon, fixedWindow}{possible arguments to \code{\link{createTimeSlices}}}
\item{classProbs}{a logical; should class probabilities be computed for classification models (along with predicted values) in each resample?}
\item{summaryFunction}{a function to compute performance metrics across resamples. The arguments to the function should be the same as those in \code{\link{defaultSummary}}.}
\item{selectionFunction}{the function used to select the optimal tuning parameter. This can be a name of the function or the function itself. See \code{\link{best}} for details and other options.}
\item{preProcOptions}{A list of options to pass to \code{\link{preProcess}}. The type of pre-processing (e.g. center, scaling etc) is passed in via the \code{preProc} option in \code{\link{train}}.}
\item{index}{a list with elements for each resampling iteration. Each list element is the sample rows used for training at that iteration.}
\item{indexOut}{a list (the same length as \code{index}) that dictates which sample are held-out for each resample. If \code{NULL}, then the unique set of samples not contained in \code{index} is used.}
\item{timingSamps}{the number of training set samples that will be used to measure the time for predicting samples (zero indicates that the prediction time should not be estimated.}
\item{predictionBounds}{a logical or numeric vector of length 2 (regression only). If logical, the predictions can be constrained to be within the limit of the training set outcomes. For example, a value of \code{c(TRUE, FALSE)} would only constrain the lower end of predictions. If numeric, specific bounds can be used. For example, if \code{c(10, NA)}, values below 10 would be predicted as 10 (with no constraint in the upper side).}
\item{seeds}{an optional set of integers that will be used to set the seed at each resampling iteration. This is useful when the models are run in parallel. A value of \code{NA} will stop the seed from being set within the worker processes while a value of \code{NULL} will set the seeds using a random set of integers. Alternatively, a list can be used. The list should have \code{B+1} elements where \code{B} is the number of resamples. The first \code{B} elements of the list should be vectors of integers of length \code{M} where \code{M} is the number of models being evaluated. The last element of the list only needs to be a single integer (for the final model). See the Examples section below and the Details section. }
\item{adaptive}{a list used when \code{method} is \code{"adaptive_cv"}, \code{"adaptive_boot"} or \code{"adaptive_LGOCV"}. See Details below. }
\item{allowParallel}{if a parallel backend is loaded and available, should the function use it?}
}
\value{
An echo of the parameters specified
}
\details{
When setting the seeds manually, the number of models being evaluated is required. This may not be obvious as \code{train} does some optimizations for certain models. For example, when tuning over PLS model, the only model that is fit is the one with the largest number of components. So if the model is being tuned over \code{comp in 1:10}, the only model fit is \code{ncomp = 10}. However, if the vector of integers used in the \code{seeds} arguments is longer than actually needed, no error is thrown.
Using \code{method = "none"} and specifying model than one model in \code{\link{train}}'s \code{tuneGrid} or \code{tuneLength} arguments will result in an error.
Using adaptive resampling when \code{method} is either \code{"adaptive_cv"}, \code{"adaptive_boot"} or \code{"adaptive_LGOCV"}, the full set of resamples is not run for each model. As resampling continues, a futility analysis is conducted and models with a low probability of being optimal are removed. These features are experimental. See Kuhn (2014) for more details. The options for this procedure are:
\itemize{
\item \code{min}: the minimum number of resamples used before models are removed
\item \code{alpha}: the confidence level of the one-sided intervals used to measure futility
\item \code{method}: either generalized least squares (\code{method = "gls"}) or a Bradley-Terry model (\code{method = "BT"})
\item \code{complete}: if a single parameter value is found before the end of resampling, should the full set of resamples be computed for that parameter. )
}
}
\author{Max Kuhn}
\references{Kuhn (2014), ``Futility Analysis in the Cross-Validation of Machine Learning Models'' \url{http://arxiv.org/abs/1405.6974}}
\examples{
\dontrun{
## Do 5 repeats of 10-Fold CV for the iris data. We will fit
## a KNN model that evaluates 12 values of k and set the seed
## at each iteration.
set.seed(123)
seeds <- vector(mode = "list", length = 51)
for(i in 1:50) seeds[[i]] <- sample.int(1000, 22)
## For the last model:
seeds[[51]] <- sample.int(1000, 1)
ctrl <- trainControl(method = "repeatedcv",
repeats = 5,
seeds = seeds)
set.seed(1)
mod <- train(Species ~ ., data = iris,
method = "knn",
tuneLength = 12,
trControl = ctrl)
ctrl2 <- trainControl(method = "adaptive_cv",
repeats = 5,
verboseIter = TRUE,
seeds = seeds)
set.seed(1)
mod2 <- train(Species ~ ., data = iris,
method = "knn",
tuneLength = 12,
trControl = ctrl2)
}
}
\keyword{utilities}
|
#
# Functions for ClusterStability
# Authors: Etienne Lord, Matthieu Willems, Vladimir Makarenkov
# Since: December 2015-July 2015
#
# Function to return the Stirling numbers of the second kind
Stirling2nd<-function(n,k) {
total=0;
somme=0;
for (j in 0:k) {
tt=(-1)^(k-j)*choose(k,j)*j^n;
somme=somme+tt;
}
total=(1/factorial(k))*somme;
return(total)
}
# Internal function to return the p_n_k probability
# Note: Now use the recurence version from the R copula package
# to calculate the Stirling numbers of the second kind
p_n_k<-function(n,k) {
no=copula::Stirling2(n-1,k);
de=copula::Stirling2(n,k);
node=no/de;
if (is.na(node)||is.infinite(node)) return (1/k);
return (node);
}
# Internal function to return the p_t_n_k probability
# Note: Now use the recurence version from the R copula package
# to calculate the Stirling numbers of the second kind
p_tilde_n_k<-function(n,k){
no=copula::Stirling2(n-1,k-1);
de=copula::Stirling2(n,k);
node=no/de;
if (is.na(node)||is.infinite(node)) return (0);
return (node);
}
#Function to return if the member in group (index) are in the same partition
is_partition_group<-function(partition, group) {
par<-partition[group[1]];
for (i in group) {
if (partition[i]!=par) return (FALSE);
}
return (TRUE);
}
#Function to calculate the singleton indice
calculate_singleton<-function(indices, partition, indice, total_indice) {
total_singleton<-array(0, c(length(indices)));
#cat(total_indice);
#cat(indices);
#if (total_indice==0) total_indice=1;
for (k in 1:length(partition)) {
part<-as.vector(partition[[k]]);
a<-table(part);
#cat(part);
#cat ("\nTable:", a[1],a[2],a[3],(a[1]+a[2]+a[3]),"\n");
for (i in 1:length(a)) {
if (a[i]==1) {
#find the corresponding element in alone group
for (j in 1:length(indices)) {
if (part[j]==i) {
if (!is.finite(indice[j])||is.na(indice[j])) indice[j]=0.0;
total_singleton[j]<-total_singleton[j]+indice[j];
}
}
}
}
}
for (j in 1:length(indices)) {
total_singleton[j]=total_singleton[j]/total_indice;
}
for (j in 1:length(indices)) {
total_singleton[j]=max(total_singleton[j], 1-total_singleton[j]);
}
return (total_singleton);
}
#Main ClusterStability function (approximative)
ClusterStability<-function(dat, k=3, replicate=1000, type='kmeans') {
mylist<-list();
dat=as.matrix(dat);
len<-nrow(dat);
partitions<-list();
indices<-list();
indice_list_ch<-array(0, c(replicate));
indice_list_dunn<-array(0, c(replicate));
indice_list_db<-array(0, c(replicate));
indice_list_sil<-array(0, c(replicate));
starts<-sample(1:10000000, replicate, replace=FALSE);
total_calinski_harabasz=0;
total_silhouette=0;
total_dunn=0;
total_db=0;
for (i in 1:replicate) {
if (type=='kmeans') {
cluster<-Reorder(kmeans(dat,centers=k, nstart=1, iter.max=100, algorithm="MacQueen")$cluster);
} else {
cluster<-Reorder(wcKMedoids(dist(dat),k,npass=0,cluster.only=TRUE));
}
indice_kmeans<-intCriteria(dat, cluster,c("Calinski_Harabasz","Dunn","Davies_Bouldin"))
total_calinski_harabasz<-total_calinski_harabasz+indice_kmeans[1]$calinski_harabasz;
total_dunn<-total_dunn+indice_kmeans[2]$dunn;
total_db<-total_db+indice_kmeans[3]$davies_bouldin;
ind<-summary(silhouette(cluster,dist(dat)))$avg.width;
if (is.nan(ind)) {
ind=0.0;
} else if (ind<0) {
ind=(ind+1)/2;
}
set.seed(starts[i])
total_silhouette<-total_silhouette+ind;
partitions[[i]]<-as.vector(cluster);
indice_list_ch[i]<-indice_kmeans[1]$calinski_harabasz;
indice_list_sil[i]<-ind;
indice_list_db[i]<-indice_kmeans[3]$davies_bouldin;
indice_list_dunn[i]<-indice_kmeans[2]$dunn;
}
r<-list("partition"=partitions, "calinski_harabasz"=indice_list_ch, "silhouette"=indice_list_sil,"total_calinski_harabasz"=total_calinski_harabasz, "total_silhouette"=total_silhouette, "dunn"=indice_list_dunn, "db"=indice_list_db,"total_dunn"=total_dunn, "total_db"=total_db);
indices=1:nrow(dat);
combinations=Kcombination(indices, 2);
total_combination_ch<-calculate_indices(combinations, r$partition, r$calinski_harabasz, r$total_calinski_harabasz);
total_combination_sil<-calculate_indices(combinations, r$partition, r$silhouette, r$total_silhouette);
total_combination_dunn<-calculate_indices(combinations, r$partition, r$dunn, r$total_dunn);
total_combination_db<-calculate_indices(combinations, r$partition, r$db, r$total_db);
total_singletons_ch<-calculate_singleton(indices, r$partition,r$calinski_harabasz,r$total_calinski_harabasz);
total_singletons_sil<-calculate_singleton(indices, r$partition,r$silhouette,r$total_silhouette);
total_singletons_dunn<-calculate_singleton(indices, r$partition,r$dunn,r$total_dunn);
total_singletons_db<-calculate_singleton(indices, r$partition,r$db,r$total_db);
global_PSG_ch<-0;
global_PSG_sil<-0;
global_PSG_dunn<-0;
global_PSG_db<-0
total_PSG_dunn<-calculate_individual_PSG_approximative(k,combinations, total_singletons_dunn,total_combination_dunn, indices);
total_PSG_db<-calculate_individual_PSG_approximative(k,combinations, total_singletons_db,total_combination_db, indices);
total_PSG_ch<-calculate_individual_PSG_approximative(k,combinations, total_singletons_ch, total_combination_ch, indices);
total_PSG_sil<-calculate_individual_PSG_approximative(k,combinations, total_singletons_sil,total_combination_sil, indices);
global_PSG_dunn<-mean(total_PSG_dunn);
global_PSG_db<-mean(total_PSG_db);
global_PSG_ch<-mean(total_PSG_ch);
global_PSG_sil<-mean(total_PSG_sil);
return(list("ST_ch"=total_PSG_ch,
"ST_sil"=total_PSG_sil,
"ST_dunn"=total_PSG_dunn,
"ST_db"=total_PSG_db,
"ST_global_ch"=global_PSG_ch,
"ST_global_sil"=global_PSG_sil,
"ST_global_dunn"=global_PSG_dunn,
"ST_global_db"=global_PSG_db
)
);
}
#Main ClusterStability function (exact)
ClusterStability_exact<-function(dat, k=3, replicate=1000, type='kmeans') {
mylist<-list();
dat=as.matrix(dat);
len<-nrow(dat);
partitions<-list();
indices<-list();
indice_list_ch<-array(0, c(replicate));
indice_list_dunn<-array(0, c(replicate));
indice_list_db<-array(0, c(replicate));
indice_list_sil<-array(0, c(replicate));
starts<-sample(1:10000000, replicate, replace=FALSE);
total_calinski_harabasz=0;
total_silhouette=0;
total_dunn=0;
total_db=0;
for (i in 1:replicate) {
if (type=='kmeans') {
cluster<-Reorder(kmeans(dat,centers=k, nstart=1, iter.max=100, algorithm="MacQueen")$cluster);
} else {
cluster<-Reorder(wcKMedoids(dist(dat),k,npass=0,cluster.only=TRUE));
}
indice_kmeans<-intCriteria(dat, cluster,c("Calinski_Harabasz","Dunn","Davies_Bouldin"))
total_calinski_harabasz<-total_calinski_harabasz+indice_kmeans[1]$calinski_harabasz;
total_dunn<-total_dunn+indice_kmeans[2]$dunn;
total_db<-total_db+indice_kmeans[3]$davies_bouldin;
ind<-summary(silhouette(cluster,dist(dat)))$avg.width;
if (is.nan(ind)) {
ind=0.0;
} else if (ind<0) {
ind=(ind+1)/2;
}
set.seed(starts[i])
total_silhouette<-total_silhouette+ind;
partitions[[i]]<-as.vector(cluster);
indice_list_ch[i]<-indice_kmeans[1]$calinski_harabasz;
indice_list_sil[i]<-ind;
indice_list_db[i]<-indice_kmeans[3]$davies_bouldin;
indice_list_dunn[i]<-indice_kmeans[2]$dunn;
}
r<-list("partition"=partitions, "calinski_harabasz"=indice_list_ch, "silhouette"=indice_list_sil,"total_calinski_harabasz"=total_calinski_harabasz, "total_silhouette"=total_silhouette, "dunn"=indice_list_dunn, "db"=indice_list_db,"total_dunn"=total_dunn, "total_db"=total_db);
indices=1:nrow(dat);
combinations=Kcombination(indices, 2);
total_combination_ch<-calculate_indices(combinations, r$partition, r$calinski_harabasz, r$total_calinski_harabasz);
total_singletons_ch<-calculate_singleton(indices, r$partition,r$calinski_harabasz,r$total_calinski_harabasz);
total_combination_sil<-calculate_indices(combinations, r$partition, r$silhouette, r$total_silhouette);
total_singletons_sil<-calculate_singleton(indices, r$partition,r$silhouette,r$total_silhouette);
total_combination_dunn<-calculate_indices(combinations, r$partition, r$dunn, r$total_dunn);
total_singletons_dunn<-calculate_singleton(indices, r$partition,r$dunn,r$total_dunn);
total_combination_db<-calculate_indices(combinations, r$partition, r$db, r$total_db);
total_singletons_db<-calculate_singleton(indices, r$partition,r$db,r$total_db);
global_PSG_ch<-0;
global_PSG_sil<-0;
global_PSG_dunn<-0;
global_PSG_db<-0;
#k, r_combinations, total_indices, indices
#Warning message
if (is.na(Stirling2(nrow(dat), k))||is.infinite(Stirling2(nrow(dat), k))) {
msg<-paste("Warning, the current values of (n=",nrow(dat),") and (k=",k,") are greater than the supported values for this function. The use of its approximate version 'ClusterStability' is recommended in this case.");
warning(msg)
}
pnk=p_n_k(nrow(dat), k);
pnktilde=p_tilde_n_k(nrow(dat), k);
total_PSG_dunn<-calculate_individual_PSG_exact(k,combinations, total_singletons_dunn,total_combination_dunn, indices,pnk,pnktilde);
total_PSG_db<-calculate_individual_PSG_exact(k,combinations, total_singletons_db,total_combination_db, indices,pnk,pnktilde);
total_PSG_ch<-calculate_individual_PSG_exact(k,combinations, total_singletons_ch,total_combination_ch, indices,pnk,pnktilde);
total_PSG_sil<-calculate_individual_PSG_exact(k,combinations, total_singletons_sil,total_combination_sil, indices,pnk,pnktilde);
global_PSG_dunn<-mean(total_PSG_dunn);
global_PSG_db<-mean(total_PSG_db);
global_PSG_ch<-mean(total_PSG_ch);
global_PSG_sil<-mean(total_PSG_sil);
return(list("ST_ch"=total_PSG_ch,
"ST_sil"=total_PSG_sil,
"ST_dunn"=total_PSG_dunn,
"ST_db"=total_PSG_db,
"ST_global_ch"=global_PSG_ch,
"ST_global_sil"=global_PSG_sil,
"ST_global_dunn"=global_PSG_dunn,
"ST_global_db"=global_PSG_db
)
);
}
| /ClusterStability/R/ClusterStability.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 9,929 | r | #
# Functions for ClusterStability
# Authors: Etienne Lord, Matthieu Willems, Vladimir Makarenkov
# Since: December 2015-July 2015
#
# Function to return the Stirling numbers of the second kind
Stirling2nd<-function(n,k) {
total=0;
somme=0;
for (j in 0:k) {
tt=(-1)^(k-j)*choose(k,j)*j^n;
somme=somme+tt;
}
total=(1/factorial(k))*somme;
return(total)
}
# Internal function to return the p_n_k probability
# Note: Now use the recurence version from the R copula package
# to calculate the Stirling numbers of the second kind
p_n_k<-function(n,k) {
no=copula::Stirling2(n-1,k);
de=copula::Stirling2(n,k);
node=no/de;
if (is.na(node)||is.infinite(node)) return (1/k);
return (node);
}
# Internal function to return the p_t_n_k probability
# Note: Now use the recurence version from the R copula package
# to calculate the Stirling numbers of the second kind
p_tilde_n_k<-function(n,k){
no=copula::Stirling2(n-1,k-1);
de=copula::Stirling2(n,k);
node=no/de;
if (is.na(node)||is.infinite(node)) return (0);
return (node);
}
#Function to return if the member in group (index) are in the same partition
is_partition_group<-function(partition, group) {
par<-partition[group[1]];
for (i in group) {
if (partition[i]!=par) return (FALSE);
}
return (TRUE);
}
#Function to calculate the singleton indice
calculate_singleton<-function(indices, partition, indice, total_indice) {
total_singleton<-array(0, c(length(indices)));
#cat(total_indice);
#cat(indices);
#if (total_indice==0) total_indice=1;
for (k in 1:length(partition)) {
part<-as.vector(partition[[k]]);
a<-table(part);
#cat(part);
#cat ("\nTable:", a[1],a[2],a[3],(a[1]+a[2]+a[3]),"\n");
for (i in 1:length(a)) {
if (a[i]==1) {
#find the corresponding element in alone group
for (j in 1:length(indices)) {
if (part[j]==i) {
if (!is.finite(indice[j])||is.na(indice[j])) indice[j]=0.0;
total_singleton[j]<-total_singleton[j]+indice[j];
}
}
}
}
}
for (j in 1:length(indices)) {
total_singleton[j]=total_singleton[j]/total_indice;
}
for (j in 1:length(indices)) {
total_singleton[j]=max(total_singleton[j], 1-total_singleton[j]);
}
return (total_singleton);
}
#Main ClusterStability function (approximative)
ClusterStability<-function(dat, k=3, replicate=1000, type='kmeans') {
mylist<-list();
dat=as.matrix(dat);
len<-nrow(dat);
partitions<-list();
indices<-list();
indice_list_ch<-array(0, c(replicate));
indice_list_dunn<-array(0, c(replicate));
indice_list_db<-array(0, c(replicate));
indice_list_sil<-array(0, c(replicate));
starts<-sample(1:10000000, replicate, replace=FALSE);
total_calinski_harabasz=0;
total_silhouette=0;
total_dunn=0;
total_db=0;
for (i in 1:replicate) {
if (type=='kmeans') {
cluster<-Reorder(kmeans(dat,centers=k, nstart=1, iter.max=100, algorithm="MacQueen")$cluster);
} else {
cluster<-Reorder(wcKMedoids(dist(dat),k,npass=0,cluster.only=TRUE));
}
indice_kmeans<-intCriteria(dat, cluster,c("Calinski_Harabasz","Dunn","Davies_Bouldin"))
total_calinski_harabasz<-total_calinski_harabasz+indice_kmeans[1]$calinski_harabasz;
total_dunn<-total_dunn+indice_kmeans[2]$dunn;
total_db<-total_db+indice_kmeans[3]$davies_bouldin;
ind<-summary(silhouette(cluster,dist(dat)))$avg.width;
if (is.nan(ind)) {
ind=0.0;
} else if (ind<0) {
ind=(ind+1)/2;
}
set.seed(starts[i])
total_silhouette<-total_silhouette+ind;
partitions[[i]]<-as.vector(cluster);
indice_list_ch[i]<-indice_kmeans[1]$calinski_harabasz;
indice_list_sil[i]<-ind;
indice_list_db[i]<-indice_kmeans[3]$davies_bouldin;
indice_list_dunn[i]<-indice_kmeans[2]$dunn;
}
r<-list("partition"=partitions, "calinski_harabasz"=indice_list_ch, "silhouette"=indice_list_sil,"total_calinski_harabasz"=total_calinski_harabasz, "total_silhouette"=total_silhouette, "dunn"=indice_list_dunn, "db"=indice_list_db,"total_dunn"=total_dunn, "total_db"=total_db);
indices=1:nrow(dat);
combinations=Kcombination(indices, 2);
total_combination_ch<-calculate_indices(combinations, r$partition, r$calinski_harabasz, r$total_calinski_harabasz);
total_combination_sil<-calculate_indices(combinations, r$partition, r$silhouette, r$total_silhouette);
total_combination_dunn<-calculate_indices(combinations, r$partition, r$dunn, r$total_dunn);
total_combination_db<-calculate_indices(combinations, r$partition, r$db, r$total_db);
total_singletons_ch<-calculate_singleton(indices, r$partition,r$calinski_harabasz,r$total_calinski_harabasz);
total_singletons_sil<-calculate_singleton(indices, r$partition,r$silhouette,r$total_silhouette);
total_singletons_dunn<-calculate_singleton(indices, r$partition,r$dunn,r$total_dunn);
total_singletons_db<-calculate_singleton(indices, r$partition,r$db,r$total_db);
global_PSG_ch<-0;
global_PSG_sil<-0;
global_PSG_dunn<-0;
global_PSG_db<-0
total_PSG_dunn<-calculate_individual_PSG_approximative(k,combinations, total_singletons_dunn,total_combination_dunn, indices);
total_PSG_db<-calculate_individual_PSG_approximative(k,combinations, total_singletons_db,total_combination_db, indices);
total_PSG_ch<-calculate_individual_PSG_approximative(k,combinations, total_singletons_ch, total_combination_ch, indices);
total_PSG_sil<-calculate_individual_PSG_approximative(k,combinations, total_singletons_sil,total_combination_sil, indices);
global_PSG_dunn<-mean(total_PSG_dunn);
global_PSG_db<-mean(total_PSG_db);
global_PSG_ch<-mean(total_PSG_ch);
global_PSG_sil<-mean(total_PSG_sil);
return(list("ST_ch"=total_PSG_ch,
"ST_sil"=total_PSG_sil,
"ST_dunn"=total_PSG_dunn,
"ST_db"=total_PSG_db,
"ST_global_ch"=global_PSG_ch,
"ST_global_sil"=global_PSG_sil,
"ST_global_dunn"=global_PSG_dunn,
"ST_global_db"=global_PSG_db
)
);
}
#Main ClusterStability function (exact)
ClusterStability_exact<-function(dat, k=3, replicate=1000, type='kmeans') {
mylist<-list();
dat=as.matrix(dat);
len<-nrow(dat);
partitions<-list();
indices<-list();
indice_list_ch<-array(0, c(replicate));
indice_list_dunn<-array(0, c(replicate));
indice_list_db<-array(0, c(replicate));
indice_list_sil<-array(0, c(replicate));
starts<-sample(1:10000000, replicate, replace=FALSE);
total_calinski_harabasz=0;
total_silhouette=0;
total_dunn=0;
total_db=0;
for (i in 1:replicate) {
if (type=='kmeans') {
cluster<-Reorder(kmeans(dat,centers=k, nstart=1, iter.max=100, algorithm="MacQueen")$cluster);
} else {
cluster<-Reorder(wcKMedoids(dist(dat),k,npass=0,cluster.only=TRUE));
}
indice_kmeans<-intCriteria(dat, cluster,c("Calinski_Harabasz","Dunn","Davies_Bouldin"))
total_calinski_harabasz<-total_calinski_harabasz+indice_kmeans[1]$calinski_harabasz;
total_dunn<-total_dunn+indice_kmeans[2]$dunn;
total_db<-total_db+indice_kmeans[3]$davies_bouldin;
ind<-summary(silhouette(cluster,dist(dat)))$avg.width;
if (is.nan(ind)) {
ind=0.0;
} else if (ind<0) {
ind=(ind+1)/2;
}
set.seed(starts[i])
total_silhouette<-total_silhouette+ind;
partitions[[i]]<-as.vector(cluster);
indice_list_ch[i]<-indice_kmeans[1]$calinski_harabasz;
indice_list_sil[i]<-ind;
indice_list_db[i]<-indice_kmeans[3]$davies_bouldin;
indice_list_dunn[i]<-indice_kmeans[2]$dunn;
}
r<-list("partition"=partitions, "calinski_harabasz"=indice_list_ch, "silhouette"=indice_list_sil,"total_calinski_harabasz"=total_calinski_harabasz, "total_silhouette"=total_silhouette, "dunn"=indice_list_dunn, "db"=indice_list_db,"total_dunn"=total_dunn, "total_db"=total_db);
indices=1:nrow(dat);
combinations=Kcombination(indices, 2);
total_combination_ch<-calculate_indices(combinations, r$partition, r$calinski_harabasz, r$total_calinski_harabasz);
total_singletons_ch<-calculate_singleton(indices, r$partition,r$calinski_harabasz,r$total_calinski_harabasz);
total_combination_sil<-calculate_indices(combinations, r$partition, r$silhouette, r$total_silhouette);
total_singletons_sil<-calculate_singleton(indices, r$partition,r$silhouette,r$total_silhouette);
total_combination_dunn<-calculate_indices(combinations, r$partition, r$dunn, r$total_dunn);
total_singletons_dunn<-calculate_singleton(indices, r$partition,r$dunn,r$total_dunn);
total_combination_db<-calculate_indices(combinations, r$partition, r$db, r$total_db);
total_singletons_db<-calculate_singleton(indices, r$partition,r$db,r$total_db);
global_PSG_ch<-0;
global_PSG_sil<-0;
global_PSG_dunn<-0;
global_PSG_db<-0;
#k, r_combinations, total_indices, indices
#Warning message
if (is.na(Stirling2(nrow(dat), k))||is.infinite(Stirling2(nrow(dat), k))) {
msg<-paste("Warning, the current values of (n=",nrow(dat),") and (k=",k,") are greater than the supported values for this function. The use of its approximate version 'ClusterStability' is recommended in this case.");
warning(msg)
}
pnk=p_n_k(nrow(dat), k);
pnktilde=p_tilde_n_k(nrow(dat), k);
total_PSG_dunn<-calculate_individual_PSG_exact(k,combinations, total_singletons_dunn,total_combination_dunn, indices,pnk,pnktilde);
total_PSG_db<-calculate_individual_PSG_exact(k,combinations, total_singletons_db,total_combination_db, indices,pnk,pnktilde);
total_PSG_ch<-calculate_individual_PSG_exact(k,combinations, total_singletons_ch,total_combination_ch, indices,pnk,pnktilde);
total_PSG_sil<-calculate_individual_PSG_exact(k,combinations, total_singletons_sil,total_combination_sil, indices,pnk,pnktilde);
global_PSG_dunn<-mean(total_PSG_dunn);
global_PSG_db<-mean(total_PSG_db);
global_PSG_ch<-mean(total_PSG_ch);
global_PSG_sil<-mean(total_PSG_sil);
return(list("ST_ch"=total_PSG_ch,
"ST_sil"=total_PSG_sil,
"ST_dunn"=total_PSG_dunn,
"ST_db"=total_PSG_db,
"ST_global_ch"=global_PSG_ch,
"ST_global_sil"=global_PSG_sil,
"ST_global_dunn"=global_PSG_dunn,
"ST_global_db"=global_PSG_db
)
);
}
|
#' Import a graph from various graph formats
#' @description Import a variety of graphs from
#' different graph formats and create a graph object.
#' @param graph_file a connection to a graph file.
#' @param file_type the type of file to be imported.
#' Options are: \code{graphml} (GraphML), \code{gml}
#' (GML), \code{sif} (SIF), \code{edges} (a .edges
#' file), and \code{mtx} (MatrixMarket format). If not
#' supplied, the function will infer the type by its
#' file extension.
#' @param graph_name an optional string for labeling
#' the graph object.
#' @param graph_time a date or date-time string
#' (required for insertion of graph into a graph series
#' of the type \code{temporal}).
#' @param graph_tz an optional value for the time zone
#' (\code{tz})
#' corresponding to the date or date-time string
#' supplied as a value to \code{graph_time}. If no time
#' zone is provided then it will be set to \code{GMT}.
#' @return a graph object of class \code{dgr_graph}.
#' @examples
#' \dontrun{
#' library(magrittr)
#'
#' # Import a GraphML graph file
#' graphml_graph <-
#' import_graph(
#' system.file("examples/power_grid.graphml",
#' package = "DiagrammeR"))
#'
#' # Get a count of the graph's nodes
#' graphml_graph %>% node_count
#' #> [1] 4941
#'
#' # Get a count of the graph's edges
#' graphml_graph %>% edge_count
#' #> [1] 6594
#'
#' # Import an SIF graph file
#' sif_graph <-
#' import_graph(
#' system.file("examples/Human_Interactome.sif",
#' package = "DiagrammeR"))
#'
#' # Get a count of the graph's nodes
#' sif_graph %>% node_count
#' #> [1] 8347
#'
#' # Get a count of the graph's edges
#' sif_graph %>% edge_count
#' #> [1] 61263
#'
#' # Import a GML graph file
#' gml_graph <-
#' import_graph(
#' system.file("examples/karate.gml",
#' package = "DiagrammeR"))
#'
#' # Get a count of the graph's nodes
#' gml_graph %>% node_count
#' #> [1] 34
#'
#' # Get a count of the graph's edges
#' gml_graph %>% edge_count
#' #> [1] 78
#' }
#' @importFrom stringr str_extract str_detect str_split str_count
#' str_replace_all str_extract_all
#' @export import_graph
import_graph <- function(graph_file,
file_type = NULL,
graph_name = NULL,
graph_time = NULL,
graph_tz = NULL) {
# Stop function if file doesn't exist
if (file.exists(graph_file) == FALSE) {
stop("The file as specified doesn't exist.")
}
# Stop function if `file_type` specified is not part
# of the group that can be imported
if (!is.null(file_type)) {
if (!(tolower(file_type) %in%
c("graphml", "gml", "sif", "edges", "mtx"))) {
stop("The file type as specified cannot be imported.")
}
}
# Obtain file extension if no value supplied
# for `file_type`
if (is.null(file_type)) {
file_extension <- gsub(".*\\.([a-zA-Z]*?)", "\\1", graph_file)
# Determine file type from file extension
if (file_extension == "graphml") {
file_type <- "graphml"
} else if (file_extension == "gml") {
file_type <- "gml"
} else if (file_extension == "sif") {
file_type <- "sif"
} else if (file_extension == "edges") {
file_type <- "edges"
} else if (file_extension == "mtx") {
file_type <- "mtx"
} else {
stop("The file type is not known so it can't be imported.")
}
}
if (file_type == "edges") {
# Read in the .edges document as a vector object
edges_document <- readLines(graph_file)
# Determine which line the data fields begin
first_line <- grep("^[^%].*", edges_document)[1]
# Create an edge data frame
edges <-
create_edges(
from = sapply(
strsplit(
edges_document[first_line:length(edges_document)],
" "), "[[", 1),
to = sapply(
strsplit(
edges_document[first_line:length(edges_document)],
" "), "[[", 2))
# Create a node data frame
nodes <-
create_nodes(
nodes = unique(
unlist(
strsplit(
edges_document[first_line:length(edges_document)],
" "))))
# Create the graph
the_graph <-
create_graph(
nodes_df = nodes,
edges_df = edges,
graph_name = graph_name,
graph_time = graph_time,
graph_tz = graph_tz,
node_attrs = c("shape = circle",
"width = 10",
"style = filled",
"color = black"),
graph_attrs = "layout = neato",
generate_dot = FALSE)
# Return the graph
return(the_graph)
}
if (file_type == "mtx") {
# Read in the .mtx document as a vector object
mtx_document <- readLines(graph_file)
# Determine which line the data fields begin
first_line <- grep("^(\\w*) (\\w*)$", mtx_document)[1]
# Create an edge data frame
edges <-
create_edges(
from = sapply(
strsplit(
mtx_document[first_line:length(mtx_document)],
" "), "[[", 1),
to = sapply(
strsplit(
mtx_document[first_line:length(mtx_document)],
" "), "[[", 2))
# Create a node data frame
nodes <-
create_nodes(
nodes = unique(
unlist(
strsplit(
mtx_document[first_line:length(mtx_document)],
" "))))
# Create the graph
the_graph <-
create_graph(
nodes_df = nodes,
edges_df = edges,
graph_name = graph_name,
graph_time = graph_time,
graph_tz = graph_tz,
node_attrs = c("shape = circle",
"width = 10",
"style = filled",
"color = black"),
graph_attrs = "layout = neato",
generate_dot = FALSE)
# Return the graph
return(the_graph)
}
if (file_type == "graphml") {
# Read in the .graphml document as a vector object
graphml_document <- readLines(graph_file)
# Determine the starting and ending indices of
# the <node> tags
xml_nodes <-
list(node_start = grep("<node ", graphml_document),
node_end = grep("</node>", graphml_document))
# Determine the starting and ending indices of the
# <edge> tags
xml_edges <-
list(edge_start = grep("<edge ", graphml_document),
edge_end = grep("</edge>", graphml_document))
# Determine all node ID values for the graph
for (i in 1:length(xml_nodes[[1]])) {
if (i == 1) nodes_ids <- vector(mode = "character")
nodes_ids <-
c(nodes_ids,
str_replace_all(
str_extract(graphml_document[xml_nodes[[1]][i]],
"\".*?\""), "\"", ""))
}
# Determine indices that contain first
# node attributes
node_key_indices <-
xml_nodes[[1]][1] - 1 +
grep("key",
graphml_document[xml_nodes[[1]][1]:xml_nodes[[2]][1]])
# Obtain names of keys
node_key_names <-
gsub(".*?\"(.*?)\".*", "\\1",
graphml_document[node_key_indices])
# Obtain list of vectors for all node attributes
node_attributes <- list()
for (i in 1:length(node_key_names)) {
for (j in 1:length(xml_nodes[[1]])) {
if (j == 1) {
attribute <- vector(mode = "character")
}
attribute <-
c(attribute,
gsub(".*?>(.*?)<.*", "\\1",
graphml_document[xml_nodes[[1]][j] + i]))
if (j == length(xml_nodes[[1]])) {
node_attributes[[i]] <- attribute
}
}
if (i == length(node_key_names)) {
names(node_attributes) <- node_key_names
}
}
# Create all nodes for graph
all_nodes <-
cbind(create_nodes(nodes = nodes_ids),
data.frame(node_attributes))
# Determine all edge values for the graph
for (i in 1:length(xml_edges[[1]])) {
if (i == 1) {
edges_from <- vector(mode = "character")
edges_to <- vector(mode = "character")
}
edges_from <-
c(edges_from,
str_replace_all(
unlist(str_extract_all(
graphml_document[xml_edges[[1]][i]],
"\".*?\""))[1], "\"", ""))
edges_to <-
c(edges_to,
str_replace_all(
unlist(str_extract_all(
graphml_document[xml_edges[[1]][i]],
"\".*?\""))[2], "\"", ""))
}
# Create all edges for graph
all_edges <-
create_edges(
from = edges_from,
to = edges_to)
# Create the graph
the_graph <-
create_graph(
nodes_df = all_nodes,
edges_df = all_edges,
graph_name = graph_name,
graph_time = graph_time,
graph_tz = graph_tz,
node_attrs = c("shape = circle",
"width = 10",
"style = filled",
"color = black"),
graph_attrs = "layout = neato",
generate_dot = FALSE)
# Return the graph
return(the_graph)
}
if (file_type == "gml") {
# Read in the .gml document as a vector object
gml_document <-
paste(readLines(graph_file), collapse = "")
# Extract information on whether graph is directed
graph_directed <-
unlist(
str_replace_all(
str_extract_all(gml_document,
"directed [0-1]"),
"directed ", ""))
# Extract all node definitions
node_defs <-
unlist(
str_extract_all(gml_document,
"node[ ]*?\\[.*?\\]"))
# Get all node ID values
node_id <-
str_replace_all(
str_extract_all(
node_defs,
"id [a-z0-9_]*"),
"id ", "")
# Get all node label values, if they exist
if (any(str_detect(node_defs, "label"))) {
node_label <-
str_replace_all(
str_replace_all(
str_extract_all(
node_defs,
"label \\\".*?\\\""),
"label \"", ""),
"\"", "")
}
# Extract all edge definitions
edge_defs <-
unlist(str_extract_all(
gml_document,
"edge[ ]*?\\[.*?\\]"))
edges_from <-
str_replace_all(
str_extract_all(
edge_defs,
"source [a-z0-9_]*"),
"source ", "")
edges_to <-
str_replace_all(
str_extract_all(
edge_defs,
"target [a-z0-9_]*"),
"target ", "")
if (any(str_detect(edge_defs, "label"))) {
edge_label <-
str_replace_all(
str_replace_all(
str_extract_all(
edge_defs,
"label \\\".*?\\\""),
"label \"", ""),
"\"", "")
}
if (any(str_detect(edge_defs, "value"))) {
edge_value <-
str_replace_all(
str_extract_all(
edge_defs,
"value [a-z0-9\\.]*"),
"value ", "")
}
# Create all nodes for graph
all_nodes <-
create_nodes(nodes = node_id,
label = FALSE)
if (exists("node_label")) {
all_nodes$label <- node_label
}
# Create all edges for graph
all_edges <-
create_edges(from = edges_from,
to = edges_to)
if (exists("edge_value")) {
all_edges$data_value <- edge_value
}
# Create the graph
the_graph <-
create_graph(
nodes_df = all_nodes,
edges_df = all_edges,
directed = ifelse(graph_directed == "1",
TRUE, FALSE),
generate_dot = FALSE)
# Return the graph
return(the_graph)
}
if (file_type == "sif") {
# Read in the SIF document as a vector object
sif_document <- readLines(graph_file)
# Initialize the vector for a node data frame
nodes <- vector(mode = "character")
# Determine which nodes are present in the graph
for (i in 1:length(sif_document)) {
nodes <-
c(nodes,
ifelse(
length(
unlist(str_split(sif_document[i], "\t"))) == 1,
unlist(str_split(sif_document[i], "\t"))[1],
unlist(str_split(sif_document[i], "\t"))[-2]))
}
# Obtain a unique vector of nodes in the graph
nodes <- unique(nodes)
# Create a node data frame
nodes_df <- create_nodes(nodes = nodes)
# Determine which lines have single nodes
if (any(!str_detect(sif_document, "\\t"))) {
single_nodes <- which(!str_detect(sif_document, "\\t"))
}
# Initialize vectors for an edge data frame
from <- to <- rel <- vector(mode = "character")
# Obtain complete vectors for the edge data frame
for (i in which(str_count(sif_document, "\\t") > 1)) {
length_stmt <- length(str_split(sif_document[i], "\t")[[1]])
from <- c(from, str_split(sif_document[i], "\t")[[1]][1])
rel <- c(rel, str_split(sif_document[i], "\t")[[1]][2])
to <- c(to, str_split(sif_document[i], "\t")[[1]][3:length_stmt])
}
# Create an edge data frame
edges_df <-
create_edges(
from = from,
to = to,
rel = rel)
# Create a graph object
the_graph <-
create_graph(
nodes_df = nodes_df,
edges_df = edges_df,
generate_dot = FALSE)
# Return the graph
return(the_graph)
}
}
| /R/import_graph.R | no_license | Oscar-Deng/DiagrammeR | R | false | false | 13,426 | r | #' Import a graph from various graph formats
#' @description Import a variety of graphs from
#' different graph formats and create a graph object.
#' @param graph_file a connection to a graph file.
#' @param file_type the type of file to be imported.
#' Options are: \code{graphml} (GraphML), \code{gml}
#' (GML), \code{sif} (SIF), \code{edges} (a .edges
#' file), and \code{mtx} (MatrixMarket format). If not
#' supplied, the function will infer the type by its
#' file extension.
#' @param graph_name an optional string for labeling
#' the graph object.
#' @param graph_time a date or date-time string
#' (required for insertion of graph into a graph series
#' of the type \code{temporal}).
#' @param graph_tz an optional value for the time zone
#' (\code{tz})
#' corresponding to the date or date-time string
#' supplied as a value to \code{graph_time}. If no time
#' zone is provided then it will be set to \code{GMT}.
#' @return a graph object of class \code{dgr_graph}.
#' @examples
#' \dontrun{
#' library(magrittr)
#'
#' # Import a GraphML graph file
#' graphml_graph <-
#' import_graph(
#' system.file("examples/power_grid.graphml",
#' package = "DiagrammeR"))
#'
#' # Get a count of the graph's nodes
#' graphml_graph %>% node_count
#' #> [1] 4941
#'
#' # Get a count of the graph's edges
#' graphml_graph %>% edge_count
#' #> [1] 6594
#'
#' # Import an SIF graph file
#' sif_graph <-
#' import_graph(
#' system.file("examples/Human_Interactome.sif",
#' package = "DiagrammeR"))
#'
#' # Get a count of the graph's nodes
#' sif_graph %>% node_count
#' #> [1] 8347
#'
#' # Get a count of the graph's edges
#' sif_graph %>% edge_count
#' #> [1] 61263
#'
#' # Import a GML graph file
#' gml_graph <-
#' import_graph(
#' system.file("examples/karate.gml",
#' package = "DiagrammeR"))
#'
#' # Get a count of the graph's nodes
#' gml_graph %>% node_count
#' #> [1] 34
#'
#' # Get a count of the graph's edges
#' gml_graph %>% edge_count
#' #> [1] 78
#' }
#' @importFrom stringr str_extract str_detect str_split str_count
#' str_replace_all str_extract_all
#' @export import_graph
import_graph <- function(graph_file,
file_type = NULL,
graph_name = NULL,
graph_time = NULL,
graph_tz = NULL) {
# Stop function if file doesn't exist
if (file.exists(graph_file) == FALSE) {
stop("The file as specified doesn't exist.")
}
# Stop function if `file_type` specified is not part
# of the group that can be imported
if (!is.null(file_type)) {
if (!(tolower(file_type) %in%
c("graphml", "gml", "sif", "edges", "mtx"))) {
stop("The file type as specified cannot be imported.")
}
}
# Obtain file extension if no value supplied
# for `file_type`
if (is.null(file_type)) {
file_extension <- gsub(".*\\.([a-zA-Z]*?)", "\\1", graph_file)
# Determine file type from file extension
if (file_extension == "graphml") {
file_type <- "graphml"
} else if (file_extension == "gml") {
file_type <- "gml"
} else if (file_extension == "sif") {
file_type <- "sif"
} else if (file_extension == "edges") {
file_type <- "edges"
} else if (file_extension == "mtx") {
file_type <- "mtx"
} else {
stop("The file type is not known so it can't be imported.")
}
}
if (file_type == "edges") {
# Read in the .edges document as a vector object
edges_document <- readLines(graph_file)
# Determine which line the data fields begin
first_line <- grep("^[^%].*", edges_document)[1]
# Create an edge data frame
edges <-
create_edges(
from = sapply(
strsplit(
edges_document[first_line:length(edges_document)],
" "), "[[", 1),
to = sapply(
strsplit(
edges_document[first_line:length(edges_document)],
" "), "[[", 2))
# Create a node data frame
nodes <-
create_nodes(
nodes = unique(
unlist(
strsplit(
edges_document[first_line:length(edges_document)],
" "))))
# Create the graph
the_graph <-
create_graph(
nodes_df = nodes,
edges_df = edges,
graph_name = graph_name,
graph_time = graph_time,
graph_tz = graph_tz,
node_attrs = c("shape = circle",
"width = 10",
"style = filled",
"color = black"),
graph_attrs = "layout = neato",
generate_dot = FALSE)
# Return the graph
return(the_graph)
}
if (file_type == "mtx") {
# Read in the .mtx document as a vector object
mtx_document <- readLines(graph_file)
# Determine which line the data fields begin
first_line <- grep("^(\\w*) (\\w*)$", mtx_document)[1]
# Create an edge data frame
edges <-
create_edges(
from = sapply(
strsplit(
mtx_document[first_line:length(mtx_document)],
" "), "[[", 1),
to = sapply(
strsplit(
mtx_document[first_line:length(mtx_document)],
" "), "[[", 2))
# Create a node data frame
nodes <-
create_nodes(
nodes = unique(
unlist(
strsplit(
mtx_document[first_line:length(mtx_document)],
" "))))
# Create the graph
the_graph <-
create_graph(
nodes_df = nodes,
edges_df = edges,
graph_name = graph_name,
graph_time = graph_time,
graph_tz = graph_tz,
node_attrs = c("shape = circle",
"width = 10",
"style = filled",
"color = black"),
graph_attrs = "layout = neato",
generate_dot = FALSE)
# Return the graph
return(the_graph)
}
if (file_type == "graphml") {
# Read in the .graphml document as a vector object
graphml_document <- readLines(graph_file)
# Determine the starting and ending indices of
# the <node> tags
xml_nodes <-
list(node_start = grep("<node ", graphml_document),
node_end = grep("</node>", graphml_document))
# Determine the starting and ending indices of the
# <edge> tags
xml_edges <-
list(edge_start = grep("<edge ", graphml_document),
edge_end = grep("</edge>", graphml_document))
# Determine all node ID values for the graph
for (i in 1:length(xml_nodes[[1]])) {
if (i == 1) nodes_ids <- vector(mode = "character")
nodes_ids <-
c(nodes_ids,
str_replace_all(
str_extract(graphml_document[xml_nodes[[1]][i]],
"\".*?\""), "\"", ""))
}
# Determine indices that contain first
# node attributes
node_key_indices <-
xml_nodes[[1]][1] - 1 +
grep("key",
graphml_document[xml_nodes[[1]][1]:xml_nodes[[2]][1]])
# Obtain names of keys
node_key_names <-
gsub(".*?\"(.*?)\".*", "\\1",
graphml_document[node_key_indices])
# Obtain list of vectors for all node attributes
node_attributes <- list()
for (i in 1:length(node_key_names)) {
for (j in 1:length(xml_nodes[[1]])) {
if (j == 1) {
attribute <- vector(mode = "character")
}
attribute <-
c(attribute,
gsub(".*?>(.*?)<.*", "\\1",
graphml_document[xml_nodes[[1]][j] + i]))
if (j == length(xml_nodes[[1]])) {
node_attributes[[i]] <- attribute
}
}
if (i == length(node_key_names)) {
names(node_attributes) <- node_key_names
}
}
# Create all nodes for graph
all_nodes <-
cbind(create_nodes(nodes = nodes_ids),
data.frame(node_attributes))
# Determine all edge values for the graph
for (i in 1:length(xml_edges[[1]])) {
if (i == 1) {
edges_from <- vector(mode = "character")
edges_to <- vector(mode = "character")
}
edges_from <-
c(edges_from,
str_replace_all(
unlist(str_extract_all(
graphml_document[xml_edges[[1]][i]],
"\".*?\""))[1], "\"", ""))
edges_to <-
c(edges_to,
str_replace_all(
unlist(str_extract_all(
graphml_document[xml_edges[[1]][i]],
"\".*?\""))[2], "\"", ""))
}
# Create all edges for graph
all_edges <-
create_edges(
from = edges_from,
to = edges_to)
# Create the graph
the_graph <-
create_graph(
nodes_df = all_nodes,
edges_df = all_edges,
graph_name = graph_name,
graph_time = graph_time,
graph_tz = graph_tz,
node_attrs = c("shape = circle",
"width = 10",
"style = filled",
"color = black"),
graph_attrs = "layout = neato",
generate_dot = FALSE)
# Return the graph
return(the_graph)
}
if (file_type == "gml") {
# Read in the .gml document as a vector object
gml_document <-
paste(readLines(graph_file), collapse = "")
# Extract information on whether graph is directed
graph_directed <-
unlist(
str_replace_all(
str_extract_all(gml_document,
"directed [0-1]"),
"directed ", ""))
# Extract all node definitions
node_defs <-
unlist(
str_extract_all(gml_document,
"node[ ]*?\\[.*?\\]"))
# Get all node ID values
node_id <-
str_replace_all(
str_extract_all(
node_defs,
"id [a-z0-9_]*"),
"id ", "")
# Get all node label values, if they exist
if (any(str_detect(node_defs, "label"))) {
node_label <-
str_replace_all(
str_replace_all(
str_extract_all(
node_defs,
"label \\\".*?\\\""),
"label \"", ""),
"\"", "")
}
# Extract all edge definitions
edge_defs <-
unlist(str_extract_all(
gml_document,
"edge[ ]*?\\[.*?\\]"))
edges_from <-
str_replace_all(
str_extract_all(
edge_defs,
"source [a-z0-9_]*"),
"source ", "")
edges_to <-
str_replace_all(
str_extract_all(
edge_defs,
"target [a-z0-9_]*"),
"target ", "")
if (any(str_detect(edge_defs, "label"))) {
edge_label <-
str_replace_all(
str_replace_all(
str_extract_all(
edge_defs,
"label \\\".*?\\\""),
"label \"", ""),
"\"", "")
}
if (any(str_detect(edge_defs, "value"))) {
edge_value <-
str_replace_all(
str_extract_all(
edge_defs,
"value [a-z0-9\\.]*"),
"value ", "")
}
# Create all nodes for graph
all_nodes <-
create_nodes(nodes = node_id,
label = FALSE)
if (exists("node_label")) {
all_nodes$label <- node_label
}
# Create all edges for graph
all_edges <-
create_edges(from = edges_from,
to = edges_to)
if (exists("edge_value")) {
all_edges$data_value <- edge_value
}
# Create the graph
the_graph <-
create_graph(
nodes_df = all_nodes,
edges_df = all_edges,
directed = ifelse(graph_directed == "1",
TRUE, FALSE),
generate_dot = FALSE)
# Return the graph
return(the_graph)
}
if (file_type == "sif") {
# Read in the SIF document as a vector object
sif_document <- readLines(graph_file)
# Initialize the vector for a node data frame
nodes <- vector(mode = "character")
# Determine which nodes are present in the graph
for (i in 1:length(sif_document)) {
nodes <-
c(nodes,
ifelse(
length(
unlist(str_split(sif_document[i], "\t"))) == 1,
unlist(str_split(sif_document[i], "\t"))[1],
unlist(str_split(sif_document[i], "\t"))[-2]))
}
# Obtain a unique vector of nodes in the graph
nodes <- unique(nodes)
# Create a node data frame
nodes_df <- create_nodes(nodes = nodes)
# Determine which lines have single nodes
if (any(!str_detect(sif_document, "\\t"))) {
single_nodes <- which(!str_detect(sif_document, "\\t"))
}
# Initialize vectors for an edge data frame
from <- to <- rel <- vector(mode = "character")
# Obtain complete vectors for the edge data frame
for (i in which(str_count(sif_document, "\\t") > 1)) {
length_stmt <- length(str_split(sif_document[i], "\t")[[1]])
from <- c(from, str_split(sif_document[i], "\t")[[1]][1])
rel <- c(rel, str_split(sif_document[i], "\t")[[1]][2])
to <- c(to, str_split(sif_document[i], "\t")[[1]][3:length_stmt])
}
# Create an edge data frame
edges_df <-
create_edges(
from = from,
to = to,
rel = rel)
# Create a graph object
the_graph <-
create_graph(
nodes_df = nodes_df,
edges_df = edges_df,
generate_dot = FALSE)
# Return the graph
return(the_graph)
}
}
|
rm(list=ls())
library(rpart)
library(rpart.plot)
library(pROC)
training<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST4/CAD Dataset Train70p.csv")
testing<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST4/CAD Dataset Test30p.csv")
sapdata<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST4/Sap_DT_Selected.csv")
base_model <- rpart(as.factor(training$Cath) ~., data = training,method="class",control = rpart.control(cp = 0))
postpruned_model <- rpart(as.factor(Cath) ~., data = training,method="class",control = rpart.control(cp = 0))
max_dep=0
min_split=0
accu=0
cp_op=0
k=1
cp_array=c(0,0.01,0.084,0.05,0.1,0.5,1)
for(cp_value in 1:length(cp_array)){
for (i in 1:20) {
for (j in 1:32) {
print(i)
print(j)
prepruned_model <- rpart(as.factor(Cath) ~., data = training,method="class",control = rpart.control(cp = cp_array[cp_value], maxdepth = i,minsplit =j))
testing$pred <- predict(object = prepruned_model, newdata = testing[,-15], type = "class")
#printcp()
#plotcp()
cm=table(testing$pred,testing$Cath,dnn=c("Prediction","Actual"))
acc=((sum(diag(cm))/sum(cm)))
tp<-cm[2,2]
tn<-cm[1,1]
fn<-cm[1,2]
fp<-cm[2,1]
sen=tp/(tp+fn)
spe=tn/(tn+fp)
mcc=((tp*tn) - (fp*fn))/(sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
f1=2*tp/((2*tp)+fp+fn)
roc_obj<-roc(testing[,15],as.numeric(testing$pred))
rocauc<-auc(roc_obj)
sapdata[k,1]=cp_array[cp_value]
sapdata[k,2]=i
sapdata[k,3]=j
sapdata[k,4]=acc
sapdata[k,5]=sen
sapdata[k,6]=spe
sapdata[k,7]=mcc
sapdata[k,8]=f1
sapdata[k,9]=rocauc
k=k+1
print('Accuracy')
print(acc)
print('sensitivity')
print(sen)
print('Specificity')
print(spe)
print('MCC')
print(mcc)
print('F1')
print(f1)
print('AUC')
print(rocauc)
if(acc>accu){
max_dep=i
min_split=j
accu=acc
cp_op=cp_array[cp_value]
}
}
}
}
write.csv(sapdata,"F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST4/Sap_DT_Selected.csv")
print(cp_op)
print(max_dep)
print(min_split)
print(accu)
rpart.plot(prepruned_model) | /ATLEAST4/DT.R | permissive | UtshaDas/CAD-Classification | R | false | false | 2,345 | r | rm(list=ls())
library(rpart)
library(rpart.plot)
library(pROC)
training<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST4/CAD Dataset Train70p.csv")
testing<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST4/CAD Dataset Test30p.csv")
sapdata<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST4/Sap_DT_Selected.csv")
base_model <- rpart(as.factor(training$Cath) ~., data = training,method="class",control = rpart.control(cp = 0))
postpruned_model <- rpart(as.factor(Cath) ~., data = training,method="class",control = rpart.control(cp = 0))
max_dep=0
min_split=0
accu=0
cp_op=0
k=1
cp_array=c(0,0.01,0.084,0.05,0.1,0.5,1)
for(cp_value in 1:length(cp_array)){
for (i in 1:20) {
for (j in 1:32) {
print(i)
print(j)
prepruned_model <- rpart(as.factor(Cath) ~., data = training,method="class",control = rpart.control(cp = cp_array[cp_value], maxdepth = i,minsplit =j))
testing$pred <- predict(object = prepruned_model, newdata = testing[,-15], type = "class")
#printcp()
#plotcp()
cm=table(testing$pred,testing$Cath,dnn=c("Prediction","Actual"))
acc=((sum(diag(cm))/sum(cm)))
tp<-cm[2,2]
tn<-cm[1,1]
fn<-cm[1,2]
fp<-cm[2,1]
sen=tp/(tp+fn)
spe=tn/(tn+fp)
mcc=((tp*tn) - (fp*fn))/(sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
f1=2*tp/((2*tp)+fp+fn)
roc_obj<-roc(testing[,15],as.numeric(testing$pred))
rocauc<-auc(roc_obj)
sapdata[k,1]=cp_array[cp_value]
sapdata[k,2]=i
sapdata[k,3]=j
sapdata[k,4]=acc
sapdata[k,5]=sen
sapdata[k,6]=spe
sapdata[k,7]=mcc
sapdata[k,8]=f1
sapdata[k,9]=rocauc
k=k+1
print('Accuracy')
print(acc)
print('sensitivity')
print(sen)
print('Specificity')
print(spe)
print('MCC')
print(mcc)
print('F1')
print(f1)
print('AUC')
print(rocauc)
if(acc>accu){
max_dep=i
min_split=j
accu=acc
cp_op=cp_array[cp_value]
}
}
}
}
write.csv(sapdata,"F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST4/Sap_DT_Selected.csv")
print(cp_op)
print(max_dep)
print(min_split)
print(accu)
rpart.plot(prepruned_model) |
library(dplyr)
## Load and prepare dataset
dat <- read.table("household_power_consumption.txt", header = T, sep = ";")
dat$Date <- as.Date(dat$Date, "%d/%m/%Y")
X <- filter(dat, Date == "2007-02-01" | Date == "2007-02-02")
## Subset data for histogram and create the plot
GAP <- as.numeric(as.character(X$Global_active_power))
png(filename = "plot1.png")
hist(GAP, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
| /plot1.R | no_license | lonely-luckily/ExData_Plotting1 | R | false | false | 463 | r | library(dplyr)
## Load and prepare dataset
dat <- read.table("household_power_consumption.txt", header = T, sep = ";")
dat$Date <- as.Date(dat$Date, "%d/%m/%Y")
X <- filter(dat, Date == "2007-02-01" | Date == "2007-02-02")
## Subset data for histogram and create the plot
GAP <- as.numeric(as.character(X$Global_active_power))
png(filename = "plot1.png")
hist(GAP, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
# Peer-graded Assignment: Getting and Cleaning Data Course Project
# Name: Byungman Choi
setwd("./W4Assignment/UCI HAR Dataset/")
trainData <- read.table("./train/X_train.txt")
trainLabel <- read.table("./train/y_train.txt")
testData <- read.table("./test/X_test.txt")
testLabel <- read.table("./test/y_test.txt")
# Loading the activity labels
activityLabelData <- read.table("./activity_labels.txt")
colnames(activityLabelData) <- c("labelID","labelName")
columnNames <- readLines("./features.txt")
colnames(trainData) <- make.names(columnNames)
colnames(testData) <- make.names(columnNames)
colnames(trainLabel) <- "activityLabel"
colnames(testLabel) <- "activityLabel"
# Checking the number of rows in trainData, trainLabel, testData, testLabel
# before combine.
nrow(trainData);nrow(trainLabel);nrow(testData);nrow(testLabel)
# Merging trainLabel and trainData
trainDataFinal <- cbind(trainLabel,trainData)
# Merging testLabel and testData
testDataFinal <- cbind(testLabel,testData)
# Merging the training and the test datasets to create one data set(totalData)
totalData <- rbind(trainDataFinal, testDataFinal)
# Extracting only the measurements on the mean and std for each measurement
finalData <- totalData[grep("activityLabel|mean\\.|std\\.",colnames(totalData))]
# Substitute the activity names in finalData from activityLabelData
for(i in seq_len(nrow(activityLabelData))) {
finalData$activityLabel[finalData$activityLabel == i] <- activityLabelData$labelName[i]
}
# Creating the average dataset (averageData) for each activity and each subject.
averageData <- aggregate(finalData[, 2:ncol(finalData)],list(finalData$activityLabel), mean,na.action=na.omit)
write.table(averageData,file = "tidyDataSet.txt",row.names = FALSE)
| /run_analysis.R | no_license | bmwilliam/Getting-and-Cleaning-Data | R | false | false | 1,801 | r | # Peer-graded Assignment: Getting and Cleaning Data Course Project
# Name: Byungman Choi
setwd("./W4Assignment/UCI HAR Dataset/")
trainData <- read.table("./train/X_train.txt")
trainLabel <- read.table("./train/y_train.txt")
testData <- read.table("./test/X_test.txt")
testLabel <- read.table("./test/y_test.txt")
# Loading the activity labels
activityLabelData <- read.table("./activity_labels.txt")
colnames(activityLabelData) <- c("labelID","labelName")
columnNames <- readLines("./features.txt")
colnames(trainData) <- make.names(columnNames)
colnames(testData) <- make.names(columnNames)
colnames(trainLabel) <- "activityLabel"
colnames(testLabel) <- "activityLabel"
# Checking the number of rows in trainData, trainLabel, testData, testLabel
# before combine.
nrow(trainData);nrow(trainLabel);nrow(testData);nrow(testLabel)
# Merging trainLabel and trainData
trainDataFinal <- cbind(trainLabel,trainData)
# Merging testLabel and testData
testDataFinal <- cbind(testLabel,testData)
# Merging the training and the test datasets to create one data set(totalData)
totalData <- rbind(trainDataFinal, testDataFinal)
# Extracting only the measurements on the mean and std for each measurement
finalData <- totalData[grep("activityLabel|mean\\.|std\\.",colnames(totalData))]
# Substitute the activity names in finalData from activityLabelData
for(i in seq_len(nrow(activityLabelData))) {
finalData$activityLabel[finalData$activityLabel == i] <- activityLabelData$labelName[i]
}
# Creating the average dataset (averageData) for each activity and each subject.
averageData <- aggregate(finalData[, 2:ncol(finalData)],list(finalData$activityLabel), mean,na.action=na.omit)
write.table(averageData,file = "tidyDataSet.txt",row.names = FALSE)
|
##### author: CAI YUN-TING ######
##### The Survey of Family Income and Expenditure, 2017 #####
##### prep and options #####
# set working directory
setwd("D:/R_wd/")
# clear objects
rm(list = ls())
# loading packages
# expss must be loaded after haven
l <- c("tidyverse", "docxtractr", "readtext",
"haven", "expss", "microbenchmark", "hablar")
lapply(l, require, character.only = TRUE)
rm(l)
# options
options(readr.show_progress = TRUE)
# do not show scientific notation
options(scipen = 999)
# timestamp
timestamp <- format(Sys.time(), "%m%d-%H%M")
# processing time
ptm <- proc.time()
##### create the codebook ######
# codebook
# file is in the default working dirctory
path_code <- "AA170042/code106.docx"
code_tbl <- read_docx(path_code) %>% docx_extract_tbl() %>% .[complete.cases(.), ]
# add row: card_num
code_tbl <- rbind(code_tbl, c(NA, "card_num", "#/79-80", NA, NA, NA))
# colnames
colnames(code_tbl) <- c("q_num", "variable", "card_pos", "label", "level", "note")
# variable b1_# - b19_#
code_tbl$`variable`[17:36] <- code_tbl$`variable`[17:36] %>%
str_split("#", simplify = TRUE) %>%
.[ , 1] %>% as.character()
# start
code_tbl$`start` <- code_tbl$`card_pos` %>%
str_split("/", simplify = TRUE) %>%
.[ , 2] %>% str_split("-", simplify = TRUE) %>%
.[ ,1] %>% as.integer()
# end
code_tbl$`end` <- code_tbl$`card_pos` %>%
str_split("/", simplify = TRUE) %>%
.[ , 2] %>% str_split("-", simplify = TRUE) %>%
.[ ,2] %>% as.integer()
# replace NA in `end`
code_tbl$`end` <- with(code_tbl, if_else(is.na(`end`), `start`, `end`))
##### names of item_xxx ######
doc.text.parts <- readtext(path_code)$`text` %>%
strsplit("\n") %>% .[[1]]
doc.items <- grep("*:", doc.text.parts, value = TRUE) %>%
.[-c(1:12, 808:810)]
# item numbers
doc.items.part1 <- strsplit(doc.items, ":") %>%
unlist() %>%
.[2 * (1:length(doc.text.parts)) -1 ] %>%
.[!is.na(.)]
# item contents
doc.items.part2 <- strsplit(doc.items, ":") %>%
unlist() %>%
.[2 * (1:length(doc.text.parts))] %>%
.[!is.na(.)]
##### data processing and manipulation ######
# data raw and card_num
path_dat <- "AA170042/inc106.dat"
df.source <- read_fwf(path_dat, fwf_positions(start = c(1, 79),
end = c(80, 80),
col_names = c("raw", "card_num")
),
# card_num as integer
col_types = cols(card_num = "i", .default = "c")
) %>%
# order by card_num
.[order(.$`card_num`), ]
##### card 01 #####
# filter out card_num == 1
x <- filter(df.source, card_num == 1) %>% .[ ,1] %>% .$`raw`
# define tempfile name and format
y <- tempfile("tp", fileext = ".dat")
# write the tempfile
write(x, file = y)
# read card 1
df1 <- read_fwf(y, fwf_positions(code_tbl$`start`[1:16],
code_tbl$`end`[1:16],
col_names = code_tbl$`variable`[1:16]),
# define column types (variable classes) in df1
col_types = cols(x1 = "c", id = "c",
a4 = "f", a5 = "f", a6 = "n",
a7 = "f", a8 = "n", a9 = "n",
a11 = "f", a12 = "n", a13 = "n",
a16 = "f", a17 = "f", a18 = "f",
a19 = "n", a20 = "n")
) %>%
# order
.[order(.$`x1`), ]
# free up ram
gc()
##### card 02 #####
# card_num 02:20
# function f2
f2 <- function(c, d = c - 1) {
# if input c (card_num) is not in the raw data,
# then create a temp file wiith "000000001" (for merging data),
# which will fill NA.
# matrix with 22 columns c(1, 17:36, 95)
if(c %in% df.source$`card_num`) {
# filter out card_num == 02:20 and create a temporary .dat file
x <- filter(df.source, card_num == c) %>% .[ ,1] %>% .$`raw`
y <- tempfile("tmp", fileext = ".dat")
write(x, file = y)
# read file
tmp <- read_fwf(y, fwf_positions(code_tbl$`start`[c(1, 17:36)],
code_tbl$`end`[c(1, 17:36)])
)
} else {tmp <- matrix(ncol = 21) %>% as_tibble(.name_repair = NULL)
tmp[ , 1] <- "00000001"
}
# name the columns (b1_1, b1_2, b1_3 ......)
# eg. b1_# , card_num == 2, then # == 1, get b1_1 (#: 1:19)
colnames(tmp) <- c("x1", paste(code_tbl$`variable`[17:36], d, sep = ""))
# tmp$card_num <- as.integer(tmp$`card_num`)
return(tmp)
}
# for loop and left_join (dplyr)
# card number = 2:20
df2 <- list()
for(i in 2:20) {
df2[[i - 1]] <- f2(i)
}
# left_joing with reduce
df2 <- Reduce(function(...) left_join(..., by = "x1"), df2)
# column types
# b1_, b4_, b21_, b23_, b25_
df2 <- df2 %>% convert(chr(x1),
chr(contains("b1_")),
num(contains("b4_")),
num(contains("b21_")),
num(contains("b23_")),
num(contains("b25_"))
)
# b2_, b3_ ... (factor)
variables <- colnames(df2)
l <- paste("b", c(2:3, 5, 8:10, 12:20, 22), "_", sep = "") %>%
paste("|", sep = "", collapse = "") %>% paste("b24_", sep = "")
bb <- grep(l, variables)
# mutate_if
df2[ , bb] %<>% mutate_if(is.character, as.factor) %>%
mutate_if(is.numeric, as.factor)
# benchmark
# microbenchmark(a <- Reduce(function(...) left_join(..., by = "x1"), df2), unit = "s")
# microbenchmark(b <- Reduce(function(...) merge(..., by = "x1", all = TRUE), df2), unit = "s")
# free up ram
gc()
##### card 21 #####
# filter out card_num == 21
x <- filter(df.source, card_num == 21) %>% .[ ,1] %>% .$raw
y <- tempfile("tmp", fileext = ".dat")
write(x, file = y)
# code_tbl[37:67]
df21 <- read_fwf(y, fwf_positions(code_tbl$`start`[c(1, 37:67)],
code_tbl$`end`[c(1, 37:67)],
# variable names
col_names = code_tbl$`variable`[c(1, 37:67)]),
# define column types
cols(x1 = "c", f57 = "f", f61 = "f", .default = "n")
) %>%
# order by x1
.[order(.$`x1`), ]
# free up ram
gc()
##### card 22 #####
# filter out card_num == 22
x <- filter(df.source, card_num == 22) %>% .[ ,1] %>% .$raw
y <- tempfile("tmp", fileext = ".dat")
write(x, file = y)
# code_tbl[68:88]
df22 <- read_fwf(y, fwf_positions(code_tbl$`start`[c(1, 68:88)],
code_tbl$`end`[c(1, 68:88)],
# variable names
col_names = code_tbl$`variable`[c(1, 68:88)]),
# define column types
col_types = cols(x1 = "c", c1 = "f", c2 = "f",
c4 = "f", d1 = "f", d5 = "f",
d6 = "f", .default = "n")) %>%
# order by x1
.[order(.$`x1`), ]
# free up ram
gc()
##### card 23-99 ####
# filter out card_num %in% 23:99
x <- filter(df.source, card_num %in% 23:99) %>% .[ ,1] %>% .$raw
y <- tempfile("tmp", fileext = ".dat")
write(x, file = y)
x <- list()
# for loop (5 sections)
for(i in 0:4) {
x[[i + 1]] <- read_fwf(y, fwf_positions(c(1, 9 + i * 14, 13 + i * 14),
c(8, 12 + i * 14, 22 + i * 14),
col_names = c("x1", "item", "exp")),
col_types = cols(x1 = "c", item = "c", exp = "c")
)
df23 <- do.call(bind_rows, x) %>% distinct()
}
# free up ram
gc()
##### list for grep (grepbook) #####
sym <- c("{", grep("[A-R]", LETTERS, value = TRUE), "}")
digi <- c(0:9, 1:9, 0)
positive <- c(rep("+", 10), rep("-", 10))
grepbook <- tibble(sym, digi, positive)
# pattern for grep and gsub
pattern <- grepbook$`sym` %>% .[2:19]
grepbook$pattern <- c("\\{$", sapply(pattern, paste, "$", sep = ""), "\\}$")
##### replace symbols with digits (positive) ####
p <- grepbook$pattern
r <- grepbook$digi
for(i in 1:10) {
# postitive [1:10]
df23$exp[grep(p[i], df23$exp)] <- gsub(pattern = p[i],
replacement = r[i],
x = grep(p[i], df23$exp, value = TRUE))
# negative [11:20]
df23$exp[grep(p[i + 10], df23$exp)] <- gsub(pattern = p[i + 10],
replacement = r[i + 10],
x = grep(p[i + 10], df23$exp, value = TRUE)) %>%
paste("-", ., sep = "")
}
# spread (transpose)
df23 <- df23 %>% spread(key = "item", value = "exp")
# remove column `0000`
df23 <- df23 %>% select( - one_of("0000"))
##### items with no observations #####
# df23(635 variables) but all items are 795
# names of all the items
colnames(df23)[-1] <- colnames(df23)[-1] %>% as.integer() %>% paste("itm", ., sep = "")
itms_all <- doc.items.part1 %>% as.integer() %>% paste("itm" , ., sep = "")
# create a tibble for those who are not in df23
df.itm.all <- matrix("NA", nrow = nrow(df23), ncol = length(itms_all)) %>%
as_tibble(.name_repair = NULL)
# create x1 column for merging
df.itm.all$`x1` <- df23$`x1`
# name the columns with all item names
colnames(df.itm.all) <- c(itms_all, "x1")
df23 <- df23 %>% left_join(df.itm.all)
# order
df23 <- df23 %>% .[order(.$`x1`), ]
# column types (hablar::convert)
df23 <- df23 %>% convert(chr(x1), num(contains("itm")))
# free up ram
gc()
##### merge ####
data.list <- list(df1, df2, df21, df22, df23)
df.inc106 <- Reduce(function(...) left_join(..., by = "x1"), data.list)
# add year column
df.inc106$year <- as.integer(106)
# remove
rm(df.source, x, df.itm.all, df1, df2, df21, df22, df23, data.list)
# free up memory
gc()
##### factor label and values ######
s <- which(!(code_tbl$level == ""))
lev <- list() #[1-7], a_xx; [8-22], b_xx; [23-24], f; [25-27], c; [28-30], d
lab <- list() #[1-7], a_xx; [8-22], b_xx; [23-24], f; [25-27], c; [28-30], d
for(i in 1:length(s)){
lev[[i]] <- code_tbl$level[s[i]] %>% str_extract_all("[0-9]+") %>% .[[1]]
lab[[i]] <- code_tbl$level[s[i]] %>% str_split("[0-9]+\\. ") %>% .[[1]] %>% .[-1]
}
##### save ######
# .RData
save(df.inc106, file = "AA170042/inc106_rev.RData")
save(code_tbl, file = "AA170042/code_tbl_106.RData")
# .csv format
# write_csv(df.inc106, "inc106.csv", col_names = TRUE, na = "")
# .sas7bdat format
# write_sas(df.inc106, "inc106.sas7bdat")
# .sav format
# write_sav(df.inc106, "inc106.sav", compress = TRUE)
##### time ######
proc.time() - ptm
##### remove all objects ######
# rm(list = ls()) | /106income_CP950.R | no_license | caiyuntingcfrc/misc | R | false | false | 11,431 | r | ##### author: CAI YUN-TING ######
##### The Survey of Family Income and Expenditure, 2017 #####
##### prep and options #####
# set working directory
setwd("D:/R_wd/")
# clear objects
rm(list = ls())
# loading packages
# expss must be loaded after haven
l <- c("tidyverse", "docxtractr", "readtext",
"haven", "expss", "microbenchmark", "hablar")
lapply(l, require, character.only = TRUE)
rm(l)
# options
options(readr.show_progress = TRUE)
# do not show scientific notation
options(scipen = 999)
# timestamp
timestamp <- format(Sys.time(), "%m%d-%H%M")
# processing time
ptm <- proc.time()
##### create the codebook ######
# codebook
# file is in the default working dirctory
path_code <- "AA170042/code106.docx"
code_tbl <- read_docx(path_code) %>% docx_extract_tbl() %>% .[complete.cases(.), ]
# add row: card_num
code_tbl <- rbind(code_tbl, c(NA, "card_num", "#/79-80", NA, NA, NA))
# colnames
colnames(code_tbl) <- c("q_num", "variable", "card_pos", "label", "level", "note")
# variable b1_# - b19_#
code_tbl$`variable`[17:36] <- code_tbl$`variable`[17:36] %>%
str_split("#", simplify = TRUE) %>%
.[ , 1] %>% as.character()
# start
code_tbl$`start` <- code_tbl$`card_pos` %>%
str_split("/", simplify = TRUE) %>%
.[ , 2] %>% str_split("-", simplify = TRUE) %>%
.[ ,1] %>% as.integer()
# end
code_tbl$`end` <- code_tbl$`card_pos` %>%
str_split("/", simplify = TRUE) %>%
.[ , 2] %>% str_split("-", simplify = TRUE) %>%
.[ ,2] %>% as.integer()
# replace NA in `end`
code_tbl$`end` <- with(code_tbl, if_else(is.na(`end`), `start`, `end`))
##### names of item_xxx ######
doc.text.parts <- readtext(path_code)$`text` %>%
strsplit("\n") %>% .[[1]]
doc.items <- grep("*:", doc.text.parts, value = TRUE) %>%
.[-c(1:12, 808:810)]
# item numbers
doc.items.part1 <- strsplit(doc.items, ":") %>%
unlist() %>%
.[2 * (1:length(doc.text.parts)) -1 ] %>%
.[!is.na(.)]
# item contents
doc.items.part2 <- strsplit(doc.items, ":") %>%
unlist() %>%
.[2 * (1:length(doc.text.parts))] %>%
.[!is.na(.)]
##### data processing and manipulation ######
# data raw and card_num
path_dat <- "AA170042/inc106.dat"
df.source <- read_fwf(path_dat, fwf_positions(start = c(1, 79),
end = c(80, 80),
col_names = c("raw", "card_num")
),
# card_num as integer
col_types = cols(card_num = "i", .default = "c")
) %>%
# order by card_num
.[order(.$`card_num`), ]
##### card 01 #####
# filter out card_num == 1
x <- filter(df.source, card_num == 1) %>% .[ ,1] %>% .$`raw`
# define tempfile name and format
y <- tempfile("tp", fileext = ".dat")
# write the tempfile
write(x, file = y)
# read card 1
df1 <- read_fwf(y, fwf_positions(code_tbl$`start`[1:16],
code_tbl$`end`[1:16],
col_names = code_tbl$`variable`[1:16]),
# define column types (variable classes) in df1
col_types = cols(x1 = "c", id = "c",
a4 = "f", a5 = "f", a6 = "n",
a7 = "f", a8 = "n", a9 = "n",
a11 = "f", a12 = "n", a13 = "n",
a16 = "f", a17 = "f", a18 = "f",
a19 = "n", a20 = "n")
) %>%
# order
.[order(.$`x1`), ]
# free up ram
gc()
##### card 02 #####
# card_num 02:20
# function f2
f2 <- function(c, d = c - 1) {
# if input c (card_num) is not in the raw data,
# then create a temp file wiith "000000001" (for merging data),
# which will fill NA.
# matrix with 22 columns c(1, 17:36, 95)
if(c %in% df.source$`card_num`) {
# filter out card_num == 02:20 and create a temporary .dat file
x <- filter(df.source, card_num == c) %>% .[ ,1] %>% .$`raw`
y <- tempfile("tmp", fileext = ".dat")
write(x, file = y)
# read file
tmp <- read_fwf(y, fwf_positions(code_tbl$`start`[c(1, 17:36)],
code_tbl$`end`[c(1, 17:36)])
)
} else {tmp <- matrix(ncol = 21) %>% as_tibble(.name_repair = NULL)
tmp[ , 1] <- "00000001"
}
# name the columns (b1_1, b1_2, b1_3 ......)
# eg. b1_# , card_num == 2, then # == 1, get b1_1 (#: 1:19)
colnames(tmp) <- c("x1", paste(code_tbl$`variable`[17:36], d, sep = ""))
# tmp$card_num <- as.integer(tmp$`card_num`)
return(tmp)
}
# for loop and left_join (dplyr)
# card number = 2:20
df2 <- list()
for(i in 2:20) {
df2[[i - 1]] <- f2(i)
}
# left_joing with reduce
df2 <- Reduce(function(...) left_join(..., by = "x1"), df2)
# column types
# b1_, b4_, b21_, b23_, b25_
df2 <- df2 %>% convert(chr(x1),
chr(contains("b1_")),
num(contains("b4_")),
num(contains("b21_")),
num(contains("b23_")),
num(contains("b25_"))
)
# b2_, b3_ ... (factor)
variables <- colnames(df2)
l <- paste("b", c(2:3, 5, 8:10, 12:20, 22), "_", sep = "") %>%
paste("|", sep = "", collapse = "") %>% paste("b24_", sep = "")
bb <- grep(l, variables)
# mutate_if
df2[ , bb] %<>% mutate_if(is.character, as.factor) %>%
mutate_if(is.numeric, as.factor)
# benchmark
# microbenchmark(a <- Reduce(function(...) left_join(..., by = "x1"), df2), unit = "s")
# microbenchmark(b <- Reduce(function(...) merge(..., by = "x1", all = TRUE), df2), unit = "s")
# free up ram
gc()
##### card 21 #####
# filter out card_num == 21
x <- filter(df.source, card_num == 21) %>% .[ ,1] %>% .$raw
y <- tempfile("tmp", fileext = ".dat")
write(x, file = y)
# code_tbl[37:67]
df21 <- read_fwf(y, fwf_positions(code_tbl$`start`[c(1, 37:67)],
code_tbl$`end`[c(1, 37:67)],
# variable names
col_names = code_tbl$`variable`[c(1, 37:67)]),
# define column types
cols(x1 = "c", f57 = "f", f61 = "f", .default = "n")
) %>%
# order by x1
.[order(.$`x1`), ]
# free up ram
gc()
##### card 22 #####
# filter out card_num == 22
x <- filter(df.source, card_num == 22) %>% .[ ,1] %>% .$raw
y <- tempfile("tmp", fileext = ".dat")
write(x, file = y)
# code_tbl[68:88]
df22 <- read_fwf(y, fwf_positions(code_tbl$`start`[c(1, 68:88)],
code_tbl$`end`[c(1, 68:88)],
# variable names
col_names = code_tbl$`variable`[c(1, 68:88)]),
# define column types
col_types = cols(x1 = "c", c1 = "f", c2 = "f",
c4 = "f", d1 = "f", d5 = "f",
d6 = "f", .default = "n")) %>%
# order by x1
.[order(.$`x1`), ]
# free up ram
gc()
##### card 23-99 ####
# filter out card_num %in% 23:99
x <- filter(df.source, card_num %in% 23:99) %>% .[ ,1] %>% .$raw
y <- tempfile("tmp", fileext = ".dat")
write(x, file = y)
x <- list()
# for loop (5 sections)
for(i in 0:4) {
x[[i + 1]] <- read_fwf(y, fwf_positions(c(1, 9 + i * 14, 13 + i * 14),
c(8, 12 + i * 14, 22 + i * 14),
col_names = c("x1", "item", "exp")),
col_types = cols(x1 = "c", item = "c", exp = "c")
)
df23 <- do.call(bind_rows, x) %>% distinct()
}
# free up ram
gc()
##### list for grep (grepbook) #####
sym <- c("{", grep("[A-R]", LETTERS, value = TRUE), "}")
digi <- c(0:9, 1:9, 0)
positive <- c(rep("+", 10), rep("-", 10))
grepbook <- tibble(sym, digi, positive)
# pattern for grep and gsub
pattern <- grepbook$`sym` %>% .[2:19]
grepbook$pattern <- c("\\{$", sapply(pattern, paste, "$", sep = ""), "\\}$")
##### replace symbols with digits (positive) ####
p <- grepbook$pattern
r <- grepbook$digi
for(i in 1:10) {
# postitive [1:10]
df23$exp[grep(p[i], df23$exp)] <- gsub(pattern = p[i],
replacement = r[i],
x = grep(p[i], df23$exp, value = TRUE))
# negative [11:20]
df23$exp[grep(p[i + 10], df23$exp)] <- gsub(pattern = p[i + 10],
replacement = r[i + 10],
x = grep(p[i + 10], df23$exp, value = TRUE)) %>%
paste("-", ., sep = "")
}
# spread (transpose)
df23 <- df23 %>% spread(key = "item", value = "exp")
# remove column `0000`
df23 <- df23 %>% select( - one_of("0000"))
##### items with no observations #####
# df23(635 variables) but all items are 795
# names of all the items
colnames(df23)[-1] <- colnames(df23)[-1] %>% as.integer() %>% paste("itm", ., sep = "")
itms_all <- doc.items.part1 %>% as.integer() %>% paste("itm" , ., sep = "")
# create a tibble for those who are not in df23
df.itm.all <- matrix("NA", nrow = nrow(df23), ncol = length(itms_all)) %>%
as_tibble(.name_repair = NULL)
# create x1 column for merging
df.itm.all$`x1` <- df23$`x1`
# name the columns with all item names
colnames(df.itm.all) <- c(itms_all, "x1")
df23 <- df23 %>% left_join(df.itm.all)
# order
df23 <- df23 %>% .[order(.$`x1`), ]
# column types (hablar::convert)
df23 <- df23 %>% convert(chr(x1), num(contains("itm")))
# free up ram
gc()
##### merge ####
data.list <- list(df1, df2, df21, df22, df23)
df.inc106 <- Reduce(function(...) left_join(..., by = "x1"), data.list)
# add year column
df.inc106$year <- as.integer(106)
# remove
rm(df.source, x, df.itm.all, df1, df2, df21, df22, df23, data.list)
# free up memory
gc()
##### factor label and values ######
s <- which(!(code_tbl$level == ""))
lev <- list() #[1-7], a_xx; [8-22], b_xx; [23-24], f; [25-27], c; [28-30], d
lab <- list() #[1-7], a_xx; [8-22], b_xx; [23-24], f; [25-27], c; [28-30], d
for(i in 1:length(s)){
lev[[i]] <- code_tbl$level[s[i]] %>% str_extract_all("[0-9]+") %>% .[[1]]
lab[[i]] <- code_tbl$level[s[i]] %>% str_split("[0-9]+\\. ") %>% .[[1]] %>% .[-1]
}
##### save ######
# .RData
save(df.inc106, file = "AA170042/inc106_rev.RData")
save(code_tbl, file = "AA170042/code_tbl_106.RData")
# .csv format
# write_csv(df.inc106, "inc106.csv", col_names = TRUE, na = "")
# .sas7bdat format
# write_sas(df.inc106, "inc106.sas7bdat")
# .sav format
# write_sav(df.inc106, "inc106.sav", compress = TRUE)
##### time ######
proc.time() - ptm
##### remove all objects ######
# rm(list = ls()) |
context("sar_loga")
library(sars)
test_that("sar_loga returns correct results", {
fit <- sar_loga(galap)
expect_equal(round(fit$AICc, 2), 143.78)
expect_equal(as.vector(round(fit$par[2], 2)), 30.28)
expect_is(fit, "sars")
expect_match(fit$normaTest[[1]], "lillie")
expect_error(sar_linear(5), "data must be a matrix or dataframe")
})
test_that("sar_loga summary returns correct results", {
fit <- sar_loga(galap)
fs <- summary(fit)
expect_equal(sum(fs$residuals), 6.439294e-15)
expect_output(str(fs), "List of 16")
expect_is(fs, "summary.sars")
expect_equal(round(fs$normaTest[[2]]$p.value, 3), 0.658)
})
| /data/genthat_extracted_code/sars/tests/test_loga.R | no_license | surayaaramli/typeRrh | R | false | false | 653 | r | context("sar_loga")
library(sars)
test_that("sar_loga returns correct results", {
fit <- sar_loga(galap)
expect_equal(round(fit$AICc, 2), 143.78)
expect_equal(as.vector(round(fit$par[2], 2)), 30.28)
expect_is(fit, "sars")
expect_match(fit$normaTest[[1]], "lillie")
expect_error(sar_linear(5), "data must be a matrix or dataframe")
})
test_that("sar_loga summary returns correct results", {
fit <- sar_loga(galap)
fs <- summary(fit)
expect_equal(sum(fs$residuals), 6.439294e-15)
expect_output(str(fs), "List of 16")
expect_is(fs, "summary.sars")
expect_equal(round(fs$normaTest[[2]]$p.value, 3), 0.658)
})
|
# stopTimer.r
#######################################################
#' stopTimer() stops the timer and returns the time in seconds since start_timer() was launched
#' @keywords timer
#' @export
#' @author Laura Tremblay-Boyer (laura.t.boyer@gmail.com)
#' stopTimer()
stopTimer <- function() print(proc.time()[3] - timer)
| /R/stop_timer.r | no_license | lauratboyer/ltb.utils | R | false | false | 325 | r | # stopTimer.r
#######################################################
#' stopTimer() stops the timer and returns the time in seconds since start_timer() was launched
#' @keywords timer
#' @export
#' @author Laura Tremblay-Boyer (laura.t.boyer@gmail.com)
#' stopTimer()
stopTimer <- function() print(proc.time()[3] - timer)
|
\name{ColPicker}
\alias{ColPicker}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Plot All Named R Colors
%% ~~function to do ... ~~
}
\description{\code{ColPicker()} plots the R-colors in a dense manner and allows to collect colors by using a locator. This can be helpful when some colors should be put together to a palette.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
ColPicker(locator = TRUE, ord = c("hsv", "default"), label = c("text", "hex", "dec"),
mdim = c(38, 12), newwin = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{locator}{logical, defines if the colors can be selected by mouseclick. Default is TRUE.}
\item{ord}{the order of the colors, can be either defined by hsv-value or by the R internal color-number. Default is the latter.
%% ~~Describe \code{ord} here~~
}
\item{label}{label for the colors, can be the colorname (text), the hex-code (#RRGGBB) or the decimal RGB-number
%% ~~Describe \code{label} here~~
}
\item{mdim}{the dimension of the color matrix. Default is 38 rows and 12 columns.}
\item{newwin}{logical, defining if a new graphic device should be used. Default is \code{FALSE}.}
}
\details{
The function plots all the colors but leaves out the grey scales \code{grey()} and \code{gray()}.
}
\author{
Andri Signorell <andri@signorell.net>
%% ~~who you are~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{par}}, \code{\link{colors}}, \code{\link{PlotPch}}
}
\examples{
ColPicker(locator=FALSE, ord="hsv")
ColPicker(locator=FALSE, label="hex")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ utilities }
| /man/ColPicker.Rd | no_license | cran/DescToolsAddIns | R | false | false | 1,814 | rd | \name{ColPicker}
\alias{ColPicker}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Plot All Named R Colors
%% ~~function to do ... ~~
}
\description{\code{ColPicker()} plots the R-colors in a dense manner and allows to collect colors by using a locator. This can be helpful when some colors should be put together to a palette.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
ColPicker(locator = TRUE, ord = c("hsv", "default"), label = c("text", "hex", "dec"),
mdim = c(38, 12), newwin = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{locator}{logical, defines if the colors can be selected by mouseclick. Default is TRUE.}
\item{ord}{the order of the colors, can be either defined by hsv-value or by the R internal color-number. Default is the latter.
%% ~~Describe \code{ord} here~~
}
\item{label}{label for the colors, can be the colorname (text), the hex-code (#RRGGBB) or the decimal RGB-number
%% ~~Describe \code{label} here~~
}
\item{mdim}{the dimension of the color matrix. Default is 38 rows and 12 columns.}
\item{newwin}{logical, defining if a new graphic device should be used. Default is \code{FALSE}.}
}
\details{
The function plots all the colors but leaves out the grey scales \code{grey()} and \code{gray()}.
}
\author{
Andri Signorell <andri@signorell.net>
%% ~~who you are~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{par}}, \code{\link{colors}}, \code{\link{PlotPch}}
}
\examples{
ColPicker(locator=FALSE, ord="hsv")
ColPicker(locator=FALSE, label="hex")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ utilities }
|
library(testthat)
library(nat)
library(httr)
# suppress RGL in headless environments (some CRAN build machines fail otherwise)
if(!interactive())
Sys.setenv(RGL_USE_NULL=TRUE)
# Is internet accessible?
internet.ok=isTRUE(try(url_ok('http://flybrain.mrc-lmb.cam.ac.uk/')))
if(Sys.getenv('NOT_CRAN') == "true" && internet.ok) {
# note that we want to run all tests requiring internet access
Sys.setenv(NAT_INTERNET_TESTS="TRUE")
# Run all test files
test_check("nat")
} else {
# We're on CRAN or flybrain is inacessible, so don't run anything involving
# remote files
Sys.setenv(NAT_INTERNET_TESTS="")
test_check("nat", filter="^[^.]+")
}
| /tests/test-all.R | no_license | cyang-2014/nat | R | false | false | 659 | r | library(testthat)
library(nat)
library(httr)
# suppress RGL in headless environments (some CRAN build machines fail otherwise)
if(!interactive())
Sys.setenv(RGL_USE_NULL=TRUE)
# Is internet accessible?
internet.ok=isTRUE(try(url_ok('http://flybrain.mrc-lmb.cam.ac.uk/')))
if(Sys.getenv('NOT_CRAN') == "true" && internet.ok) {
# note that we want to run all tests requiring internet access
Sys.setenv(NAT_INTERNET_TESTS="TRUE")
# Run all test files
test_check("nat")
} else {
# We're on CRAN or flybrain is inacessible, so don't run anything involving
# remote files
Sys.setenv(NAT_INTERNET_TESTS="")
test_check("nat", filter="^[^.]+")
}
|
rm(list=ls())
setwd("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Tom_Jhou_U01DA044468_Dropbox_copy/U01")
library(readxl)
library(dplyr)
library(stringr)
library(lubridate)
library(purrr)
library(data.table)
## helpful ##
list.files()
length(IDs[!is.na(IDs)])
####
### obtain a table of all text files
### test one file
filetest <- read.table("~/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/tom_jhou_u01/Locomotor/U1/2018-0727-1337_1_LOCOMOTOR_BASIC.txt", header = F, fill = T)
filetesttab <- filetest[7:36, 1:2] %>%
mutate("labanimalid" = ifelse(filetest[5,6] %>% as.character() %>% as.numeric() < 10, paste0("U0", filetest[5,6] %>% as.character() %>% as.numeric()), paste0("U", filetest[5,6] %>% as.numeric(as.character()))))
names(filetesttab) <- c("minute", "counts", "labanimalid")
filetesttab <- filetest[7:36, 1:2]
###
setwd(paste0("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Tom_Jhou_U01DA044468_Dropbox_copy/U01 folder/", experiment_name,"/"))
runway <- NA
idqc <- function(experiment_name){
setwd(paste0("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Tom_Jhou_U01DA044468_Dropbox_copy/U01 folder/", experiment_name,"/"))
files <- list.files(path=".", pattern=".txt", full.names=TRUE, recursive=TRUE)
read_ids<-function(x){
data = fread(paste0("sed -n 8p ","'",x,"'"), header=F, fill=T, sep=":", showProgress = F)
data$id<-x
return(data)
}
all<-lapply(files, read_ids)
all_test <- rbindlist(all, use.names = T, fill = T)
all_test2 <- all_test %>% select(-V1)
colnames(all_test2) <- c("generatedid", "filename")
all_test2$idinfile <- gsub("(.*_)(\\d+)_.+", "\\2", all_test2$filename)
all_test2$idindir <- gsub("(.*U)(\\d+)/.+", "\\2", all_test2$filename)
subset(all_test2, generatedid!= idindir | generatedid!=idinfile | idindir!=idinfile)
}
idqc("Locomotor") #done
idqc("Progressive punishment") #done
idqc("Progressive ratio") #done
runway <- idqc("Runway") # XX 2 col name to 5 col
idqc("Delayed punishment") # done
idqc("Lever training") # XX line 8 is a bigger problem
# getting all letter id cases
test <- stringr::str_match_all(files, "U[[:digit:]]+[[:alpha:]]+") %>% unlist() %>% unique()
setwd("~/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Tom_Jhou_U01DA044468_Dropbox_copy/U01 folder")
mastersheet_path = "U01 Master sheet_readonly.xlsx"
master_sheetnames <- excel_sheets(mastersheet_path)
master_listdf <- lapply(excel_sheets(mastersheet_path), read_excel, path = mastersheet_path)
locomotorqc <-f
####### Locomotor ##############
## Master file preparation
Locomotor <- master_listdf["Locomotor"] %>% as.data.frame()
# Locomotor_sans_NA_cols <- Locomotor[!map_lgl(Locomotor, ~ all(is.na(.)))] # remove all "completely na" columns
# Locomotor_remake <- data.frame("animallabid" = grep("U\\d", Locomotor_sans_NA_cols$Locomotor.Program..Locomotor.Basic., value = T))
# namevector <- c(paste0("bincounts", as.character(1:30)), "notes", "bincountsearly", "bincountslate")
# Locomotor_remake[ , namevector] <- NA
counts <- Locomotor_sans_NA_cols[which(grepl("^Binned", Locomotor_sans_NA_cols$Locomotor.Program..Locomotor.Basic.)),1:34]
counts_test <- counts
rownames(counts_test) <- seq(length=nrow(counts_test)) #reset rownumbering
names(counts_test) <- c("animallabid", paste0("bincounts", as.character(1:30)), "notes", "bincountsearly", "bincountslate")
# make into while and for loop
## COME BACK TO THIS LATER [PICK UP]
names(counts_test) <- c("value", paste0("bincounts", as.character(1:30)), "notes", "bincountsearly", "bincountslate")
counts_test$animallabid <- NA
i <- 1
j <- 1
repeat {
counts_test$animallabid[i] <- unames[j]
i = i + 1
if (grepl("Counts$", counts_test$value[i]) | grepl("Counts[1][ab]?$", counts_test$value[i])){
j = j + 1
}
}
counts_test$animallabid[1:296] <- paste0(unames[1:296], "_", counts_test$animallabid[1:296])
counts_test$animallabid[297:300] <- paste0(rep(c(unames[297:298]), each = 2), "_", counts_test$animallabid[297:300])
counts_test$animallabid[301:302] <- paste0(unames[299:300], "_", counts_test$animallabid[301:302])
counts_test$animallabid[303:338] <- paste0(rep(c(unames[301:318]), each = 2), "_", counts_test$animallabid[303:338])
counts_test$animallabid[339:343] <- paste0(rep(c(unames[319]), times = 5), "_", counts_test$animallabid[339:343])
counts_test$animallabid[344:353] <- paste0(rep(c(unames[320:324]), each = 2), "_", counts_test$animallabid[344:353])
counts_test$animallabid[354:361] <- paste0(rep(c(unames[325:326]), each = 4), "_", counts_test$animallabid[354:361])
counts_test$animallabid[362:365] <- paste0(rep(c(unames[327:328]), each = 2), "_", counts_test$animallabid[362:365])
counts_test$animallabid[366:373] <- paste0(rep(c(unames[329:330]), each = 4), "_", counts_test$animallabid[366:373])
counts_test$animallabid[374:377] <- paste0(rep(c(unames[331:332]), each = 2), "_", counts_test$animallabid[374:377])
counts_test$animallabid[378:385] <- paste0(rep(c(unames[333:334]), each = 4), "_", counts_test$animallabid[378:385])
counts_test$animallabid[386:387] <- paste0(rep(c(unames[335]), times = 2), "_", counts_test$animallabid[386:387])
counts_test$animallabid[388:415] <- paste0(rep(c(unames[336:342]), each = 4), "_", counts_test$animallabid[388:415])
counts_test$animallabid[416:417] <- paste0(rep(c(unames[343]), times = 2), "_", counts_test$animallabid[416:417])
counts_test$animallabid[418:422] <- paste0(rep(c(unames[344]), times = 5), "_", counts_test$animallabid[418:422])
counts_test$animallabid[423:630] <- paste0(rep(c(unames[345:396]), each = 4), "_", counts_test$animallabid[423:630])
counts_test2 <- counts_test %>%
separate(animallabid, c("animallabid","value"), sep = "_")
# it really worked
## for above code: used grepl("Counts$",counts$animallabid)
# Locomotor_sans_NA_cols$Locomotor.Program..Locomotor.Basic.[which(grepl("1a$", Locomotor_sans_NA_cols$Locomotor.Program..Locomotor.Basic.))-1] + U339 and 368 (STDEV bc they have Counts1, 1a, 1b, etc.)
## Text file preparation
####### Progressive Punishment ##############
## Master file preparation
ProgressivePunishment <- master_listdf["Progressive Punishment"] %>% as.data.frame()
shocks <- ProgressivePunishment[which(grepl("^\\d", as.numeric(ProgressivePunishment[,1]))), c(1, 3, 7:11)]
names(shocks) <- c("session", "date", "lastcompletedintensity", "lastattempedintensity","numberoftrialsatlastshockintensity","attemptedactivepresses","attemptedinactivepresses")
# shocks$session <- factor(shocks$session, ordered = T)
shocks_test <- shocks
shocks_test$session <- as.numeric(shocks_test$session)
shocks_test$date <- openxlsx::convertToDateTime(shocks$date)
rownames(shocks_test) <- seq(length=nrow(shocks_test)) #reset rownumbering
shocksunames <- grep("^U", ProgressivePunishment$Progressive.Punishment....1, value = T)
shocks_test$animallabid <- NA
i <- 1
j <- 1
repeat {
shocks_test$animallabid[i] <- shocksunames[j]
i = i + 1
if (shocks_test$session[i] < shocks_test$session[i-1] & shocks_test$session[i] == 0){
j = j + 1
}
}
## LOOK BACK TO HT PROTOCOL FOR DEFINITION OF NUMBER OF TRIALS AT LAST SHOCK INTENSITY
txt1 <- read.table("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Tom_Jhou_U01DA044468_Dropbox_copy/U01 folder/Progressive punishment/U1/2018-0719-1450_1_FOOD CONFLICT.txt", blank.lines.skip = F)
sub('.*?(\\w+)\\W+\\w+\\W*?$', '\\1', string) | /Bonnie's Codes/QC/QC_ID.R | no_license | bonnfire/Jhou_U01DA044468 | R | false | false | 7,386 | r | rm(list=ls())
setwd("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Tom_Jhou_U01DA044468_Dropbox_copy/U01")
library(readxl)
library(dplyr)
library(stringr)
library(lubridate)
library(purrr)
library(data.table)
## helpful ##
list.files()
length(IDs[!is.na(IDs)])
####
### obtain a table of all text files
### test one file
filetest <- read.table("~/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/tom_jhou_u01/Locomotor/U1/2018-0727-1337_1_LOCOMOTOR_BASIC.txt", header = F, fill = T)
filetesttab <- filetest[7:36, 1:2] %>%
mutate("labanimalid" = ifelse(filetest[5,6] %>% as.character() %>% as.numeric() < 10, paste0("U0", filetest[5,6] %>% as.character() %>% as.numeric()), paste0("U", filetest[5,6] %>% as.numeric(as.character()))))
names(filetesttab) <- c("minute", "counts", "labanimalid")
filetesttab <- filetest[7:36, 1:2]
###
setwd(paste0("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Tom_Jhou_U01DA044468_Dropbox_copy/U01 folder/", experiment_name,"/"))
runway <- NA
idqc <- function(experiment_name){
setwd(paste0("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Tom_Jhou_U01DA044468_Dropbox_copy/U01 folder/", experiment_name,"/"))
files <- list.files(path=".", pattern=".txt", full.names=TRUE, recursive=TRUE)
read_ids<-function(x){
data = fread(paste0("sed -n 8p ","'",x,"'"), header=F, fill=T, sep=":", showProgress = F)
data$id<-x
return(data)
}
all<-lapply(files, read_ids)
all_test <- rbindlist(all, use.names = T, fill = T)
all_test2 <- all_test %>% select(-V1)
colnames(all_test2) <- c("generatedid", "filename")
all_test2$idinfile <- gsub("(.*_)(\\d+)_.+", "\\2", all_test2$filename)
all_test2$idindir <- gsub("(.*U)(\\d+)/.+", "\\2", all_test2$filename)
subset(all_test2, generatedid!= idindir | generatedid!=idinfile | idindir!=idinfile)
}
idqc("Locomotor") #done
idqc("Progressive punishment") #done
idqc("Progressive ratio") #done
runway <- idqc("Runway") # XX 2 col name to 5 col
idqc("Delayed punishment") # done
idqc("Lever training") # XX line 8 is a bigger problem
# getting all letter id cases
test <- stringr::str_match_all(files, "U[[:digit:]]+[[:alpha:]]+") %>% unlist() %>% unique()
setwd("~/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Tom_Jhou_U01DA044468_Dropbox_copy/U01 folder")
mastersheet_path = "U01 Master sheet_readonly.xlsx"
master_sheetnames <- excel_sheets(mastersheet_path)
master_listdf <- lapply(excel_sheets(mastersheet_path), read_excel, path = mastersheet_path)
locomotorqc <-f
####### Locomotor ##############
## Master file preparation
Locomotor <- master_listdf["Locomotor"] %>% as.data.frame()
# Locomotor_sans_NA_cols <- Locomotor[!map_lgl(Locomotor, ~ all(is.na(.)))] # remove all "completely na" columns
# Locomotor_remake <- data.frame("animallabid" = grep("U\\d", Locomotor_sans_NA_cols$Locomotor.Program..Locomotor.Basic., value = T))
# namevector <- c(paste0("bincounts", as.character(1:30)), "notes", "bincountsearly", "bincountslate")
# Locomotor_remake[ , namevector] <- NA
counts <- Locomotor_sans_NA_cols[which(grepl("^Binned", Locomotor_sans_NA_cols$Locomotor.Program..Locomotor.Basic.)),1:34]
counts_test <- counts
rownames(counts_test) <- seq(length=nrow(counts_test)) #reset rownumbering
names(counts_test) <- c("animallabid", paste0("bincounts", as.character(1:30)), "notes", "bincountsearly", "bincountslate")
# make into while and for loop
## COME BACK TO THIS LATER [PICK UP]
names(counts_test) <- c("value", paste0("bincounts", as.character(1:30)), "notes", "bincountsearly", "bincountslate")
counts_test$animallabid <- NA
i <- 1
j <- 1
repeat {
counts_test$animallabid[i] <- unames[j]
i = i + 1
if (grepl("Counts$", counts_test$value[i]) | grepl("Counts[1][ab]?$", counts_test$value[i])){
j = j + 1
}
}
counts_test$animallabid[1:296] <- paste0(unames[1:296], "_", counts_test$animallabid[1:296])
counts_test$animallabid[297:300] <- paste0(rep(c(unames[297:298]), each = 2), "_", counts_test$animallabid[297:300])
counts_test$animallabid[301:302] <- paste0(unames[299:300], "_", counts_test$animallabid[301:302])
counts_test$animallabid[303:338] <- paste0(rep(c(unames[301:318]), each = 2), "_", counts_test$animallabid[303:338])
counts_test$animallabid[339:343] <- paste0(rep(c(unames[319]), times = 5), "_", counts_test$animallabid[339:343])
counts_test$animallabid[344:353] <- paste0(rep(c(unames[320:324]), each = 2), "_", counts_test$animallabid[344:353])
counts_test$animallabid[354:361] <- paste0(rep(c(unames[325:326]), each = 4), "_", counts_test$animallabid[354:361])
counts_test$animallabid[362:365] <- paste0(rep(c(unames[327:328]), each = 2), "_", counts_test$animallabid[362:365])
counts_test$animallabid[366:373] <- paste0(rep(c(unames[329:330]), each = 4), "_", counts_test$animallabid[366:373])
counts_test$animallabid[374:377] <- paste0(rep(c(unames[331:332]), each = 2), "_", counts_test$animallabid[374:377])
counts_test$animallabid[378:385] <- paste0(rep(c(unames[333:334]), each = 4), "_", counts_test$animallabid[378:385])
counts_test$animallabid[386:387] <- paste0(rep(c(unames[335]), times = 2), "_", counts_test$animallabid[386:387])
counts_test$animallabid[388:415] <- paste0(rep(c(unames[336:342]), each = 4), "_", counts_test$animallabid[388:415])
counts_test$animallabid[416:417] <- paste0(rep(c(unames[343]), times = 2), "_", counts_test$animallabid[416:417])
counts_test$animallabid[418:422] <- paste0(rep(c(unames[344]), times = 5), "_", counts_test$animallabid[418:422])
counts_test$animallabid[423:630] <- paste0(rep(c(unames[345:396]), each = 4), "_", counts_test$animallabid[423:630])
counts_test2 <- counts_test %>%
separate(animallabid, c("animallabid","value"), sep = "_")
# it really worked
## for above code: used grepl("Counts$",counts$animallabid)
# Locomotor_sans_NA_cols$Locomotor.Program..Locomotor.Basic.[which(grepl("1a$", Locomotor_sans_NA_cols$Locomotor.Program..Locomotor.Basic.))-1] + U339 and 368 (STDEV bc they have Counts1, 1a, 1b, etc.)
## Text file preparation
####### Progressive Punishment ##############
## Master file preparation
ProgressivePunishment <- master_listdf["Progressive Punishment"] %>% as.data.frame()
shocks <- ProgressivePunishment[which(grepl("^\\d", as.numeric(ProgressivePunishment[,1]))), c(1, 3, 7:11)]
names(shocks) <- c("session", "date", "lastcompletedintensity", "lastattempedintensity","numberoftrialsatlastshockintensity","attemptedactivepresses","attemptedinactivepresses")
# shocks$session <- factor(shocks$session, ordered = T)
shocks_test <- shocks
shocks_test$session <- as.numeric(shocks_test$session)
shocks_test$date <- openxlsx::convertToDateTime(shocks$date)
rownames(shocks_test) <- seq(length=nrow(shocks_test)) #reset rownumbering
shocksunames <- grep("^U", ProgressivePunishment$Progressive.Punishment....1, value = T)
shocks_test$animallabid <- NA
i <- 1
j <- 1
repeat {
shocks_test$animallabid[i] <- shocksunames[j]
i = i + 1
if (shocks_test$session[i] < shocks_test$session[i-1] & shocks_test$session[i] == 0){
j = j + 1
}
}
## LOOK BACK TO HT PROTOCOL FOR DEFINITION OF NUMBER OF TRIALS AT LAST SHOCK INTENSITY
txt1 <- read.table("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Tom_Jhou_U01DA044468_Dropbox_copy/U01 folder/Progressive punishment/U1/2018-0719-1450_1_FOOD CONFLICT.txt", blank.lines.skip = F)
sub('.*?(\\w+)\\W+\\w+\\W*?$', '\\1', string) |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{dpdata}
\alias{dpdata}
\title{Retrieve the data of a resource}
\usage{
dpdata(meta, resource = 1)
}
\arguments{
\item{meta}{a \code{datapackage} object or a \code{\link{dpresource}} object
in which case the \code{resource} argument is ignored.}
\item{resource}{an identifier of the the resource. This can be a numeric
index, or a character with the name of the resource.}
}
\value{
A \code{data.frame} containing the data of the resource.
}
\description{
Retrieve the data of a resource
}
\details{
Currently the following types are supported:
\describe{
\item{string}{a string (of arbitrary length)}
\item{number}{a number including floating point numbers.}
\item{integer}{an integer.}
\item{date}{a date. This MUST be in ISO6801 format YYYY-MM-DD or, if not, a
format field must be provided describing the structure.}
\item{datetime}{a date-time. This MUST be in ISO 8601 format of
YYYY-MM-DDThh:mm:ssZ in UTC time or, if not, a format field must be
provided.}
\item{boolean}{a boolean value (1/0, true/false).}
}
}
| /man/dpdata.Rd | no_license | djvanderlaan/datapackage | R | false | false | 1,101 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{dpdata}
\alias{dpdata}
\title{Retrieve the data of a resource}
\usage{
dpdata(meta, resource = 1)
}
\arguments{
\item{meta}{a \code{datapackage} object or a \code{\link{dpresource}} object
in which case the \code{resource} argument is ignored.}
\item{resource}{an identifier of the the resource. This can be a numeric
index, or a character with the name of the resource.}
}
\value{
A \code{data.frame} containing the data of the resource.
}
\description{
Retrieve the data of a resource
}
\details{
Currently the following types are supported:
\describe{
\item{string}{a string (of arbitrary length)}
\item{number}{a number including floating point numbers.}
\item{integer}{an integer.}
\item{date}{a date. This MUST be in ISO6801 format YYYY-MM-DD or, if not, a
format field must be provided describing the structure.}
\item{datetime}{a date-time. This MUST be in ISO 8601 format of
YYYY-MM-DDThh:mm:ssZ in UTC time or, if not, a format field must be
provided.}
\item{boolean}{a boolean value (1/0, true/false).}
}
}
|
library(leaflet)
library(rgdal)
library(sp)
library(ggmap)
library(maptools)
#Import survey and fishery data
read.csv("data/fishery/gkc_logbook.csv") %>%
clean_names() -> gkc_log
read.csv("data/survey/tanner_survey_specimen.csv") %>% #data pre-filtered for GKC in OceanAK
clean_names() -> bio_survey
read.csv("data/survey/tanner_survey_pot.csv") %>%
clean_names() -> pot_survey
read.csv("data/survey/tanner_survey_pot_and_specimen.csv") %>%
clean_names() -> pot_bio_survey
#Summarize and join tables
bio_survey %>%
group_by(year, location, species, pot_no) %>%
summarise(total_crab = sum(number_of_specimens))-> bio_summary
#rename(bio_summary, year = i_year) -> bio_summary
target <-c(2013:2021) #removes current year in map
left_join(pot_survey, bio_summary, by = c("year", "location", "pot_no")) %>%
select(year, location, pot_no, latitude_decimal_degrees,
longitude_decimal_degrees, species, total_crab) %>%
filter(year %in% target) -> gkc_survey
#Import stat-area shapefiles
readOGR("data/shape_files/cf_SE_stats_area.shp") -> stat_area
fortify(stat_area) -> stat_area_df
#Using Tanner pot and specimen data combined from OceanAK
#Set location for Holkham Bay
hlk_bay <- c(-133.7750, 57.6739, -133.4, 57.8354)
pot_bio_survey %>%
filter(year %in% target,
location == "Holkham Bay",
number_of_specimens != 0) -> pot_survey_summary
ggmap(get_stamenmap(bbox = hlk_bay,
maptype = "terrain",
color = "bw",
force = TRUE)) +
geom_point(data = gkc_survey,
aes(x = longitude_decimal_degrees,
y = latitude_decimal_degrees,
size = total_crab,
color = total_crab),
alpha = 0.6) +
scale_size_continuous(range = c(0.5, 11), "no. of crab") +
scale_color_continuous(type = "viridis", "no. of crab") +
#scale_color_viridis_d("no. of crab") +
ylab("Latitude (Decimal Degrees)") +
xlab("Longitude (Decimal Degrees)") +
labs(title ="Holkham Bay",
subtitle = "Number of GKC caught during the Tanner survey") +
facet_wrap(~year, ncol = 3) +
theme(legend.position = "bottom",
strip.background = element_blank())
ggsave(paste0(fig_path, '/holkham_bay_gkc_survey_bycatch.png'),
width = 9, height = 12, units = "in", dpi = 200)
| /r/maps.R | no_license | apolson8/seak_gkc | R | false | false | 2,367 | r | library(leaflet)
library(rgdal)
library(sp)
library(ggmap)
library(maptools)
#Import survey and fishery data
read.csv("data/fishery/gkc_logbook.csv") %>%
clean_names() -> gkc_log
read.csv("data/survey/tanner_survey_specimen.csv") %>% #data pre-filtered for GKC in OceanAK
clean_names() -> bio_survey
read.csv("data/survey/tanner_survey_pot.csv") %>%
clean_names() -> pot_survey
read.csv("data/survey/tanner_survey_pot_and_specimen.csv") %>%
clean_names() -> pot_bio_survey
#Summarize and join tables
bio_survey %>%
group_by(year, location, species, pot_no) %>%
summarise(total_crab = sum(number_of_specimens))-> bio_summary
#rename(bio_summary, year = i_year) -> bio_summary
target <-c(2013:2021) #removes current year in map
left_join(pot_survey, bio_summary, by = c("year", "location", "pot_no")) %>%
select(year, location, pot_no, latitude_decimal_degrees,
longitude_decimal_degrees, species, total_crab) %>%
filter(year %in% target) -> gkc_survey
#Import stat-area shapefiles
readOGR("data/shape_files/cf_SE_stats_area.shp") -> stat_area
fortify(stat_area) -> stat_area_df
#Using Tanner pot and specimen data combined from OceanAK
#Set location for Holkham Bay
hlk_bay <- c(-133.7750, 57.6739, -133.4, 57.8354)
pot_bio_survey %>%
filter(year %in% target,
location == "Holkham Bay",
number_of_specimens != 0) -> pot_survey_summary
ggmap(get_stamenmap(bbox = hlk_bay,
maptype = "terrain",
color = "bw",
force = TRUE)) +
geom_point(data = gkc_survey,
aes(x = longitude_decimal_degrees,
y = latitude_decimal_degrees,
size = total_crab,
color = total_crab),
alpha = 0.6) +
scale_size_continuous(range = c(0.5, 11), "no. of crab") +
scale_color_continuous(type = "viridis", "no. of crab") +
#scale_color_viridis_d("no. of crab") +
ylab("Latitude (Decimal Degrees)") +
xlab("Longitude (Decimal Degrees)") +
labs(title ="Holkham Bay",
subtitle = "Number of GKC caught during the Tanner survey") +
facet_wrap(~year, ncol = 3) +
theme(legend.position = "bottom",
strip.background = element_blank())
ggsave(paste0(fig_path, '/holkham_bay_gkc_survey_bycatch.png'),
width = 9, height = 12, units = "in", dpi = 200)
|
# Manuscript: Epigenetic and transcriptomic characterization reveals progression markers and essential pathways in clear cell renal cell carcinoma
# Author: Yige Wu
# Description: UMAP illustration of the tumor-cell clusters for four tumor samples, colored by the cluster name
# Section: Results - Transcriptome-based tumor-cell subclusters may represent genomically distinct subclones
#=======================================================================================
# Load libraries and set working directory -----------------------------------
## load libraries
packages = c(
"data.table",
"stringr",
"plyr",
"dplyr",
"ggplot2",
"RColorBrewer",
"ggrastr",
"Polychrome"
)
for (pkg_name_tmp in packages) {
if (!(pkg_name_tmp %in% installed.packages()[,1])) {
install.packages(pkg_name_tmp, dependencies = T)
}
if (!(pkg_name_tmp %in% installed.packages()[,1])) {
if (!requireNamespace("BiocManager", quietly=TRUE))
install.packages("BiocManager")
BiocManager::install(pkg_name_tmp)
}
library(package = pkg_name_tmp, character.only = T)
}
## set working directory to current file location
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# pre-process -------------------------------------------------------------------
# ## barcode-UMAP info for tumor clusters
# barcode2umap_df <- fread(data.table = F, input = "../../data/MetaData_TumorCellOnlyReclustered.20210805.v1.tsv.gz")
# ## barcode to tumor subcluster assignment
# barcode2tumorsubcluster_df <- fread(input = "../../data/Barcode2TumorSubclusterId.20210805.v1.tsv.gz", data.table = F)
# ## scrublet information
# barcode2scrublet_df <- fread(input = "../../data/scrublet.united_outputs.20210729.v1.tsv.gz", data.table = F)
# ## merge data
# barcode2umap_df <- merge(x = barcode2umap_df, y = barcode2tumorsubcluster_df,
# by.x = c("easy_id", "barcode_tumorcellreclustered"),
# by.y = c("easy_id", "barcode"),
# all.x = T)
# barcode2umap_df <- barcode2umap_df %>%
# filter(easy_id != "C3L-00359-T1")
# plot by each sample ----------------------------------------------------
## make different output files
dir_out <- paste0("../../outputs/"); dir.create(dir_out)
dir_out_now <- paste0(dir_out, "F4b_UMAP_tumorclusters", "/")
dir.create(dir_out_now)
colors_all <- Polychrome::dark.colors(n = 24)
for (easy_id_tmp in c("C3L-00010-T1", "C3L-00096-T1", "C3L-00079-T1", "C3L-00583-T1")) {
# for (easy_id_tmp in unique(barcode2umap_df$easy_id)) {
# ## make plot data
# scrublets_df <- barcode2scrublet_df %>%
# filter(Aliquot_WU == easy_id_tmp) %>%
# filter(predicted_doublet)
# barcodes_doublet <- scrublets_df$Barcode; length(barcodes_doublet)
#
# plot_data_df <- barcode2umap_df %>%
# filter(easy_id == easy_id_tmp) %>%
# filter(!(barcode_tumorcellreclustered %in% barcodes_doublet)) %>%
# mutate(Name_TumorCluster = paste0("C", id_manual_cluster_w0+1))
# cellnumber_percluster_df <- plot_data_df %>%
# select(Name_TumorCluster) %>%
# table() %>%
# as.data.frame() %>%
# rename(Name_TumorCluster = ".")
# plot_data_df <- plot_data_df %>%
# mutate(Name_TumorCluster = ifelse(Name_TumorCluster == "CNA" | Name_TumorCluster %in% cellnumber_percluster_df$Name_TumorCluster[cellnumber_percluster_df$Freq < 50], "Minor cluster (<50 cells)", Name_TumorCluster)) %>%
# select(UMAP_1, UMAP_2, Name_TumorCluster)
# ## save plot data
# write.table(x = plot_data_df, file = paste0("../../plot_data/F4b.", easy_id_tmp, ".SourceData.tsv"), quote = F, sep = "\t", row.names = F)
## input plot data
plot_data_df <- fread(data.table = F, file = paste0("../../plot_data/F4b.", easy_id_tmp, ".SourceData.tsv"))
## make color for each cluster
names_cluster_tmp <- sort(unique(plot_data_df$Name_TumorCluster))
length_clusters <- length(names_cluster_tmp)
if("Minor cluster (<50 cells)" %in% names_cluster_tmp) {
uniq_cluster_colors <- c(colors_all[1:(length_clusters-1)], "grey40")
} else {
uniq_cluster_colors <-colors_all[1:length_clusters]
}
names(uniq_cluster_colors) <- names_cluster_tmp
## make plot
p <- ggplot()
p <- p + geom_point_rast(data = plot_data_df, mapping = aes(x = UMAP_1, y = UMAP_2, color = Name_TumorCluster), shape = 16, alpha = 0.8, size = 1)
p <- p + scale_color_manual(values = uniq_cluster_colors, na.translate = T)
p <- p + ggtitle(label = easy_id_tmp, subtitle = "Manually grouped tumor clusters")
# p <- p + ggtitle(label = easy_id_tmp, subtitle = "Original analysis")
p <- p + guides(colour = guide_legend(override.aes = list(size=3), title = NULL, nrow = 2, label.theme = element_text(size = 20)))
p <- p + theme_void()
p <- p + theme(legend.position = "bottom")
p <- p + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), title = element_text(size = 25))
# save output -------------------------------------------------------------
file2write <- paste0(dir_out_now, easy_id_tmp, ".pdf")
pdf(file2write, width = 5, height = 5, useDingbats = F)
print(p)
dev.off()
}
| /R/4_Tumorclusters/4b_UMAP_tumorclusters.R | permissive | yigewu/ccRCC_sn_publication | R | false | false | 5,202 | r | # Manuscript: Epigenetic and transcriptomic characterization reveals progression markers and essential pathways in clear cell renal cell carcinoma
# Author: Yige Wu
# Description: UMAP illustration of the tumor-cell clusters for four tumor samples, colored by the cluster name
# Section: Results - Transcriptome-based tumor-cell subclusters may represent genomically distinct subclones
#=======================================================================================
# Load libraries and set working directory -----------------------------------
## load libraries
packages = c(
"data.table",
"stringr",
"plyr",
"dplyr",
"ggplot2",
"RColorBrewer",
"ggrastr",
"Polychrome"
)
for (pkg_name_tmp in packages) {
if (!(pkg_name_tmp %in% installed.packages()[,1])) {
install.packages(pkg_name_tmp, dependencies = T)
}
if (!(pkg_name_tmp %in% installed.packages()[,1])) {
if (!requireNamespace("BiocManager", quietly=TRUE))
install.packages("BiocManager")
BiocManager::install(pkg_name_tmp)
}
library(package = pkg_name_tmp, character.only = T)
}
## set working directory to current file location
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# pre-process -------------------------------------------------------------------
# ## barcode-UMAP info for tumor clusters
# barcode2umap_df <- fread(data.table = F, input = "../../data/MetaData_TumorCellOnlyReclustered.20210805.v1.tsv.gz")
# ## barcode to tumor subcluster assignment
# barcode2tumorsubcluster_df <- fread(input = "../../data/Barcode2TumorSubclusterId.20210805.v1.tsv.gz", data.table = F)
# ## scrublet information
# barcode2scrublet_df <- fread(input = "../../data/scrublet.united_outputs.20210729.v1.tsv.gz", data.table = F)
# ## merge data
# barcode2umap_df <- merge(x = barcode2umap_df, y = barcode2tumorsubcluster_df,
# by.x = c("easy_id", "barcode_tumorcellreclustered"),
# by.y = c("easy_id", "barcode"),
# all.x = T)
# barcode2umap_df <- barcode2umap_df %>%
# filter(easy_id != "C3L-00359-T1")
# plot by each sample ----------------------------------------------------
## make different output files
dir_out <- paste0("../../outputs/"); dir.create(dir_out)
dir_out_now <- paste0(dir_out, "F4b_UMAP_tumorclusters", "/")
dir.create(dir_out_now)
colors_all <- Polychrome::dark.colors(n = 24)
for (easy_id_tmp in c("C3L-00010-T1", "C3L-00096-T1", "C3L-00079-T1", "C3L-00583-T1")) {
# for (easy_id_tmp in unique(barcode2umap_df$easy_id)) {
# ## make plot data
# scrublets_df <- barcode2scrublet_df %>%
# filter(Aliquot_WU == easy_id_tmp) %>%
# filter(predicted_doublet)
# barcodes_doublet <- scrublets_df$Barcode; length(barcodes_doublet)
#
# plot_data_df <- barcode2umap_df %>%
# filter(easy_id == easy_id_tmp) %>%
# filter(!(barcode_tumorcellreclustered %in% barcodes_doublet)) %>%
# mutate(Name_TumorCluster = paste0("C", id_manual_cluster_w0+1))
# cellnumber_percluster_df <- plot_data_df %>%
# select(Name_TumorCluster) %>%
# table() %>%
# as.data.frame() %>%
# rename(Name_TumorCluster = ".")
# plot_data_df <- plot_data_df %>%
# mutate(Name_TumorCluster = ifelse(Name_TumorCluster == "CNA" | Name_TumorCluster %in% cellnumber_percluster_df$Name_TumorCluster[cellnumber_percluster_df$Freq < 50], "Minor cluster (<50 cells)", Name_TumorCluster)) %>%
# select(UMAP_1, UMAP_2, Name_TumorCluster)
# ## save plot data
# write.table(x = plot_data_df, file = paste0("../../plot_data/F4b.", easy_id_tmp, ".SourceData.tsv"), quote = F, sep = "\t", row.names = F)
## input plot data
plot_data_df <- fread(data.table = F, file = paste0("../../plot_data/F4b.", easy_id_tmp, ".SourceData.tsv"))
## make color for each cluster
names_cluster_tmp <- sort(unique(plot_data_df$Name_TumorCluster))
length_clusters <- length(names_cluster_tmp)
if("Minor cluster (<50 cells)" %in% names_cluster_tmp) {
uniq_cluster_colors <- c(colors_all[1:(length_clusters-1)], "grey40")
} else {
uniq_cluster_colors <-colors_all[1:length_clusters]
}
names(uniq_cluster_colors) <- names_cluster_tmp
## make plot
p <- ggplot()
p <- p + geom_point_rast(data = plot_data_df, mapping = aes(x = UMAP_1, y = UMAP_2, color = Name_TumorCluster), shape = 16, alpha = 0.8, size = 1)
p <- p + scale_color_manual(values = uniq_cluster_colors, na.translate = T)
p <- p + ggtitle(label = easy_id_tmp, subtitle = "Manually grouped tumor clusters")
# p <- p + ggtitle(label = easy_id_tmp, subtitle = "Original analysis")
p <- p + guides(colour = guide_legend(override.aes = list(size=3), title = NULL, nrow = 2, label.theme = element_text(size = 20)))
p <- p + theme_void()
p <- p + theme(legend.position = "bottom")
p <- p + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), title = element_text(size = 25))
# save output -------------------------------------------------------------
file2write <- paste0(dir_out_now, easy_id_tmp, ".pdf")
pdf(file2write, width = 5, height = 5, useDingbats = F)
print(p)
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_gran.R
\name{create_gran}
\alias{create_gran}
\title{Build dynamic temporal granularities}
\usage{
create_gran(
.data,
gran1 = NULL,
hierarchy_tbl = NULL,
label = TRUE,
abbr = TRUE,
...
)
}
\arguments{
\item{.data}{A tsibble object.}
\item{gran1}{the granularity to be created. For temporal data, any
combination of "second", "minute", "qhour", "hhour", "hour", "day", "week", "fortnight
,"month", "quarter", "semester" or "year" can be chosen in the form of finer
to coarser unit. For example, for the granularity hour of the week, value is
"hour_week".}
\item{hierarchy_tbl}{A hierarchy table specifying the hierarchy of units
and their relationships.}
\item{label}{Logical. TRUE will display the month as an ordered factor of
character string such as "January", "February". FALSE will display the month
as an ordered factor such as 1 to 12, where 1 stands for January and 12 for
December.}
\item{abbr}{logical. FALSE will display abbreviated labels.}
\item{...}{Other arguments passed on to individual methods.}
}
\value{
A tsibble with an additional column of granularity.
}
\description{
Create time granularities that accommodate for periodicities in data, both single and multiple order up.
Periodic ones might include time granularities like minute of the day, hour
of the week and aperiodic calendar categorizations may include day of the month or
week of the quarter. For non-temporal data, supports
only periodic deconstructions.
}
\examples{
library(dplyr)
library(ggplot2)
library(lvplot)
# Search for granularities
smart_meter10 \%>\%
search_gran(highest_unit = "week")
# Screen harmonies from the search list
\dontrun{
smart_meter10 \%>\%
harmony(
ugran = "day",
filter_in = "wknd_wday"
)
}
# visualize probability distribution of
# the harmony pair (wknd_wday, hour_day)
smart_meter10 \%>\%
dplyr::filter(customer_id == "10017936") \%>\%
prob_plot(
gran1 = "wknd_wday",
gran2 = "hour_day",
response = "general_supply_kwh",
plot_type = "quantile",
quantile_prob = c(0.1, 0.25, 0.5, 0.75, 0.9)
) +
scale_y_sqrt()
#' # Compute granularities for non-temporal data
library(tsibble)
cricket_tsibble <- cricket \%>\%
mutate(data_index = row_number()) \%>\%
as_tsibble(index = data_index)
hierarchy_model <- tibble::tibble(
units = c("index", "over", "inning", "match"),
convert_fct = c(1, 20, 2, 1)
)
cricket_tsibble \%>\%
create_gran(
"over_inning",
hierarchy_model
)
cricket_tsibble \%>\%
filter(batting_team \%in\% c(
"Mumbai Indians",
"Chennai Super Kings"
)) \%>\%
prob_plot("inning", "over",
hierarchy_model,
response = "runs_per_over",
plot_type = "lv"
)
# Validate if given column in the data set
# equals computed granularity
validate_gran(cricket_tsibble,
gran = "over_inning",
hierarchy_tbl = hierarchy_model,
validate_col = "over"
)
}
| /man/create_gran.Rd | no_license | Sayani07/gravitas | R | false | true | 2,964 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_gran.R
\name{create_gran}
\alias{create_gran}
\title{Build dynamic temporal granularities}
\usage{
create_gran(
.data,
gran1 = NULL,
hierarchy_tbl = NULL,
label = TRUE,
abbr = TRUE,
...
)
}
\arguments{
\item{.data}{A tsibble object.}
\item{gran1}{the granularity to be created. For temporal data, any
combination of "second", "minute", "qhour", "hhour", "hour", "day", "week", "fortnight
,"month", "quarter", "semester" or "year" can be chosen in the form of finer
to coarser unit. For example, for the granularity hour of the week, value is
"hour_week".}
\item{hierarchy_tbl}{A hierarchy table specifying the hierarchy of units
and their relationships.}
\item{label}{Logical. TRUE will display the month as an ordered factor of
character string such as "January", "February". FALSE will display the month
as an ordered factor such as 1 to 12, where 1 stands for January and 12 for
December.}
\item{abbr}{logical. FALSE will display abbreviated labels.}
\item{...}{Other arguments passed on to individual methods.}
}
\value{
A tsibble with an additional column of granularity.
}
\description{
Create time granularities that accommodate for periodicities in data, both single and multiple order up.
Periodic ones might include time granularities like minute of the day, hour
of the week and aperiodic calendar categorizations may include day of the month or
week of the quarter. For non-temporal data, supports
only periodic deconstructions.
}
\examples{
library(dplyr)
library(ggplot2)
library(lvplot)
# Search for granularities
smart_meter10 \%>\%
search_gran(highest_unit = "week")
# Screen harmonies from the search list
\dontrun{
smart_meter10 \%>\%
harmony(
ugran = "day",
filter_in = "wknd_wday"
)
}
# visualize probability distribution of
# the harmony pair (wknd_wday, hour_day)
smart_meter10 \%>\%
dplyr::filter(customer_id == "10017936") \%>\%
prob_plot(
gran1 = "wknd_wday",
gran2 = "hour_day",
response = "general_supply_kwh",
plot_type = "quantile",
quantile_prob = c(0.1, 0.25, 0.5, 0.75, 0.9)
) +
scale_y_sqrt()
#' # Compute granularities for non-temporal data
library(tsibble)
cricket_tsibble <- cricket \%>\%
mutate(data_index = row_number()) \%>\%
as_tsibble(index = data_index)
hierarchy_model <- tibble::tibble(
units = c("index", "over", "inning", "match"),
convert_fct = c(1, 20, 2, 1)
)
cricket_tsibble \%>\%
create_gran(
"over_inning",
hierarchy_model
)
cricket_tsibble \%>\%
filter(batting_team \%in\% c(
"Mumbai Indians",
"Chennai Super Kings"
)) \%>\%
prob_plot("inning", "over",
hierarchy_model,
response = "runs_per_over",
plot_type = "lv"
)
# Validate if given column in the data set
# equals computed granularity
validate_gran(cricket_tsibble,
gran = "over_inning",
hierarchy_tbl = hierarchy_model,
validate_col = "over"
)
}
|
library(shiny)
library(mosaic)
#source("helper.R")
choose_data_source <- radioButtons(
"file_or_package",
label = NULL,
choices = list("Choose Datasets" = 1, "Upload Datasets" = 2),
selected = 1)
condition1 <-
conditionalPanel(
condition = "input.file_or_package == 1",
selectizeInput("data", label = "Choose Dataset",
choices = list( "Galton", "Heightweight",
"SwimRecords", "TenMileRace"
)
)
)
condition2 <-
conditionalPanel(
condition = "input.file_or_package == 2",
fileInput('data_own', 'Choose CSV File',
accept=c('text/csv',
'text/comma-separated-values,text/plain',
'.csv')
)
)
subset <-
fluidRow(
checkboxInput("random_subset", "Random Subset"),
conditionalPanel(
condition = "input.random_subset == true",
numericInput("random_subset_nrow", "Rows", value = 10, min = 1)
)
)
well <- wellPanel(
choose_data_source,
condition1,
condition2,
subset)
specify_data_source_panel <-
navlistPanel(
widths = c(2,10),
tabPanel(
"Data",
column(4,
well
),
column(6,
dataTableOutput("table")
)
),
tabPanel("Frame")
)
| /navBar/data.R | no_license | dtkaplan/MOSAIC-Summer-2015 | R | false | false | 1,374 | r | library(shiny)
library(mosaic)
#source("helper.R")
choose_data_source <- radioButtons(
"file_or_package",
label = NULL,
choices = list("Choose Datasets" = 1, "Upload Datasets" = 2),
selected = 1)
condition1 <-
conditionalPanel(
condition = "input.file_or_package == 1",
selectizeInput("data", label = "Choose Dataset",
choices = list( "Galton", "Heightweight",
"SwimRecords", "TenMileRace"
)
)
)
condition2 <-
conditionalPanel(
condition = "input.file_or_package == 2",
fileInput('data_own', 'Choose CSV File',
accept=c('text/csv',
'text/comma-separated-values,text/plain',
'.csv')
)
)
subset <-
fluidRow(
checkboxInput("random_subset", "Random Subset"),
conditionalPanel(
condition = "input.random_subset == true",
numericInput("random_subset_nrow", "Rows", value = 10, min = 1)
)
)
well <- wellPanel(
choose_data_source,
condition1,
condition2,
subset)
specify_data_source_panel <-
navlistPanel(
widths = c(2,10),
tabPanel(
"Data",
column(4,
well
),
column(6,
dataTableOutput("table")
)
),
tabPanel("Frame")
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auto_auth.R
\name{gar_auto_auth}
\alias{gar_auto_auth}
\title{Perform auto authentication}
\usage{
gar_auto_auth(required_scopes, no_auto = NULL,
environment_var = "GAR_AUTH_FILE", new_user = NULL)
}
\arguments{
\item{required_scopes}{Required scopes needed to authenticate - needs to match at least one}
\item{no_auto}{If TRUE, ignore auto-authentication settings}
\item{environment_var}{Name of environment var that contains auth file path}
\item{new_user}{Deprecated, not used
The authentication file can be a \code{.httr-oauth} file created via \link{gar_auth}
or a Google service JSON file downloaded from the Google API credential console,
with file extension \code{.json}.
You can use this in your code to authenticate from a file location specified in file,
but it is mainly intended to be called on package load via \link{gar_attach_auto_auth}.
\code{environment_var} This is the name that will be called via \link{Sys.getenv} on library load. The environment variable will contain an absolute file path to the location of an authentication file.}
}
\value{
an OAuth token object, specifically a
\code{\link[=Token-class]{Token2.0}}, invisibly
}
\description{
This helper function lets you use environment variables to auto-authenticate on package load, intended for calling by \link{gar_attach_auto_auth}
}
\seealso{
Help files for \link{.onAttach}
Other authentication functions: \code{\link{gar_attach_auto_auth}},
\code{\link{gar_auth_service}}, \code{\link{gar_auth}},
\code{\link{gar_gce_auth}},
\code{\link{get_google_token}},
\code{\link{token_exists}}
}
\concept{authentication functions}
| /man/gar_auto_auth.Rd | no_license | jeffwzhong1994/googleAuthR | R | false | true | 1,715 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auto_auth.R
\name{gar_auto_auth}
\alias{gar_auto_auth}
\title{Perform auto authentication}
\usage{
gar_auto_auth(required_scopes, no_auto = NULL,
environment_var = "GAR_AUTH_FILE", new_user = NULL)
}
\arguments{
\item{required_scopes}{Required scopes needed to authenticate - needs to match at least one}
\item{no_auto}{If TRUE, ignore auto-authentication settings}
\item{environment_var}{Name of environment var that contains auth file path}
\item{new_user}{Deprecated, not used
The authentication file can be a \code{.httr-oauth} file created via \link{gar_auth}
or a Google service JSON file downloaded from the Google API credential console,
with file extension \code{.json}.
You can use this in your code to authenticate from a file location specified in file,
but it is mainly intended to be called on package load via \link{gar_attach_auto_auth}.
\code{environment_var} This is the name that will be called via \link{Sys.getenv} on library load. The environment variable will contain an absolute file path to the location of an authentication file.}
}
\value{
an OAuth token object, specifically a
\code{\link[=Token-class]{Token2.0}}, invisibly
}
\description{
This helper function lets you use environment variables to auto-authenticate on package load, intended for calling by \link{gar_attach_auto_auth}
}
\seealso{
Help files for \link{.onAttach}
Other authentication functions: \code{\link{gar_attach_auto_auth}},
\code{\link{gar_auth_service}}, \code{\link{gar_auth}},
\code{\link{gar_gce_auth}},
\code{\link{get_google_token}},
\code{\link{token_exists}}
}
\concept{authentication functions}
|
EvoFold.file = "EvoFold_liftOver_sacCer1-to-sacCer2.bed"
PARS.file = "../Kertesz_et_al_2010/GSE22393_processed_merged_PARS_sacCer2_1.bed"
genes.file = "../Kertesz_et_al_2010/sce_transcriptome_global.tab"
EvoFold.table = read.table(EvoFold.file, head=F, sep="\t")
PARS.table = read.table(PARS.file, head=F, sep="\t")
genes.table = read.table(genes.file, head=F, sep="\t")
exon.table = genes.table[genes.table$V5 == "Exon",]
Roman.chr = rep(NA,nrow(exon.table))
Roman.chr[exon.table$V1 == 1]="chrI"
Roman.chr[exon.table$V1 == 2]="chrII"
Roman.chr[exon.table$V1 == 3]="chrIII"
Roman.chr[exon.table$V1 == 4]="chrIV"
Roman.chr[exon.table$V1 == 5]="chrV"
Roman.chr[exon.table$V1 == 6]="chrVI"
Roman.chr[exon.table$V1 == 7]="chrVII"
Roman.chr[exon.table$V1 == 8]="chrVIII"
Roman.chr[exon.table$V1 == 9]="chrIX"
Roman.chr[exon.table$V1 == 10]="chrX"
Roman.chr[exon.table$V1 == 11]="chrXI"
Roman.chr[exon.table$V1 == 12]="chrXII"
Roman.chr[exon.table$V1 == 13]="chrXIII"
Roman.chr[exon.table$V1 == 14]="chrXIV"
Roman.chr[exon.table$V1 == 15]="chrXV"
Roman.chr[exon.table$V1 == 16]="chrXVI"
unstranded.start = rep(NA, nrow(exon.table))
unstranded.start[exon.table$V4 >= exon.table$V3] = exon.table$V3[exon.table$V4 >= exon.table$V3]
unstranded.start[exon.table$V3 > exon.table$V4] = exon.table$V4[exon.table$V3 > exon.table$V4]
unstranded.stop = rep(NA, nrow(exon.table))
unstranded.stop[exon.table$V4 >= exon.table$V3] = exon.table$V4[exon.table$V4 >= exon.table$V3]
unstranded.stop[exon.table$V3 > exon.table$V4] = exon.table$V3[exon.table$V3 > exon.table$V4]
exon.table = data.frame(Roman.chr, unstranded.start, unstranded.stop, exon.table)
library("GenomicRanges")
EvoFold_gr = GRanges(Rle(EvoFold.table$V1),
IRanges(start=EvoFold.table$V2, end=EvoFold.table$V3))
PARS_gr = GRanges(Rle(PARS.table$V1),
IRanges(start=PARS.table$V2, end=PARS.table$V3))
exon_gr = GRanges(Rle(exon.table$Roman.chr),
IRanges(start=exon.table$unstranded.start, end=exon.table$unstranded.stop))
EvoFold_PARS_gr = intersect(EvoFold_gr, PARS_gr)
EvoFold_PARS.table = data.frame(EvoFold_PARS_gr)
EvoFold.All.ID = paste(EvoFold_PARS.table$seqnames, EvoFold_PARS.table$start, sep=":")
#not really useful, if you don't have a PARS score
#EvoFold_exon_gr = intersect(EvoFold_gr, exon_gr)
#EvoFold_exon.table = data.frame(EvoFold_exon_gr)
#EvoFold.exon.ID = paste(EvoFold_exon.table$seqnames, EvoFold_exon.table$start, sep=":")
PARS_exon_gr = intersect(PARS_gr, exon_gr)
PARS_exon.table = data.frame(PARS_exon_gr)
PARS.exon.ID = paste(PARS_exon.table$seqnames, PARS_exon.table$start, sep=":")
EvoFold_exons_PARS_gr = intersect(EvoFold_gr, PARS_exon_gr)
EvoFold_exons_PARS.table = data.frame(EvoFold_exons_PARS_gr)
EvoFold_PARS.exon.ID = paste(EvoFold_exons_PARS.table$seqnames, EvoFold_exons_PARS.table$start, sep=":")
PARS.ID = paste(PARS.table$V1, PARS.table$V2, sep=":")
png("PARS_density.png")
par(mfcol=c(1,2))
#################
### all sites ###
#################
background = PARS.table$V5
print(length(background))
print(quantile(background))
EvoFold = PARS.table$V5[match(EvoFold.All.ID,PARS.ID,nomatch=0)]
print(length(EvoFold))
print(quantile(EvoFold))
den = density(background, from=-8, to=8)
plot(den$x, den$y, type="l", xlab = "PARS Score", ylab = "Density",
xlim=c(-8,8), col="gray", main = "All Genomic Sites")
mtext(paste("n = ",length(background)," versus n= ",length(EvoFold),sep=""), side=3, line=0.25)
den = density(EvoFold, na.rm=T, from=-8, to=8)
lines(den$x, den$y, type = "l", col="blue")
legend("topleft",legend=c("All","EvoFold"),col=c("gray","blue"),
lwd=2, ncol=1, cex=0.8)
##################
### Exon sites ###
##################
background = PARS.table$V5[match(PARS.exon.ID,PARS.ID,nomatch=0)]
print(length(background))
print(quantile(background))
EvoFold = PARS.table$V5[match(EvoFold_PARS.exon.ID,PARS.ID,nomatch=0)]
print(length(EvoFold))
print(quantile(EvoFold))
den = density(background, from=-8, to=8)
plot(den$x, den$y, type="l", xlab = "PARS Score", ylab = "Density",
xlim=c(-8,8), col="gray", main = "Exonic Sites")
mtext(paste("n = ",length(background)," versus n= ",length(EvoFold),sep=""), side=3, line=0.25)
den = density(EvoFold, na.rm=T, from=-8, to=8)
lines(den$x, den$y, type = "l", col="blue")
legend("topleft",legend=c("All","EvoFold"),col=c("gray","blue"),
lwd=2, ncol=1, cex=0.8)
dev.off()
| /Kertesz_et_al_2010_Comp/compare_predictions.R | no_license | cwarden45/Coding-fRNA-Comment | R | false | false | 4,373 | r | EvoFold.file = "EvoFold_liftOver_sacCer1-to-sacCer2.bed"
PARS.file = "../Kertesz_et_al_2010/GSE22393_processed_merged_PARS_sacCer2_1.bed"
genes.file = "../Kertesz_et_al_2010/sce_transcriptome_global.tab"
EvoFold.table = read.table(EvoFold.file, head=F, sep="\t")
PARS.table = read.table(PARS.file, head=F, sep="\t")
genes.table = read.table(genes.file, head=F, sep="\t")
exon.table = genes.table[genes.table$V5 == "Exon",]
Roman.chr = rep(NA,nrow(exon.table))
Roman.chr[exon.table$V1 == 1]="chrI"
Roman.chr[exon.table$V1 == 2]="chrII"
Roman.chr[exon.table$V1 == 3]="chrIII"
Roman.chr[exon.table$V1 == 4]="chrIV"
Roman.chr[exon.table$V1 == 5]="chrV"
Roman.chr[exon.table$V1 == 6]="chrVI"
Roman.chr[exon.table$V1 == 7]="chrVII"
Roman.chr[exon.table$V1 == 8]="chrVIII"
Roman.chr[exon.table$V1 == 9]="chrIX"
Roman.chr[exon.table$V1 == 10]="chrX"
Roman.chr[exon.table$V1 == 11]="chrXI"
Roman.chr[exon.table$V1 == 12]="chrXII"
Roman.chr[exon.table$V1 == 13]="chrXIII"
Roman.chr[exon.table$V1 == 14]="chrXIV"
Roman.chr[exon.table$V1 == 15]="chrXV"
Roman.chr[exon.table$V1 == 16]="chrXVI"
unstranded.start = rep(NA, nrow(exon.table))
unstranded.start[exon.table$V4 >= exon.table$V3] = exon.table$V3[exon.table$V4 >= exon.table$V3]
unstranded.start[exon.table$V3 > exon.table$V4] = exon.table$V4[exon.table$V3 > exon.table$V4]
unstranded.stop = rep(NA, nrow(exon.table))
unstranded.stop[exon.table$V4 >= exon.table$V3] = exon.table$V4[exon.table$V4 >= exon.table$V3]
unstranded.stop[exon.table$V3 > exon.table$V4] = exon.table$V3[exon.table$V3 > exon.table$V4]
exon.table = data.frame(Roman.chr, unstranded.start, unstranded.stop, exon.table)
library("GenomicRanges")
EvoFold_gr = GRanges(Rle(EvoFold.table$V1),
IRanges(start=EvoFold.table$V2, end=EvoFold.table$V3))
PARS_gr = GRanges(Rle(PARS.table$V1),
IRanges(start=PARS.table$V2, end=PARS.table$V3))
exon_gr = GRanges(Rle(exon.table$Roman.chr),
IRanges(start=exon.table$unstranded.start, end=exon.table$unstranded.stop))
EvoFold_PARS_gr = intersect(EvoFold_gr, PARS_gr)
EvoFold_PARS.table = data.frame(EvoFold_PARS_gr)
EvoFold.All.ID = paste(EvoFold_PARS.table$seqnames, EvoFold_PARS.table$start, sep=":")
#not really useful, if you don't have a PARS score
#EvoFold_exon_gr = intersect(EvoFold_gr, exon_gr)
#EvoFold_exon.table = data.frame(EvoFold_exon_gr)
#EvoFold.exon.ID = paste(EvoFold_exon.table$seqnames, EvoFold_exon.table$start, sep=":")
PARS_exon_gr = intersect(PARS_gr, exon_gr)
PARS_exon.table = data.frame(PARS_exon_gr)
PARS.exon.ID = paste(PARS_exon.table$seqnames, PARS_exon.table$start, sep=":")
EvoFold_exons_PARS_gr = intersect(EvoFold_gr, PARS_exon_gr)
EvoFold_exons_PARS.table = data.frame(EvoFold_exons_PARS_gr)
EvoFold_PARS.exon.ID = paste(EvoFold_exons_PARS.table$seqnames, EvoFold_exons_PARS.table$start, sep=":")
PARS.ID = paste(PARS.table$V1, PARS.table$V2, sep=":")
png("PARS_density.png")
par(mfcol=c(1,2))
#################
### all sites ###
#################
background = PARS.table$V5
print(length(background))
print(quantile(background))
EvoFold = PARS.table$V5[match(EvoFold.All.ID,PARS.ID,nomatch=0)]
print(length(EvoFold))
print(quantile(EvoFold))
den = density(background, from=-8, to=8)
plot(den$x, den$y, type="l", xlab = "PARS Score", ylab = "Density",
xlim=c(-8,8), col="gray", main = "All Genomic Sites")
mtext(paste("n = ",length(background)," versus n= ",length(EvoFold),sep=""), side=3, line=0.25)
den = density(EvoFold, na.rm=T, from=-8, to=8)
lines(den$x, den$y, type = "l", col="blue")
legend("topleft",legend=c("All","EvoFold"),col=c("gray","blue"),
lwd=2, ncol=1, cex=0.8)
##################
### Exon sites ###
##################
background = PARS.table$V5[match(PARS.exon.ID,PARS.ID,nomatch=0)]
print(length(background))
print(quantile(background))
EvoFold = PARS.table$V5[match(EvoFold_PARS.exon.ID,PARS.ID,nomatch=0)]
print(length(EvoFold))
print(quantile(EvoFold))
den = density(background, from=-8, to=8)
plot(den$x, den$y, type="l", xlab = "PARS Score", ylab = "Density",
xlim=c(-8,8), col="gray", main = "Exonic Sites")
mtext(paste("n = ",length(background)," versus n= ",length(EvoFold),sep=""), side=3, line=0.25)
den = density(EvoFold, na.rm=T, from=-8, to=8)
lines(den$x, den$y, type = "l", col="blue")
legend("topleft",legend=c("All","EvoFold"),col=c("gray","blue"),
lwd=2, ncol=1, cex=0.8)
dev.off()
|
######################################
# Boosting
######################################
install.packages("gbm")
library(gbm)
library(MASS)
library(ISLR)
set.seed(1)
train=sample(1:nrow(Boston),nrow(Boston)/2)
boston.test=Boston[-train,"medv"]
# boosting: distribution=gaussian for regression probs, bernoulli for classification probs
boost.boston=gbm(medv~., data=Boston[train,], distribution="gaussian",n.trees=5000, interaction.depth=4)
summary(boost.boston)
# var rel.inf
# lstat lstat 46.5750123
# rm rm 30.7136292
# dis dis 6.7046057
# crim crim 4.0022999
# nox nox 2.6463615
# ptratio ptratio 2.3558616
# black black 1.7742596
# age age 1.6161692
# tax tax 1.3448183
# indus indus 1.2734992
# chas chas 0.7623708
# rad rad 0.2119990
# zn zn 0.0191136
# plot relative influence: lstat, rm
par(mfrow=c(1,2))
# partial dependence plots
# marginal effect of the variable on response
plot(boost.boston,i="rm")
plot(boost.boston,i="lstat")
# relationship: increasing with rm, decreasing with lstat
# predict with test data set
yhat.boost=predict(boost.boston,newdata=Boston[-train,],n.trees=5000)
mean((yhat.boost-boston.test)^2) # MSE: 11.84694
# boosting with a different shrinkage parameter (default=0.01), change to 0.2
boost.boston=gbm(medv~., data=Boston[train,], distribution="gaussian",
n.trees=5000, interaction.depth=4, shrinkage=0.2, verbose=F)
# MSE: 11.42312 slight improvement
| /boosting.R | no_license | just4jin/date-mining-algorithms | R | false | false | 1,532 | r | ######################################
# Boosting
######################################
install.packages("gbm")
library(gbm)
library(MASS)
library(ISLR)
set.seed(1)
train=sample(1:nrow(Boston),nrow(Boston)/2)
boston.test=Boston[-train,"medv"]
# boosting: distribution=gaussian for regression probs, bernoulli for classification probs
boost.boston=gbm(medv~., data=Boston[train,], distribution="gaussian",n.trees=5000, interaction.depth=4)
summary(boost.boston)
# var rel.inf
# lstat lstat 46.5750123
# rm rm 30.7136292
# dis dis 6.7046057
# crim crim 4.0022999
# nox nox 2.6463615
# ptratio ptratio 2.3558616
# black black 1.7742596
# age age 1.6161692
# tax tax 1.3448183
# indus indus 1.2734992
# chas chas 0.7623708
# rad rad 0.2119990
# zn zn 0.0191136
# plot relative influence: lstat, rm
par(mfrow=c(1,2))
# partial dependence plots
# marginal effect of the variable on response
plot(boost.boston,i="rm")
plot(boost.boston,i="lstat")
# relationship: increasing with rm, decreasing with lstat
# predict with test data set
yhat.boost=predict(boost.boston,newdata=Boston[-train,],n.trees=5000)
mean((yhat.boost-boston.test)^2) # MSE: 11.84694
# boosting with a different shrinkage parameter (default=0.01), change to 0.2
boost.boston=gbm(medv~., data=Boston[train,], distribution="gaussian",
n.trees=5000, interaction.depth=4, shrinkage=0.2, verbose=F)
# MSE: 11.42312 slight improvement
|
testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902832.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615853543-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 659 | r | testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902832.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
## BS code
BSprice<-function(pc, S, k, vol, d, r, t)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
d2 = d1 - vol * sqrt(t)
BSprice = pc * exp(-d * t) * S *
pnorm(pc * d1) - pc * k * exp(-r * t) * pnorm(pc * d2)
return(BSprice)
}
BSvega<-function(pc, S, k, vol, d, r, t)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
BSvega = exp(-d * t) * S * sqrt(t) * exp((-d1 ^ 2) / 2) / (sqrt(2 * pi))
return(BSvega)
}
BSvol<-function(pc, S, k, price, d, r, t, start = 0.2)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#price option premium
#d dividend yield
#r riskless rate
#t time to maturity
#start starting value for vol, optional, by default=0.2
voli = start
pricei = BSprice(pc, S, k, voli, d, r, t)
vegai = BSvega(pc, S, k, voli, d, r, t)
while(abs(price - pricei) > 0.000001)
{
voli<-voli + (price - pricei) / vegai
pricei<-BSprice(pc, S, k, voli, d, r, t)
vegai<-BSvega(pc, S, k, voli, d, r, t)
}
BSvol = voli
return(BSvol)
}
BSdelta<-function(pc, S, k, vol, d, r, t)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
if (pc == 1) {BSdelta = exp(-d * t) * pnorm(d1)} else
{BSdelta = exp(-d * t) * (pnorm(d1) - 1)}
return(BSdelta)
}
BSgamma<-function(pc, S, k, vol, d, r, t)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
BSgamma = exp(-d * t) * exp((-d1 ^ 2) / 2) / (sqrt(2 * pi) * S * vol * sqrt(t))
return(BSgamma)}
BStheta<-function(pc, S, k, vol, d, r, t) #there is a q in the formula?
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
d2 = d1 - vol * sqrt(t)
BStheta = -exp(-d * t) * exp((-d1 ^ 2) / 2) * S * vol /
(sqrt(2 * pi) * 2 * sqrt(t)) + pc * d * S * exp(-d * t) * pnorm(pc * d1) - pc * r * k * exp(-r * t) * pnorm(pc * d2)
return(BStheta)
}
BSrho<-function(pc, S, k, vol, d, r, t)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
d2 = d1 - vol * sqrt(t)
BSrho = pc * k * t * exp(-r * t) * pnorm(pc * d2)
return(BSrho)
}
###HESTON###
# Risk-Neutral Probability P1
HestonP1<-function(phi, kappa, theta, lambda, rho, sigma, tau, K, S, r, v)
{
mu1 = 0.5;
b1 = (kappa + lambda - rho * sigma);
d1 = sqrt((complex(real=0,imaginary=(rho*sigma*phi)) - b1)^2 - (complex(real=0,imaginary=(sigma^2 * 2 * mu1 * phi)) - sigma^2 * phi^2));
g1 = (b1 - complex(real=0,imaginary=(rho * sigma * phi)) + d1) / (b1 - complex(real=0,imaginary=(rho * sigma * phi)) - d1);
DD1_1 = (b1 - complex(real=0,imaginary=(rho * sigma * phi)) + d1) / (sigma^2);
DD1_2 = 1 - exp(d1 * tau);
DD1_3 = 1 - g1 * exp(d1 * tau);
DD1 = DD1_1 * (DD1_2 / DD1_3);
CC1_1 = complex(real=0,imaginary=(r * phi * tau));
CC1_2 = (kappa * theta) / (sigma^2);
CC1_3 = (b1 - complex(real=0,imaginary=(rho * sigma * phi)) + d1) * tau;
CC1_4 = 2 * log((1 - g1 * exp(d1 * tau)) / (1 - g1));
cc1 = CC1_1 + CC1_2 * (CC1_3 - CC1_4);
f1 = exp(cc1 + DD1 * v + complex(real=0,imaginary=phi * log(S)));
y = Re(f1 * exp(complex(real=0,imaginary=-phi * log(K))) / (complex(real=0,imaginary=phi)));
return(y)
}
# Risk-Neutral Probability P2
HestonP2<-function(phi, kappa, theta, lambda, rho, sigma, tau, K, S, r, v)
{
mu1 = -0.5;
b1 = kappa + lambda;
d1 = sqrt((rho * sigma * complex(real=0,imaginary=phi) - b1)^2 - (sigma^2 * 2 * mu1 * complex(real=0,imaginary=phi ) - sigma^2 * phi^2));
g1 = (b1 - rho * sigma * complex(real=0,imaginary=phi ) + d1) / (b1 - rho * sigma * complex(real=0,imaginary=phi ) - d1);
DD1_1 = (b1 - rho * sigma * complex(real=0,imaginary=phi ) + d1) / (sigma^2);
DD1_2 = 1 - exp(d1 * tau);
DD1_3 = 1 - g1 * exp(d1 * tau);
DD1 = DD1_1 * DD1_2 / DD1_3;
CC1_1 = complex(real=0,imaginary=r * phi * tau);
CC1_2 = kappa * theta / (sigma^2);
CC1_3 = (b1 - rho * sigma * complex(real=0,imaginary=phi ) + d1) * tau;
CC1_4 = 2 * log((1 - g1 * exp(d1 * tau)) / (1 - g1));
cc1 = CC1_1 + CC1_2 * (CC1_3 - CC1_4);
f1 = exp(cc1 + DD1 * v + complex(real=0,imaginary=phi * log(S)));
y = Re(exp(complex(real=0,imaginary=-phi * log(K))) * f1 / (complex(real=0,imaginary=phi )));
return(y)
}
# Trapezoidal Rule (THIS IS USED TO SLVE THE INTEGRAND)
TRAPnumint<-function(x, y)
{
n = length(x);
I = 0;
for (t in 2:n) {
I = I + 0.5 * (x[t] - x[t-1]) * (y[t-1] + y[t]);}
return(I)
}
# Heston Option Price
Heston<-function(PutCall, kappa, theta, lambda, rho, sigma, tau, K, S, r, v)
{
P1_int = rep(0,1001);
P2_int = P1_int;
phi_int = seq(0.0001,100.0001,by=.1)
cnt = 1;
for (phi in seq(0.0001,100.0001,by=.1))
{
P1_int[cnt] = HestonP1(phi, kappa, theta, lambda, rho, sigma, tau, K, S, r, v);
P2_int[cnt] = HestonP2(phi, kappa, theta, lambda, rho, sigma, tau, K, S, r, v);
cnt = cnt + 1;
}
p1 = 0.5 + (1 / pi) * TRAPnumint(phi_int, P1_int);
p2 = 0.5 + (1 / pi) * TRAPnumint(phi_int, P2_int);
if (p1 < 0) {p1 = 0;}
if (p1 > 1) {p1 = 1;}
if (p2 < 0) {p2 = 0;}
if (p2 > 1) {p2 = 1;}
HestonC = S * p1 - K * exp(-r * tau) * p2;
if (PutCall=='Call') {y = HestonC;} else {
if(PutCall=='Put') {y = HestonC + K * exp(-r * tau) - S}}
return(y)
}
#### TRYING AN EXAMPLE
K = 100;
r = .05;
rho = -.7;
kappa = 2; # lambda in our notes and in Gatheral
theta = 0.01; # v bar in our notes
sigma = 0.1; # eta in our notes
v = 0.01; # initial variance
im_k = .5; # technical
lambda= 0.05
tau= 0.08 #time to maturity
Call_Prices = rep(1);{
Call_Prices[1]<- Heston(S=100,K=100,r=0.05,v=0.01,theta=.01,kappa=2,sigma=.1,rho=-.7, lambda=0.05, tau=.08, PutCall= 'Call');
}
Call_Prices[1]
## can either specify the variable separately, or in the function- i for some reason did both
## can extend call price by changing value in rep()
| /Untitled.R | no_license | NancyGahlot/FM408 | R | false | false | 6,859 | r |
## BS code
BSprice<-function(pc, S, k, vol, d, r, t)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
d2 = d1 - vol * sqrt(t)
BSprice = pc * exp(-d * t) * S *
pnorm(pc * d1) - pc * k * exp(-r * t) * pnorm(pc * d2)
return(BSprice)
}
BSvega<-function(pc, S, k, vol, d, r, t)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
BSvega = exp(-d * t) * S * sqrt(t) * exp((-d1 ^ 2) / 2) / (sqrt(2 * pi))
return(BSvega)
}
BSvol<-function(pc, S, k, price, d, r, t, start = 0.2)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#price option premium
#d dividend yield
#r riskless rate
#t time to maturity
#start starting value for vol, optional, by default=0.2
voli = start
pricei = BSprice(pc, S, k, voli, d, r, t)
vegai = BSvega(pc, S, k, voli, d, r, t)
while(abs(price - pricei) > 0.000001)
{
voli<-voli + (price - pricei) / vegai
pricei<-BSprice(pc, S, k, voli, d, r, t)
vegai<-BSvega(pc, S, k, voli, d, r, t)
}
BSvol = voli
return(BSvol)
}
BSdelta<-function(pc, S, k, vol, d, r, t)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
if (pc == 1) {BSdelta = exp(-d * t) * pnorm(d1)} else
{BSdelta = exp(-d * t) * (pnorm(d1) - 1)}
return(BSdelta)
}
BSgamma<-function(pc, S, k, vol, d, r, t)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
BSgamma = exp(-d * t) * exp((-d1 ^ 2) / 2) / (sqrt(2 * pi) * S * vol * sqrt(t))
return(BSgamma)}
BStheta<-function(pc, S, k, vol, d, r, t) #there is a q in the formula?
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
d2 = d1 - vol * sqrt(t)
BStheta = -exp(-d * t) * exp((-d1 ^ 2) / 2) * S * vol /
(sqrt(2 * pi) * 2 * sqrt(t)) + pc * d * S * exp(-d * t) * pnorm(pc * d1) - pc * r * k * exp(-r * t) * pnorm(pc * d2)
return(BStheta)
}
BSrho<-function(pc, S, k, vol, d, r, t)
{
#pc put/call indicator call=1, put=-1
#S Stock price at 0
#K strike
#vol volatility
#d dividend yield
#r riskless rate
#t time to maturity
d1 = (log(S / k) + t * (r - d + (vol ^ 2) / 2)) / (vol * sqrt(t))
d2 = d1 - vol * sqrt(t)
BSrho = pc * k * t * exp(-r * t) * pnorm(pc * d2)
return(BSrho)
}
###HESTON###
# Risk-Neutral Probability P1
HestonP1<-function(phi, kappa, theta, lambda, rho, sigma, tau, K, S, r, v)
{
mu1 = 0.5;
b1 = (kappa + lambda - rho * sigma);
d1 = sqrt((complex(real=0,imaginary=(rho*sigma*phi)) - b1)^2 - (complex(real=0,imaginary=(sigma^2 * 2 * mu1 * phi)) - sigma^2 * phi^2));
g1 = (b1 - complex(real=0,imaginary=(rho * sigma * phi)) + d1) / (b1 - complex(real=0,imaginary=(rho * sigma * phi)) - d1);
DD1_1 = (b1 - complex(real=0,imaginary=(rho * sigma * phi)) + d1) / (sigma^2);
DD1_2 = 1 - exp(d1 * tau);
DD1_3 = 1 - g1 * exp(d1 * tau);
DD1 = DD1_1 * (DD1_2 / DD1_3);
CC1_1 = complex(real=0,imaginary=(r * phi * tau));
CC1_2 = (kappa * theta) / (sigma^2);
CC1_3 = (b1 - complex(real=0,imaginary=(rho * sigma * phi)) + d1) * tau;
CC1_4 = 2 * log((1 - g1 * exp(d1 * tau)) / (1 - g1));
cc1 = CC1_1 + CC1_2 * (CC1_3 - CC1_4);
f1 = exp(cc1 + DD1 * v + complex(real=0,imaginary=phi * log(S)));
y = Re(f1 * exp(complex(real=0,imaginary=-phi * log(K))) / (complex(real=0,imaginary=phi)));
return(y)
}
# Risk-Neutral Probability P2
HestonP2<-function(phi, kappa, theta, lambda, rho, sigma, tau, K, S, r, v)
{
mu1 = -0.5;
b1 = kappa + lambda;
d1 = sqrt((rho * sigma * complex(real=0,imaginary=phi) - b1)^2 - (sigma^2 * 2 * mu1 * complex(real=0,imaginary=phi ) - sigma^2 * phi^2));
g1 = (b1 - rho * sigma * complex(real=0,imaginary=phi ) + d1) / (b1 - rho * sigma * complex(real=0,imaginary=phi ) - d1);
DD1_1 = (b1 - rho * sigma * complex(real=0,imaginary=phi ) + d1) / (sigma^2);
DD1_2 = 1 - exp(d1 * tau);
DD1_3 = 1 - g1 * exp(d1 * tau);
DD1 = DD1_1 * DD1_2 / DD1_3;
CC1_1 = complex(real=0,imaginary=r * phi * tau);
CC1_2 = kappa * theta / (sigma^2);
CC1_3 = (b1 - rho * sigma * complex(real=0,imaginary=phi ) + d1) * tau;
CC1_4 = 2 * log((1 - g1 * exp(d1 * tau)) / (1 - g1));
cc1 = CC1_1 + CC1_2 * (CC1_3 - CC1_4);
f1 = exp(cc1 + DD1 * v + complex(real=0,imaginary=phi * log(S)));
y = Re(exp(complex(real=0,imaginary=-phi * log(K))) * f1 / (complex(real=0,imaginary=phi )));
return(y)
}
# Trapezoidal Rule (THIS IS USED TO SLVE THE INTEGRAND)
TRAPnumint<-function(x, y)
{
n = length(x);
I = 0;
for (t in 2:n) {
I = I + 0.5 * (x[t] - x[t-1]) * (y[t-1] + y[t]);}
return(I)
}
# Heston Option Price
Heston<-function(PutCall, kappa, theta, lambda, rho, sigma, tau, K, S, r, v)
{
P1_int = rep(0,1001);
P2_int = P1_int;
phi_int = seq(0.0001,100.0001,by=.1)
cnt = 1;
for (phi in seq(0.0001,100.0001,by=.1))
{
P1_int[cnt] = HestonP1(phi, kappa, theta, lambda, rho, sigma, tau, K, S, r, v);
P2_int[cnt] = HestonP2(phi, kappa, theta, lambda, rho, sigma, tau, K, S, r, v);
cnt = cnt + 1;
}
p1 = 0.5 + (1 / pi) * TRAPnumint(phi_int, P1_int);
p2 = 0.5 + (1 / pi) * TRAPnumint(phi_int, P2_int);
if (p1 < 0) {p1 = 0;}
if (p1 > 1) {p1 = 1;}
if (p2 < 0) {p2 = 0;}
if (p2 > 1) {p2 = 1;}
HestonC = S * p1 - K * exp(-r * tau) * p2;
if (PutCall=='Call') {y = HestonC;} else {
if(PutCall=='Put') {y = HestonC + K * exp(-r * tau) - S}}
return(y)
}
#### TRYING AN EXAMPLE
K = 100;
r = .05;
rho = -.7;
kappa = 2; # lambda in our notes and in Gatheral
theta = 0.01; # v bar in our notes
sigma = 0.1; # eta in our notes
v = 0.01; # initial variance
im_k = .5; # technical
lambda= 0.05
tau= 0.08 #time to maturity
Call_Prices = rep(1);{
Call_Prices[1]<- Heston(S=100,K=100,r=0.05,v=0.01,theta=.01,kappa=2,sigma=.1,rho=-.7, lambda=0.05, tau=.08, PutCall= 'Call');
}
Call_Prices[1]
## can either specify the variable separately, or in the function- i for some reason did both
## can extend call price by changing value in rep()
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
glbin_lcd_cpp <- function(X, y, offset, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning) {
.Call('_glbinc_glbin_lcd_cpp', PACKAGE = 'glbinc', X, y, offset, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning)
}
glbin_lcd_sparse_cpp <- function(X, y, offset, center, scale, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning) {
.Call('_glbinc_glbin_lcd_sparse_cpp', PACKAGE = 'glbinc', X, y, offset, center, scale, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning)
}
glbin_lcd_std_cpp <- function(X, y, offset, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning) {
.Call('_glbinc_glbin_lcd_std_cpp', PACKAGE = 'glbinc', X, y, offset, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning)
}
glm_binom_std_c <- function(X, y, offset, center, scale, add_intercept, verb, eps, maxiter, w_limit = 0.25, stability_threshold = 1e-3) {
.Call('_glbinc_glm_binom_std_c', PACKAGE = 'glbinc', X, y, offset, center, scale, add_intercept, verb, eps, maxiter, w_limit, stability_threshold)
}
glm_binom_std_sparse_c <- function(X, y, offset, center, scale, add_intercept, verb, eps, maxiter, w_limit = 0.25, stability_threshold = 1e-3) {
.Call('_glbinc_glm_binom_std_sparse_c', PACKAGE = 'glbinc', X, y, offset, center, scale, add_intercept, verb, eps, maxiter, w_limit, stability_threshold)
}
rcpp_hello <- function() {
.Call('_glbinc_rcpp_hello', PACKAGE = 'glbinc')
}
sparse_c <- function(x) {
.Call('_glbinc_sparse_c', PACKAGE = 'glbinc', x)
}
| /R/RcppExports.R | no_license | jeliason/glbinc | R | false | false | 1,943 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
glbin_lcd_cpp <- function(X, y, offset, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning) {
.Call('_glbinc_glbin_lcd_cpp', PACKAGE = 'glbinc', X, y, offset, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning)
}
glbin_lcd_sparse_cpp <- function(X, y, offset, center, scale, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning) {
.Call('_glbinc_glbin_lcd_sparse_cpp', PACKAGE = 'glbinc', X, y, offset, center, scale, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning)
}
glbin_lcd_std_cpp <- function(X, y, offset, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning) {
.Call('_glbinc_glbin_lcd_std_cpp', PACKAGE = 'glbinc', X, y, offset, G0, G1, group_weight, lambda, add_intercept, alpha, verb, eps, dfmax, maxiter, AIC_stop, penalty, pen_tuning)
}
glm_binom_std_c <- function(X, y, offset, center, scale, add_intercept, verb, eps, maxiter, w_limit = 0.25, stability_threshold = 1e-3) {
.Call('_glbinc_glm_binom_std_c', PACKAGE = 'glbinc', X, y, offset, center, scale, add_intercept, verb, eps, maxiter, w_limit, stability_threshold)
}
glm_binom_std_sparse_c <- function(X, y, offset, center, scale, add_intercept, verb, eps, maxiter, w_limit = 0.25, stability_threshold = 1e-3) {
.Call('_glbinc_glm_binom_std_sparse_c', PACKAGE = 'glbinc', X, y, offset, center, scale, add_intercept, verb, eps, maxiter, w_limit, stability_threshold)
}
rcpp_hello <- function() {
.Call('_glbinc_rcpp_hello', PACKAGE = 'glbinc')
}
sparse_c <- function(x) {
.Call('_glbinc_sparse_c', PACKAGE = 'glbinc', x)
}
|
library(oro.nifti)
library(neurobase)
library(tidyverse)
library(stringr)
#output_selected_clusters
#this function output, for each fold, the selected clusters for each modality as nifti images
#---PARAMETERS---
#output_list: the list that is output of a call of the fit_and_eval function
#space_defining_image : the absolute path to a nii or a nii.gz file defining the space in which to write the clusters
#output_name: string, prefix of the filenames of the nifti images to be written
output_selected_clusters <- function(output_list, space_defining_image, output_name) {
img <- readNIfTI(space_defining_image)
img_dim <- dim(img@.Data)
folds <- length(output_list)
for (fold_index in 1:folds) {
modalities <- names(output_list[[fold_index]][[1]])
modalities_number <- length(modalities)
for (modality_index in 1:modalities_number) {
this_modality <- modalities[modality_index]
clusters_number <- sum(str_detect(output_list[[fold_index]]$weights$features_name, this_modality))
if(clusters_number == 0){
next}
suffix <- paste("fold", fold_index, this_modality, sep = "_")
if (this_modality == "dorsal_attentional") {
this_modality_clusters <- str_subset(output_list[[fold_index]]$weights$features_name, this_modality) %>%
str_split(.,"_") %>%
map_chr(~`[`(.,3)) %>%
as.numeric(.)} else {
this_modality_clusters <- str_subset(output_list[[fold_index]]$weights$features_name, this_modality) %>%
str_split(.,"_") %>%
map_chr(~`[`(.,2)) %>%
as.numeric(.)}
selected_clusters <- `[[`(output_list[[fold_index]][[1]], this_modality) %>%
filter(., cluster_id %in% this_modality_clusters)
new_img <- img
new_img@.Data <- array(0,img_dim)
for (cl in this_modality_clusters) {
new_img@.Data[as.matrix(selected_clusters[selected_clusters$cluster_id == cl, 1:3])] <- cl
}
img_name <- paste(output_name,suffix, sep = "_")
writeNIfTI(new_img, paste0(img_name), verbose=TRUE)
}
}
}
#evaluators_calculation
#helper function to calculate several performance indexes
#---PARAMETERS---
#df: data_frame or data.frame with columns "classification" (label from the fitted model) and "ground" (ground truth)
evaluators_calculation <- function(df) {
performance_table <- table(df$classification, df$ground)
accuracy <- sum(diag(performance_table))/sum(performance_table)
sensitivity <- performance_table[2,2]/sum(performance_table[,2])
specificity <- performance_table[1,1]/sum(performance_table[,1])
F1 <- (2*performance_table[2,2])/((2*performance_table[2,2]) + performance_table[1,2] + performance_table[2,1])
evaluation <- c(accuracy = accuracy, sensitivity = sensitivity, specificity = specificity, F1 = F1)
return(evaluation)
}
#evaluate_model_merged
#this function take as input the output of the function fit_and_eval and output
#several performance indexes for the stacked folds
#---PARAMETERS---
#output_classification: list, the output of a call to fit_and_eval
evaluate_model_merged <- function(output_classification) {
all_folds <- output_classification %>%
map(~`$`(.,"accuracy")) %>%
Reduce(bind_rows,.)
output <- evaluators_calculation(all_folds)
return(output)
}
#evaluate_model_merged
#this function take as input the output of the function fit_and_eval and output
#several performance indexes averaged by fold
#---PARAMETERS---
#output_classification: list, the output of a call to fit_and_eval
evaluate_model_fold <- function(output_classification) {
each_folds <- output_classification %>%
map(~`$`(.,"accuracy"))
each_eval <- lapply(each_folds, evaluators_calculation)
each_accuracy <- sapply(each_eval,`[`,"accuracy")
each_accuracy [is.nan(each_accuracy)] <- 0
mean_accuracy <- mean(each_accuracy)
each_sensitivity <- sapply(each_eval,`[`,"sensitivity")
each_sensitivity [is.nan(each_sensitivity)] <- 0
mean_sensitivity <- mean(each_sensitivity)
each_specificity <- sapply(each_eval,`[`,"specificity")
each_specificity [is.nan(each_specificity)] <- 0
mean_specificity <- mean(each_specificity)
each_F1 <- sapply(each_eval,`[`,"F1")
each_F1 [is.nan(each_F1)] <- 0
mean_F1 <- mean(each_F1)
output <- c(accuracy = mean_accuracy, sensitivity = mean_sensitivity, specificity = mean_specificity, F1 = mean_F1)
return(output)
}
| /evaluation_of_model_and_cluster_writing.r | no_license | fnemmi-tonic/multimodal_MRI_pipeline_classification | R | false | false | 4,532 | r | library(oro.nifti)
library(neurobase)
library(tidyverse)
library(stringr)
#output_selected_clusters
#this function output, for each fold, the selected clusters for each modality as nifti images
#---PARAMETERS---
#output_list: the list that is output of a call of the fit_and_eval function
#space_defining_image : the absolute path to a nii or a nii.gz file defining the space in which to write the clusters
#output_name: string, prefix of the filenames of the nifti images to be written
output_selected_clusters <- function(output_list, space_defining_image, output_name) {
img <- readNIfTI(space_defining_image)
img_dim <- dim(img@.Data)
folds <- length(output_list)
for (fold_index in 1:folds) {
modalities <- names(output_list[[fold_index]][[1]])
modalities_number <- length(modalities)
for (modality_index in 1:modalities_number) {
this_modality <- modalities[modality_index]
clusters_number <- sum(str_detect(output_list[[fold_index]]$weights$features_name, this_modality))
if(clusters_number == 0){
next}
suffix <- paste("fold", fold_index, this_modality, sep = "_")
if (this_modality == "dorsal_attentional") {
this_modality_clusters <- str_subset(output_list[[fold_index]]$weights$features_name, this_modality) %>%
str_split(.,"_") %>%
map_chr(~`[`(.,3)) %>%
as.numeric(.)} else {
this_modality_clusters <- str_subset(output_list[[fold_index]]$weights$features_name, this_modality) %>%
str_split(.,"_") %>%
map_chr(~`[`(.,2)) %>%
as.numeric(.)}
selected_clusters <- `[[`(output_list[[fold_index]][[1]], this_modality) %>%
filter(., cluster_id %in% this_modality_clusters)
new_img <- img
new_img@.Data <- array(0,img_dim)
for (cl in this_modality_clusters) {
new_img@.Data[as.matrix(selected_clusters[selected_clusters$cluster_id == cl, 1:3])] <- cl
}
img_name <- paste(output_name,suffix, sep = "_")
writeNIfTI(new_img, paste0(img_name), verbose=TRUE)
}
}
}
#evaluators_calculation
#helper function to calculate several performance indexes
#---PARAMETERS---
#df: data_frame or data.frame with columns "classification" (label from the fitted model) and "ground" (ground truth)
evaluators_calculation <- function(df) {
performance_table <- table(df$classification, df$ground)
accuracy <- sum(diag(performance_table))/sum(performance_table)
sensitivity <- performance_table[2,2]/sum(performance_table[,2])
specificity <- performance_table[1,1]/sum(performance_table[,1])
F1 <- (2*performance_table[2,2])/((2*performance_table[2,2]) + performance_table[1,2] + performance_table[2,1])
evaluation <- c(accuracy = accuracy, sensitivity = sensitivity, specificity = specificity, F1 = F1)
return(evaluation)
}
#evaluate_model_merged
#this function take as input the output of the function fit_and_eval and output
#several performance indexes for the stacked folds
#---PARAMETERS---
#output_classification: list, the output of a call to fit_and_eval
evaluate_model_merged <- function(output_classification) {
all_folds <- output_classification %>%
map(~`$`(.,"accuracy")) %>%
Reduce(bind_rows,.)
output <- evaluators_calculation(all_folds)
return(output)
}
#evaluate_model_merged
#this function take as input the output of the function fit_and_eval and output
#several performance indexes averaged by fold
#---PARAMETERS---
#output_classification: list, the output of a call to fit_and_eval
evaluate_model_fold <- function(output_classification) {
each_folds <- output_classification %>%
map(~`$`(.,"accuracy"))
each_eval <- lapply(each_folds, evaluators_calculation)
each_accuracy <- sapply(each_eval,`[`,"accuracy")
each_accuracy [is.nan(each_accuracy)] <- 0
mean_accuracy <- mean(each_accuracy)
each_sensitivity <- sapply(each_eval,`[`,"sensitivity")
each_sensitivity [is.nan(each_sensitivity)] <- 0
mean_sensitivity <- mean(each_sensitivity)
each_specificity <- sapply(each_eval,`[`,"specificity")
each_specificity [is.nan(each_specificity)] <- 0
mean_specificity <- mean(each_specificity)
each_F1 <- sapply(each_eval,`[`,"F1")
each_F1 [is.nan(each_F1)] <- 0
mean_F1 <- mean(each_F1)
output <- c(accuracy = mean_accuracy, sensitivity = mean_sensitivity, specificity = mean_specificity, F1 = mean_F1)
return(output)
}
|
LoadParameters<-function()
{
parameters = data.frame(matrix(vector(), 1, 15, dimnames=list(c(), c("seed", "numtrain","numtest","itertrain","percGroup","indirect1","indirect2","daylength", "traveltime","detaintime","alpha","gamma","epsilon","pop","movereward"))),stringsAsFactors=F)
parameters$seed=2671 # Seed: 7013, 5510, 2671, 5481, 1994, 9326, 3214, 7816,6882,5557
parameters$numtrain=100 # Number Train runs
parameters$numtest=50 # Number of Test runs
parameters$itertrain=25 #Number Train Iterations
#parameters$bias=.1 # Amount Bias
parameters$percGroup=.3 # Percentage of Biased Group
parameters$indirect1=.2
parameters$indirect2=.8
parameters$crim=4
#parameters$vulnamount=.05 # Vulnerability
#parameters$perccrim=.1 # Possibly Increase in Criminal due to Vulnerability
#parameters$percsusp=.25 # Possibly Increase in Suspicious due to Vulnerability
parameters$daylength=28 # Length of Day
parameters$traveltime=1 # Travel Time
parameters$detaintime=4 # Detain Time
parameters$alpha = .6 # Learning Rate [0,1]
parameters$gamma = .8 # Thoughtfulness Factor [0,1]
parameters$epsilon = .3 # Exploration Parameter [0,1]
parameters$movereward=0
#Parameters$pop=Parameters$rows*Parameters$cols # Total Population: pop
parameters$pop=3000
return(parameters)
}
################################################
##
################################################
#' Computes the reinforcement learning policy
#'
#' Computes reinforcement learning policy from a given state-action table Q.
#' The policy is the decision-making function of the agent and defines the learning
#' agent's behavior at a given time.
#'
#' @param x Variable which encodes the behavior of the agent. This can be
#' either a \code{matrix}, \code{data.frame} or an \code{\link{rl}} object.
#' @seealso \code{\link{ReinforcementLearning}}
#' @return Returns the learned policy.
#' @examples
#' # Create exemplary state-action table (Q) with 2 actions and 3 states
#' Q <- data.frame("up" = c(-1, 0, 1), "down" = c(-1, 1, 0))
#'
#' # Show best possible action in each state
#' computePolicy(Q)
#'
#' @rdname computePolicy
#' @export
computePolicy <- function(x) {
UseMethod("computePolicy", x)
}
#' @export
computePolicy.matrix <- function(x) {
policy <- colnames(x)[apply(x, 1, which.max)]
names(policy) <- rownames(x)
return(policy)
}
#' @export
computePolicy.data.frame <- function(x) {
return(computePolicy(as.matrix(x)))
}
#' @export
computePolicy.rl <- function(x) {
return(computePolicy(x$Q))
}
#' @export
computePolicy.default <- function(x) {
stop("Argument invalid.")
}
#' Computes the reinforcement learning policy
#'
#' Deprecated. Please use [ReinforcementLearning::computePolicy()] instead.
#'
#' @param x Variable which encodes the behavior of the agent. This can be
#' either a \code{matrix}, \code{data.frame} or an \code{\link{rl}} object.
#' @seealso \code{\link{ReinforcementLearning}}
#' @return Returns the learned policy.
#' @rdname policy
#' @export
policy <- function(x) {
.Deprecated("computePolicy")
computePolicy(x)
}
################################################
## This function contains all of the parameters
## in one location so that it is easy to update
## the model as needed
################################################
################################################
##This creates the set of choices
################################################
createsamplefunction<-function(population, parameters)
{
id_num0<-sample(1:nrow(population), parameters$daylength*3, replace=F)
Left<-NA
Right<-NA
Center<-NA
LeftReward<-NA
RightReward<-NA
CenterReward<-NA
LeftCrim<-NA
RightCrim<-NA
CenterCrim<-NA
LeftGroup<-NA
RightGroup<-NA
CenterGroup<-NA
LeftSusp<-NA
RightSusp<-NA
CenterSusp<-NA
LeftSuspCode<-NA
RightSuspCode<-NA
CenterSuspCode<-NA
LeftDirectCode<-NA
RightDirectCode<-NA
CenterDirectCode<-NA
LeftIndirectCode<-NA
RightIndirectCode<-NA
CenterIndirectCode<-NA
LeftRandomCode<-NA
RightRandomCode<-NA
CenterRandomCode<-NA
LeftCrimCode<-NA
RightCrimCode<-NA
CenterCrimCode<-NA
NextSuspState<-NA
NextDirectState<-NA
NextIndirectState<-NA
NextCrimState<-NA
SuspState<-NA
DirectState<-NA
IndirectState<-NA
CrimState<-NA
State<-NA
NextState<-NA
RandomState<-NA
NextRandomState<-NA
for(ii in 1: parameters$daylength)
{
id_num1<-id_num0[ii]
id_num2<-id_num0[ii+parameters$daylength]
id_num3<-id_num0[ii+2*parameters$daylength]
Left[ii]<-id_num1
Right[ii]<-id_num2
Center[ii]<-id_num3
LeftReward[ii]<-population$Reward[id_num1]
RightReward[ii]<-population$Reward[id_num2]
CenterReward[ii]<-population$Reward[id_num3]
LeftGroup[ii]<-population$Group[id_num1]
RightGroup[ii]<-population$Group[id_num2]
CenterGroup[ii]<-population$Group[id_num3]
LeftSusp[ii]<-population$Susp[id_num1]
RightSusp[ii]<-population$Susp[id_num2]
CenterSusp[ii]<-population$Susp[id_num3]
LeftCrim[ii]<-population$Crim[id_num1]
RightCrim[ii]<-population$Crim[id_num2]
CenterCrim[ii]<-population$Crim[id_num3]
State[ii]<-NA
LeftSuspCode[ii]<-population$Susp[id_num1]
RightSuspCode[ii]<-population$Susp[id_num2]
CenterSuspCode[ii]<-population$Susp[id_num3]
LeftDirectCode[ii]<-population$DirectCode[id_num1]
RightDirectCode[ii]<-population$DirectCode[id_num2]
CenterDirectCode[ii]<-population$DirectCode[id_num3]
LeftIndirectCode[ii]<-population$IndirectCode[id_num1]
RightIndirectCode[ii]<-population$IndirectCode[id_num2]
CenterIndirectCode[ii]<-population$IndirectCode[id_num3]
LeftCrimCode[ii]<-population$Crim[id_num1]
RightCrimCode[ii]<-population$Crim[id_num2]
CenterCrimCode[ii]<-population$Crim[id_num3]
LeftRandomCode[ii]<-sample(0:9,1)
RightRandomCode[ii]<-sample(0:9,1)
CenterRandomCode[ii]<-sample(0:9,1)
SuspState[ii]<-paste0(LeftSuspCode[ii],'.',CenterSuspCode[ii],'.',RightSuspCode[ii])
DirectState[ii]<-paste0(LeftDirectCode[ii],'.',CenterDirectCode[ii],'.',RightDirectCode[ii])
IndirectState[ii]<-paste0(LeftIndirectCode[ii],'.',CenterIndirectCode[ii],'.',RightIndirectCode[ii])
CrimState[ii]<-paste0(LeftCrimCode[ii],'.',CenterCrimCode[ii],'.',RightCrimCode[ii])
RandomState[ii]<-paste0(LeftRandomCode[ii],'.',CenterRandomCode[ii],'.',RightRandomCode[ii])
NextSuspState[ii-1]<-paste0(LeftSuspCode[ii],'.',CenterSuspCode[ii],'.',RightSuspCode[ii])
NextSuspState[ii]<-"End"
NextDirectState[ii-1]<-paste0(LeftDirectCode[ii],'.',CenterDirectCode[ii],'.',RightDirectCode[ii])
NextDirectState[ii]<-"End"
NextIndirectState[ii-1]<-paste0(LeftIndirectCode[ii],'.',CenterIndirectCode[ii],'.',RightIndirectCode[ii])
NextIndirectState[ii]<-"End"
NextCrimState[ii-1]<-paste0(LeftCrimCode[ii],'.',CenterCrimCode[ii],'.',RightCrimCode[ii])
NextCrimState[ii]<-"End"
NextRandomState[ii-1]<-paste0(LeftRandomCode[ii],'.',CenterRandomCode[ii],'.',RightRandomCode[ii])
NextRandomState[ii]<-"End"
}
createRLsample<-data.frame(Left,LeftGroup, LeftSusp,LeftSuspCode,LeftDirectCode,
LeftIndirectCode, LeftCrim,LeftRandomCode, LeftReward,
Center,CenterGroup, CenterSusp,CenterSuspCode,CenterDirectCode,
CenterIndirectCode, CenterCrim,CenterRandomCode, CenterReward,
Right, RightGroup, RightSusp, RightSuspCode,
RightDirectCode, RightIndirectCode, RightCrim,RightRandomCode, RightReward,SuspState, DirectState, IndirectState,CrimState,RandomState, NextSuspState, NextDirectState, NextIndirectState,NextCrimState, NextRandomState)
return(createRLsample)
}
################################################
##Create the state diagram
################################################
statediagramfunction <- function(createsample, parameters,flag) {
time = parameters$daylength
detain = parameters$detaintime
move = parameters$traveltime
statemap<-data.frame("State"=paste0(time,".",createsample$State[1]),
"Action"="Left",
"Reward"=createsample$LeftReward[1],
"NextState"=paste0(time-detain,".",createsample$NextState[1]),
"LeftGroup"=createsample$LeftGroup[1],
"LeftSusp"=createsample$LeftSusp[1],
"LeftCrim"=createsample$LeftCrim[1],
"LeftRandom"=createsample$LeftRandomCode[1],
"LeftReward"=createsample$LeftReward[1],
"CenterGroup"=createsample$CenterGroup[1],
"CenterSusp"=createsample$CenterSusp[1],
"CenterCrim"=createsample$CenterCrim[1],
"CenterRandom"=createsample$CenterRandomCode[1],
"CenterReward"=createsample$CenterReward[1],
"RightGroup"=createsample$RightGroup[1],
"RightSusp"=createsample$RightSusp[1],
"RightCrim"=createsample$RightCrim[1],
"RightRandom"=createsample$RightRandomCode[1],
"RightReward"=createsample$RightReward[1])
nextrow<-data.frame("State"=paste0(time,".",createsample$State[1]),
"Action"="Center",
"Reward"=createsample$CenterReward[1],
"NextState"=paste0(time-detain,".",createsample$NextState[1]),
"LeftGroup"=createsample$LeftGroup[1],
"LeftSusp"=createsample$LeftSusp[1],
"LeftCrim"=createsample$LeftCrim[1],
"LeftRandom"=createsample$LeftRandom[1],
"LeftReward"=createsample$LeftReward[1],
"CenterGroup"=createsample$CenterGroup[1],
"CenterSusp"=createsample$CenterSusp[1],
"CenterCrim"=createsample$CenterCrim[1],
"CenterRandom"=createsample$CenterRandom[1],
"CenterReward"=createsample$CenterReward[1],
"RightGroup"=createsample$RightGroup[1],
"RightSusp"=createsample$RightSusp[1],
"RightCrim"=createsample$RightCrim[1],
"RightRandom"=createsample$RightRandom[1],
"RightReward"=createsample$RightReward[1])
nextrow2<-data.frame("State"=paste0(time,".",createsample$State[1]),
"Action"="Right",
"Reward"=createsample$RightReward[1],
"NextState"=paste0(time-detain,".",createsample$NextState[1]),
"LeftGroup"=createsample$LeftGroup[1],
"LeftSusp"=createsample$LeftSusp[1],
"LeftCrim"=createsample$LeftCrim[1],
"LeftRandom"=createsample$LeftRandom[1],
"LeftReward"=createsample$LeftReward[1],
"CenterGroup"=createsample$CenterGroup[1],
"CenterSusp"=createsample$CenterSusp[1],
"CenterCrim"=createsample$CenterCrim[1],
"CenterRandom"=createsample$CenterRandom[1],
"CenterReward"=createsample$CenterReward[1],
"RightGroup"=createsample$RightGroup[1],
"RightSusp"=createsample$RightSusp[1],
"RightCrim"=createsample$RightCrim[1],
"RightRandom"=createsample$RightRandom[1],
"RightReward"=createsample$RightReward[1])
statemap<-rbind(statemap,nextrow,nextrow2)
nextrow<-data.frame("State"=paste0(time,".",createsample$State[1]),
"Action"="None",
"Reward"=0,
"NextState"=paste0(time-move,".",createsample$NextState[1]),
"LeftGroup"=createsample$LeftGroup[1],
"LeftSusp"=createsample$LeftSusp[1],
"LeftCrim"=createsample$LeftCrim[1],
"LeftRandom"=createsample$LeftRandom[1],
"LeftReward"=createsample$LeftReward[1],
"CenterGroup"=createsample$CenterGroup[1],
"CenterSusp"=createsample$CenterSusp[1],
"CenterCrim"=createsample$CenterCrim[1],
"CenterRandom"=createsample$CenterRandom[1],
"CenterReward"=createsample$CenterReward[1],
"RightGroup"=createsample$RightGroup[1],
"RightSusp"=createsample$RightSusp[1],
"RightCrim"=createsample$RightCrim[1],
"RightRandom"=createsample$RightRandom[1],
"RightReward"=createsample$RightReward[1])
statemap<-rbind(statemap,nextrow)
for(ii in 1:1000)
{
statedummy<-as.character(statemap$NextState[ii])
flag<-0
if (is.na(statedummy))
{}
else if (statedummy=="End")
{}
else
{
for(jj in 1:nrow(statemap))
{
if (statedummy==statemap$State[jj])
{
flag<-1
}
}
if(flag==0)
{
openstate<-unlist(stri_split_fixed(as.character(statedummy),".", fixed = TRUE, n=2))
for (kk in 1:nrow(createsample))
{
if (openstate[2]==as.character(createsample$State[kk]))
{
timedet<-as.numeric(openstate[1])-detain
timemove<-as.numeric(openstate[1])-move
if(as.numeric(openstate[1])>=detain)
{
nextrow<-data.frame("State"=statedummy,
"Action"="Left",
"Reward"=createsample$LeftReward[kk],
"NextState"=paste0(timedet,".",createsample$State[kk+1]),
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
nextrow2<-data.frame("State"=statedummy,
"Action"="Center",
"Reward"=createsample$CenterReward[kk],
"NextState"=paste0(timedet,".",createsample$State[kk+1]),
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
nextrow3<-data.frame("State"=statedummy,
"Action"="Right",
"Reward"=createsample$RightReward[kk],
"NextState"=paste0(timedet,".",createsample$State[kk+1]),
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
}
else if(as.numeric(openstate[1])<detain)
{
nextrow<-data.frame("State"=statedummy,
"Action"="Left",
"Reward"=-50,
"NextState"="End",
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
nextrow2<-data.frame("State"=statedummy,
"Action"="Center",
"Reward"=-50,
"NextState"="End",
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
nextrow3<-data.frame("State"=statedummy,
"Action"="Right",
"Reward"=-50,
"NextState"="End",
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
}
if(as.numeric(openstate[1])>=move)
{
nextrow4<-data.frame("State"=statedummy,
"Action"="None",
"Reward"=0,
"NextState"=paste0(timemove,".",createsample$State[kk+1]),
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
statemap<-rbind(statemap, nextrow, nextrow2, nextrow3,nextrow4)
}
else if (as.numeric(openstate[1])<move)
{
nextrow4<-data.frame("State"=statedummy,
"Action"="None",
"Reward"=0,
"NextState"="End",
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
statemap<-rbind(statemap, nextrow, nextrow2, nextrow3,nextrow4)
}
}
}
}
}
}
return(statemap)
}
################################################
## Call RL with an existing model
################################################
runRL<-function(RLdat, trainmodelold, parameters)
{
# Load dataset
RLdat$State<-as.character(RLdat$State)
RLdat$NextState<-as.character(RLdat$NextState)
RLdat$Action<-as.character(RLdat$Action)
# Define reinforcement learning parameters
control <- list(alpha = parameters$alpha, gamma = parameters$gamma, epsilon = parameters$epsilon)
# Perform reinforcement learning
trainmodelnew <- ReinforcementLearning(RLdat, s = "State", a = "Action", r =
"Reward",
s_new = "NextState", iter = parameters$itertrain, control = control, model=trainmodelold)
# Print optimal policy
return(trainmodelnew)
}
################################################
## This is the initial reinforcement learning
## routine. Calls RL without an existing model
################################################
runRLinit<-function(RLdat,parameters)
{
# Load dataset
RLdat$State<-as.character(RLdat$State)
RLdat$NextState<-as.character(RLdat$NextState)
RLdat$Action<-as.character(RLdat$Action)
# Define reinforcement learning parameters
control <- list(alpha = parameters$alpha, gamma = parameters$gamma, epsilon = parameters$epsilon)
# Perform reinforcement learning
trainmodelnew <- ReinforcementLearning(RLdat, s = "State", a = "Action", r =
"Reward",
s_new = "NextState", iter = parameters$itertrain, control = control)
# Print optimal policy
return(trainmodelnew)
}
################################################
## This is the population seeding routine.
################################################
createpopulation<-function(parameters)
{
set.seed=(parameters$seed)
population = data.frame(matrix(vector(), 0, 8, dimnames=list(c(), c("ID", "Group", "Crim", "Indirect","Reward", "Susp", "DirectCode", "IndirectCode"))),stringsAsFactors=F)
person = data.frame(matrix(vector(), 1, 8, dimnames=list(c(), c("ID", "Group", "Crim", "Indirect","Reward", "Susp", "DirectCode", "IndirectCode"))),stringsAsFactors=F)
for (ii in 1:parameters$pop)
{
person$ID <-sprintf("%03d",ii)
person$Group <- sample(1:2, 1, replace=T,prob=c(1-parameters$percGroup,parameters$percGroup))
person$Crim<- sample(1:parameters$crim, 1, replace=T)
person$Reward=person$Crim
if (person$Group==1)
{
person$Indirect<- sample(0:1, 1, replace=T,prob=c(1-parameters$indirect1,parameters$indirect1))
person$Susp<- round(person$Crim + rnorm(1,0,1))
if(person$Susp<0)
{person$Susp<-0}
}
if (person$Group==2)
{
person$Indirect<- sample(0:1, 1, replace=T,prob=c(parameters$indirect2,1-parameters$indirect2))
person$Susp<- round(person$Crim + rnorm(1,1,1))
if(person$Susp<0)
{person$Susp<-0}
}
person$DirectCode<-paste0(person$Group,'.',person$Susp)
population<-rbind(population,person)
person$IndirectCode<-paste0(person$Indirect,'.',person$Susp)
population<-rbind(population,person)
}
return(population)
}
################################################
## Function Policywork
################################################
policywork<-function(policytest,test)
{
policytest<- data.frame(unlist(policytest))
policytest<-cbind(policytest,State=rownames(policytest))
policytest$State<-as.character(policytest$State)
finalpolicy<-NA
for (ii in 1:nrow(policytest))
{
policytest$State[ii]<-sub('X','', policytest$State[ii])
}
finalpolicy<-data.frame("State"=NA,"Action"=NA, "Reward"=NA, "LeftCrim"=NA, "LeftGroup"=NA, "LeftSusp"=NA, "LeftReward"=NA, "CenterCrim"=NA, "CenterGroup"=NA, "CenterSusp"=NA, "CenterReward"=NA,"RightCrim"=NA, "RightGroup"=NA, "RightSusp"=NA, "RightReward"=NA, "NextState"=NA)
finalpolicy2<-finalpolicy
for(jj in 1:nrow(test))
{
for (kk in 1:nrow(policytest))
{
if(policytest$State[kk]==test$State[jj]&&as.character(policytest$unlist.policytest.[kk])==as.character(test$Action[jj]))
{ finalpolicy2$State<-policytest$State[kk]
finalpolicy2$Action<-policytest$unlist.policytest.[kk]
finalpolicy2$Reward<-test$Reward[jj]
finalpolicy2$LeftCrim<-test$LeftCrim[jj]
finalpolicy2$LeftGroup<-test$LeftGroup[jj]
finalpolicy2$LeftSusp<-test$LeftSusp[jj]
finalpolicy2$LeftReward<-test$LeftReward[jj]
finalpolicy2$CenterCrim<-test$CenterCrim[jj]
finalpolicy2$CenterGroup<-test$CenterGroup[jj]
finalpolicy2$CenterSusp<-test$CenterSusp[jj]
finalpolicy2$CenterReward<-test$CenterReward[jj]
finalpolicy2$RightCrim<-test$RightCrim[jj]
finalpolicy2$RightGroup<-test$RightGroup[jj]
finalpolicy2$RightSusp<-test$RightSusp[jj]
finalpolicy2$RightReward<-test$RightReward[jj]
finalpolicy2$NextState<-test$NextState[jj]
finalpolicy<-rbind(finalpolicy, finalpolicy2)
}
}
}
finalpolicy<-unique(finalpolicy)
finalpolicy<-na.omit(finalpolicy)
return(finalpolicy)
}
################################################
##This is a routine to find the best soultion
################################################
RLSolution<-function(finalpolicy)
{
RLsolution<-finalpolicy[1,]
for(ii in 1:parameters$daylength)
{
if(!is.na(RLsolution$NextState[ii]))
{
if(RLsolution$NextState[ii]=="End")
{break}
for(jj in 1:nrow(finalpolicy))
{
if (RLsolution$NextState[ii]==finalpolicy$State[jj])
{
RLsolution[ii+1,]<-finalpolicy[jj,]
}
}
}
for(kk in 1:nrow(RLsolution))
{if (RLsolution$Action[kk]=="Left")
{RLsolution$Group[kk]<-RLsolution$LeftGroup[kk]}
if (RLsolution$Action[kk]=="Center")
{RLsolution$Group[kk]<-RLsolution$CenterGroup[kk]}
if (RLsolution$Action[kk]=="Right")
{RLsolution$Group[kk]<-RLsolution$RightGroup[kk]}
if (RLsolution$Action[kk]=="None")
{RLsolution$Group[kk]<-NA}
} }
return(RLsolution)
}
################################################
##
################################################
BestPolicy<-function(test,parameters)
{
testmodelIdeal<-runRLinit(test,parameters)
policytestIdeal<-computePolicy(testmodelIdeal)
finalpolicyIdeal<-policywork(policytestIdeal,test)
RLsolutionIdeal<-RLSolution(finalpolicyIdeal)
return(RLsolutionIdeal)
}
################################################
##
################################################
library(stringi)
library(dplyr)
library(ReinforcementLearning)
library(binaryLogic)
library(stats)
library(hash)
library(ggplot2)
library(testthat)
library(wesanderson)
#setwd("~/Alethea")
parameters<-LoadParameters()
population<-createpopulation(parameters)
test_rand<-sample.int(n = nrow(population), size = 2000, replace = F)
trainpopulation <- population[test_rand, ]
testpopulation <- population[-test_rand, ]
save(testpopulation, file = paste0("TestPopulation.",parameters$seed,".rda"))
save(trainpopulation, file = paste0("TrainPopulation.",parameters$seed,".rda"))
createsample<-createsamplefunction(trainpopulation, parameters)
createsampleSusp<-createsample
createsampleDirect<-createsample
createsampleIndirect<-createsample
createsampleCrim<-createsample
createsampleRandom<-createsample
################################################
##
################################################
RLsolutionTrainSusp<-NA
RLsolutionTrainDirect<-NA
RLsolutionTrainIndirect<-NA
RLsolutionTrainCrim<-NA
RLsolutionTrainRandom<-NA
RLtableTrainSusp<-NA
RLtableTrainDirect<-NA
RLtableTrainIndirect<-NA
RLtableTrainCrim<-NA
RLtableTrainRandom<-NA
proportionTrainSusp<-NA
proportionTrainDirect<-NA
proportionTrainIndirect<-NA
proportionTrainCrim<-NA
proportionTrainRandom<-NA
for(i in 1:nrow(createsample))
{
createsampleSusp$State[i]<-createsample$SuspState[i]
createsampleSusp$NextState[i]<-createsample$SuspNextState[i]
createsampleDirect$State[i]<-createsample$DirectState[i]
createsampleDirect$NextState[i]<-createsample$DirectNextState[i]
createsampleIndirect$State[i]<-createsample$IndirectState[i]
createsampleIndirect$NextState[i]<-createsample$IndirectNextState[i]
createsampleCrim$State[i]<-createsample$CrimState[i]
createsampleCrim$NextState[i]<-createsample$CrimNextState[i]
createsampleRandom$State[i]<-createsample$RandomState[i]
createsampleRandom$NextState[i]<-createsample$RandomNextState[i]
}
trainSusp<-statediagramfunction(createsampleSusp, parameters)
trainmodelSusp<-runRLinit(trainSusp, parameters)
trainDirect<-statediagramfunction(createsampleDirect, parameters)
trainmodelDirect<-runRLinit(trainDirect, parameters)
trainIndirect<-statediagramfunction(createsampleIndirect, parameters)
trainmodelIndirect<-runRLinit(trainIndirect, parameters)
trainCrim<-statediagramfunction(createsampleCrim, parameters)
trainmodelCrim<-runRLinit(trainCrim, parameters)
trainRandom<-statediagramfunction(createsampleRandom, parameters)
trainmodelRandom<-runRLinit(trainRandom, parameters)
for (m in 1:parameters$numtrain)
{
RLtrainsample<-createsamplefunction(trainpopulation, parameters)
RLtrainsampleSusp<-RLtrainsample
RLtrainsampleDirect<-RLtrainsample
RLtrainsampleIndirect<-RLtrainsample
RLtrainsampleCrim<-RLtrainsample
RLtrainsampleRandom<-RLtrainsample
for(i in 1:parameters$daylength)
{
RLtrainsampleSusp$State[i]<-as.character(RLtrainsample$SuspState[i])
RLtrainsampleSusp$NextState[i]<-as.character(RLtrainsample$NextSuspState[i])
RLtrainsampleDirect$State[i]<-as.character(RLtrainsample$DirectState[i])
RLtrainsampleDirect$NextState[i]<-as.character(RLtrainsample$NextDirectState[i])
RLtrainsampleIndirect$State[i]<-as.character(RLtrainsample$IndirectState[i])
RLtrainsampleIndirect$NextState[i]<-as.character(RLtrainsample$NextIndirectState[i])
RLtrainsampleCrim$State[i]<-as.character(RLtrainsample$CrimState[i])
RLtrainsampleCrim$NextState[i]<-as.character(RLtrainsample$NextCrimState[i])
RLtrainsampleRandom$State[i]<-as.character(RLtrainsample$RandomState[i])
RLtrainsampleRandom$NextState[i]<-as.character(RLtrainsample$NextRandomState[i])
}
trainSusp<-statediagramfunction(RLtrainsampleSusp, parameters)
trainmodelSusp<-runRL(trainSusp, trainmodelSusp,parameters)
policytrainSusp<-computePolicy(trainmodelSusp)
finalpolicytrainSusp<-policywork(policytrainSusp,trainSusp)
RLsolutionTrainSusp2<-RLSolution(finalpolicytrainSusp)
RLtableTrainSusp2<-as.data.frame(table(RLsolutionTrainSusp2$Group))
proportionTrainSusp2<-RLtableTrainSusp2[2,2]/(RLtableTrainSusp2[1,2]+RLtableTrainSusp2[2,2])
RLsolutionTrainSusp<-rbind(RLsolutionTrainSusp,RLsolutionTrainSusp2)
RLtableTrainSusp<-rbind(RLtableTrainSusp,RLtableTrainSusp2)
proportionTrainSusp<-rbind(proportionTrainSusp,proportionTrainSusp2)
trainDirect<-statediagramfunction(RLtrainsampleDirect, parameters)
trainmodelDirect<-runRL(trainDirect, trainmodelDirect,parameters)
policytrainDirect<-computePolicy(trainmodelDirect)
finalpolicytrainDirect<-policywork(policytrainDirect,trainDirect)
RLsolutionTrainDirect2<-RLSolution(finalpolicytrainDirect)
RLtableTrainDirect2<-as.data.frame(table(RLsolutionTrainDirect2$Group))
proportionTrainDirect2<-RLtableTrainDirect2[2,2]/(RLtableTrainDirect2[1,2]+RLtableTrainDirect2[2,2])
RLsolutionTrainDirect<-rbind(RLsolutionTrainDirect,RLsolutionTrainDirect2)
RLtableTrainDirect<-rbind(RLtableTrainDirect,RLtableTrainDirect2)
proportionTrainDirect<-rbind(proportionTrainDirect,proportionTrainDirect2)
trainIndirect<-statediagramfunction(RLtrainsampleIndirect, parameters)
trainmodelIndirect<-runRL(trainIndirect, trainmodelIndirect,parameters)
policytrainIndirect<-computePolicy(trainmodelIndirect)
finalpolicytrainIndirect<-policywork(policytrainIndirect,trainIndirect)
RLsolutionTrainIndirect2<-RLSolution(finalpolicytrainIndirect)
RLtableTrainIndirect2<-as.data.frame(table(RLsolutionTrainIndirect2$Group))
proportionTrainIndirect2<-RLtableTrainIndirect2[2,2]/(RLtableTrainIndirect2[1,2]+RLtableTrainIndirect2[2,2])
RLsolutionTrainIndirect<-rbind(RLsolutionTrainIndirect,RLsolutionTrainIndirect2)
RLtableTrainIndirect<-rbind(RLtableTrainIndirect,RLtableTrainIndirect2)
proportionTrainIndirect<-rbind(proportionTrainIndirect,proportionTrainIndirect2)
trainCrim<-statediagramfunction(RLtrainsampleCrim, parameters)
trainmodelCrim<-runRL(trainCrim, trainmodelCrim,parameters)
policytrainCrim<-computePolicy(trainmodelCrim)
finalpolicytrainCrim<-policywork(policytrainCrim,trainCrim)
RLsolutionTrainCrim2<-RLSolution(finalpolicytrainCrim)
RLtableTrainCrim2<-as.data.frame(table(RLsolutionTrainCrim2$Group))
proportionTrainCrim2<-RLtableTrainCrim2[2,2]/(RLtableTrainCrim2[1,2]+RLtableTrainCrim2[2,2])
RLsolutionTrainCrim<-rbind(RLsolutionTrainCrim,RLsolutionTrainCrim2)
RLtableTrainCrim<-rbind(RLtableTrainCrim,RLtableTrainCrim2)
proportionTrainCrim<-rbind(proportionTrainCrim,proportionTrainCrim2)
trainRandom<-statediagramfunction(RLtrainsampleRandom, parameters)
trainmodelRandom<-runRL(trainRandom, trainmodelRandom,parameters)
policytrainRandom<-computePolicy(trainmodelRandom)
finalpolicytrainRandom<-policywork(policytrainRandom,trainRandom)
RLsolutionTrainRandom2<-RLSolution(finalpolicytrainRandom)
RLtableTrainRandom2<-as.data.frame(table(RLsolutionTrainRandom2$Group))
proportionTrainRandom2<-RLtableTrainRandom2[2,2]/(RLtableTrainRandom2[1,2]+RLtableTrainRandom2[2,2])
RLsolutionTrainRandom<-rbind(RLsolutionTrainRandom,RLsolutionTrainRandom2)
RLtableTrainRandom<-rbind(RLtableTrainRandom,RLtableTrainRandom2)
proportionTrainRandom<-rbind(proportionTrainRandom,proportionTrainRandom2)
}
RLtableTrainSusp <- na.omit(RLtableTrainSusp)
RLtableTrainDirect <- na.omit(RLtableTrainDirect)
RLtableTrainIndirect <- na.omit(RLtableTrainIndirect)
RLtableTrainCrim <- na.omit(RLtableTrainCrim)
RLtableTrainRandom <- na.omit(RLtableTrainRandom)
proportionTrainSusp <- na.omit(proportionTrainSusp)
proportionTrainDirect <- na.omit(proportionTrainDirect)
proportionTrainIndirect <- na.omit(proportionTrainIndirect)
proportionTrainCrim <- na.omit(proportionTrainCrim)
proportionTrainRandom <- na.omit(proportionTrainRandom)
save(proportionTrainSusp,proportionTrainDirect,proportionTrainIndirect,
proportionTrainCrim,proportionTrainRandom,
file = paste0("proportionTrain.seed.",parameters$seed,"daylength.",parameters$daylength,".numtrain.",parameters$numtrain,".numtest.",parameters$numtest,".itertrain.",parameters$itertrain,".rda"))
save(trainmodelSusp,trainmodelDirect,trainmodelIndirect,trainmodelCrim,trainmodelRandom,
file = paste0("TrainModels.seed.",parameters$seed,"daylength.",parameters$daylength,".numtrain.",parameters$numtrain,".numtest.",parameters$numtest,".itertrain.",parameters$itertrain,".rda"))
################################################
##
################################################
RLsolutionSusp<-NA
RLsolutionDirect<-NA
RLsolutionIndirect<-NA
RLsolutionCrim<-NA
RLsolutionRandom<-NA
RLtableSusp<-NA
RLtableDirect<-NA
RLtableIndirect<-NA
RLtableCrim<-NA
RLtableRandom<-NA
proportionSusp<-NA
proportionDirect<-NA
proportionIndirect<-NA
proportionCrim<-NA
proportionRandom<-NA
for(j in 1:parameters$numtest)
{
RLtestsample<-createsamplefunction(testpopulation, parameters)
RLtestsampleSusp<-RLtestsample
RLtestsampleDirect<-RLtestsample
RLtestsampleIndirect<-RLtestsample
RLtestsampleCrim<-RLtestsample
RLtestsampleRandom<-RLtestsample
for(i in 1:nrow(RLtestsample))
{
RLtestsampleSusp$State[i]<-as.character(RLtestsample$SuspState[i])
RLtestsampleSusp$NextState[i]<-as.character(RLtestsample$NextSuspState[i])
RLtestsampleDirect$State[i]<-as.character(RLtestsample$DirectState[i])
RLtestsampleDirect$NextState[i]<-as.character(RLtestsample$NextDirectState[i])
RLtestsampleIndirect$State[i]<-as.character(RLtestsample$IndirectState[i])
RLtestsampleIndirect$NextState[i]<-as.character(RLtestsample$NextIndirectState[i])
RLtestsampleCrim$State[i]<-as.character(RLtestsample$CrimState[i])
RLtestsampleCrim$NextState[i]<-as.character(RLtestsample$NextCrimState[i])
RLtestsampleRandom$State[i]<-as.character(RLtestsample$RandomState[i])
RLtestsampleRandom$NextState[i]<-as.character(RLtestsample$NextRandomState[i])
}
testSusp<-statediagramfunction(RLtestsampleSusp, parameters)
testmodelSusp<-runRL(testSusp,trainmodelSusp,parameters)
testDirect<-statediagramfunction(RLtestsampleDirect, parameters)
testmodelDirect<-runRL(testDirect,trainmodelDirect,parameters)
testIndirect<-statediagramfunction(RLtestsampleIndirect, parameters)
testmodelIndirect<-runRL(testIndirect,trainmodelIndirect,parameters)
testCrim<-statediagramfunction(RLtestsampleCrim, parameters)
testmodelCrim<-runRL(testCrim,trainmodelCrim,parameters)
testRandom<-statediagramfunction(RLtestsampleRandom, parameters)
testmodelRandom<-runRL(testRandom,trainmodelRandom,parameters)
policytestSusp<-computePolicy(testmodelSusp)
finalpolicySusp<-policywork(policytestSusp,testSusp)
RLsolutionSusp2<-RLSolution(finalpolicySusp)
RLtableSusp2<-as.data.frame(table(RLsolutionSusp2$Group))
proportionSusp2<-RLtableSusp2[2,2]/(RLtableSusp2[1,2]+RLtableSusp2[2,2])
RLsolutionSusp<-rbind(RLsolutionSusp,RLsolutionSusp2)
RLtableSusp<-rbind(RLtableSusp,RLtableSusp2)
proportionSusp<-rbind(proportionSusp,proportionSusp2)
policytestDirect<-computePolicy(testmodelDirect)
finalpolicyDirect<-policywork(policytestDirect,testDirect)
RLsolutionDirect2<-RLSolution(finalpolicyDirect)
RLtableDirect2<-as.data.frame(table(RLsolutionDirect2$Group))
proportionDirect2<-RLtableDirect2[2,2]/(RLtableDirect2[1,2]+RLtableDirect2[2,2])
RLsolutionDirect<-rbind(RLsolutionDirect,RLsolutionDirect2)
RLtableDirect<-rbind(RLtableDirect,RLtableDirect2)
proportionDirect<-rbind(proportionDirect,proportionDirect2)
policytestIndirect<-computePolicy(testmodelIndirect)
finalpolicyIndirect<-policywork(policytestIndirect, testIndirect)
RLsolutionIndirect2<-RLSolution(finalpolicyIndirect)
RLtableIndirect2<-as.data.frame(table(RLsolutionIndirect2$Group))
proportionIndirect2<-RLtableIndirect2[2,2]/(RLtableIndirect2[1,2]+RLtableIndirect2[2,2])
RLsolutionIndirect<-rbind(RLsolutionIndirect,RLsolutionIndirect2)
RLtableIndirect<-rbind(RLtableIndirect,RLtableIndirect2)
proportionIndirect<-rbind(proportionIndirect,proportionIndirect2)
policytestCrim<-computePolicy(testmodelCrim)
finalpolicyCrim<-policywork(policytestCrim,testCrim)
RLsolutionCrim2<-RLSolution(finalpolicyCrim)
RLtableCrim2<-as.data.frame(table(RLsolutionCrim2$Group))
proportionCrim2<-RLtableCrim2[2,2]/(RLtableCrim2[1,2]+RLtableCrim2[2,2])
RLsolutionCrim<-rbind(RLsolutionCrim,RLsolutionCrim2)
RLtableCrim<-rbind(RLtableCrim,RLtableCrim2)
proportionCrim<-rbind(proportionCrim,proportionCrim2)
policytestRandom<-computePolicy(testmodelRandom)
finalpolicyRandom<-policywork(policytestRandom,testRandom)
RLsolutionRandom2<-RLSolution(finalpolicyRandom)
RLtableRandom2<-as.data.frame(table(RLsolutionRandom2$Group))
proportionRandom2<-RLtableRandom2[2,2]/(RLtableRandom2[1,2]+RLtableRandom2[2,2])
RLsolutionRandom<-rbind(RLsolutionRandom,RLsolutionRandom2)
RLtableRandom<-rbind(RLtableRandom,RLtableRandom2)
proportionRandom<-rbind(proportionRandom,proportionRandom2)
}
RLtableSusp <- na.omit(RLtableSusp)
RLtableDirect <- na.omit(RLtableDirect)
RLtableIndirect <- na.omit(RLtableIndirect)
RLtableCrim <- na.omit(RLtableCrim)
RLtableRandom <- na.omit(RLtableRandom)
proportionSusp <- na.omit(proportionSusp)
proportionDirect <- na.omit(proportionDirect)
proportionIndirect <- na.omit(proportionIndirect)
proportionCrim <- na.omit(proportionCrim)
proportionRandom <- na.omit(proportionRandom)
save(RLsolutionSusp,RLsolutionDirect,RLsolutionIndirect,RLsolutionCrim,RLsolutionRandom, file = paste0("RLsolution.daylength.",parameters$daylength,".numtrain.",parameters$numtrain,".numtest.",parameters$numtest,".itertrain.",parameters$itertrain,".rda"))
################################################
##
################################################
RLsolutionSuspIdeal<-BestPolicy(testSusp,parameters)
RLsolutionDirectIdeal<-BestPolicy(testDirect,parameters)
RLsolutionIndirectIdeal<-BestPolicy(testIndirect,parameters)
RLsolutionCrimIdeal<-BestPolicy(testCrim,parameters)
RLsolutionRandomIdeal<-BestPolicy(testRandom,parameters)
save(RLsolutionSuspIdeal,RLsolutionDirectIdeal,RLsolutionIndirect,RLsolutionCrimIdeal,RLsolutionRandomIdeal, file = paste0("RLsolutionIdeal.daylength.",parameters$daylength,".numtrain.",parameters$numtrain,".numtest.",parameters$numtest,".itertrain.",parameters$itertrain,".rda"))
save(proportionSusp,proportionDirect,proportionIndirect,proportionCrim,proportionRandom,file =paste0("proportion.",parameters$seed,".",parameters$numtrain,".",parameters$numtest,".",parameters$itertrain,".rda"))
| /Ready to Upload/Alethea.2671.R | no_license | Kgatliffe/Alethea | R | false | false | 47,180 | r |
LoadParameters<-function()
{
parameters = data.frame(matrix(vector(), 1, 15, dimnames=list(c(), c("seed", "numtrain","numtest","itertrain","percGroup","indirect1","indirect2","daylength", "traveltime","detaintime","alpha","gamma","epsilon","pop","movereward"))),stringsAsFactors=F)
parameters$seed=2671 # Seed: 7013, 5510, 2671, 5481, 1994, 9326, 3214, 7816,6882,5557
parameters$numtrain=100 # Number Train runs
parameters$numtest=50 # Number of Test runs
parameters$itertrain=25 #Number Train Iterations
#parameters$bias=.1 # Amount Bias
parameters$percGroup=.3 # Percentage of Biased Group
parameters$indirect1=.2
parameters$indirect2=.8
parameters$crim=4
#parameters$vulnamount=.05 # Vulnerability
#parameters$perccrim=.1 # Possibly Increase in Criminal due to Vulnerability
#parameters$percsusp=.25 # Possibly Increase in Suspicious due to Vulnerability
parameters$daylength=28 # Length of Day
parameters$traveltime=1 # Travel Time
parameters$detaintime=4 # Detain Time
parameters$alpha = .6 # Learning Rate [0,1]
parameters$gamma = .8 # Thoughtfulness Factor [0,1]
parameters$epsilon = .3 # Exploration Parameter [0,1]
parameters$movereward=0
#Parameters$pop=Parameters$rows*Parameters$cols # Total Population: pop
parameters$pop=3000
return(parameters)
}
################################################
##
################################################
#' Computes the reinforcement learning policy
#'
#' Computes reinforcement learning policy from a given state-action table Q.
#' The policy is the decision-making function of the agent and defines the learning
#' agent's behavior at a given time.
#'
#' @param x Variable which encodes the behavior of the agent. This can be
#' either a \code{matrix}, \code{data.frame} or an \code{\link{rl}} object.
#' @seealso \code{\link{ReinforcementLearning}}
#' @return Returns the learned policy.
#' @examples
#' # Create exemplary state-action table (Q) with 2 actions and 3 states
#' Q <- data.frame("up" = c(-1, 0, 1), "down" = c(-1, 1, 0))
#'
#' # Show best possible action in each state
#' computePolicy(Q)
#'
#' @rdname computePolicy
#' @export
computePolicy <- function(x) {
UseMethod("computePolicy", x)
}
#' @export
computePolicy.matrix <- function(x) {
policy <- colnames(x)[apply(x, 1, which.max)]
names(policy) <- rownames(x)
return(policy)
}
#' @export
computePolicy.data.frame <- function(x) {
return(computePolicy(as.matrix(x)))
}
#' @export
computePolicy.rl <- function(x) {
return(computePolicy(x$Q))
}
#' @export
computePolicy.default <- function(x) {
stop("Argument invalid.")
}
#' Computes the reinforcement learning policy
#'
#' Deprecated. Please use [ReinforcementLearning::computePolicy()] instead.
#'
#' @param x Variable which encodes the behavior of the agent. This can be
#' either a \code{matrix}, \code{data.frame} or an \code{\link{rl}} object.
#' @seealso \code{\link{ReinforcementLearning}}
#' @return Returns the learned policy.
#' @rdname policy
#' @export
policy <- function(x) {
.Deprecated("computePolicy")
computePolicy(x)
}
################################################
## This function contains all of the parameters
## in one location so that it is easy to update
## the model as needed
################################################
################################################
##This creates the set of choices
################################################
createsamplefunction<-function(population, parameters)
{
id_num0<-sample(1:nrow(population), parameters$daylength*3, replace=F)
Left<-NA
Right<-NA
Center<-NA
LeftReward<-NA
RightReward<-NA
CenterReward<-NA
LeftCrim<-NA
RightCrim<-NA
CenterCrim<-NA
LeftGroup<-NA
RightGroup<-NA
CenterGroup<-NA
LeftSusp<-NA
RightSusp<-NA
CenterSusp<-NA
LeftSuspCode<-NA
RightSuspCode<-NA
CenterSuspCode<-NA
LeftDirectCode<-NA
RightDirectCode<-NA
CenterDirectCode<-NA
LeftIndirectCode<-NA
RightIndirectCode<-NA
CenterIndirectCode<-NA
LeftRandomCode<-NA
RightRandomCode<-NA
CenterRandomCode<-NA
LeftCrimCode<-NA
RightCrimCode<-NA
CenterCrimCode<-NA
NextSuspState<-NA
NextDirectState<-NA
NextIndirectState<-NA
NextCrimState<-NA
SuspState<-NA
DirectState<-NA
IndirectState<-NA
CrimState<-NA
State<-NA
NextState<-NA
RandomState<-NA
NextRandomState<-NA
for(ii in 1: parameters$daylength)
{
id_num1<-id_num0[ii]
id_num2<-id_num0[ii+parameters$daylength]
id_num3<-id_num0[ii+2*parameters$daylength]
Left[ii]<-id_num1
Right[ii]<-id_num2
Center[ii]<-id_num3
LeftReward[ii]<-population$Reward[id_num1]
RightReward[ii]<-population$Reward[id_num2]
CenterReward[ii]<-population$Reward[id_num3]
LeftGroup[ii]<-population$Group[id_num1]
RightGroup[ii]<-population$Group[id_num2]
CenterGroup[ii]<-population$Group[id_num3]
LeftSusp[ii]<-population$Susp[id_num1]
RightSusp[ii]<-population$Susp[id_num2]
CenterSusp[ii]<-population$Susp[id_num3]
LeftCrim[ii]<-population$Crim[id_num1]
RightCrim[ii]<-population$Crim[id_num2]
CenterCrim[ii]<-population$Crim[id_num3]
State[ii]<-NA
LeftSuspCode[ii]<-population$Susp[id_num1]
RightSuspCode[ii]<-population$Susp[id_num2]
CenterSuspCode[ii]<-population$Susp[id_num3]
LeftDirectCode[ii]<-population$DirectCode[id_num1]
RightDirectCode[ii]<-population$DirectCode[id_num2]
CenterDirectCode[ii]<-population$DirectCode[id_num3]
LeftIndirectCode[ii]<-population$IndirectCode[id_num1]
RightIndirectCode[ii]<-population$IndirectCode[id_num2]
CenterIndirectCode[ii]<-population$IndirectCode[id_num3]
LeftCrimCode[ii]<-population$Crim[id_num1]
RightCrimCode[ii]<-population$Crim[id_num2]
CenterCrimCode[ii]<-population$Crim[id_num3]
LeftRandomCode[ii]<-sample(0:9,1)
RightRandomCode[ii]<-sample(0:9,1)
CenterRandomCode[ii]<-sample(0:9,1)
SuspState[ii]<-paste0(LeftSuspCode[ii],'.',CenterSuspCode[ii],'.',RightSuspCode[ii])
DirectState[ii]<-paste0(LeftDirectCode[ii],'.',CenterDirectCode[ii],'.',RightDirectCode[ii])
IndirectState[ii]<-paste0(LeftIndirectCode[ii],'.',CenterIndirectCode[ii],'.',RightIndirectCode[ii])
CrimState[ii]<-paste0(LeftCrimCode[ii],'.',CenterCrimCode[ii],'.',RightCrimCode[ii])
RandomState[ii]<-paste0(LeftRandomCode[ii],'.',CenterRandomCode[ii],'.',RightRandomCode[ii])
NextSuspState[ii-1]<-paste0(LeftSuspCode[ii],'.',CenterSuspCode[ii],'.',RightSuspCode[ii])
NextSuspState[ii]<-"End"
NextDirectState[ii-1]<-paste0(LeftDirectCode[ii],'.',CenterDirectCode[ii],'.',RightDirectCode[ii])
NextDirectState[ii]<-"End"
NextIndirectState[ii-1]<-paste0(LeftIndirectCode[ii],'.',CenterIndirectCode[ii],'.',RightIndirectCode[ii])
NextIndirectState[ii]<-"End"
NextCrimState[ii-1]<-paste0(LeftCrimCode[ii],'.',CenterCrimCode[ii],'.',RightCrimCode[ii])
NextCrimState[ii]<-"End"
NextRandomState[ii-1]<-paste0(LeftRandomCode[ii],'.',CenterRandomCode[ii],'.',RightRandomCode[ii])
NextRandomState[ii]<-"End"
}
createRLsample<-data.frame(Left,LeftGroup, LeftSusp,LeftSuspCode,LeftDirectCode,
LeftIndirectCode, LeftCrim,LeftRandomCode, LeftReward,
Center,CenterGroup, CenterSusp,CenterSuspCode,CenterDirectCode,
CenterIndirectCode, CenterCrim,CenterRandomCode, CenterReward,
Right, RightGroup, RightSusp, RightSuspCode,
RightDirectCode, RightIndirectCode, RightCrim,RightRandomCode, RightReward,SuspState, DirectState, IndirectState,CrimState,RandomState, NextSuspState, NextDirectState, NextIndirectState,NextCrimState, NextRandomState)
return(createRLsample)
}
################################################
##Create the state diagram
################################################
statediagramfunction <- function(createsample, parameters,flag) {
time = parameters$daylength
detain = parameters$detaintime
move = parameters$traveltime
statemap<-data.frame("State"=paste0(time,".",createsample$State[1]),
"Action"="Left",
"Reward"=createsample$LeftReward[1],
"NextState"=paste0(time-detain,".",createsample$NextState[1]),
"LeftGroup"=createsample$LeftGroup[1],
"LeftSusp"=createsample$LeftSusp[1],
"LeftCrim"=createsample$LeftCrim[1],
"LeftRandom"=createsample$LeftRandomCode[1],
"LeftReward"=createsample$LeftReward[1],
"CenterGroup"=createsample$CenterGroup[1],
"CenterSusp"=createsample$CenterSusp[1],
"CenterCrim"=createsample$CenterCrim[1],
"CenterRandom"=createsample$CenterRandomCode[1],
"CenterReward"=createsample$CenterReward[1],
"RightGroup"=createsample$RightGroup[1],
"RightSusp"=createsample$RightSusp[1],
"RightCrim"=createsample$RightCrim[1],
"RightRandom"=createsample$RightRandomCode[1],
"RightReward"=createsample$RightReward[1])
nextrow<-data.frame("State"=paste0(time,".",createsample$State[1]),
"Action"="Center",
"Reward"=createsample$CenterReward[1],
"NextState"=paste0(time-detain,".",createsample$NextState[1]),
"LeftGroup"=createsample$LeftGroup[1],
"LeftSusp"=createsample$LeftSusp[1],
"LeftCrim"=createsample$LeftCrim[1],
"LeftRandom"=createsample$LeftRandom[1],
"LeftReward"=createsample$LeftReward[1],
"CenterGroup"=createsample$CenterGroup[1],
"CenterSusp"=createsample$CenterSusp[1],
"CenterCrim"=createsample$CenterCrim[1],
"CenterRandom"=createsample$CenterRandom[1],
"CenterReward"=createsample$CenterReward[1],
"RightGroup"=createsample$RightGroup[1],
"RightSusp"=createsample$RightSusp[1],
"RightCrim"=createsample$RightCrim[1],
"RightRandom"=createsample$RightRandom[1],
"RightReward"=createsample$RightReward[1])
nextrow2<-data.frame("State"=paste0(time,".",createsample$State[1]),
"Action"="Right",
"Reward"=createsample$RightReward[1],
"NextState"=paste0(time-detain,".",createsample$NextState[1]),
"LeftGroup"=createsample$LeftGroup[1],
"LeftSusp"=createsample$LeftSusp[1],
"LeftCrim"=createsample$LeftCrim[1],
"LeftRandom"=createsample$LeftRandom[1],
"LeftReward"=createsample$LeftReward[1],
"CenterGroup"=createsample$CenterGroup[1],
"CenterSusp"=createsample$CenterSusp[1],
"CenterCrim"=createsample$CenterCrim[1],
"CenterRandom"=createsample$CenterRandom[1],
"CenterReward"=createsample$CenterReward[1],
"RightGroup"=createsample$RightGroup[1],
"RightSusp"=createsample$RightSusp[1],
"RightCrim"=createsample$RightCrim[1],
"RightRandom"=createsample$RightRandom[1],
"RightReward"=createsample$RightReward[1])
statemap<-rbind(statemap,nextrow,nextrow2)
nextrow<-data.frame("State"=paste0(time,".",createsample$State[1]),
"Action"="None",
"Reward"=0,
"NextState"=paste0(time-move,".",createsample$NextState[1]),
"LeftGroup"=createsample$LeftGroup[1],
"LeftSusp"=createsample$LeftSusp[1],
"LeftCrim"=createsample$LeftCrim[1],
"LeftRandom"=createsample$LeftRandom[1],
"LeftReward"=createsample$LeftReward[1],
"CenterGroup"=createsample$CenterGroup[1],
"CenterSusp"=createsample$CenterSusp[1],
"CenterCrim"=createsample$CenterCrim[1],
"CenterRandom"=createsample$CenterRandom[1],
"CenterReward"=createsample$CenterReward[1],
"RightGroup"=createsample$RightGroup[1],
"RightSusp"=createsample$RightSusp[1],
"RightCrim"=createsample$RightCrim[1],
"RightRandom"=createsample$RightRandom[1],
"RightReward"=createsample$RightReward[1])
statemap<-rbind(statemap,nextrow)
for(ii in 1:1000)
{
statedummy<-as.character(statemap$NextState[ii])
flag<-0
if (is.na(statedummy))
{}
else if (statedummy=="End")
{}
else
{
for(jj in 1:nrow(statemap))
{
if (statedummy==statemap$State[jj])
{
flag<-1
}
}
if(flag==0)
{
openstate<-unlist(stri_split_fixed(as.character(statedummy),".", fixed = TRUE, n=2))
for (kk in 1:nrow(createsample))
{
if (openstate[2]==as.character(createsample$State[kk]))
{
timedet<-as.numeric(openstate[1])-detain
timemove<-as.numeric(openstate[1])-move
if(as.numeric(openstate[1])>=detain)
{
nextrow<-data.frame("State"=statedummy,
"Action"="Left",
"Reward"=createsample$LeftReward[kk],
"NextState"=paste0(timedet,".",createsample$State[kk+1]),
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
nextrow2<-data.frame("State"=statedummy,
"Action"="Center",
"Reward"=createsample$CenterReward[kk],
"NextState"=paste0(timedet,".",createsample$State[kk+1]),
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
nextrow3<-data.frame("State"=statedummy,
"Action"="Right",
"Reward"=createsample$RightReward[kk],
"NextState"=paste0(timedet,".",createsample$State[kk+1]),
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
}
else if(as.numeric(openstate[1])<detain)
{
nextrow<-data.frame("State"=statedummy,
"Action"="Left",
"Reward"=-50,
"NextState"="End",
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
nextrow2<-data.frame("State"=statedummy,
"Action"="Center",
"Reward"=-50,
"NextState"="End",
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
nextrow3<-data.frame("State"=statedummy,
"Action"="Right",
"Reward"=-50,
"NextState"="End",
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
}
if(as.numeric(openstate[1])>=move)
{
nextrow4<-data.frame("State"=statedummy,
"Action"="None",
"Reward"=0,
"NextState"=paste0(timemove,".",createsample$State[kk+1]),
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
statemap<-rbind(statemap, nextrow, nextrow2, nextrow3,nextrow4)
}
else if (as.numeric(openstate[1])<move)
{
nextrow4<-data.frame("State"=statedummy,
"Action"="None",
"Reward"=0,
"NextState"="End",
"LeftGroup"=createsample$LeftGroup[kk],
"LeftSusp"=createsample$LeftSusp[kk],
"LeftCrim"=createsample$LeftCrim[kk],
"LeftRandom"=createsample$LeftRandomCode[kk],
"LeftReward"=createsample$LeftReward[kk],
"CenterGroup"=createsample$CenterGroup[kk],
"CenterSusp"=createsample$CenterSusp[kk],
"CenterCrim"=createsample$CenterCrim[kk],
"CenterRandom"=createsample$CenterRandomCode[kk],
"CenterReward"=createsample$CenterReward[kk],
"RightGroup"=createsample$RightGroup[kk],
"RightSusp"=createsample$RightSusp[kk],
"RightCrim"=createsample$RightCrim[kk],
"RightRandom"=createsample$RightRandomCode[kk],
"RightReward"=createsample$RightReward[kk])
statemap<-rbind(statemap, nextrow, nextrow2, nextrow3,nextrow4)
}
}
}
}
}
}
return(statemap)
}
################################################
## Call RL with an existing model
################################################
runRL<-function(RLdat, trainmodelold, parameters)
{
# Load dataset
RLdat$State<-as.character(RLdat$State)
RLdat$NextState<-as.character(RLdat$NextState)
RLdat$Action<-as.character(RLdat$Action)
# Define reinforcement learning parameters
control <- list(alpha = parameters$alpha, gamma = parameters$gamma, epsilon = parameters$epsilon)
# Perform reinforcement learning
trainmodelnew <- ReinforcementLearning(RLdat, s = "State", a = "Action", r =
"Reward",
s_new = "NextState", iter = parameters$itertrain, control = control, model=trainmodelold)
# Print optimal policy
return(trainmodelnew)
}
################################################
## This is the initial reinforcement learning
## routine. Calls RL without an existing model
################################################
runRLinit<-function(RLdat,parameters)
{
# Load dataset
RLdat$State<-as.character(RLdat$State)
RLdat$NextState<-as.character(RLdat$NextState)
RLdat$Action<-as.character(RLdat$Action)
# Define reinforcement learning parameters
control <- list(alpha = parameters$alpha, gamma = parameters$gamma, epsilon = parameters$epsilon)
# Perform reinforcement learning
trainmodelnew <- ReinforcementLearning(RLdat, s = "State", a = "Action", r =
"Reward",
s_new = "NextState", iter = parameters$itertrain, control = control)
# Print optimal policy
return(trainmodelnew)
}
################################################
## This is the population seeding routine.
################################################
createpopulation<-function(parameters)
{
set.seed=(parameters$seed)
population = data.frame(matrix(vector(), 0, 8, dimnames=list(c(), c("ID", "Group", "Crim", "Indirect","Reward", "Susp", "DirectCode", "IndirectCode"))),stringsAsFactors=F)
person = data.frame(matrix(vector(), 1, 8, dimnames=list(c(), c("ID", "Group", "Crim", "Indirect","Reward", "Susp", "DirectCode", "IndirectCode"))),stringsAsFactors=F)
for (ii in 1:parameters$pop)
{
person$ID <-sprintf("%03d",ii)
person$Group <- sample(1:2, 1, replace=T,prob=c(1-parameters$percGroup,parameters$percGroup))
person$Crim<- sample(1:parameters$crim, 1, replace=T)
person$Reward=person$Crim
if (person$Group==1)
{
person$Indirect<- sample(0:1, 1, replace=T,prob=c(1-parameters$indirect1,parameters$indirect1))
person$Susp<- round(person$Crim + rnorm(1,0,1))
if(person$Susp<0)
{person$Susp<-0}
}
if (person$Group==2)
{
person$Indirect<- sample(0:1, 1, replace=T,prob=c(parameters$indirect2,1-parameters$indirect2))
person$Susp<- round(person$Crim + rnorm(1,1,1))
if(person$Susp<0)
{person$Susp<-0}
}
person$DirectCode<-paste0(person$Group,'.',person$Susp)
population<-rbind(population,person)
person$IndirectCode<-paste0(person$Indirect,'.',person$Susp)
population<-rbind(population,person)
}
return(population)
}
################################################
## Function Policywork
################################################
policywork<-function(policytest,test)
{
policytest<- data.frame(unlist(policytest))
policytest<-cbind(policytest,State=rownames(policytest))
policytest$State<-as.character(policytest$State)
finalpolicy<-NA
for (ii in 1:nrow(policytest))
{
policytest$State[ii]<-sub('X','', policytest$State[ii])
}
finalpolicy<-data.frame("State"=NA,"Action"=NA, "Reward"=NA, "LeftCrim"=NA, "LeftGroup"=NA, "LeftSusp"=NA, "LeftReward"=NA, "CenterCrim"=NA, "CenterGroup"=NA, "CenterSusp"=NA, "CenterReward"=NA,"RightCrim"=NA, "RightGroup"=NA, "RightSusp"=NA, "RightReward"=NA, "NextState"=NA)
finalpolicy2<-finalpolicy
for(jj in 1:nrow(test))
{
for (kk in 1:nrow(policytest))
{
if(policytest$State[kk]==test$State[jj]&&as.character(policytest$unlist.policytest.[kk])==as.character(test$Action[jj]))
{ finalpolicy2$State<-policytest$State[kk]
finalpolicy2$Action<-policytest$unlist.policytest.[kk]
finalpolicy2$Reward<-test$Reward[jj]
finalpolicy2$LeftCrim<-test$LeftCrim[jj]
finalpolicy2$LeftGroup<-test$LeftGroup[jj]
finalpolicy2$LeftSusp<-test$LeftSusp[jj]
finalpolicy2$LeftReward<-test$LeftReward[jj]
finalpolicy2$CenterCrim<-test$CenterCrim[jj]
finalpolicy2$CenterGroup<-test$CenterGroup[jj]
finalpolicy2$CenterSusp<-test$CenterSusp[jj]
finalpolicy2$CenterReward<-test$CenterReward[jj]
finalpolicy2$RightCrim<-test$RightCrim[jj]
finalpolicy2$RightGroup<-test$RightGroup[jj]
finalpolicy2$RightSusp<-test$RightSusp[jj]
finalpolicy2$RightReward<-test$RightReward[jj]
finalpolicy2$NextState<-test$NextState[jj]
finalpolicy<-rbind(finalpolicy, finalpolicy2)
}
}
}
finalpolicy<-unique(finalpolicy)
finalpolicy<-na.omit(finalpolicy)
return(finalpolicy)
}
################################################
##This is a routine to find the best soultion
################################################
RLSolution<-function(finalpolicy)
{
RLsolution<-finalpolicy[1,]
for(ii in 1:parameters$daylength)
{
if(!is.na(RLsolution$NextState[ii]))
{
if(RLsolution$NextState[ii]=="End")
{break}
for(jj in 1:nrow(finalpolicy))
{
if (RLsolution$NextState[ii]==finalpolicy$State[jj])
{
RLsolution[ii+1,]<-finalpolicy[jj,]
}
}
}
for(kk in 1:nrow(RLsolution))
{if (RLsolution$Action[kk]=="Left")
{RLsolution$Group[kk]<-RLsolution$LeftGroup[kk]}
if (RLsolution$Action[kk]=="Center")
{RLsolution$Group[kk]<-RLsolution$CenterGroup[kk]}
if (RLsolution$Action[kk]=="Right")
{RLsolution$Group[kk]<-RLsolution$RightGroup[kk]}
if (RLsolution$Action[kk]=="None")
{RLsolution$Group[kk]<-NA}
} }
return(RLsolution)
}
################################################
##
################################################
BestPolicy<-function(test,parameters)
{
testmodelIdeal<-runRLinit(test,parameters)
policytestIdeal<-computePolicy(testmodelIdeal)
finalpolicyIdeal<-policywork(policytestIdeal,test)
RLsolutionIdeal<-RLSolution(finalpolicyIdeal)
return(RLsolutionIdeal)
}
################################################
##
################################################
library(stringi)
library(dplyr)
library(ReinforcementLearning)
library(binaryLogic)
library(stats)
library(hash)
library(ggplot2)
library(testthat)
library(wesanderson)
#setwd("~/Alethea")
parameters<-LoadParameters()
population<-createpopulation(parameters)
test_rand<-sample.int(n = nrow(population), size = 2000, replace = F)
trainpopulation <- population[test_rand, ]
testpopulation <- population[-test_rand, ]
save(testpopulation, file = paste0("TestPopulation.",parameters$seed,".rda"))
save(trainpopulation, file = paste0("TrainPopulation.",parameters$seed,".rda"))
createsample<-createsamplefunction(trainpopulation, parameters)
createsampleSusp<-createsample
createsampleDirect<-createsample
createsampleIndirect<-createsample
createsampleCrim<-createsample
createsampleRandom<-createsample
################################################
##
################################################
RLsolutionTrainSusp<-NA
RLsolutionTrainDirect<-NA
RLsolutionTrainIndirect<-NA
RLsolutionTrainCrim<-NA
RLsolutionTrainRandom<-NA
RLtableTrainSusp<-NA
RLtableTrainDirect<-NA
RLtableTrainIndirect<-NA
RLtableTrainCrim<-NA
RLtableTrainRandom<-NA
proportionTrainSusp<-NA
proportionTrainDirect<-NA
proportionTrainIndirect<-NA
proportionTrainCrim<-NA
proportionTrainRandom<-NA
for(i in 1:nrow(createsample))
{
createsampleSusp$State[i]<-createsample$SuspState[i]
createsampleSusp$NextState[i]<-createsample$SuspNextState[i]
createsampleDirect$State[i]<-createsample$DirectState[i]
createsampleDirect$NextState[i]<-createsample$DirectNextState[i]
createsampleIndirect$State[i]<-createsample$IndirectState[i]
createsampleIndirect$NextState[i]<-createsample$IndirectNextState[i]
createsampleCrim$State[i]<-createsample$CrimState[i]
createsampleCrim$NextState[i]<-createsample$CrimNextState[i]
createsampleRandom$State[i]<-createsample$RandomState[i]
createsampleRandom$NextState[i]<-createsample$RandomNextState[i]
}
trainSusp<-statediagramfunction(createsampleSusp, parameters)
trainmodelSusp<-runRLinit(trainSusp, parameters)
trainDirect<-statediagramfunction(createsampleDirect, parameters)
trainmodelDirect<-runRLinit(trainDirect, parameters)
trainIndirect<-statediagramfunction(createsampleIndirect, parameters)
trainmodelIndirect<-runRLinit(trainIndirect, parameters)
trainCrim<-statediagramfunction(createsampleCrim, parameters)
trainmodelCrim<-runRLinit(trainCrim, parameters)
trainRandom<-statediagramfunction(createsampleRandom, parameters)
trainmodelRandom<-runRLinit(trainRandom, parameters)
for (m in 1:parameters$numtrain)
{
RLtrainsample<-createsamplefunction(trainpopulation, parameters)
RLtrainsampleSusp<-RLtrainsample
RLtrainsampleDirect<-RLtrainsample
RLtrainsampleIndirect<-RLtrainsample
RLtrainsampleCrim<-RLtrainsample
RLtrainsampleRandom<-RLtrainsample
for(i in 1:parameters$daylength)
{
RLtrainsampleSusp$State[i]<-as.character(RLtrainsample$SuspState[i])
RLtrainsampleSusp$NextState[i]<-as.character(RLtrainsample$NextSuspState[i])
RLtrainsampleDirect$State[i]<-as.character(RLtrainsample$DirectState[i])
RLtrainsampleDirect$NextState[i]<-as.character(RLtrainsample$NextDirectState[i])
RLtrainsampleIndirect$State[i]<-as.character(RLtrainsample$IndirectState[i])
RLtrainsampleIndirect$NextState[i]<-as.character(RLtrainsample$NextIndirectState[i])
RLtrainsampleCrim$State[i]<-as.character(RLtrainsample$CrimState[i])
RLtrainsampleCrim$NextState[i]<-as.character(RLtrainsample$NextCrimState[i])
RLtrainsampleRandom$State[i]<-as.character(RLtrainsample$RandomState[i])
RLtrainsampleRandom$NextState[i]<-as.character(RLtrainsample$NextRandomState[i])
}
trainSusp<-statediagramfunction(RLtrainsampleSusp, parameters)
trainmodelSusp<-runRL(trainSusp, trainmodelSusp,parameters)
policytrainSusp<-computePolicy(trainmodelSusp)
finalpolicytrainSusp<-policywork(policytrainSusp,trainSusp)
RLsolutionTrainSusp2<-RLSolution(finalpolicytrainSusp)
RLtableTrainSusp2<-as.data.frame(table(RLsolutionTrainSusp2$Group))
proportionTrainSusp2<-RLtableTrainSusp2[2,2]/(RLtableTrainSusp2[1,2]+RLtableTrainSusp2[2,2])
RLsolutionTrainSusp<-rbind(RLsolutionTrainSusp,RLsolutionTrainSusp2)
RLtableTrainSusp<-rbind(RLtableTrainSusp,RLtableTrainSusp2)
proportionTrainSusp<-rbind(proportionTrainSusp,proportionTrainSusp2)
trainDirect<-statediagramfunction(RLtrainsampleDirect, parameters)
trainmodelDirect<-runRL(trainDirect, trainmodelDirect,parameters)
policytrainDirect<-computePolicy(trainmodelDirect)
finalpolicytrainDirect<-policywork(policytrainDirect,trainDirect)
RLsolutionTrainDirect2<-RLSolution(finalpolicytrainDirect)
RLtableTrainDirect2<-as.data.frame(table(RLsolutionTrainDirect2$Group))
proportionTrainDirect2<-RLtableTrainDirect2[2,2]/(RLtableTrainDirect2[1,2]+RLtableTrainDirect2[2,2])
RLsolutionTrainDirect<-rbind(RLsolutionTrainDirect,RLsolutionTrainDirect2)
RLtableTrainDirect<-rbind(RLtableTrainDirect,RLtableTrainDirect2)
proportionTrainDirect<-rbind(proportionTrainDirect,proportionTrainDirect2)
trainIndirect<-statediagramfunction(RLtrainsampleIndirect, parameters)
trainmodelIndirect<-runRL(trainIndirect, trainmodelIndirect,parameters)
policytrainIndirect<-computePolicy(trainmodelIndirect)
finalpolicytrainIndirect<-policywork(policytrainIndirect,trainIndirect)
RLsolutionTrainIndirect2<-RLSolution(finalpolicytrainIndirect)
RLtableTrainIndirect2<-as.data.frame(table(RLsolutionTrainIndirect2$Group))
proportionTrainIndirect2<-RLtableTrainIndirect2[2,2]/(RLtableTrainIndirect2[1,2]+RLtableTrainIndirect2[2,2])
RLsolutionTrainIndirect<-rbind(RLsolutionTrainIndirect,RLsolutionTrainIndirect2)
RLtableTrainIndirect<-rbind(RLtableTrainIndirect,RLtableTrainIndirect2)
proportionTrainIndirect<-rbind(proportionTrainIndirect,proportionTrainIndirect2)
trainCrim<-statediagramfunction(RLtrainsampleCrim, parameters)
trainmodelCrim<-runRL(trainCrim, trainmodelCrim,parameters)
policytrainCrim<-computePolicy(trainmodelCrim)
finalpolicytrainCrim<-policywork(policytrainCrim,trainCrim)
RLsolutionTrainCrim2<-RLSolution(finalpolicytrainCrim)
RLtableTrainCrim2<-as.data.frame(table(RLsolutionTrainCrim2$Group))
proportionTrainCrim2<-RLtableTrainCrim2[2,2]/(RLtableTrainCrim2[1,2]+RLtableTrainCrim2[2,2])
RLsolutionTrainCrim<-rbind(RLsolutionTrainCrim,RLsolutionTrainCrim2)
RLtableTrainCrim<-rbind(RLtableTrainCrim,RLtableTrainCrim2)
proportionTrainCrim<-rbind(proportionTrainCrim,proportionTrainCrim2)
trainRandom<-statediagramfunction(RLtrainsampleRandom, parameters)
trainmodelRandom<-runRL(trainRandom, trainmodelRandom,parameters)
policytrainRandom<-computePolicy(trainmodelRandom)
finalpolicytrainRandom<-policywork(policytrainRandom,trainRandom)
RLsolutionTrainRandom2<-RLSolution(finalpolicytrainRandom)
RLtableTrainRandom2<-as.data.frame(table(RLsolutionTrainRandom2$Group))
proportionTrainRandom2<-RLtableTrainRandom2[2,2]/(RLtableTrainRandom2[1,2]+RLtableTrainRandom2[2,2])
RLsolutionTrainRandom<-rbind(RLsolutionTrainRandom,RLsolutionTrainRandom2)
RLtableTrainRandom<-rbind(RLtableTrainRandom,RLtableTrainRandom2)
proportionTrainRandom<-rbind(proportionTrainRandom,proportionTrainRandom2)
}
RLtableTrainSusp <- na.omit(RLtableTrainSusp)
RLtableTrainDirect <- na.omit(RLtableTrainDirect)
RLtableTrainIndirect <- na.omit(RLtableTrainIndirect)
RLtableTrainCrim <- na.omit(RLtableTrainCrim)
RLtableTrainRandom <- na.omit(RLtableTrainRandom)
proportionTrainSusp <- na.omit(proportionTrainSusp)
proportionTrainDirect <- na.omit(proportionTrainDirect)
proportionTrainIndirect <- na.omit(proportionTrainIndirect)
proportionTrainCrim <- na.omit(proportionTrainCrim)
proportionTrainRandom <- na.omit(proportionTrainRandom)
save(proportionTrainSusp,proportionTrainDirect,proportionTrainIndirect,
proportionTrainCrim,proportionTrainRandom,
file = paste0("proportionTrain.seed.",parameters$seed,"daylength.",parameters$daylength,".numtrain.",parameters$numtrain,".numtest.",parameters$numtest,".itertrain.",parameters$itertrain,".rda"))
save(trainmodelSusp,trainmodelDirect,trainmodelIndirect,trainmodelCrim,trainmodelRandom,
file = paste0("TrainModels.seed.",parameters$seed,"daylength.",parameters$daylength,".numtrain.",parameters$numtrain,".numtest.",parameters$numtest,".itertrain.",parameters$itertrain,".rda"))
################################################
##
################################################
RLsolutionSusp<-NA
RLsolutionDirect<-NA
RLsolutionIndirect<-NA
RLsolutionCrim<-NA
RLsolutionRandom<-NA
RLtableSusp<-NA
RLtableDirect<-NA
RLtableIndirect<-NA
RLtableCrim<-NA
RLtableRandom<-NA
proportionSusp<-NA
proportionDirect<-NA
proportionIndirect<-NA
proportionCrim<-NA
proportionRandom<-NA
for(j in 1:parameters$numtest)
{
RLtestsample<-createsamplefunction(testpopulation, parameters)
RLtestsampleSusp<-RLtestsample
RLtestsampleDirect<-RLtestsample
RLtestsampleIndirect<-RLtestsample
RLtestsampleCrim<-RLtestsample
RLtestsampleRandom<-RLtestsample
for(i in 1:nrow(RLtestsample))
{
RLtestsampleSusp$State[i]<-as.character(RLtestsample$SuspState[i])
RLtestsampleSusp$NextState[i]<-as.character(RLtestsample$NextSuspState[i])
RLtestsampleDirect$State[i]<-as.character(RLtestsample$DirectState[i])
RLtestsampleDirect$NextState[i]<-as.character(RLtestsample$NextDirectState[i])
RLtestsampleIndirect$State[i]<-as.character(RLtestsample$IndirectState[i])
RLtestsampleIndirect$NextState[i]<-as.character(RLtestsample$NextIndirectState[i])
RLtestsampleCrim$State[i]<-as.character(RLtestsample$CrimState[i])
RLtestsampleCrim$NextState[i]<-as.character(RLtestsample$NextCrimState[i])
RLtestsampleRandom$State[i]<-as.character(RLtestsample$RandomState[i])
RLtestsampleRandom$NextState[i]<-as.character(RLtestsample$NextRandomState[i])
}
testSusp<-statediagramfunction(RLtestsampleSusp, parameters)
testmodelSusp<-runRL(testSusp,trainmodelSusp,parameters)
testDirect<-statediagramfunction(RLtestsampleDirect, parameters)
testmodelDirect<-runRL(testDirect,trainmodelDirect,parameters)
testIndirect<-statediagramfunction(RLtestsampleIndirect, parameters)
testmodelIndirect<-runRL(testIndirect,trainmodelIndirect,parameters)
testCrim<-statediagramfunction(RLtestsampleCrim, parameters)
testmodelCrim<-runRL(testCrim,trainmodelCrim,parameters)
testRandom<-statediagramfunction(RLtestsampleRandom, parameters)
testmodelRandom<-runRL(testRandom,trainmodelRandom,parameters)
policytestSusp<-computePolicy(testmodelSusp)
finalpolicySusp<-policywork(policytestSusp,testSusp)
RLsolutionSusp2<-RLSolution(finalpolicySusp)
RLtableSusp2<-as.data.frame(table(RLsolutionSusp2$Group))
proportionSusp2<-RLtableSusp2[2,2]/(RLtableSusp2[1,2]+RLtableSusp2[2,2])
RLsolutionSusp<-rbind(RLsolutionSusp,RLsolutionSusp2)
RLtableSusp<-rbind(RLtableSusp,RLtableSusp2)
proportionSusp<-rbind(proportionSusp,proportionSusp2)
policytestDirect<-computePolicy(testmodelDirect)
finalpolicyDirect<-policywork(policytestDirect,testDirect)
RLsolutionDirect2<-RLSolution(finalpolicyDirect)
RLtableDirect2<-as.data.frame(table(RLsolutionDirect2$Group))
proportionDirect2<-RLtableDirect2[2,2]/(RLtableDirect2[1,2]+RLtableDirect2[2,2])
RLsolutionDirect<-rbind(RLsolutionDirect,RLsolutionDirect2)
RLtableDirect<-rbind(RLtableDirect,RLtableDirect2)
proportionDirect<-rbind(proportionDirect,proportionDirect2)
policytestIndirect<-computePolicy(testmodelIndirect)
finalpolicyIndirect<-policywork(policytestIndirect, testIndirect)
RLsolutionIndirect2<-RLSolution(finalpolicyIndirect)
RLtableIndirect2<-as.data.frame(table(RLsolutionIndirect2$Group))
proportionIndirect2<-RLtableIndirect2[2,2]/(RLtableIndirect2[1,2]+RLtableIndirect2[2,2])
RLsolutionIndirect<-rbind(RLsolutionIndirect,RLsolutionIndirect2)
RLtableIndirect<-rbind(RLtableIndirect,RLtableIndirect2)
proportionIndirect<-rbind(proportionIndirect,proportionIndirect2)
policytestCrim<-computePolicy(testmodelCrim)
finalpolicyCrim<-policywork(policytestCrim,testCrim)
RLsolutionCrim2<-RLSolution(finalpolicyCrim)
RLtableCrim2<-as.data.frame(table(RLsolutionCrim2$Group))
proportionCrim2<-RLtableCrim2[2,2]/(RLtableCrim2[1,2]+RLtableCrim2[2,2])
RLsolutionCrim<-rbind(RLsolutionCrim,RLsolutionCrim2)
RLtableCrim<-rbind(RLtableCrim,RLtableCrim2)
proportionCrim<-rbind(proportionCrim,proportionCrim2)
policytestRandom<-computePolicy(testmodelRandom)
finalpolicyRandom<-policywork(policytestRandom,testRandom)
RLsolutionRandom2<-RLSolution(finalpolicyRandom)
RLtableRandom2<-as.data.frame(table(RLsolutionRandom2$Group))
proportionRandom2<-RLtableRandom2[2,2]/(RLtableRandom2[1,2]+RLtableRandom2[2,2])
RLsolutionRandom<-rbind(RLsolutionRandom,RLsolutionRandom2)
RLtableRandom<-rbind(RLtableRandom,RLtableRandom2)
proportionRandom<-rbind(proportionRandom,proportionRandom2)
}
RLtableSusp <- na.omit(RLtableSusp)
RLtableDirect <- na.omit(RLtableDirect)
RLtableIndirect <- na.omit(RLtableIndirect)
RLtableCrim <- na.omit(RLtableCrim)
RLtableRandom <- na.omit(RLtableRandom)
proportionSusp <- na.omit(proportionSusp)
proportionDirect <- na.omit(proportionDirect)
proportionIndirect <- na.omit(proportionIndirect)
proportionCrim <- na.omit(proportionCrim)
proportionRandom <- na.omit(proportionRandom)
save(RLsolutionSusp,RLsolutionDirect,RLsolutionIndirect,RLsolutionCrim,RLsolutionRandom, file = paste0("RLsolution.daylength.",parameters$daylength,".numtrain.",parameters$numtrain,".numtest.",parameters$numtest,".itertrain.",parameters$itertrain,".rda"))
################################################
##
################################################
RLsolutionSuspIdeal<-BestPolicy(testSusp,parameters)
RLsolutionDirectIdeal<-BestPolicy(testDirect,parameters)
RLsolutionIndirectIdeal<-BestPolicy(testIndirect,parameters)
RLsolutionCrimIdeal<-BestPolicy(testCrim,parameters)
RLsolutionRandomIdeal<-BestPolicy(testRandom,parameters)
save(RLsolutionSuspIdeal,RLsolutionDirectIdeal,RLsolutionIndirect,RLsolutionCrimIdeal,RLsolutionRandomIdeal, file = paste0("RLsolutionIdeal.daylength.",parameters$daylength,".numtrain.",parameters$numtrain,".numtest.",parameters$numtest,".itertrain.",parameters$itertrain,".rda"))
save(proportionSusp,proportionDirect,proportionIndirect,proportionCrim,proportionRandom,file =paste0("proportion.",parameters$seed,".",parameters$numtrain,".",parameters$numtest,".",parameters$itertrain,".rda"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DsAcc-class.R
\docType{methods}
\name{getSamples,DsAcc-method}
\alias{getSamples,DsAcc-method}
\alias{getSamples}
\title{getSamples-methods}
\usage{
\S4method{getSamples}{DsAcc}(.object)
}
\arguments{
\item{.object}{\code{\linkS4class{DsAcc}} object}
}
\value{
Character vector of sample IDs in the dataset
}
\description{
Return sample IDs in a dataset
}
\author{
Fabian Mueller
}
| /man/getSamples-DsAcc-method.Rd | no_license | GreenleafLab/ChrAccR | R | false | true | 460 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DsAcc-class.R
\docType{methods}
\name{getSamples,DsAcc-method}
\alias{getSamples,DsAcc-method}
\alias{getSamples}
\title{getSamples-methods}
\usage{
\S4method{getSamples}{DsAcc}(.object)
}
\arguments{
\item{.object}{\code{\linkS4class{DsAcc}} object}
}
\value{
Character vector of sample IDs in the dataset
}
\description{
Return sample IDs in a dataset
}
\author{
Fabian Mueller
}
|
##################################################
## Brief: Identify discontinuities in travel times to adjacent tracts.
## Date: 05/13/2021
## Author: Eric Chandler <echandler@uchicago.edu>
## Details: Measures variance of travel times to adjacent tracts.
## Tracts with large variance may indicate routing has failed.
##################################################
library(r5r)
library(sf)
library(dplyr)
library(glue)
# Set target county, geography, and env vars
target_year <- "2010"
target_state <- "24"
target_geography <- "tract"
target_od_path <- glue(
"input/{target_year}/{target_geography}/",
"origin_destination_by_state/{target_state}/"
)
mode <- c("WALK","TRANSIT")
target_mode <- paste(mode, collapse='-')
target_ttm_dir <- glue("output/{target_year}/{target_geography}/travel_time_matrices/{target_state}/{target_mode}")
# Load origins and destinations from resources/ directory
origins <- readr::read_csv(glue(target_od_path, "/origins.csv")) %>% mutate(ID_INT = as.numeric(id))
destinations <- readr::read_csv(glue(target_od_path, "/destinations.csv")) %>% mutate(ID_INT = as.numeric(id))
# Read r5r output
tt_mat <- read.csv(glue(target_ttm_dir, "/ttm.csv")) %>%
left_join(origins, by = c("fromId" = "ID_INT"), suffix = c(".tt", ".org")) %>%
left_join(destinations, by = c("toId" = "ID_INT"), suffix = c(".org", ".dest"))
# Get tracts
tract_geos <- tigris::tracts(as.numeric(target_state)) %>%
mutate(GEOID_INT = as.numeric(GEOID)) %>% st_transform(2163)
# Compute adjacent tracts
adjacent_tracts <- tract_geos %>% st_join(tract_geos, join = st_touches) %>%
select(GEOID_INT.x, GEOID_INT.y, geometry)
# !!REPEAT!! run code from here onwards a few times to spot-check many points!
# Sample one origin tract to reduce memory costs
tt_from_one_origin <- tt_mat %>% select(fromId, toId, travel_time) %>%
filter(fromId == sample(tt_mat$fromId,size=1))
# Compute difference in travel time from origin to adjacent tracts
adjacent_times <- adjacent_tracts %>%
inner_join(tt_from_one_origin, by = c("GEOID_INT.x" = "toId")) %>%
inner_join(tt_from_one_origin, by = c("GEOID_INT.y" = "toId")) %>%
mutate(DIFF = travel_time.x - travel_time.y)
# Compute variance in adjacent tract travel times
var_times <- adjacent_times %>% group_by(fromId.x, GEOID_INT.x, travel_time.x) %>%
summarise(VAR_ADJ = sd(travel_time.y),
AVG_ADJ = mean(travel_time.y),
VAR_DIFF = sd(DIFF),
AVG_DIFF = mean(DIFF), .groups='keep') %>%
mutate(STD_DIFF = VAR_DIFF/travel_time.x,
STD_ADJ = VAR_ADJ/travel_time.x,)
# Plot distribution of travel time variance
ggplot(data = var_times) + geom_boxplot(aes(y=STD_ADJ)) +
labs(title="Relative variance of travel time to adjacent tracts",
x="1 Observation = 1 Tract (GEOID)", y="StDev(TTime to Adjacent Tracts)/Mean(TTime to Tract)")
# Spatial plots
ggplot(data = var_times) + geom_sf(aes(fill=travel_time.x)) +
labs(title="Baseline travel time", fill="Minutes")
ggplot(data = var_times) + geom_sf(aes(fill=VAR_ADJ)) +
labs(title="Variance of travel time vs adjacent tracts", fill="Minutes")
ggplot(data = var_times) + geom_sf(aes(fill=STD_ADJ)) +
labs(title="Mean-adjusted variance of travel time vs adjacent tracts", fill="Minutes")
| /routing/r5r/discontinuity_finder_differential.R | no_license | dfsnow/travel-time-matrices | R | false | false | 3,364 | r | ##################################################
## Brief: Identify discontinuities in travel times to adjacent tracts.
## Date: 05/13/2021
## Author: Eric Chandler <echandler@uchicago.edu>
## Details: Measures variance of travel times to adjacent tracts.
## Tracts with large variance may indicate routing has failed.
##################################################
library(r5r)
library(sf)
library(dplyr)
library(glue)
# Set target county, geography, and env vars
target_year <- "2010"
target_state <- "24"
target_geography <- "tract"
target_od_path <- glue(
"input/{target_year}/{target_geography}/",
"origin_destination_by_state/{target_state}/"
)
mode <- c("WALK","TRANSIT")
target_mode <- paste(mode, collapse='-')
target_ttm_dir <- glue("output/{target_year}/{target_geography}/travel_time_matrices/{target_state}/{target_mode}")
# Load origins and destinations from resources/ directory
origins <- readr::read_csv(glue(target_od_path, "/origins.csv")) %>% mutate(ID_INT = as.numeric(id))
destinations <- readr::read_csv(glue(target_od_path, "/destinations.csv")) %>% mutate(ID_INT = as.numeric(id))
# Read r5r output
tt_mat <- read.csv(glue(target_ttm_dir, "/ttm.csv")) %>%
left_join(origins, by = c("fromId" = "ID_INT"), suffix = c(".tt", ".org")) %>%
left_join(destinations, by = c("toId" = "ID_INT"), suffix = c(".org", ".dest"))
# Get tracts
tract_geos <- tigris::tracts(as.numeric(target_state)) %>%
mutate(GEOID_INT = as.numeric(GEOID)) %>% st_transform(2163)
# Compute adjacent tracts
adjacent_tracts <- tract_geos %>% st_join(tract_geos, join = st_touches) %>%
select(GEOID_INT.x, GEOID_INT.y, geometry)
# !!REPEAT!! run code from here onwards a few times to spot-check many points!
# Sample one origin tract to reduce memory costs
tt_from_one_origin <- tt_mat %>% select(fromId, toId, travel_time) %>%
filter(fromId == sample(tt_mat$fromId,size=1))
# Compute difference in travel time from origin to adjacent tracts
adjacent_times <- adjacent_tracts %>%
inner_join(tt_from_one_origin, by = c("GEOID_INT.x" = "toId")) %>%
inner_join(tt_from_one_origin, by = c("GEOID_INT.y" = "toId")) %>%
mutate(DIFF = travel_time.x - travel_time.y)
# Compute variance in adjacent tract travel times
var_times <- adjacent_times %>% group_by(fromId.x, GEOID_INT.x, travel_time.x) %>%
summarise(VAR_ADJ = sd(travel_time.y),
AVG_ADJ = mean(travel_time.y),
VAR_DIFF = sd(DIFF),
AVG_DIFF = mean(DIFF), .groups='keep') %>%
mutate(STD_DIFF = VAR_DIFF/travel_time.x,
STD_ADJ = VAR_ADJ/travel_time.x,)
# Plot distribution of travel time variance
ggplot(data = var_times) + geom_boxplot(aes(y=STD_ADJ)) +
labs(title="Relative variance of travel time to adjacent tracts",
x="1 Observation = 1 Tract (GEOID)", y="StDev(TTime to Adjacent Tracts)/Mean(TTime to Tract)")
# Spatial plots
ggplot(data = var_times) + geom_sf(aes(fill=travel_time.x)) +
labs(title="Baseline travel time", fill="Minutes")
ggplot(data = var_times) + geom_sf(aes(fill=VAR_ADJ)) +
labs(title="Variance of travel time vs adjacent tracts", fill="Minutes")
ggplot(data = var_times) + geom_sf(aes(fill=STD_ADJ)) +
labs(title="Mean-adjusted variance of travel time vs adjacent tracts", fill="Minutes")
|
findfactors <- function(num) {
x <- c()
1stprime<- 2; 2ndprime <- 3; everyprime <- num
while( everyprime != 1 ) {
while( everyprime%%1stprime == 0 ) {
x <- c(x, 1stprime)
everyprime <- floor(everyprime/ 1stprime)
}
1stprime <- 2ndprime
2ndprime <- 2ndprime + 2
}
x
}
print(findfactors(1027*4))
| /Scripts/Task/prime-decomposition/r/prime-decomposition.r | no_license | stefanos1316/Rosetta-Code-Research | R | false | false | 333 | r | findfactors <- function(num) {
x <- c()
1stprime<- 2; 2ndprime <- 3; everyprime <- num
while( everyprime != 1 ) {
while( everyprime%%1stprime == 0 ) {
x <- c(x, 1stprime)
everyprime <- floor(everyprime/ 1stprime)
}
1stprime <- 2ndprime
2ndprime <- 2ndprime + 2
}
x
}
print(findfactors(1027*4))
|
#' @title Animate cases on a process map
#'
#' @description This function animates the cases stored in a `bupaR` event log on top of a process model.
#' Each case is represented by a token that travels through the process model according to the waiting and processing times of activities.
#' Currently, animation is only supported for process models created by \code{\link{process_map}} of the `processmapR` package.
#' The animation will be rendered as SVG animation (SMIL) using the `htmlwidgets` framework. Each token is a SVG shape and customizable.
#'
#' @param eventlog The `bupaR` event log object that should be animated
#' @param processmap A process map created with `processmapR` (\code{\link{process_map}})
#' on which the event log will be animated. If not provided a standard process map will be generated
#' from the supplied event log.
#' @param renderer Whether to use Graphviz (\code{\link{renderer_graphviz}}) to layout and render the process map,
#' or to render the process map using Leaflet (\code{\link{renderer_leaflet}}) on a geographical map.
#' @param mode Whether to animate the cases according to their actual time of occurrence (`absolute`) or to start all cases at once (`relative`).
#' @param duration The overall duration of the animation, all times are scaled according to this overall duration.
#' @param jitter The magnitude of a random coordinate translation, known as jitter in scatterplots, which is added to each token.
#' Adding jitter can help to disambiguate tokens drawn on top of each other.
#' @param timeline Whether to render a timeline slider in supported browsers (Work only on recent versions of Chrome and Firefox).
#' @param initial_state Whether the initial playback state is `playing` or `paused`. The default is `playing`.
#' @param initial_time Sets the initial time of the animation. The default value is `0`.
#' @param legend Whether to show a legend for the `size` or the `color` scale. The default is not to show a legend.
#' @param repeat_count The number of times the process animation is repeated.
#' @param repeat_delay The seconds to wait before one repetition of the animation.
#' @param epsilon_time A (small) time to be added to every animation to ensure that tokens are visible.
#' @param mapping A list of aesthetic mappings from event log attributes to certain visual parameters of the tokens.
#' Use \code{\link{token_aes}} to create a suitable mapping list.
#' @param token_callback_onclick A JavaScript function that is called when a token is clicked.
#' The function is parsed by \code{\link{JS}} and received three parameters: `svg_root`, 'svg_element', and 'case_id'.
#' @param token_callback_select A JavaScript callback function called when token selection changes.
#' @param activity_callback_onclick A JavaScript function that is called when an activity is clicked.
#' The function is parsed by \code{\link{JS}} and received three parameters: 'svg_root', 'svg_element', and 'activity_id'.
#' @param activity_callback_select A JavaScript callback function called when activity selection changes.
#' @param elementId passed through to \code{\link{createWidget}}. A custom elementId is useful to capture the selection events
#' via input$elementId_tokens and input$elementId_activities when used in Shiny.
#' @param preRenderHook passed through to \code{\link{createWidget}}.
#' @param width,height Fixed size for widget (in css units).
#' The default is NULL, which results in intelligent automatic sizing based on the widget's container.
#' @param sizingPolicy Options that govern how the widget is sized in various
#' containers (e.g. a standalone browser, the RStudio Viewer, a knitr figure,
#' or a Shiny output binding). These options can be specified by calling the
#' \code{\link{sizingPolicy}} function.
#' @param ... Options passed on to \code{\link{process_map}}.
#'
#' @examples
#' data(example_log)
#'
#' # Animate the process with default options (absolute time and 60s duration)
#' animate_process(example_log)
#' \donttest{
#' # Animate the process with default options (relative time, with jitter, infinite repeat)
#' animate_process(example_log, mode = "relative", jitter = 10, repeat_count = Inf)
#' }
#'
#' @seealso \code{\link{process_map}}, \code{\link{token_aes}}
#'
#' @import dplyr
#' @importFrom magrittr %>%
#' @importFrom rlang :=
#' @importFrom processmapR process_map
#'
#' @export
animate_process <- function(eventlog,
processmap = process_map(eventlog, render = F, ...),
renderer = renderer_graphviz(),
mode = c("absolute","relative","off"),
duration = 60,
jitter = 0,
timeline = TRUE,
legend = NULL,
initial_state = c("playing", "paused"),
initial_time = 0,
repeat_count = 1,
repeat_delay = 0.5,
epsilon_time = duration / 1000,
mapping = token_aes(),
token_callback_onclick = c("function(svg_root, svg_element, case_id) {","}"),
token_callback_select = token_select_decoration(),
activity_callback_onclick = c("function(svg_root, svg_element, activity_id) {","}"),
activity_callback_select = activity_select_decoration(),
elementId = NULL,
preRenderHook = NULL,
width = NULL,
height = NULL,
sizingPolicy = htmlwidgets::sizingPolicy(
browser.fill = TRUE,
viewer.fill = TRUE,
knitr.figure = FALSE,
knitr.defaultWidth = "100%",
knitr.defaultHeight = "300"
),
...) {
if (any(startsWith(as.character(names(list(...))), "animation_"))) {
stop("The old pre 1.0 API using `animation_` parameters is deprecated.")
}
# Make CRAN happy about dplyr evaluation
case_start <- log_end <- start_time <- end_time <- next_end_time <- next_start_time <- NULL
case <- case_end <- log_start <- log_duration <- case_duration <- from_id <- to_id <- NULL
label <- act <- NULL
token_start <- token_end <- activity_duration <- token_duration <- NULL
constraint <- weight <- NULL
mode <- match.arg(mode)
initial_state <- match.arg(initial_state)
if (initial_time > duration) {
stop("The 'initial_time' parameter should be less or equal than the specified 'duration'.")
}
precedence <- NULL
if (!is.null(attr(processmap, "base_precedence"))) {
precedence <- attr(processmap, "base_precedence") %>%
mutate_at(vars(start_time, end_time, next_start_time, next_end_time), as.numeric, units = "secs")
activities <- precedence %>%
select(act, id = from_id) %>%
stats::na.omit() %>%
distinct() %>%
arrange(id)
} else if (!is.null(attr(processmap, "causal_nodes"))) {
activities <- attr(processmap, "causal_nodes") %>%
select(act, id = from_id) %>%
stats::na.omit() %>%
distinct() %>%
arrange(id)
} else {
stop("Missing attribute `base_precedence` or `causal_nodes`. Did you supply a process map generated by `process_map` or `render_causal_net`? ")
}
start_activity <- processmap$nodes_df %>% filter(label == "Start") %>% pull(id)
end_activity <- processmap$nodes_df %>% filter(label == "End") %>% pull(id)
if (mode != "off" && !is.null(precedence)) {
suppressWarnings({
cases <- precedence %>%
group_by(case) %>%
summarise(case_start = min(start_time, na.rm = T),
case_end = max(end_time, na.rm = T)) %>%
mutate(case_duration = case_end - case_start) %>%
filter(!is.na(case)) %>%
ungroup()
})
# determine animation factor based on requested duration
if (mode == "absolute") {
timeline_start <- cases %>% pull(case_start) %>% min(na.rm = T)
timeline_end <- cases %>% pull(case_end) %>% max()
a_factor <- (timeline_end - timeline_start) / duration
} else {
timeline_start <- 0
timeline_end <- cases %>% pull(case_duration) %>% max(na.rm = T)
a_factor = timeline_end / duration
}
tokens <- generate_tokens(cases, precedence, processmap, mode, a_factor,
timeline_start, timeline_end, epsilon_time)
adjust <- max(tokens$token_end) / duration
tokens <- tokens %>%
mutate(token_start = token_start / adjust,
token_duration = token_duration / adjust,
activity_duration = activity_duration / adjust) %>%
select(-token_end)
sizes <- generate_animation_attribute(eventlog, mapping$size$attribute, 6)
sizes <- transform_time(sizes, cases, mode, a_factor, timeline_start, timeline_end)
colors <- generate_animation_attribute(eventlog, mapping$color$attribute, "white")
colors <- transform_time(colors, cases, mode, a_factor, timeline_start, timeline_end)
images <- generate_animation_attribute(eventlog, mapping$image$attribute, NA)
images <- transform_time(images, cases, mode, a_factor, timeline_start, timeline_end)
if (mapping$shape == "image" && nrow(images) == 0) {
stop("Need to supply image URLs in parameter 'mapping' to use shape 'image'.");
}
opacities <- generate_animation_attribute(eventlog, mapping$opacity$attribute, 0.9)
opacities <- transform_time(opacities, cases, mode, a_factor, timeline_start, timeline_end)
} else {
# No animation mode, for using activity selection features only
sizes <- data.frame()
colors <- data.frame()
images <- data.frame()
opacities <- data.frame()
tokens <- data.frame()
timeline_start <- 0
timeline_end <- 0
timeline <- FALSE
a_factor <- 0
}
if ("weight" %in% colnames(processmap$edges_df)) {
# hack to add 'weight' attribute to the graph
processmap$edges_df %>%
mutate(len = weight) -> processmap$edges_df
}
if ("constraint" %in% colnames(processmap$edges_df)) {
# hack to add 'weight' attribute to the graph
processmap$edges_df %>%
mutate(decorate = constraint) -> processmap$edges_df
}
# actually render the process map
rendered_process <- renderer(processmap, width, height)
x <- list(
rendered_process = rendered_process,
activities = activities,
tokens = tokens,
sizes = sizes,
sizes_scale = mapping$size,
colors = colors,
colors_scale = mapping$color,
opacities = opacities,
opacities_scale = mapping$opacity,
images = images,
images_scale = mapping$image,
shape = mapping$shape, #TODO see if this can be a scale too
attributes = mapping$attributes,
start_activity = start_activity,
end_activity = end_activity,
duration = duration,
timeline = timeline,
mode = mode,
initial_state = initial_state,
initial_time = initial_time,
repeat_count = repeat_count,
repeat_delay = repeat_delay,
jitter = jitter,
factor = a_factor * 1000,
legend = legend,
timeline_start = timeline_start * 1000,
timeline_end = timeline_end * 1000,
onclick_token_callback = htmlwidgets::JS(token_callback_onclick),
onclick_token_select = htmlwidgets::JS(token_callback_select),
onclick_activity_callback = htmlwidgets::JS(activity_callback_onclick),
onclick_activity_select = htmlwidgets::JS(activity_callback_select),
processmap_renderer = attr(renderer, "name")
)
x <- c(x, attr(renderer, "config"))
htmlwidgets::createWidget(elementId = elementId,
name = "processanimateR",
x = x,
width = width, height = height,
sizingPolicy = sizingPolicy,
preRenderHook = preRenderHook,
dependencies = attr(renderer, "dependencies"))
}
#' @title Create a process animation output element
#' @description Renders a renderProcessanimater within an application page.
#' @param outputId Output variable to read the animation from
#' @param width,height Must be a valid CSS unit (like 100%, 400px, auto) or a number,
#' which will be coerced to a string and have px appended.
#'
#' @export
processanimaterOutput <- function(outputId, width = "100%", height = "400px") {
htmlwidgets::shinyWidgetOutput(outputId = outputId,
name = "processanimateR",
inline = F,
width = width, height = height,
package = "processanimateR")
}
#' @title Renders process animation output
#' @description Renders a SVG process animation suitable to be used by processanimaterOutput.
#' @param expr The expression generating a process animation (animate_process).
#' @param env The environment in which to evaluate expr.
#' @param quoted Is expr a quoted expression (with quote())? This is useful if you want to save an expression in a variable.
#'
#' @export
renderProcessanimater <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, processanimaterOutput, env, quoted = TRUE)
}
#
# Private helper functions
#
generate_tokens <- function(cases, precedence, processmap, mode, a_factor,
timeline_start, timeline_end, epsilon) {
case <- end_time <- start_time <- next_end_time <- next_start_time <- case_start <- token_duration <- NULL
min_order <- token_start <- activity_duration <- token_end <- from_id <- to_id <- case_duration <- NULL
tokens <- precedence %>%
left_join(cases, by = c("case")) %>%
left_join(processmap$edges_df, by = c("from_id" = "from", "to_id" = "to")) %>%
filter(!is.na(id) & !is.na(case))
if (mode == "absolute") {
tokens <- mutate(tokens,
token_start = (end_time - timeline_start) / a_factor,
token_duration = (next_start_time - end_time) / a_factor,
activity_duration = pmax(0, (next_end_time - next_start_time) / a_factor))
} else {
tokens <- mutate(tokens,
token_start = (end_time - case_start) / a_factor,
token_duration = (next_start_time - end_time) / a_factor,
activity_duration = pmax(0, (next_end_time - next_start_time) / a_factor))
}
tokens <- tokens %>%
# TODO improve handling of parallelism
# Filter all negative durations caused by parallelism
filter(token_duration >= 0, activity_duration >= 0) %>%
# SVG animations seem to not like events starting at the same time caused by 0s durations
mutate(token_duration = epsilon + token_duration,
activity_duration = epsilon + activity_duration) %>%
arrange(case, start_time, min_order) %>%
group_by(case) %>%
# Ensure start times are not overlapping SMIL does not fancy this
mutate(token_start = token_start + ((row_number(token_start) - min_rank(token_start)) * epsilon)) %>%
# Ensure consecutive start times, this epsilon just needs to be small
mutate(token_end = min(token_start) + cumsum(token_duration + activity_duration) + 0.000001) %>%
mutate(token_start = lag(token_end, default = min(token_start))) %>%
ungroup()
tokens %>%
select(case,
edge_id = id,
token_start,
token_duration,
activity_duration,
token_end)
}
generate_animation_attribute <- function(eventlog, value, default) {
attribute <- rlang::sym("value")
if (is.null(value)) {
# use fixed default value
eventlog %>%
as.data.frame() %>%
group_by(!!case_id_(eventlog)) %>%
summarise(time = min(!!timestamp_(eventlog))) %>%
mutate(!!attribute := default) %>%
rename(case = !!case_id_(eventlog))
} else if (is.data.frame(value)) {
# check data present
stopifnot(c("case", "time", "value") %in% colnames(value))
value
} else if (value %in% colnames(eventlog)) {
# use existing value from event log
eventlog %>%
as.data.frame() %>%
mutate(!!attribute := !!rlang::sym(value)) %>%
select(case = !!case_id_(eventlog),
time = !!timestamp_(eventlog),
!!attribute)
} else {
# set to a fixed value
eventlog %>%
as.data.frame() %>%
mutate(!!attribute := value) %>%
select(case = !!case_id_(eventlog),
time = !!timestamp_(eventlog),
!!attribute)
}
}
transform_time <- function(data, cases, mode, a_factor, timeline_start, timeline_end) {
.order <- time <- case <- log_start <- case_start <- value <- NULL
if (nrow(data) != nrow(cases)) {
data <- data %>%
group_by(case) %>%
filter(row_number() == 1 | lag(value) != value) # only keep changes in value
}
data <- data %>%
left_join(cases, by = "case")
if (mode == "absolute") {
data <- mutate(data, time = as.numeric(time - timeline_start, units = "secs"))
} else {
data <- mutate(data, time = as.numeric(time - case_start, units = "secs"))
}
data %>%
mutate(time = time / a_factor) %>%
select(case, time, value)
}
# Utility functions
# https://github.com/gertjanssenswillen/processmapR/blob/master/R/utils.R
case_id_ <- function(eventlog) rlang::sym(bupaR::case_id(eventlog))
timestamp_ <- function(eventlog) rlang::sym(bupaR::timestamp(eventlog))
| /R/processanimateR.R | permissive | jennifer-cooper/processanimateR | R | false | false | 17,795 | r | #' @title Animate cases on a process map
#'
#' @description This function animates the cases stored in a `bupaR` event log on top of a process model.
#' Each case is represented by a token that travels through the process model according to the waiting and processing times of activities.
#' Currently, animation is only supported for process models created by \code{\link{process_map}} of the `processmapR` package.
#' The animation will be rendered as SVG animation (SMIL) using the `htmlwidgets` framework. Each token is a SVG shape and customizable.
#'
#' @param eventlog The `bupaR` event log object that should be animated
#' @param processmap A process map created with `processmapR` (\code{\link{process_map}})
#' on which the event log will be animated. If not provided a standard process map will be generated
#' from the supplied event log.
#' @param renderer Whether to use Graphviz (\code{\link{renderer_graphviz}}) to layout and render the process map,
#' or to render the process map using Leaflet (\code{\link{renderer_leaflet}}) on a geographical map.
#' @param mode Whether to animate the cases according to their actual time of occurrence (`absolute`) or to start all cases at once (`relative`).
#' @param duration The overall duration of the animation, all times are scaled according to this overall duration.
#' @param jitter The magnitude of a random coordinate translation, known as jitter in scatterplots, which is added to each token.
#' Adding jitter can help to disambiguate tokens drawn on top of each other.
#' @param timeline Whether to render a timeline slider in supported browsers (Work only on recent versions of Chrome and Firefox).
#' @param initial_state Whether the initial playback state is `playing` or `paused`. The default is `playing`.
#' @param initial_time Sets the initial time of the animation. The default value is `0`.
#' @param legend Whether to show a legend for the `size` or the `color` scale. The default is not to show a legend.
#' @param repeat_count The number of times the process animation is repeated.
#' @param repeat_delay The seconds to wait before one repetition of the animation.
#' @param epsilon_time A (small) time to be added to every animation to ensure that tokens are visible.
#' @param mapping A list of aesthetic mappings from event log attributes to certain visual parameters of the tokens.
#' Use \code{\link{token_aes}} to create a suitable mapping list.
#' @param token_callback_onclick A JavaScript function that is called when a token is clicked.
#' The function is parsed by \code{\link{JS}} and received three parameters: `svg_root`, 'svg_element', and 'case_id'.
#' @param token_callback_select A JavaScript callback function called when token selection changes.
#' @param activity_callback_onclick A JavaScript function that is called when an activity is clicked.
#' The function is parsed by \code{\link{JS}} and received three parameters: 'svg_root', 'svg_element', and 'activity_id'.
#' @param activity_callback_select A JavaScript callback function called when activity selection changes.
#' @param elementId passed through to \code{\link{createWidget}}. A custom elementId is useful to capture the selection events
#' via input$elementId_tokens and input$elementId_activities when used in Shiny.
#' @param preRenderHook passed through to \code{\link{createWidget}}.
#' @param width,height Fixed size for widget (in css units).
#' The default is NULL, which results in intelligent automatic sizing based on the widget's container.
#' @param sizingPolicy Options that govern how the widget is sized in various
#' containers (e.g. a standalone browser, the RStudio Viewer, a knitr figure,
#' or a Shiny output binding). These options can be specified by calling the
#' \code{\link{sizingPolicy}} function.
#' @param ... Options passed on to \code{\link{process_map}}.
#'
#' @examples
#' data(example_log)
#'
#' # Animate the process with default options (absolute time and 60s duration)
#' animate_process(example_log)
#' \donttest{
#' # Animate the process with default options (relative time, with jitter, infinite repeat)
#' animate_process(example_log, mode = "relative", jitter = 10, repeat_count = Inf)
#' }
#'
#' @seealso \code{\link{process_map}}, \code{\link{token_aes}}
#'
#' @import dplyr
#' @importFrom magrittr %>%
#' @importFrom rlang :=
#' @importFrom processmapR process_map
#'
#' @export
animate_process <- function(eventlog,
processmap = process_map(eventlog, render = F, ...),
renderer = renderer_graphviz(),
mode = c("absolute","relative","off"),
duration = 60,
jitter = 0,
timeline = TRUE,
legend = NULL,
initial_state = c("playing", "paused"),
initial_time = 0,
repeat_count = 1,
repeat_delay = 0.5,
epsilon_time = duration / 1000,
mapping = token_aes(),
token_callback_onclick = c("function(svg_root, svg_element, case_id) {","}"),
token_callback_select = token_select_decoration(),
activity_callback_onclick = c("function(svg_root, svg_element, activity_id) {","}"),
activity_callback_select = activity_select_decoration(),
elementId = NULL,
preRenderHook = NULL,
width = NULL,
height = NULL,
sizingPolicy = htmlwidgets::sizingPolicy(
browser.fill = TRUE,
viewer.fill = TRUE,
knitr.figure = FALSE,
knitr.defaultWidth = "100%",
knitr.defaultHeight = "300"
),
...) {
if (any(startsWith(as.character(names(list(...))), "animation_"))) {
stop("The old pre 1.0 API using `animation_` parameters is deprecated.")
}
# Make CRAN happy about dplyr evaluation
case_start <- log_end <- start_time <- end_time <- next_end_time <- next_start_time <- NULL
case <- case_end <- log_start <- log_duration <- case_duration <- from_id <- to_id <- NULL
label <- act <- NULL
token_start <- token_end <- activity_duration <- token_duration <- NULL
constraint <- weight <- NULL
mode <- match.arg(mode)
initial_state <- match.arg(initial_state)
if (initial_time > duration) {
stop("The 'initial_time' parameter should be less or equal than the specified 'duration'.")
}
precedence <- NULL
if (!is.null(attr(processmap, "base_precedence"))) {
precedence <- attr(processmap, "base_precedence") %>%
mutate_at(vars(start_time, end_time, next_start_time, next_end_time), as.numeric, units = "secs")
activities <- precedence %>%
select(act, id = from_id) %>%
stats::na.omit() %>%
distinct() %>%
arrange(id)
} else if (!is.null(attr(processmap, "causal_nodes"))) {
activities <- attr(processmap, "causal_nodes") %>%
select(act, id = from_id) %>%
stats::na.omit() %>%
distinct() %>%
arrange(id)
} else {
stop("Missing attribute `base_precedence` or `causal_nodes`. Did you supply a process map generated by `process_map` or `render_causal_net`? ")
}
start_activity <- processmap$nodes_df %>% filter(label == "Start") %>% pull(id)
end_activity <- processmap$nodes_df %>% filter(label == "End") %>% pull(id)
if (mode != "off" && !is.null(precedence)) {
suppressWarnings({
cases <- precedence %>%
group_by(case) %>%
summarise(case_start = min(start_time, na.rm = T),
case_end = max(end_time, na.rm = T)) %>%
mutate(case_duration = case_end - case_start) %>%
filter(!is.na(case)) %>%
ungroup()
})
# determine animation factor based on requested duration
if (mode == "absolute") {
timeline_start <- cases %>% pull(case_start) %>% min(na.rm = T)
timeline_end <- cases %>% pull(case_end) %>% max()
a_factor <- (timeline_end - timeline_start) / duration
} else {
timeline_start <- 0
timeline_end <- cases %>% pull(case_duration) %>% max(na.rm = T)
a_factor = timeline_end / duration
}
tokens <- generate_tokens(cases, precedence, processmap, mode, a_factor,
timeline_start, timeline_end, epsilon_time)
adjust <- max(tokens$token_end) / duration
tokens <- tokens %>%
mutate(token_start = token_start / adjust,
token_duration = token_duration / adjust,
activity_duration = activity_duration / adjust) %>%
select(-token_end)
sizes <- generate_animation_attribute(eventlog, mapping$size$attribute, 6)
sizes <- transform_time(sizes, cases, mode, a_factor, timeline_start, timeline_end)
colors <- generate_animation_attribute(eventlog, mapping$color$attribute, "white")
colors <- transform_time(colors, cases, mode, a_factor, timeline_start, timeline_end)
images <- generate_animation_attribute(eventlog, mapping$image$attribute, NA)
images <- transform_time(images, cases, mode, a_factor, timeline_start, timeline_end)
if (mapping$shape == "image" && nrow(images) == 0) {
stop("Need to supply image URLs in parameter 'mapping' to use shape 'image'.");
}
opacities <- generate_animation_attribute(eventlog, mapping$opacity$attribute, 0.9)
opacities <- transform_time(opacities, cases, mode, a_factor, timeline_start, timeline_end)
} else {
# No animation mode, for using activity selection features only
sizes <- data.frame()
colors <- data.frame()
images <- data.frame()
opacities <- data.frame()
tokens <- data.frame()
timeline_start <- 0
timeline_end <- 0
timeline <- FALSE
a_factor <- 0
}
if ("weight" %in% colnames(processmap$edges_df)) {
# hack to add 'weight' attribute to the graph
processmap$edges_df %>%
mutate(len = weight) -> processmap$edges_df
}
if ("constraint" %in% colnames(processmap$edges_df)) {
# hack to add 'weight' attribute to the graph
processmap$edges_df %>%
mutate(decorate = constraint) -> processmap$edges_df
}
# actually render the process map
rendered_process <- renderer(processmap, width, height)
x <- list(
rendered_process = rendered_process,
activities = activities,
tokens = tokens,
sizes = sizes,
sizes_scale = mapping$size,
colors = colors,
colors_scale = mapping$color,
opacities = opacities,
opacities_scale = mapping$opacity,
images = images,
images_scale = mapping$image,
shape = mapping$shape, #TODO see if this can be a scale too
attributes = mapping$attributes,
start_activity = start_activity,
end_activity = end_activity,
duration = duration,
timeline = timeline,
mode = mode,
initial_state = initial_state,
initial_time = initial_time,
repeat_count = repeat_count,
repeat_delay = repeat_delay,
jitter = jitter,
factor = a_factor * 1000,
legend = legend,
timeline_start = timeline_start * 1000,
timeline_end = timeline_end * 1000,
onclick_token_callback = htmlwidgets::JS(token_callback_onclick),
onclick_token_select = htmlwidgets::JS(token_callback_select),
onclick_activity_callback = htmlwidgets::JS(activity_callback_onclick),
onclick_activity_select = htmlwidgets::JS(activity_callback_select),
processmap_renderer = attr(renderer, "name")
)
x <- c(x, attr(renderer, "config"))
htmlwidgets::createWidget(elementId = elementId,
name = "processanimateR",
x = x,
width = width, height = height,
sizingPolicy = sizingPolicy,
preRenderHook = preRenderHook,
dependencies = attr(renderer, "dependencies"))
}
#' @title Create a process animation output element
#' @description Renders a renderProcessanimater within an application page.
#' @param outputId Output variable to read the animation from
#' @param width,height Must be a valid CSS unit (like 100%, 400px, auto) or a number,
#' which will be coerced to a string and have px appended.
#'
#' @export
processanimaterOutput <- function(outputId, width = "100%", height = "400px") {
htmlwidgets::shinyWidgetOutput(outputId = outputId,
name = "processanimateR",
inline = F,
width = width, height = height,
package = "processanimateR")
}
#' @title Renders process animation output
#' @description Renders a SVG process animation suitable to be used by processanimaterOutput.
#' @param expr The expression generating a process animation (animate_process).
#' @param env The environment in which to evaluate expr.
#' @param quoted Is expr a quoted expression (with quote())? This is useful if you want to save an expression in a variable.
#'
#' @export
renderProcessanimater <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, processanimaterOutput, env, quoted = TRUE)
}
#
# Private helper functions
#
generate_tokens <- function(cases, precedence, processmap, mode, a_factor,
timeline_start, timeline_end, epsilon) {
case <- end_time <- start_time <- next_end_time <- next_start_time <- case_start <- token_duration <- NULL
min_order <- token_start <- activity_duration <- token_end <- from_id <- to_id <- case_duration <- NULL
tokens <- precedence %>%
left_join(cases, by = c("case")) %>%
left_join(processmap$edges_df, by = c("from_id" = "from", "to_id" = "to")) %>%
filter(!is.na(id) & !is.na(case))
if (mode == "absolute") {
tokens <- mutate(tokens,
token_start = (end_time - timeline_start) / a_factor,
token_duration = (next_start_time - end_time) / a_factor,
activity_duration = pmax(0, (next_end_time - next_start_time) / a_factor))
} else {
tokens <- mutate(tokens,
token_start = (end_time - case_start) / a_factor,
token_duration = (next_start_time - end_time) / a_factor,
activity_duration = pmax(0, (next_end_time - next_start_time) / a_factor))
}
tokens <- tokens %>%
# TODO improve handling of parallelism
# Filter all negative durations caused by parallelism
filter(token_duration >= 0, activity_duration >= 0) %>%
# SVG animations seem to not like events starting at the same time caused by 0s durations
mutate(token_duration = epsilon + token_duration,
activity_duration = epsilon + activity_duration) %>%
arrange(case, start_time, min_order) %>%
group_by(case) %>%
# Ensure start times are not overlapping SMIL does not fancy this
mutate(token_start = token_start + ((row_number(token_start) - min_rank(token_start)) * epsilon)) %>%
# Ensure consecutive start times, this epsilon just needs to be small
mutate(token_end = min(token_start) + cumsum(token_duration + activity_duration) + 0.000001) %>%
mutate(token_start = lag(token_end, default = min(token_start))) %>%
ungroup()
tokens %>%
select(case,
edge_id = id,
token_start,
token_duration,
activity_duration,
token_end)
}
generate_animation_attribute <- function(eventlog, value, default) {
attribute <- rlang::sym("value")
if (is.null(value)) {
# use fixed default value
eventlog %>%
as.data.frame() %>%
group_by(!!case_id_(eventlog)) %>%
summarise(time = min(!!timestamp_(eventlog))) %>%
mutate(!!attribute := default) %>%
rename(case = !!case_id_(eventlog))
} else if (is.data.frame(value)) {
# check data present
stopifnot(c("case", "time", "value") %in% colnames(value))
value
} else if (value %in% colnames(eventlog)) {
# use existing value from event log
eventlog %>%
as.data.frame() %>%
mutate(!!attribute := !!rlang::sym(value)) %>%
select(case = !!case_id_(eventlog),
time = !!timestamp_(eventlog),
!!attribute)
} else {
# set to a fixed value
eventlog %>%
as.data.frame() %>%
mutate(!!attribute := value) %>%
select(case = !!case_id_(eventlog),
time = !!timestamp_(eventlog),
!!attribute)
}
}
transform_time <- function(data, cases, mode, a_factor, timeline_start, timeline_end) {
.order <- time <- case <- log_start <- case_start <- value <- NULL
if (nrow(data) != nrow(cases)) {
data <- data %>%
group_by(case) %>%
filter(row_number() == 1 | lag(value) != value) # only keep changes in value
}
data <- data %>%
left_join(cases, by = "case")
if (mode == "absolute") {
data <- mutate(data, time = as.numeric(time - timeline_start, units = "secs"))
} else {
data <- mutate(data, time = as.numeric(time - case_start, units = "secs"))
}
data %>%
mutate(time = time / a_factor) %>%
select(case, time, value)
}
# Utility functions
# https://github.com/gertjanssenswillen/processmapR/blob/master/R/utils.R
case_id_ <- function(eventlog) rlang::sym(bupaR::case_id(eventlog))
timestamp_ <- function(eventlog) rlang::sym(bupaR::timestamp(eventlog))
|
context("Input MCMCiter and Burnin of function for correlated version")
test_that("Throws warning if MCMCiter parameter is not a integer greater than a cutoff", {
skip_on_cran()
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = 1), "MCMCiter should be at least 2200*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = "123"), "MCMCiter not provided as integer*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = 123.5), "MCMCiter not provided as integer*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = -1), "MCMCiter should be at least 2200*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = c(10000, 20000)), "MCMCiter is not a scalar*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = as.matrix(c(10000, 20000))), "MCMCiter is not a scalar*")
})
test_that("Throws warning if Burnin parameter is not a integer lower than a cutoff", {
skip_on_cran()
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = 1), "Burnin should be at least 200*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = "123"), "Burnin not provided as integer*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = 123.5), "Burnin not provided as integer*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = -1), "Burnin should be at least 200*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = c(10000, 20000)), "Burnin is not a scalar*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = as.matrix(c(10000, 20000))), "Burnin is not a scalar*")
})
test_that("Throws warning if MCMC sample size is less than 5000", {
skip_on_cran()
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = 10000, MCMCiter = 10000), "*provided less than 2000*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = 12000, MCMCiter = 10000), "*provided less than 2000*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = 10000, MCMCiter = 11000), "*provided less than 2000*")
})
| /tests/testthat/test-InputRestCor.R | no_license | ArunabhaCodes/CPBayes | R | false | false | 2,151 | r | context("Input MCMCiter and Burnin of function for correlated version")
test_that("Throws warning if MCMCiter parameter is not a integer greater than a cutoff", {
skip_on_cran()
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = 1), "MCMCiter should be at least 2200*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = "123"), "MCMCiter not provided as integer*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = 123.5), "MCMCiter not provided as integer*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = -1), "MCMCiter should be at least 2200*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = c(10000, 20000)), "MCMCiter is not a scalar*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, MCMCiter = as.matrix(c(10000, 20000))), "MCMCiter is not a scalar*")
})
test_that("Throws warning if Burnin parameter is not a integer lower than a cutoff", {
skip_on_cran()
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = 1), "Burnin should be at least 200*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = "123"), "Burnin not provided as integer*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = 123.5), "Burnin not provided as integer*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = -1), "Burnin should be at least 200*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = c(10000, 20000)), "Burnin is not a scalar*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = as.matrix(c(10000, 20000))), "Burnin is not a scalar*")
})
test_that("Throws warning if MCMC sample size is less than 5000", {
skip_on_cran()
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = 10000, MCMCiter = 10000), "*provided less than 2000*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = 12000, MCMCiter = 10000), "*provided less than 2000*")
expect_warning(cpbayes_cor(1:10, 1:10, ExampleDataCor$cor, Burnin = 10000, MCMCiter = 11000), "*provided less than 2000*")
})
|
#' Compcors the input matrix using SVD and returns the result.
#'
#' @param fmri input fmri image or matrix
#' @param ncompcor n compcor vectors
#' @param variance_extreme high variance threshold e.g 0.95 for 95 percent
#' @param mask optional mask for image
#' @param randomSamples take this many random samples to speed things up
#' @param returnv return the spatial vectors
#' @param returnhighvarmat bool to return the high variance matrix
#' @param returnhighvarmatinds bool to return the high variance matrix indices
#' @param highvarmatinds index list
#' @param scale scale the matrix of high variance voxels, default FALSE. note
#' that you may get slightly different results by scaling the input matrix
#' before passing into this function.
#' @return dataframe of nuisance predictors is output
#' @author Avants BB
#' @examples
#'
#' mat <- matrix( rnorm(50000) ,ncol=500)
#' compcorrdf<-compcor( mat )
#'
#' @export compcor
compcor <- function(fmri, ncompcor = 4,
variance_extreme = 0.975,
mask = NULL, randomSamples = 1,
returnv = FALSE, returnhighvarmat = FALSE,
returnhighvarmatinds = FALSE,
highvarmatinds = NA,
scale = TRUE ) {
if (nargs() == 0) {
print("Usage: compcorr_df<-compcor( fmri, mask ) ")
return(1)
}
if (is.antsImage(fmri) & is.null(mask)) {
print("Need to input a mask too")
print(args(compcor))
return(NULL)
}
if (is.antsImage(fmri) & !is.null(mask)) {
mask = check_ants(mask)
mat <- timeseries2matrix(fmri, mask)
}
if ( inherits(fmri, "matrix")) {
mat <- fmri
}
if (is.na(highvarmatinds)) {
temporalvar <- apply(mat, 2, var)
tvhist <- hist(temporalvar, breaks = c("FD"), plot = FALSE)
percvar <- variance_extreme # percentage of high variance data to use
# get total counts
totalcounts <- sum(tvhist$counts)
wh <- (cumsum(tvhist$counts) < (totalcounts * percvar))
thresh <- max(tvhist$mids[wh])
highvarmatinds <- which(temporalvar > thresh)
}
highvarmat <- mat[, highvarmatinds]
if ( scale ) highvarmat = scale( highvarmat , scale = FALSE )
if (returnhighvarmatinds)
return(highvarmatinds)
if (!returnv) {
compcorrsvd <- svd(highvarmat, nu = ncompcor, nv = 0)
if (ncompcor > 0) {
compcorr <- (compcorrsvd$u[, 1:ncompcor])
compcorrnames <- paste("compcorr", c(1:ncol(compcorr)), sep = "")
nuis <- compcorr
colnames(nuis) <- c(compcorrnames)
}
return(nuis)
}
if (returnv) {
compcorrsvd <- svd(highvarmat, nu = 0, nv = ncompcor)
if (ncompcor > 0) {
compcorr <- (compcorrsvd$v[, 1:ncompcor])
compcorrnames <- paste("compcorr", c(1:ncol(compcorr)), sep = "")
nuis <- compcorr
colnames(nuis) <- c(compcorrnames)
}
return(nuis)
}
}
| /R/compcor.R | permissive | ANTsX/ANTsR | R | false | false | 2,752 | r | #' Compcors the input matrix using SVD and returns the result.
#'
#' @param fmri input fmri image or matrix
#' @param ncompcor n compcor vectors
#' @param variance_extreme high variance threshold e.g 0.95 for 95 percent
#' @param mask optional mask for image
#' @param randomSamples take this many random samples to speed things up
#' @param returnv return the spatial vectors
#' @param returnhighvarmat bool to return the high variance matrix
#' @param returnhighvarmatinds bool to return the high variance matrix indices
#' @param highvarmatinds index list
#' @param scale scale the matrix of high variance voxels, default FALSE. note
#' that you may get slightly different results by scaling the input matrix
#' before passing into this function.
#' @return dataframe of nuisance predictors is output
#' @author Avants BB
#' @examples
#'
#' mat <- matrix( rnorm(50000) ,ncol=500)
#' compcorrdf<-compcor( mat )
#'
#' @export compcor
compcor <- function(fmri, ncompcor = 4,
variance_extreme = 0.975,
mask = NULL, randomSamples = 1,
returnv = FALSE, returnhighvarmat = FALSE,
returnhighvarmatinds = FALSE,
highvarmatinds = NA,
scale = TRUE ) {
if (nargs() == 0) {
print("Usage: compcorr_df<-compcor( fmri, mask ) ")
return(1)
}
if (is.antsImage(fmri) & is.null(mask)) {
print("Need to input a mask too")
print(args(compcor))
return(NULL)
}
if (is.antsImage(fmri) & !is.null(mask)) {
mask = check_ants(mask)
mat <- timeseries2matrix(fmri, mask)
}
if ( inherits(fmri, "matrix")) {
mat <- fmri
}
if (is.na(highvarmatinds)) {
temporalvar <- apply(mat, 2, var)
tvhist <- hist(temporalvar, breaks = c("FD"), plot = FALSE)
percvar <- variance_extreme # percentage of high variance data to use
# get total counts
totalcounts <- sum(tvhist$counts)
wh <- (cumsum(tvhist$counts) < (totalcounts * percvar))
thresh <- max(tvhist$mids[wh])
highvarmatinds <- which(temporalvar > thresh)
}
highvarmat <- mat[, highvarmatinds]
if ( scale ) highvarmat = scale( highvarmat , scale = FALSE )
if (returnhighvarmatinds)
return(highvarmatinds)
if (!returnv) {
compcorrsvd <- svd(highvarmat, nu = ncompcor, nv = 0)
if (ncompcor > 0) {
compcorr <- (compcorrsvd$u[, 1:ncompcor])
compcorrnames <- paste("compcorr", c(1:ncol(compcorr)), sep = "")
nuis <- compcorr
colnames(nuis) <- c(compcorrnames)
}
return(nuis)
}
if (returnv) {
compcorrsvd <- svd(highvarmat, nu = 0, nv = ncompcor)
if (ncompcor > 0) {
compcorr <- (compcorrsvd$v[, 1:ncompcor])
compcorrnames <- paste("compcorr", c(1:ncol(compcorr)), sep = "")
nuis <- compcorr
colnames(nuis) <- c(compcorrnames)
}
return(nuis)
}
}
|
#' UI for barplot module
#'
#' @param id string used to namespace module
#' @return an UI snippet
#' @import shiny
#' @import plotly
#' @noRd
barplot_ui <- function (id) {
ns <- shiny::NS(id)
shiny::tagList(
plotly::plotlyOutput(ns("barplot"), height = "500px")
)
}
| /R/mod-barplot_ui.R | permissive | ikbentimkramer/cdphmd | R | false | false | 276 | r | #' UI for barplot module
#'
#' @param id string used to namespace module
#' @return an UI snippet
#' @import shiny
#' @import plotly
#' @noRd
barplot_ui <- function (id) {
ns <- shiny::NS(id)
shiny::tagList(
plotly::plotlyOutput(ns("barplot"), height = "500px")
)
}
|
output$MPelect <- renderPlot({
votesperMP=read.csv("source/data/votesperMP.csv")
dat<-votesperMP[votesperMP$Year == input$year,]
cols <- c(AP="hotpink3",APNI="hotpink3", Alliance="hotpink3", Alliance..Lib.="orange3", Alliance..SDP.="blue4",BNP="steelblue4", British.National="steelblue4",Con="dodgerblue", Conservative="dodgerblue", Ch.P="darkorchid2", Democratic.Unionist.Party="firebrick3", DUP="firebrick3", Democratic.Unionist="firebrick3",Ecology="green3", ED="tomato4",
Green="green3",Grn="green3",KHHC="violetred2",Kidderminster.Hospital.and.Health.Concern="violetred2",Ind="plum1", Independent="plum1", Ind1="plum1", Ind.="plum1", Ind131="plum1", Ind86="plum1", Independent76="plum1", Ind127="plum1", Independent.Labour="red1",Ind.Labour="red1",Lab="red2", Labour="red2", LDem="orange2", Liberal.Democrat="orange2" , LD="orange2", Lib="orange3", Liberal="orange3", National.Front="lightskyblue", Natural.Law="lightsteelblue2",
Official.Unionist.Party="coral",Oth="grey40",PC="palegreen", Plaid.Cymru="palegreen", People.s.Labour="gold1",Referendum="indianred4",Republican.Clubs="limegreen",Respect="forestgreen", Resp="forestgreen",SSP="orangered3",Scottish.Socialist="orangered3", SDLP="green4",Social.Democratic.and.Labour.Party="green4",Social.Democratic.and.Labour="green4",Sinn.Fein="limegreen",SF="limegreen",Sinn.Fein="limegreen", Scottish.National.Party="yellow",SNP="yellow", Scottish.National="yellow", Speaker="wheat3",The.Speaker="wheat3",Socialist.Alliance="red4",Social.Democratic="blue4",Socialist.Labour="grey35", Trade.Unionist.and.Socialist.Coalition="deeppink4",TUV="royalblue",
UCUNF="cadetblue2", UK.Independence.Party="darkviolet", UKIP="darkviolet", United.Kingdom.Independence="darkviolet",United.Kingdom.Unionist="mediumpurple4", United.Ulster.Unionist="deepskyblue1", Ulster.Unionist.Party="coral", UU="coral",UUP="coral", Official.Unionist="coral", Ulster.Unionist="coral",Ulster.Popular.Unionist="blue1", Ver="darkorchid1", The.Workers...NI.="red4",WRP="tomato2" )
brks <- c("AP","APNI", "Alliance", "Alliance..Lib.", "Alliance..SDP.","BNP", "British.National","Con", "Conservative", "Ch.P", "Democratic.Unionist.Party", "DUP", "Democratic.Unionist","Ecology", "ED",
"Green","Grn","KHHC","Kidderminster.Hospital.and.Health.Concern","Ind", "Independent", "Ind1", "Ind.", "Ind131", "Ind86", "Independent76", "Ind127", "Independent.Labour","Ind.Labour","Lab", "Labour", "LDem", "Liberal.Democrat" , "LD", "Lib", "Liberal", "National.Front", "Natural.Law",
"Official.Unionist.Party","Oth","PC", "Plaid.Cymru", "People.s.Labour", "Referendum","Republican.Clubs","Respect","Resp","SSP","Scottish.Socialist", "SDLP","Social.Democratic.and.Labour.Party","Social.Democratic.and.Labour","Sinn.Fein","SF","Sinn.Fein", "Scottish.National.Party","SNP", "Scottish.National", "Speaker","The.Speaker","Socialist.Alliance","Social.Democratic","Socialist.Labour", "Trade.Unionist.and.Socialist.Coalition","TUV",
"UCUNF", "UK.Independence.Party", "UKIP", "United.Kingdom.Independence","United.Kingdom.Unionist", "United.Ulster.Unionist", "Ulster.Unionist.Party", "UU","UUP", "Official.Unionist", "Ulster.Unionist","Ulster.Popular.Unionist", "Ver", "The.Workers...NI.","WRP")
labs <- c("Alliance NI","Alliance NI","Alliance NI", "Alliance (Liberal)", "Alliance (SDP)","British National", "British National","Conservative", "Conservative", "Christian Party", "DUP", "DUP", "DUP","Ecology", "English Democrat",
"Green","Green","Health Concern","Health Concern","Independent", "Independent", "Independent", "Independent", "Independent", "Independent", "Independent", "Independent", "Independent Labour","Independent Labour","Labour", "Labour", "Liberal Democrat", "Liberal Democrat" , "Liberal Democrat", "Liberal", "Liberal", "National Front", "Natural Law",
"Official Unionist","Other","Plaid Cymru", "Plaid Cymru", "People's Labour", "Referendum","Republican Clubs","Respect","Respect","Scottish Socialist","Scottish Socialist", "SDLP","SDLP","SDLP","Sinn Fein","Sinn Fein","Sinn Fein", "SNP","SNP", "SNP", "Speaker","Speaker","Socialist Alliance","Social Democratic","Socialist Labour", "TUSC","TUV",
"UCUNF", "UKIP", "UKIP", "UKIP","UK Unionist", "United Ulster Unionist", "UUP", "UUP","UUP", "Official Unionist", "Ulster Unionist","Ulster Popular Unionist", "Veritas", "The Workers NI","Workers Revolutionary")
FPTP=ggplot(data=subset(dat,System=="FPTP"),aes(Party,Votes, fill=Party))+
geom_bar(colour="black",stat="identity",position="dodge") +
ggtitle(paste("First Past the Post Electoral System",input$year))+
scale_x_discrete(breaks=brks,
labels=labs)+
scale_y_continuous(name="Votes per MP", labels = comma)+
theme(axis.text.x=element_text(angle=90,hjust=1)) + #theme_bw()+
scale_fill_manual(values=cols,breaks=brks, labels=labs)+guides(fill=FALSE)+
geom_text(aes(label=comma(round(Votes,0))), colour="black",vjust=-0.2)
CV=ggplot(data=subset(dat,System=="CV"),aes(Party,Votes, fill=Party))+
geom_bar(colour="black",stat="identity",position="dodge") +
ggtitle(paste("Concentrated Vote Electoral System", input$year))+
scale_x_discrete(breaks=brks, labels=labs)+
scale_y_continuous(name="Votes per MP", labels = comma)+
theme(axis.text.x=element_text(angle=90,hjust=1)) + #theme_bw()+
scale_fill_manual(values=cols,breaks=brks, labels=labs)+guides(fill=FALSE)+
geom_text(aes(label=comma(round(Votes,0))), colour="black",vjust=-0.2)
grid.arrange(FPTP,CV, ncol=2, nrow=1)
}) | /Shiny/source/servermpelect.R | no_license | macarda/Concentrated-Vote | R | false | false | 5,676 | r | output$MPelect <- renderPlot({
votesperMP=read.csv("source/data/votesperMP.csv")
dat<-votesperMP[votesperMP$Year == input$year,]
cols <- c(AP="hotpink3",APNI="hotpink3", Alliance="hotpink3", Alliance..Lib.="orange3", Alliance..SDP.="blue4",BNP="steelblue4", British.National="steelblue4",Con="dodgerblue", Conservative="dodgerblue", Ch.P="darkorchid2", Democratic.Unionist.Party="firebrick3", DUP="firebrick3", Democratic.Unionist="firebrick3",Ecology="green3", ED="tomato4",
Green="green3",Grn="green3",KHHC="violetred2",Kidderminster.Hospital.and.Health.Concern="violetred2",Ind="plum1", Independent="plum1", Ind1="plum1", Ind.="plum1", Ind131="plum1", Ind86="plum1", Independent76="plum1", Ind127="plum1", Independent.Labour="red1",Ind.Labour="red1",Lab="red2", Labour="red2", LDem="orange2", Liberal.Democrat="orange2" , LD="orange2", Lib="orange3", Liberal="orange3", National.Front="lightskyblue", Natural.Law="lightsteelblue2",
Official.Unionist.Party="coral",Oth="grey40",PC="palegreen", Plaid.Cymru="palegreen", People.s.Labour="gold1",Referendum="indianred4",Republican.Clubs="limegreen",Respect="forestgreen", Resp="forestgreen",SSP="orangered3",Scottish.Socialist="orangered3", SDLP="green4",Social.Democratic.and.Labour.Party="green4",Social.Democratic.and.Labour="green4",Sinn.Fein="limegreen",SF="limegreen",Sinn.Fein="limegreen", Scottish.National.Party="yellow",SNP="yellow", Scottish.National="yellow", Speaker="wheat3",The.Speaker="wheat3",Socialist.Alliance="red4",Social.Democratic="blue4",Socialist.Labour="grey35", Trade.Unionist.and.Socialist.Coalition="deeppink4",TUV="royalblue",
UCUNF="cadetblue2", UK.Independence.Party="darkviolet", UKIP="darkviolet", United.Kingdom.Independence="darkviolet",United.Kingdom.Unionist="mediumpurple4", United.Ulster.Unionist="deepskyblue1", Ulster.Unionist.Party="coral", UU="coral",UUP="coral", Official.Unionist="coral", Ulster.Unionist="coral",Ulster.Popular.Unionist="blue1", Ver="darkorchid1", The.Workers...NI.="red4",WRP="tomato2" )
brks <- c("AP","APNI", "Alliance", "Alliance..Lib.", "Alliance..SDP.","BNP", "British.National","Con", "Conservative", "Ch.P", "Democratic.Unionist.Party", "DUP", "Democratic.Unionist","Ecology", "ED",
"Green","Grn","KHHC","Kidderminster.Hospital.and.Health.Concern","Ind", "Independent", "Ind1", "Ind.", "Ind131", "Ind86", "Independent76", "Ind127", "Independent.Labour","Ind.Labour","Lab", "Labour", "LDem", "Liberal.Democrat" , "LD", "Lib", "Liberal", "National.Front", "Natural.Law",
"Official.Unionist.Party","Oth","PC", "Plaid.Cymru", "People.s.Labour", "Referendum","Republican.Clubs","Respect","Resp","SSP","Scottish.Socialist", "SDLP","Social.Democratic.and.Labour.Party","Social.Democratic.and.Labour","Sinn.Fein","SF","Sinn.Fein", "Scottish.National.Party","SNP", "Scottish.National", "Speaker","The.Speaker","Socialist.Alliance","Social.Democratic","Socialist.Labour", "Trade.Unionist.and.Socialist.Coalition","TUV",
"UCUNF", "UK.Independence.Party", "UKIP", "United.Kingdom.Independence","United.Kingdom.Unionist", "United.Ulster.Unionist", "Ulster.Unionist.Party", "UU","UUP", "Official.Unionist", "Ulster.Unionist","Ulster.Popular.Unionist", "Ver", "The.Workers...NI.","WRP")
labs <- c("Alliance NI","Alliance NI","Alliance NI", "Alliance (Liberal)", "Alliance (SDP)","British National", "British National","Conservative", "Conservative", "Christian Party", "DUP", "DUP", "DUP","Ecology", "English Democrat",
"Green","Green","Health Concern","Health Concern","Independent", "Independent", "Independent", "Independent", "Independent", "Independent", "Independent", "Independent", "Independent Labour","Independent Labour","Labour", "Labour", "Liberal Democrat", "Liberal Democrat" , "Liberal Democrat", "Liberal", "Liberal", "National Front", "Natural Law",
"Official Unionist","Other","Plaid Cymru", "Plaid Cymru", "People's Labour", "Referendum","Republican Clubs","Respect","Respect","Scottish Socialist","Scottish Socialist", "SDLP","SDLP","SDLP","Sinn Fein","Sinn Fein","Sinn Fein", "SNP","SNP", "SNP", "Speaker","Speaker","Socialist Alliance","Social Democratic","Socialist Labour", "TUSC","TUV",
"UCUNF", "UKIP", "UKIP", "UKIP","UK Unionist", "United Ulster Unionist", "UUP", "UUP","UUP", "Official Unionist", "Ulster Unionist","Ulster Popular Unionist", "Veritas", "The Workers NI","Workers Revolutionary")
FPTP=ggplot(data=subset(dat,System=="FPTP"),aes(Party,Votes, fill=Party))+
geom_bar(colour="black",stat="identity",position="dodge") +
ggtitle(paste("First Past the Post Electoral System",input$year))+
scale_x_discrete(breaks=brks,
labels=labs)+
scale_y_continuous(name="Votes per MP", labels = comma)+
theme(axis.text.x=element_text(angle=90,hjust=1)) + #theme_bw()+
scale_fill_manual(values=cols,breaks=brks, labels=labs)+guides(fill=FALSE)+
geom_text(aes(label=comma(round(Votes,0))), colour="black",vjust=-0.2)
CV=ggplot(data=subset(dat,System=="CV"),aes(Party,Votes, fill=Party))+
geom_bar(colour="black",stat="identity",position="dodge") +
ggtitle(paste("Concentrated Vote Electoral System", input$year))+
scale_x_discrete(breaks=brks, labels=labs)+
scale_y_continuous(name="Votes per MP", labels = comma)+
theme(axis.text.x=element_text(angle=90,hjust=1)) + #theme_bw()+
scale_fill_manual(values=cols,breaks=brks, labels=labs)+guides(fill=FALSE)+
geom_text(aes(label=comma(round(Votes,0))), colour="black",vjust=-0.2)
grid.arrange(FPTP,CV, ncol=2, nrow=1)
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_mailinglists.R
\name{all_mailinglists}
\alias{all_mailinglists}
\title{Retrieve a data frame of all mailing lists from Qualtrics}
\usage{
all_mailinglists()
}
\description{
Retrieve a data frame of all mailing lists from Qualtrics
}
\examples{
\dontrun{
# Register your Qualtrics credentials if you haven't already
qualtrics_api_credentials(
api_key = "<YOUR-API-KEY>",
base_url = "<YOUR-BASE-URL>"
)
# Retrieve a list of all mailing lists
mailinglists <- all_mailinglists()
}
}
| /man/all_mailinglists.Rd | permissive | jntrcs/qualtRics | R | false | true | 567 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_mailinglists.R
\name{all_mailinglists}
\alias{all_mailinglists}
\title{Retrieve a data frame of all mailing lists from Qualtrics}
\usage{
all_mailinglists()
}
\description{
Retrieve a data frame of all mailing lists from Qualtrics
}
\examples{
\dontrun{
# Register your Qualtrics credentials if you haven't already
qualtrics_api_credentials(
api_key = "<YOUR-API-KEY>",
base_url = "<YOUR-BASE-URL>"
)
# Retrieve a list of all mailing lists
mailinglists <- all_mailinglists()
}
}
|
#' Model the per-gene variance
#'
#' Model the variance of the log-expression profiles for each gene,
#' decomposing it into technical and biological components based on a fitted mean-variance trend.
#'
#' @param x A numeric matrix of log-normalized expression values where rows are genes and columns are cells.
#'
#' Alternatively, a \linkS4class{SummarizedExperiment} containing such a matrix.
#' @param design A numeric matrix containing blocking terms for uninteresting factors of variation.
#' @param subset.row See \code{?"\link{scran-gene-selection}"}, specifying the rows for which to model the variance.
#' Defaults to all genes in \code{x}.
#' @param subset.fit An argument similar to \code{subset.row}, specifying the rows to be used for trend fitting.
#' Defaults to \code{subset.row}.
#' @param BPPARAM A \linkS4class{BiocParallelParam} object indicating whether parallelization should be performed across genes.
#' @param ... For the generic, further arguments to pass to each method.
#'
#' For the ANY method, further arguments to pass to \code{\link{fitTrendVar}}.
#'
#' For the \linkS4class{SummarizedExperiment} method, further arguments to pass to the ANY method.
#' @param block A factor specifying the blocking levels for each cell in \code{x}.
#' If specified, variance modelling is performed separately within each block and statistics are combined across blocks.
#' @param equiweight A logical scalar indicating whether statistics from each block should be given equal weight.
#' Otherwise, each block is weighted according to its number of cells.
#' Only used if \code{block} is specified.
#' @param method String specifying how p-values should be combined when \code{block} is specified, see \code{\link{combinePValues}}.
#' @param assay.type String or integer scalar specifying the assay containing the log-expression values.
#'
#' @details
#' For each gene, we compute the variance and mean of the log-expression values.
#' A trend is fitted to the variance against the mean for all genes using \code{\link{fitTrendVar}}.
#' The fitted value for each gene is used as a proxy for the technical component of variation for each gene,
#' under the assumption that most genes exhibit a low baseline level of variation that is not biologically interesting.
#' The biological component of variation for each gene is defined as the the residual from the trend.
#'
#' Ranking genes by the biological component enables identification of interesting genes for downstream analyses
#' in a manner that accounts for the mean-variance relationship.
#' We use log-transformed expression values to blunt the impact of large positive outliers and to ensure that large variances are driven by strong log-fold changes between cells rather than differences in counts.
#' Log-expression values are also used in downstream analyses like PCA, so modelling them here avoids inconsistencies with different quantifications of variation across analysis steps.
#'
#' By default, the trend is fitted using all of the genes in \code{x}.
#' If \code{subset.fit} is specified, the trend is fitted using only the specified subset,
#' and the technical components for all other genes are determined by extrapolation or interpolation.
#' This could be used to perform the fit based on genes that are known to have low variance, thus weakening the assumption above.
#' Note that this does not refer to spike-in transcripts, which should be handled via \code{\link{modelGeneVarWithSpikes}}.
#'
#' @section Handling uninteresting factors:
#' Setting \code{block} will estimate the mean and variance of each gene for cells in each level of \code{block} separately.
#' The trend is fitted separately for each level, and the variance decomposition is also performed separately.
#' Per-level statistics are then combined to obtain a single value per gene:
#' \itemize{
#' \item For means and variance components, this is done by averaging values across levels.
#' If \code{equiweight=FALSE}, a weighted average is used where the value for each level is weighted by the number of cells.
#' By default, all levels are equally weighted when combining statistics.
#' \item Per-level p-values are combined using \code{\link{combinePValues}} according to \code{method}.
#' By default, Fisher's method is used to identify genes that are highly variable in any batch.
#' Whether or not this is responsive to \code{equiweight} depends on the chosen method.
#' \item Blocks with fewer than 2 cells are completely ignored and do not contribute to the combined mean, variance component or p-value.
#' }
#'
#' Use of \code{block} is the recommended approach for accounting for any uninteresting categorical factor of variation.
#' In addition to accounting for systematic differences in expression between levels of the blocking factor,
#' it also accommodates differences in the mean-variance relationships.
#'
#' Alternatively, uninteresting factors can be used to construct a design matrix to pass to the function via \code{design}.
#' In this case, a linear model is fitted to the expression profile for each gene and the residual variance is calculated.
#' This approach is useful for covariates or additive models that cannot be expressed as a one-way layout for use in \code{block}.
#' However, it assumes that the error is normally distributed with equal variance for all observations of a given gene.
#'
#' Use of \code{block} and \code{design} together is currently not supported and will lead to an error.
#'
#' @section Computing p-values:
#' The p-value for each gene is computed by assuming that the variance estimates are normally distributed around the trend, and that the standard deviation of the variance distribution is proportional to the value of the trend.
#' This is used to construct a one-sided test for each gene based on its \code{bio}, under the null hypothesis that the biological component is equal to zero.
#' The proportionality constant for the standard deviation is set to the \code{std.dev} returned by \code{\link{fitTrendVar}}.
#' This is estimated from the spread of per-gene variance estimates around the trend, so the null hypothesis effectively becomes \dQuote{is this gene \emph{more} variable than other genes of the same abundance?}
#'
#' @return
#' A \linkS4class{DataFrame} is returned where each row corresponds to a gene in \code{x} (or in \code{subset.row}, if specified).
#' This contains the numeric fields:
#' \describe{
#' \item{\code{mean}:}{Mean normalized log-expression per gene.}
#' \item{\code{total}:}{Variance of the normalized log-expression per gene.}
#' \item{\code{bio}:}{Biological component of the variance.}
#' \item{\code{tech}:}{Technical component of the variance.}
#' \item{\code{p.value, FDR}:}{Raw and adjusted p-values for the test against the null hypothesis that \code{bio<=0}.}
#' }
#'
#' If \code{block} is not specified,
#' the \code{metadata} of the DataFrame contains the output of running \code{\link{fitTrendVar}} on the specified features,
#' along with the \code{mean} and \code{var} used to fit the trend.
#'
#' If \code{block} is specified,
#' the output contains another \code{per.block} field.
#' This field is itself a DataFrame of DataFrames, where each internal DataFrame contains statistics for the variance modelling within each block and has the same format as described above.
#' Each internal DataFrame's \code{metadata} contains the output of \code{\link{fitTrendVar}} for the cells of that block.
#'
#' @author Aaron Lun
#'
#' @examples
#' library(scuttle)
#' sce <- mockSCE()
#' sce <- logNormCounts(sce)
#'
#' # Fitting to all features.
#' allf <- modelGeneVar(sce)
#' allf
#'
#' plot(allf$mean, allf$total)
#' curve(metadata(allf)$trend(x), add=TRUE, col="dodgerblue")
#'
#' # Using a subset of features for fitting.
#' subf <- modelGeneVar(sce, subset.fit=1:100)
#' subf
#'
#' plot(subf$mean, subf$total)
#' curve(metadata(subf)$trend(x), add=TRUE, col="dodgerblue")
#' points(metadata(subf)$mean, metadata(subf)$var, col="red", pch=16)
#'
#' # With blocking.
#' block <- sample(LETTERS[1:2], ncol(sce), replace=TRUE)
#' blk <- modelGeneVar(sce, block=block)
#' blk
#'
#' par(mfrow=c(1,2))
#' for (i in colnames(blk$per.block)) {
#' current <- blk$per.block[[i]]
#' plot(current$mean, current$total)
#' curve(metadata(current)$trend(x), add=TRUE, col="dodgerblue")
#' }
#'
#' @name modelGeneVar
#' @aliases modelGeneVar modelGeneVar,ANY-method modelGeneVar,SingleCellExperiment-method
#' @seealso
#' \code{\link{fitTrendVar}}, for the trend fitting options.
#'
#' \code{\link{modelGeneVarWithSpikes}}, for modelling variance with spike-in controls.
NULL
#############################
# Defining the basic method #
#############################
#' @importFrom BiocParallel SerialParam
#' @importFrom scuttle .subset2index
.model_gene_var <- function(x, block=NULL, design=NULL, subset.row=NULL, subset.fit=NULL,
..., equiweight=TRUE, method="fisher", BPPARAM=SerialParam())
{
FUN <- function(s) {
.compute_mean_var(x, block=block, design=design, subset.row=s,
block.FUN=compute_blocked_stats_none,
residual.FUN=compute_residual_stats_none,
BPPARAM=BPPARAM)
}
x.stats <- FUN(subset.row)
if (is.null(subset.fit)) {
fit.stats <- x.stats
} else {
# Yes, we could do this more efficiently by rolling up 'subset.fit'
# into 'subset.row' for a single '.compute_mean_var' call... but I CBF'd.
fit.stats <- FUN(subset.fit)
}
collected <- .decompose_log_exprs(x.stats$means, x.stats$vars, fit.stats$means, fit.stats$vars,
x.stats$ncells, ...)
output <- .combine_blocked_statistics(collected, method, equiweight, x.stats$ncells)
rownames(output) <- rownames(x)[.subset2index(subset.row, x)]
output
}
#########################
# Setting up S4 methods #
#########################
#' @export
setGeneric("modelGeneVar", function(x, ...) standardGeneric("modelGeneVar"))
#' @export
#' @rdname modelGeneVar
setMethod("modelGeneVar", "ANY", .model_gene_var)
#' @export
#' @importFrom SummarizedExperiment assay
#' @rdname modelGeneVar
setMethod("modelGeneVar", "SummarizedExperiment", function(x, ..., assay.type="logcounts") {
.model_gene_var(x=assay(x, i=assay.type), ...)
})
| /R/modelGeneVar.R | no_license | zzzsssyyy1995/scran | R | false | false | 10,311 | r | #' Model the per-gene variance
#'
#' Model the variance of the log-expression profiles for each gene,
#' decomposing it into technical and biological components based on a fitted mean-variance trend.
#'
#' @param x A numeric matrix of log-normalized expression values where rows are genes and columns are cells.
#'
#' Alternatively, a \linkS4class{SummarizedExperiment} containing such a matrix.
#' @param design A numeric matrix containing blocking terms for uninteresting factors of variation.
#' @param subset.row See \code{?"\link{scran-gene-selection}"}, specifying the rows for which to model the variance.
#' Defaults to all genes in \code{x}.
#' @param subset.fit An argument similar to \code{subset.row}, specifying the rows to be used for trend fitting.
#' Defaults to \code{subset.row}.
#' @param BPPARAM A \linkS4class{BiocParallelParam} object indicating whether parallelization should be performed across genes.
#' @param ... For the generic, further arguments to pass to each method.
#'
#' For the ANY method, further arguments to pass to \code{\link{fitTrendVar}}.
#'
#' For the \linkS4class{SummarizedExperiment} method, further arguments to pass to the ANY method.
#' @param block A factor specifying the blocking levels for each cell in \code{x}.
#' If specified, variance modelling is performed separately within each block and statistics are combined across blocks.
#' @param equiweight A logical scalar indicating whether statistics from each block should be given equal weight.
#' Otherwise, each block is weighted according to its number of cells.
#' Only used if \code{block} is specified.
#' @param method String specifying how p-values should be combined when \code{block} is specified, see \code{\link{combinePValues}}.
#' @param assay.type String or integer scalar specifying the assay containing the log-expression values.
#'
#' @details
#' For each gene, we compute the variance and mean of the log-expression values.
#' A trend is fitted to the variance against the mean for all genes using \code{\link{fitTrendVar}}.
#' The fitted value for each gene is used as a proxy for the technical component of variation for each gene,
#' under the assumption that most genes exhibit a low baseline level of variation that is not biologically interesting.
#' The biological component of variation for each gene is defined as the the residual from the trend.
#'
#' Ranking genes by the biological component enables identification of interesting genes for downstream analyses
#' in a manner that accounts for the mean-variance relationship.
#' We use log-transformed expression values to blunt the impact of large positive outliers and to ensure that large variances are driven by strong log-fold changes between cells rather than differences in counts.
#' Log-expression values are also used in downstream analyses like PCA, so modelling them here avoids inconsistencies with different quantifications of variation across analysis steps.
#'
#' By default, the trend is fitted using all of the genes in \code{x}.
#' If \code{subset.fit} is specified, the trend is fitted using only the specified subset,
#' and the technical components for all other genes are determined by extrapolation or interpolation.
#' This could be used to perform the fit based on genes that are known to have low variance, thus weakening the assumption above.
#' Note that this does not refer to spike-in transcripts, which should be handled via \code{\link{modelGeneVarWithSpikes}}.
#'
#' @section Handling uninteresting factors:
#' Setting \code{block} will estimate the mean and variance of each gene for cells in each level of \code{block} separately.
#' The trend is fitted separately for each level, and the variance decomposition is also performed separately.
#' Per-level statistics are then combined to obtain a single value per gene:
#' \itemize{
#' \item For means and variance components, this is done by averaging values across levels.
#' If \code{equiweight=FALSE}, a weighted average is used where the value for each level is weighted by the number of cells.
#' By default, all levels are equally weighted when combining statistics.
#' \item Per-level p-values are combined using \code{\link{combinePValues}} according to \code{method}.
#' By default, Fisher's method is used to identify genes that are highly variable in any batch.
#' Whether or not this is responsive to \code{equiweight} depends on the chosen method.
#' \item Blocks with fewer than 2 cells are completely ignored and do not contribute to the combined mean, variance component or p-value.
#' }
#'
#' Use of \code{block} is the recommended approach for accounting for any uninteresting categorical factor of variation.
#' In addition to accounting for systematic differences in expression between levels of the blocking factor,
#' it also accommodates differences in the mean-variance relationships.
#'
#' Alternatively, uninteresting factors can be used to construct a design matrix to pass to the function via \code{design}.
#' In this case, a linear model is fitted to the expression profile for each gene and the residual variance is calculated.
#' This approach is useful for covariates or additive models that cannot be expressed as a one-way layout for use in \code{block}.
#' However, it assumes that the error is normally distributed with equal variance for all observations of a given gene.
#'
#' Use of \code{block} and \code{design} together is currently not supported and will lead to an error.
#'
#' @section Computing p-values:
#' The p-value for each gene is computed by assuming that the variance estimates are normally distributed around the trend, and that the standard deviation of the variance distribution is proportional to the value of the trend.
#' This is used to construct a one-sided test for each gene based on its \code{bio}, under the null hypothesis that the biological component is equal to zero.
#' The proportionality constant for the standard deviation is set to the \code{std.dev} returned by \code{\link{fitTrendVar}}.
#' This is estimated from the spread of per-gene variance estimates around the trend, so the null hypothesis effectively becomes \dQuote{is this gene \emph{more} variable than other genes of the same abundance?}
#'
#' @return
#' A \linkS4class{DataFrame} is returned where each row corresponds to a gene in \code{x} (or in \code{subset.row}, if specified).
#' This contains the numeric fields:
#' \describe{
#' \item{\code{mean}:}{Mean normalized log-expression per gene.}
#' \item{\code{total}:}{Variance of the normalized log-expression per gene.}
#' \item{\code{bio}:}{Biological component of the variance.}
#' \item{\code{tech}:}{Technical component of the variance.}
#' \item{\code{p.value, FDR}:}{Raw and adjusted p-values for the test against the null hypothesis that \code{bio<=0}.}
#' }
#'
#' If \code{block} is not specified,
#' the \code{metadata} of the DataFrame contains the output of running \code{\link{fitTrendVar}} on the specified features,
#' along with the \code{mean} and \code{var} used to fit the trend.
#'
#' If \code{block} is specified,
#' the output contains another \code{per.block} field.
#' This field is itself a DataFrame of DataFrames, where each internal DataFrame contains statistics for the variance modelling within each block and has the same format as described above.
#' Each internal DataFrame's \code{metadata} contains the output of \code{\link{fitTrendVar}} for the cells of that block.
#'
#' @author Aaron Lun
#'
#' @examples
#' library(scuttle)
#' sce <- mockSCE()
#' sce <- logNormCounts(sce)
#'
#' # Fitting to all features.
#' allf <- modelGeneVar(sce)
#' allf
#'
#' plot(allf$mean, allf$total)
#' curve(metadata(allf)$trend(x), add=TRUE, col="dodgerblue")
#'
#' # Using a subset of features for fitting.
#' subf <- modelGeneVar(sce, subset.fit=1:100)
#' subf
#'
#' plot(subf$mean, subf$total)
#' curve(metadata(subf)$trend(x), add=TRUE, col="dodgerblue")
#' points(metadata(subf)$mean, metadata(subf)$var, col="red", pch=16)
#'
#' # With blocking.
#' block <- sample(LETTERS[1:2], ncol(sce), replace=TRUE)
#' blk <- modelGeneVar(sce, block=block)
#' blk
#'
#' par(mfrow=c(1,2))
#' for (i in colnames(blk$per.block)) {
#' current <- blk$per.block[[i]]
#' plot(current$mean, current$total)
#' curve(metadata(current)$trend(x), add=TRUE, col="dodgerblue")
#' }
#'
#' @name modelGeneVar
#' @aliases modelGeneVar modelGeneVar,ANY-method modelGeneVar,SingleCellExperiment-method
#' @seealso
#' \code{\link{fitTrendVar}}, for the trend fitting options.
#'
#' \code{\link{modelGeneVarWithSpikes}}, for modelling variance with spike-in controls.
NULL
#############################
# Defining the basic method #
#############################
#' @importFrom BiocParallel SerialParam
#' @importFrom scuttle .subset2index
.model_gene_var <- function(x, block=NULL, design=NULL, subset.row=NULL, subset.fit=NULL,
..., equiweight=TRUE, method="fisher", BPPARAM=SerialParam())
{
FUN <- function(s) {
.compute_mean_var(x, block=block, design=design, subset.row=s,
block.FUN=compute_blocked_stats_none,
residual.FUN=compute_residual_stats_none,
BPPARAM=BPPARAM)
}
x.stats <- FUN(subset.row)
if (is.null(subset.fit)) {
fit.stats <- x.stats
} else {
# Yes, we could do this more efficiently by rolling up 'subset.fit'
# into 'subset.row' for a single '.compute_mean_var' call... but I CBF'd.
fit.stats <- FUN(subset.fit)
}
collected <- .decompose_log_exprs(x.stats$means, x.stats$vars, fit.stats$means, fit.stats$vars,
x.stats$ncells, ...)
output <- .combine_blocked_statistics(collected, method, equiweight, x.stats$ncells)
rownames(output) <- rownames(x)[.subset2index(subset.row, x)]
output
}
#########################
# Setting up S4 methods #
#########################
#' @export
setGeneric("modelGeneVar", function(x, ...) standardGeneric("modelGeneVar"))
#' @export
#' @rdname modelGeneVar
setMethod("modelGeneVar", "ANY", .model_gene_var)
#' @export
#' @importFrom SummarizedExperiment assay
#' @rdname modelGeneVar
setMethod("modelGeneVar", "SummarizedExperiment", function(x, ..., assay.type="logcounts") {
.model_gene_var(x=assay(x, i=assay.type), ...)
})
|
# supplementary figure showing correlation between case strength & punishment
library(tidyverse)
library(grid)
library(gridExtra)
library(gtable)
library(gridBase)
library(factoextra)
library(ggarrange)
source('ggplot_setup.R')
load('data/stan_postprocess_2v_t.rdata')
#effects <- effects %>% filter(group =='mri')
#dat <- dat %>% filter(group |'mri')
############### Panel A: Punishment and case strength effect correlations ##################################
load('data/stan_postprocess_2v_t.rdata')
effects <- effects %>% filter(group =='mri' | group == 'mturk')
panel_A <- ggplot(data=(effects %>% filter(grepl('Omega', variable), evidence=='baseline'))) +
geom_hline(yintercept=0, colour='grey') +
geom_pointrange(aes(x=evidence, y=X50., ymin=X2.5., ymax=X97.5., color=group),
position=position_dodge(width = 0.5)) +
xlab('') + ylab('\nCase Strength /\nPunishmnet Correlation') +
group_color_scale +
evidence_plus_baseline_x_axis +
#labs(title="A", size=rel(3)) +
th +
theme(
legend.position=c(0.8, 0.8)
) + theme(text=element_text(family="Helvetica"),
plot.title = element_text(family="Helvetica",face="bold")) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
############### Panels B & C: PCA ##################################
#run PCA on weights for case strength and punishment
#fmri sample
#read data
fmri_data <- read.csv(file="data/scenario_effects_fmri_sample.csv",
header=TRUE, sep = ",",
na.strings=c("","NA"))
# #correlation plot
# plt_b <- ggplot(data = fmri_data,
# mapping = aes(x = case_strength_mean,y = punishment_mean)) +
# geom_point(color='black',size=2) +
# geom_smooth(method='lm', formula= y~x, colour = "black") +
# scale_x_continuous(limits = c(0, 25)) +
# scale_y_continuous(limits = c(0, 85)) +
# xlab('Case Strength') +
# ylab('Punishment')
#
# panel_B <- plt_b + th +
# theme(text=element_text(family="Helvetica"),
# plot.title = element_text(family="Helvetica",face="bold"))+
# theme(plot.margin=unit(c(5, 5.5, 25.5, 25.5),"points"))
#pca & plot
fmri_data.pca <- prcomp(fmri_data[,c(2:3)],
center = TRUE,
scale. = TRUE)
#summary
summary(fmri_data.pca)
#loadings
fmri_data.pca$rotation
#scores
fmri_data.pca$x
pct_var_exp<- get_eig(fmri_data.pca)
pct_var_exp <- cbind(PC = rownames(pct_var_exp), pct_var_exp)
rownames(pct_var_exp) <- 1:nrow(pct_var_exp)
pct_var_exp$PC_name[pct_var_exp$PC == "Dim.1"] <- "1"
pct_var_exp$PC_name[pct_var_exp$PC == "Dim.2"] <- "2"
pct_var_exp$PC<-pct_var_exp$PC_name
h <- ggplot(pct_var_exp, aes(x = PC, y = variance.percent)) +
geom_point(stat = "identity", size=5)
panel_B <- h + th + ylab('Variance Explained (%) ') +
theme(text=element_text(family="Helvetica"),
plot.title = element_text(family="Helvetica",face="bold"),
plot.background = element_rect(fill = 'white',color='white'))+
theme(plot.margin=unit(c(5.5, 25.5, 5.5, 25.5),"points")) +
scale_y_continuous(limits = c(0, 100))
####################
#run PCA on weights for case strength and punishment
#mturk sample
#read data
mturk_data <- read.csv(file="data/scenario_effects_mturk_sample.csv",
header=TRUE, sep = ",",
na.strings=c("","NA"))
#pca & plot
mturk_data.pca <- prcomp(mturk_data[,c(2:3)],
center = TRUE,
scale. = TRUE)
#summary
summary(mturk_data.pca)
#loadings
mturk_data.pca$rotation
#scores
mturk_data.pca$x
pct_var_exp<- get_eig(mturk_data.pca)
pct_var_exp <- cbind(PC = rownames(pct_var_exp), pct_var_exp)
rownames(pct_var_exp) <- 1:nrow(pct_var_exp)
pct_var_exp$PC_name[pct_var_exp$PC == "Dim.1"] <- "1"
pct_var_exp$PC_name[pct_var_exp$PC == "Dim.2"] <- "2"
pct_var_exp$PC<-pct_var_exp$PC_name
j <- ggplot(pct_var_exp, aes(x = PC, y = variance.percent)) +
geom_point(stat = "identity", size=5)
panel_C <- j + th + ylab('Variance Explained (%) ') +
theme(text=element_text(family="Helvetica"),
plot.title = element_text(family="Helvetica",face="bold"),
plot.background = element_rect(fill = 'white',color='white'))+
theme(plot.margin=unit(c(5.5, 25.5, 5.5, 25.5),"points")) +
scale_y_continuous(limits = c(0, 100))
############### Combine into a single figure ##################################
combo_plot <- ggarrange(panel_A,panel_B, panel_C,
ncol = 3, widths = c(1,1,1),align = 'hv',
labels = c("A","B","C"), font.label = list(size = 20))
# save to disk
ggsave('figs/supp_fig_3.pdf', plot=combo_plot, width=13, height=6, units='in', useDingbats=FALSE)
| /behavior/make_supp_fig_3.R | no_license | jcastrel/juror_fmri_bias | R | false | false | 4,763 | r | # supplementary figure showing correlation between case strength & punishment
library(tidyverse)
library(grid)
library(gridExtra)
library(gtable)
library(gridBase)
library(factoextra)
library(ggarrange)
source('ggplot_setup.R')
load('data/stan_postprocess_2v_t.rdata')
#effects <- effects %>% filter(group =='mri')
#dat <- dat %>% filter(group |'mri')
############### Panel A: Punishment and case strength effect correlations ##################################
load('data/stan_postprocess_2v_t.rdata')
effects <- effects %>% filter(group =='mri' | group == 'mturk')
panel_A <- ggplot(data=(effects %>% filter(grepl('Omega', variable), evidence=='baseline'))) +
geom_hline(yintercept=0, colour='grey') +
geom_pointrange(aes(x=evidence, y=X50., ymin=X2.5., ymax=X97.5., color=group),
position=position_dodge(width = 0.5)) +
xlab('') + ylab('\nCase Strength /\nPunishmnet Correlation') +
group_color_scale +
evidence_plus_baseline_x_axis +
#labs(title="A", size=rel(3)) +
th +
theme(
legend.position=c(0.8, 0.8)
) + theme(text=element_text(family="Helvetica"),
plot.title = element_text(family="Helvetica",face="bold")) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
############### Panels B & C: PCA ##################################
#run PCA on weights for case strength and punishment
#fmri sample
#read data
fmri_data <- read.csv(file="data/scenario_effects_fmri_sample.csv",
header=TRUE, sep = ",",
na.strings=c("","NA"))
# #correlation plot
# plt_b <- ggplot(data = fmri_data,
# mapping = aes(x = case_strength_mean,y = punishment_mean)) +
# geom_point(color='black',size=2) +
# geom_smooth(method='lm', formula= y~x, colour = "black") +
# scale_x_continuous(limits = c(0, 25)) +
# scale_y_continuous(limits = c(0, 85)) +
# xlab('Case Strength') +
# ylab('Punishment')
#
# panel_B <- plt_b + th +
# theme(text=element_text(family="Helvetica"),
# plot.title = element_text(family="Helvetica",face="bold"))+
# theme(plot.margin=unit(c(5, 5.5, 25.5, 25.5),"points"))
#pca & plot
fmri_data.pca <- prcomp(fmri_data[,c(2:3)],
center = TRUE,
scale. = TRUE)
#summary
summary(fmri_data.pca)
#loadings
fmri_data.pca$rotation
#scores
fmri_data.pca$x
pct_var_exp<- get_eig(fmri_data.pca)
pct_var_exp <- cbind(PC = rownames(pct_var_exp), pct_var_exp)
rownames(pct_var_exp) <- 1:nrow(pct_var_exp)
pct_var_exp$PC_name[pct_var_exp$PC == "Dim.1"] <- "1"
pct_var_exp$PC_name[pct_var_exp$PC == "Dim.2"] <- "2"
pct_var_exp$PC<-pct_var_exp$PC_name
h <- ggplot(pct_var_exp, aes(x = PC, y = variance.percent)) +
geom_point(stat = "identity", size=5)
panel_B <- h + th + ylab('Variance Explained (%) ') +
theme(text=element_text(family="Helvetica"),
plot.title = element_text(family="Helvetica",face="bold"),
plot.background = element_rect(fill = 'white',color='white'))+
theme(plot.margin=unit(c(5.5, 25.5, 5.5, 25.5),"points")) +
scale_y_continuous(limits = c(0, 100))
####################
#run PCA on weights for case strength and punishment
#mturk sample
#read data
mturk_data <- read.csv(file="data/scenario_effects_mturk_sample.csv",
header=TRUE, sep = ",",
na.strings=c("","NA"))
#pca & plot
mturk_data.pca <- prcomp(mturk_data[,c(2:3)],
center = TRUE,
scale. = TRUE)
#summary
summary(mturk_data.pca)
#loadings
mturk_data.pca$rotation
#scores
mturk_data.pca$x
pct_var_exp<- get_eig(mturk_data.pca)
pct_var_exp <- cbind(PC = rownames(pct_var_exp), pct_var_exp)
rownames(pct_var_exp) <- 1:nrow(pct_var_exp)
pct_var_exp$PC_name[pct_var_exp$PC == "Dim.1"] <- "1"
pct_var_exp$PC_name[pct_var_exp$PC == "Dim.2"] <- "2"
pct_var_exp$PC<-pct_var_exp$PC_name
j <- ggplot(pct_var_exp, aes(x = PC, y = variance.percent)) +
geom_point(stat = "identity", size=5)
panel_C <- j + th + ylab('Variance Explained (%) ') +
theme(text=element_text(family="Helvetica"),
plot.title = element_text(family="Helvetica",face="bold"),
plot.background = element_rect(fill = 'white',color='white'))+
theme(plot.margin=unit(c(5.5, 25.5, 5.5, 25.5),"points")) +
scale_y_continuous(limits = c(0, 100))
############### Combine into a single figure ##################################
combo_plot <- ggarrange(panel_A,panel_B, panel_C,
ncol = 3, widths = c(1,1,1),align = 'hv',
labels = c("A","B","C"), font.label = list(size = 20))
# save to disk
ggsave('figs/supp_fig_3.pdf', plot=combo_plot, width=13, height=6, units='in', useDingbats=FALSE)
|
#' @title sim_multiple_rows: Simulate tree from multiple rows and apply summary statistics
#' @description Function runs multiple simulations. Can be specified by row.
#' Also provides summary statistics like total fossil count, fossil
#' count per interval, and origin time.
#'
#' @param data data frame
#' @param extant Number of extant taxa
#' @param mus Extinction rate
#' @param lambdas Speciation rate
#' @param psis Fossil sampling rate. Input as a vector
#' @param intervals Vector of time intervals or time bins
#' @param by Selects for specific rows. For example, by = 100 refers to sampling
#' every 100th row. Defaults to FALSE. If FALSE, every row is sampled to produce
#' a simulated phylo object.
#' @param path Specify path to save
#'
#' @return summlist. An object describing useful summary statistics like
#' total fossil count, fossil count per interval, and origin time
#'
#'
sim_multiple_rows <- function(data, extant, mus, lambdas, psis, intervals, by = FALSE, path = c()){
num <- length(intervals) + 2 #length of time bins plus 2 (the mu and lambda)
summlist <- list()
for(i in c(1:num)){summlist[i + 1] <- c()}
if(by == FALSE){
for(i in c(1:nrow(data))){
tempTree <- sim_single_row(n_extant = extant, n_trees = 1, df = data, mu = mus, lambda = lambdas, psi = psis, row = i, mers = intervals)
tree_stats <- summarize_tree(tempTree, intervals)
for(i in c(1:length(tree_stats))){
summlist[[i]] <- append(summlist[[i]], tree_stats[i])
}
}
}
else{
for(i in which(c(1:nrow(data) %% by == 0))){
tempTree <- sim_single_row(n_extant = extant, n_trees = 1, df = data, mu = mus, lambda = lambdas, psi = psis, row = i, mers = intervals)
tree_stats <- summarize_tree(tempTree, intervals)
for(i in c(1:length(tree_stats))){
summlist[[i]] <- append(summlist[[i]], tree_stats[i]) #add to summlist the tree_stats value
}
}
}
summlist <- as.data.frame(summlist)
for(i in c(1:ncol(summlist))){
names(summlist)[1] <- "OriginTime"
names(summlist)[2] <- "Total_Foss_Count"
if(i > 2){
names(summlist)[i] <- paste("Int", i - 2, "Fossils" ,sep = "_")
}
}
if(length(path) != 0){
write.csv(summlist, file = path)
}
return(summlist)
}
| /R/multiple_simulations.R | no_license | jaishimuku/WrightLab_FBD | R | false | false | 2,352 | r |
#' @title sim_multiple_rows: Simulate tree from multiple rows and apply summary statistics
#' @description Function runs multiple simulations. Can be specified by row.
#' Also provides summary statistics like total fossil count, fossil
#' count per interval, and origin time.
#'
#' @param data data frame
#' @param extant Number of extant taxa
#' @param mus Extinction rate
#' @param lambdas Speciation rate
#' @param psis Fossil sampling rate. Input as a vector
#' @param intervals Vector of time intervals or time bins
#' @param by Selects for specific rows. For example, by = 100 refers to sampling
#' every 100th row. Defaults to FALSE. If FALSE, every row is sampled to produce
#' a simulated phylo object.
#' @param path Specify path to save
#'
#' @return summlist. An object describing useful summary statistics like
#' total fossil count, fossil count per interval, and origin time
#'
#'
sim_multiple_rows <- function(data, extant, mus, lambdas, psis, intervals, by = FALSE, path = c()){
num <- length(intervals) + 2 #length of time bins plus 2 (the mu and lambda)
summlist <- list()
for(i in c(1:num)){summlist[i + 1] <- c()}
if(by == FALSE){
for(i in c(1:nrow(data))){
tempTree <- sim_single_row(n_extant = extant, n_trees = 1, df = data, mu = mus, lambda = lambdas, psi = psis, row = i, mers = intervals)
tree_stats <- summarize_tree(tempTree, intervals)
for(i in c(1:length(tree_stats))){
summlist[[i]] <- append(summlist[[i]], tree_stats[i])
}
}
}
else{
for(i in which(c(1:nrow(data) %% by == 0))){
tempTree <- sim_single_row(n_extant = extant, n_trees = 1, df = data, mu = mus, lambda = lambdas, psi = psis, row = i, mers = intervals)
tree_stats <- summarize_tree(tempTree, intervals)
for(i in c(1:length(tree_stats))){
summlist[[i]] <- append(summlist[[i]], tree_stats[i]) #add to summlist the tree_stats value
}
}
}
summlist <- as.data.frame(summlist)
for(i in c(1:ncol(summlist))){
names(summlist)[1] <- "OriginTime"
names(summlist)[2] <- "Total_Foss_Count"
if(i > 2){
names(summlist)[i] <- paste("Int", i - 2, "Fossils" ,sep = "_")
}
}
if(length(path) != 0){
write.csv(summlist, file = path)
}
return(summlist)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decom.adaptive.R
\name{decom.adaptive}
\alias{decom.adaptive}
\title{decom: decompose waveform with adaptive Gaussian function.}
\usage{
decom.adaptive(x, smooth = "TRUE", thres = 0.22, width = 3)
}
\arguments{
\item{x}{is a waveform with a index at the begining and followed with intensities.}
\item{smooth}{is tell whether you want to smooth the waveform to remove some obvious outliers. Default is TRUE.}
\item{thres}{is to determine if the detected peak is the real peak whose intensity should be higher than threshold*maximum intensity. Default is 0.22.}
\item{width}{width of moving window.Default is 3, must be odd integer between 1 and n.This parameter ONLY work when the smooth is TRUE.}
}
\value{
A list contains estimates of A, u, sig and ri (rate parameter in adaotive Gaussian function) after decomposition.
}
\description{
The function allows you to eatimate parameters charcterizing waveforms and to pave the way for generating waveform-based point cloud.
}
\examples{
##import return waveform data
data(return)
lr<-nrow(return)
ind<-c(1:lr)
return<-data.frame(ind,return)
x<-return[1,] ###must be a dataset including intensity with index at the beginning.
r1<-decom(x)
r2<-decom.adaptive(x)
# for the whole dataset
dr3<-apply(return,1,decom.adaptive)
dd<-return[10:100,]
dr4<-apply(dd,1,decom.adaptive)
####to collect all data
rfit3<-do.call("rbind",lapply(dr3,"[[",1)) ## waveform is correctly decomposed with index,some are not correct index by NA
ga3<-do.call("rbind",lapply(dr3,"[[",2)) ###the original results, which can help to get more detailed results.
pa3<-do.call("rbind",lapply(dr3,"[[",3)) ###useful decompostion results for next step or geolocation transformation.
colnames(pa3)<-c("index","pi","t","sd","ri","pise","tse","sdse","rise")
####delete some wrong ones
rid<-rfit3[!is.na(rfit3),]
wid<-setdiff(c(1:lr),rid) ###index of waveforms needs to be reprocessed
rpars<-pa1[!is.na(pa1[,1]),] ###useful decomposition parameters
Generally using adaptive Guassian will give less NA results comparing to Gaussian function (decom).
}
| /man/decom.adaptive.Rd | no_license | parvezrana/waveformlidar | R | false | true | 2,153 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decom.adaptive.R
\name{decom.adaptive}
\alias{decom.adaptive}
\title{decom: decompose waveform with adaptive Gaussian function.}
\usage{
decom.adaptive(x, smooth = "TRUE", thres = 0.22, width = 3)
}
\arguments{
\item{x}{is a waveform with a index at the begining and followed with intensities.}
\item{smooth}{is tell whether you want to smooth the waveform to remove some obvious outliers. Default is TRUE.}
\item{thres}{is to determine if the detected peak is the real peak whose intensity should be higher than threshold*maximum intensity. Default is 0.22.}
\item{width}{width of moving window.Default is 3, must be odd integer between 1 and n.This parameter ONLY work when the smooth is TRUE.}
}
\value{
A list contains estimates of A, u, sig and ri (rate parameter in adaotive Gaussian function) after decomposition.
}
\description{
The function allows you to eatimate parameters charcterizing waveforms and to pave the way for generating waveform-based point cloud.
}
\examples{
##import return waveform data
data(return)
lr<-nrow(return)
ind<-c(1:lr)
return<-data.frame(ind,return)
x<-return[1,] ###must be a dataset including intensity with index at the beginning.
r1<-decom(x)
r2<-decom.adaptive(x)
# for the whole dataset
dr3<-apply(return,1,decom.adaptive)
dd<-return[10:100,]
dr4<-apply(dd,1,decom.adaptive)
####to collect all data
rfit3<-do.call("rbind",lapply(dr3,"[[",1)) ## waveform is correctly decomposed with index,some are not correct index by NA
ga3<-do.call("rbind",lapply(dr3,"[[",2)) ###the original results, which can help to get more detailed results.
pa3<-do.call("rbind",lapply(dr3,"[[",3)) ###useful decompostion results for next step or geolocation transformation.
colnames(pa3)<-c("index","pi","t","sd","ri","pise","tse","sdse","rise")
####delete some wrong ones
rid<-rfit3[!is.na(rfit3),]
wid<-setdiff(c(1:lr),rid) ###index of waveforms needs to be reprocessed
rpars<-pa1[!is.na(pa1[,1]),] ###useful decomposition parameters
Generally using adaptive Guassian will give less NA results comparing to Gaussian function (decom).
}
|
# orcid.R
# Take ORCID ID and make a list of papers
# use rcrossref to get better formatted data
# Version for shiny
# March 2018
# set token as an environmental variable (March 2018)
x <- "07073399-4dcc-47b3-a0a8-925327224519"
Sys.setenv(ORCID_TOKEN=x)
## Test IDs
# orcid.id = '0000-0003-1602-4544'
# orcid.id = '0000-0001-8369-1238' # Suzanne
# orcid.id = '0000-0003-0152-4394' # Richard
# orcid.id = '0000-0002-7129-0039' # Sue
# orcid.id = '0000-0003-2434-4206' # David Moher
# orcid.id ='0000-0002-2358-2440' # ginny
# orcid.id ='0000-0001-6339-0374' # me
# orcid.id = '0000-0002-5559-3267' # nick
# orcid.id='0000-0001-7733-287X'
# orcid.id = '0000-0002-5808-4249' #Jenny
# orcid.id='0000-0001-7564-073X' # Paul
# orcid.id='0000-0003-3637-2423' # Anisa
# orcid.id='0000-0002-6020-9733' # Lionel
# orcid.id='0000-0002-0630-3825'
# main function
my.orcid = function(orcid.id='0000-0002-2358-2440'){ # default here = Ginny
ret = list() # start with blank output
# a) select person
bio = orcid_id(orcid = orcid.id, profile='profile') # get basics
name = paste(bio[[1]]$`name`$`given-names`$value,
bio[[1]]$`name`$`family-name`$value)
name = gsub(' ', ' ', name) # remove double space
name = gsub(' $', '', name) # remove trailing space
# b) select works
d = works(orcid_id(orcid = orcid.id)) # get works as a tibble
# if no papers then end function here
if(nrow(d)==0){
ret$name = name
ret$papers = NULL
ret$authors = NULL
return(ret)
}
# hide all this in a dummy function for now, as it's not used
use.ids = function(){
ids = NULL
for (k in 1:nrow(d)){
this = d[k,]$`external-ids.external-id`[[1]]
if(is.null(this)==F & length(this)>0){
# First get doi
this.id = subset(this, `external-id-type`=='doi')
if(nrow(this.id)==1){
this.frame = data.frame(type='doi', id=this.id$`external-id-value`)
}
if(nrow(this.id)==0){
this.id = subset(this, `external-id-type`=='pmid')
if(nrow(this.id)==1){
this.frame = data.frame(type='pmid', id=this.id$`external-id-value`)
}
}
if(nrow(this.id)==0){
#cat('No doi,',k,'\n')
this.frame = NULL
}
# concatenate
ids = rbind(ids, this.frame)
}
}
} # end of dummy use.ids function
#unlist(plyr::llply(d$`external-ids.external-id`, function(x){`external-id-value`}))
# may need to revert to a loop
#for (k in 1:nrow(d)){
# unlist(plyr::llply(aff, function(x){x$'affilname'})
#}
dois = identifiers(d, type='doi') # get DOIs, not available for all papers
dois = dois[duplicated(tolower(dois))==FALSE] # remove duplicates
#eids = identifiers(d, type='eid') # get Scopus IDs, not available for all papers
# remove F1000 DOIs where there is second version (keep latest version)
not.f1000 = dois[!str_detect(string=dois, pattern='f1000')]
f1000 = dois[str_detect(string=dois, pattern='f1000')]
if(length(f1000)>0){ # only if some F1000 journals
split.f1000 = str_split(f1000, pattern='\\.', n=Inf, simplify = TRUE) # split by .
split.f1000 = data.frame(split.f1000, stringsAsFactors = F)
split.f1000$X3 = as.numeric(split.f1000$X3)
split.f1000$X4 = as.numeric(split.f1000$X4)
split.f1000 = dplyr::group_by(split.f1000, X3) %>%
dplyr::arrange(X3, X4) %>%
filter(row_number()==n()) %>%
mutate(doi = paste(X1, '.', X2, '.', X3, '.', X4, sep=''))
# concatenate back F1000 and not F1000
dois = c(not.f1000, split.f1000$doi)
}
if(length(f1000)==0){dois = not.f1000}
# d) get nicely formatted data for papers with a DOIs using crossref
cdata.nonbibtex = cr_works(dois)$data
# add Open Access status (March 2018)
cdata.nonbibtex$OA = NA
# run with fail
n.match = count = 0
while(n.match != nrow(cdata.nonbibtex)&count < 3){ # run three times max
OAs = purrr::map_df(cdata.nonbibtex$DOI,
plyr::failwith(f = function(x) roadoi::oadoi_fetch(x, email = "a.barnett@qut.edu.au")))
n.match = nrow(OAs)
count = count + 1
#cat(n.match, ', count', count, '\n') # tracking warning
}
if(n.match != nrow(cdata.nonbibtex)){oa.warning = TRUE}
if(n.match == nrow(cdata.nonbibtex)){
oa.warning = FALSE
cdata.nonbibtex$OA = OAs$is_oa # Is there an OA copy? (logical)
}
# e) format papers with separate matrix for authors ###
papers = bib.authors = NULL
# e2) ... now for non bibtex from crossref
authors.crossref = NULL
if(nrow(cdata.nonbibtex) > 0){
authors.crossref = matrix(data='', nrow=nrow(cdata.nonbibtex), ncol=300) # start with huge matrix
for (k in 1:nrow(cdata.nonbibtex)){ # loop needed
# authors, convert from tibble
fauthors = cdata.nonbibtex$author[[k]]
fam.only = FALSE # flag for family only
if(is.null(fauthors)==FALSE){
if('family' %in% names(fauthors) & length(names(fauthors))<=2){ # changed to allow 'sequence' (Sep 2018)
fauthors = fauthors$family
fam.only = TRUE
}
}
if(fam.only==FALSE & ('given' %in% names(fauthors) == FALSE) & is.null(fauthors)==FALSE){
fauthors = dplyr::filter(fauthors, is.na(name)==FALSE) # not missing
fauthors = paste(fauthors$name)
}
if(fam.only==FALSE & 'given' %in% names(fauthors) & is.null(fauthors)==FALSE){
fauthors = filter(fauthors, is.na(family)==FALSE) # not missing
fauthors = select(fauthors, given, family)
fauthors = paste(fauthors$given, fauthors$family) # does include NA - to fix
}
if(is.null(fauthors)==FALSE){
if(length(fauthors)>ncol(authors.crossref)){fauthors = fauthors[1:ncol(authors.crossref)]} # truncate where author numbers are huge (jan 2018)
authors.crossref[k, 1:length(fauthors)] = fauthors
}
# year (was based on created, fixed January 2018)
idates = cdata.nonbibtex$issued[k]
cdates = cdata.nonbibtex$created[k]
if(is.na(idates)){idates = cdates} # if missing use created date
dlengths = nchar(idates)
idates[dlengths==4] = paste(idates[dlengths==4],'-01-01',sep='') # add years and months as needed
idates[dlengths==7] = paste(idates[dlengths==7],'-01',sep='')
year = format(as.Date(idates), '%Y')
## journal
journal = cdata.nonbibtex$container.title[k]
# Identify bioRxiv (couldn't find another way, needs updating)
if(is.na(journal)){
if(cdata.nonbibtex$publisher[k] == "Cold Spring Harbor Laboratory")(journal='bioRxiv')
}
# title
title = as.character(cdata.nonbibtex$title[k])
# volume/issue/pages
volume = cdata.nonbibtex$volume[k]
issue = cdata.nonbibtex$issue[k]
pages = cdata.nonbibtex$page[k]
# doi
DOI = cdata.nonbibtex$DOI[k]
# OA
OA = cdata.nonbibtex$OA[k]
# type
type = cdata.nonbibtex$type[k]
# put it all together
frame = data.frame(Journal=journal, Title=title, Year=year, Volume=volume, Issue=issue, Pages=pages, Type=type, DOI=DOI, OA=OA)
papers = rbind(papers, frame)
}
}
# f) combine authors and remove empty columns
authors = authors.crossref
to.find = which(colSums(authors=='') == nrow(authors))
if(length(to.find)==0){fmin = ncol(authors)+1 } # all columns full
if(length(to.find)>0){fmin = min(to.find)} # find first empty column
authors = authors[, 1:(fmin-1)]
if(nrow(papers)==1){authors=matrix(authors); authors=t(authors)}
# remove duplicates (again, just a safety net, should have been caught earlier)
if(nrow(papers) > 1){
dups = duplicated(tolower(papers$Title))
papers = papers[!dups,]
authors = authors[!dups,]
}
# remove later versions of paper with almost identical DOI _ TO DO
## count first author papers
# make alternative versions of name
reverse = paste(bio[[1]]$name$`family-name`$value, ', ',
substr(bio[[1]]$name$`given-names`$value,1,1), '.', sep='')
simple = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '. ',
bio[[1]]$name$`family-name`$value, sep='')
s0 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), ' ',
bio[[1]]$name$`family-name`$value, sep='')
s1 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '.[A-Z] ',
bio[[1]]$name$`family-name`$value, sep='')
s2 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '. [A-Z] ',
bio[[1]]$name$`family-name`$value, sep='')
s3 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '. [A-Z]. ',
bio[[1]]$name$`family-name`$value, sep='')
s4 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '.[A-Z]. ',
bio[[1]]$name$`family-name`$value, sep='')
s5 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), ' [A-Z] ',
bio[[1]]$name$`family-name`$value, sep='')
s6 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '[A-Z] ',
bio[[1]]$name$`family-name`$value, sep='')
middle = paste(bio[[1]]$name$`given-names`$value, ' [A-Z]. ',
bio[[1]]$name$`family-name`$value, sep='')
middle1 = paste(bio[[1]]$name$`given-names`$value, ' [A-Z] ',
bio[[1]]$name$`family-name`$value, sep='')
name.to.search = tolower(c(name, reverse, simple, s0, s1, s2, s3, s4, s5, s6, middle, middle1))
index = grep(paste(name.to.search, sep='', collapse='|'), tolower(authors[,1])) # first column of authors
papers$First.author = 0
papers$First.author[index] = 1
# last author
authors.na = authors
authors.na[authors.na==''] = NA # version with missing authors
last = apply(authors.na, 1, function(x) tail(na.omit(x), 1)) # extract last authors
index = grep(paste(name.to.search, sep='', collapse='|'), tolower(last)) #
papers$Last.author = 0
papers$Last.author[index] = 1
papers$Last.author[papers$First.author == 1] = 0 # Single author papers are only flagged as first author papers
# work out author order - so that it can be bolded in report
matches = str_match(pattern=paste(name.to.search, sep='', collapse='|'), string=tolower(authors))
matches = matrix(matches, nrow=nrow(papers))
author.order = (is.na(matches)==F)%*%1:ncol(matches) # which columns are not zero
# for appearances
papers$Title = as.character(papers$Title)
papers$Journal = as.character(papers$Journal)
if(class(papers$Year)=='factor'){
papers$Year = as.numeric(as.character(papers$Year))
}
if(class(papers$Volume)=='factor'){
papers$Volume = as.character(papers$Volume)
}
if(class(papers$Issue)=='factor'){
papers$Issue = as.character(papers$Issue)
}
if(class(papers$Pages)=='factor'){
papers$Pages = as.character(papers$Pages)
}
if(class(papers$DOI)=='factor'){
papers$DOI = as.character(papers$DOI)
}
## need to remove/change special characters like: … and -- from title
# replace NAs is authors with ''
authors[is.na(authors)==T] = ''
# give a consistent number of columns to author matrix
blank = matrix("", nrow=nrow(authors), ncol=50) # 50 authors max
if(ncol(authors)>50){authors = authors[,1:50]} # truncate at 50 if over 50 authors on a paper
blank[, 1:ncol(authors)] = authors
authors = blank
# return
ret$name = name
ret$papers = papers
ret$oa.warning = oa.warning
ret$authors = authors # separate matrix so that authors can be selected
ret$author.order = author.order
# return
return(ret)
}
| /orcid.R | permissive | edward-burn/helping.funders | R | false | false | 11,449 | r | # orcid.R
# Take ORCID ID and make a list of papers
# use rcrossref to get better formatted data
# Version for shiny
# March 2018
# set token as an environmental variable (March 2018)
x <- "07073399-4dcc-47b3-a0a8-925327224519"
Sys.setenv(ORCID_TOKEN=x)
## Test IDs
# orcid.id = '0000-0003-1602-4544'
# orcid.id = '0000-0001-8369-1238' # Suzanne
# orcid.id = '0000-0003-0152-4394' # Richard
# orcid.id = '0000-0002-7129-0039' # Sue
# orcid.id = '0000-0003-2434-4206' # David Moher
# orcid.id ='0000-0002-2358-2440' # ginny
# orcid.id ='0000-0001-6339-0374' # me
# orcid.id = '0000-0002-5559-3267' # nick
# orcid.id='0000-0001-7733-287X'
# orcid.id = '0000-0002-5808-4249' #Jenny
# orcid.id='0000-0001-7564-073X' # Paul
# orcid.id='0000-0003-3637-2423' # Anisa
# orcid.id='0000-0002-6020-9733' # Lionel
# orcid.id='0000-0002-0630-3825'
# main function
my.orcid = function(orcid.id='0000-0002-2358-2440'){ # default here = Ginny
ret = list() # start with blank output
# a) select person
bio = orcid_id(orcid = orcid.id, profile='profile') # get basics
name = paste(bio[[1]]$`name`$`given-names`$value,
bio[[1]]$`name`$`family-name`$value)
name = gsub(' ', ' ', name) # remove double space
name = gsub(' $', '', name) # remove trailing space
# b) select works
d = works(orcid_id(orcid = orcid.id)) # get works as a tibble
# if no papers then end function here
if(nrow(d)==0){
ret$name = name
ret$papers = NULL
ret$authors = NULL
return(ret)
}
# hide all this in a dummy function for now, as it's not used
use.ids = function(){
ids = NULL
for (k in 1:nrow(d)){
this = d[k,]$`external-ids.external-id`[[1]]
if(is.null(this)==F & length(this)>0){
# First get doi
this.id = subset(this, `external-id-type`=='doi')
if(nrow(this.id)==1){
this.frame = data.frame(type='doi', id=this.id$`external-id-value`)
}
if(nrow(this.id)==0){
this.id = subset(this, `external-id-type`=='pmid')
if(nrow(this.id)==1){
this.frame = data.frame(type='pmid', id=this.id$`external-id-value`)
}
}
if(nrow(this.id)==0){
#cat('No doi,',k,'\n')
this.frame = NULL
}
# concatenate
ids = rbind(ids, this.frame)
}
}
} # end of dummy use.ids function
#unlist(plyr::llply(d$`external-ids.external-id`, function(x){`external-id-value`}))
# may need to revert to a loop
#for (k in 1:nrow(d)){
# unlist(plyr::llply(aff, function(x){x$'affilname'})
#}
dois = identifiers(d, type='doi') # get DOIs, not available for all papers
dois = dois[duplicated(tolower(dois))==FALSE] # remove duplicates
#eids = identifiers(d, type='eid') # get Scopus IDs, not available for all papers
# remove F1000 DOIs where there is second version (keep latest version)
not.f1000 = dois[!str_detect(string=dois, pattern='f1000')]
f1000 = dois[str_detect(string=dois, pattern='f1000')]
if(length(f1000)>0){ # only if some F1000 journals
split.f1000 = str_split(f1000, pattern='\\.', n=Inf, simplify = TRUE) # split by .
split.f1000 = data.frame(split.f1000, stringsAsFactors = F)
split.f1000$X3 = as.numeric(split.f1000$X3)
split.f1000$X4 = as.numeric(split.f1000$X4)
split.f1000 = dplyr::group_by(split.f1000, X3) %>%
dplyr::arrange(X3, X4) %>%
filter(row_number()==n()) %>%
mutate(doi = paste(X1, '.', X2, '.', X3, '.', X4, sep=''))
# concatenate back F1000 and not F1000
dois = c(not.f1000, split.f1000$doi)
}
if(length(f1000)==0){dois = not.f1000}
# d) get nicely formatted data for papers with a DOIs using crossref
cdata.nonbibtex = cr_works(dois)$data
# add Open Access status (March 2018)
cdata.nonbibtex$OA = NA
# run with fail
n.match = count = 0
while(n.match != nrow(cdata.nonbibtex)&count < 3){ # run three times max
OAs = purrr::map_df(cdata.nonbibtex$DOI,
plyr::failwith(f = function(x) roadoi::oadoi_fetch(x, email = "a.barnett@qut.edu.au")))
n.match = nrow(OAs)
count = count + 1
#cat(n.match, ', count', count, '\n') # tracking warning
}
if(n.match != nrow(cdata.nonbibtex)){oa.warning = TRUE}
if(n.match == nrow(cdata.nonbibtex)){
oa.warning = FALSE
cdata.nonbibtex$OA = OAs$is_oa # Is there an OA copy? (logical)
}
# e) format papers with separate matrix for authors ###
papers = bib.authors = NULL
# e2) ... now for non bibtex from crossref
authors.crossref = NULL
if(nrow(cdata.nonbibtex) > 0){
authors.crossref = matrix(data='', nrow=nrow(cdata.nonbibtex), ncol=300) # start with huge matrix
for (k in 1:nrow(cdata.nonbibtex)){ # loop needed
# authors, convert from tibble
fauthors = cdata.nonbibtex$author[[k]]
fam.only = FALSE # flag for family only
if(is.null(fauthors)==FALSE){
if('family' %in% names(fauthors) & length(names(fauthors))<=2){ # changed to allow 'sequence' (Sep 2018)
fauthors = fauthors$family
fam.only = TRUE
}
}
if(fam.only==FALSE & ('given' %in% names(fauthors) == FALSE) & is.null(fauthors)==FALSE){
fauthors = dplyr::filter(fauthors, is.na(name)==FALSE) # not missing
fauthors = paste(fauthors$name)
}
if(fam.only==FALSE & 'given' %in% names(fauthors) & is.null(fauthors)==FALSE){
fauthors = filter(fauthors, is.na(family)==FALSE) # not missing
fauthors = select(fauthors, given, family)
fauthors = paste(fauthors$given, fauthors$family) # does include NA - to fix
}
if(is.null(fauthors)==FALSE){
if(length(fauthors)>ncol(authors.crossref)){fauthors = fauthors[1:ncol(authors.crossref)]} # truncate where author numbers are huge (jan 2018)
authors.crossref[k, 1:length(fauthors)] = fauthors
}
# year (was based on created, fixed January 2018)
idates = cdata.nonbibtex$issued[k]
cdates = cdata.nonbibtex$created[k]
if(is.na(idates)){idates = cdates} # if missing use created date
dlengths = nchar(idates)
idates[dlengths==4] = paste(idates[dlengths==4],'-01-01',sep='') # add years and months as needed
idates[dlengths==7] = paste(idates[dlengths==7],'-01',sep='')
year = format(as.Date(idates), '%Y')
## journal
journal = cdata.nonbibtex$container.title[k]
# Identify bioRxiv (couldn't find another way, needs updating)
if(is.na(journal)){
if(cdata.nonbibtex$publisher[k] == "Cold Spring Harbor Laboratory")(journal='bioRxiv')
}
# title
title = as.character(cdata.nonbibtex$title[k])
# volume/issue/pages
volume = cdata.nonbibtex$volume[k]
issue = cdata.nonbibtex$issue[k]
pages = cdata.nonbibtex$page[k]
# doi
DOI = cdata.nonbibtex$DOI[k]
# OA
OA = cdata.nonbibtex$OA[k]
# type
type = cdata.nonbibtex$type[k]
# put it all together
frame = data.frame(Journal=journal, Title=title, Year=year, Volume=volume, Issue=issue, Pages=pages, Type=type, DOI=DOI, OA=OA)
papers = rbind(papers, frame)
}
}
# f) combine authors and remove empty columns
authors = authors.crossref
to.find = which(colSums(authors=='') == nrow(authors))
if(length(to.find)==0){fmin = ncol(authors)+1 } # all columns full
if(length(to.find)>0){fmin = min(to.find)} # find first empty column
authors = authors[, 1:(fmin-1)]
if(nrow(papers)==1){authors=matrix(authors); authors=t(authors)}
# remove duplicates (again, just a safety net, should have been caught earlier)
if(nrow(papers) > 1){
dups = duplicated(tolower(papers$Title))
papers = papers[!dups,]
authors = authors[!dups,]
}
# remove later versions of paper with almost identical DOI _ TO DO
## count first author papers
# make alternative versions of name
reverse = paste(bio[[1]]$name$`family-name`$value, ', ',
substr(bio[[1]]$name$`given-names`$value,1,1), '.', sep='')
simple = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '. ',
bio[[1]]$name$`family-name`$value, sep='')
s0 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), ' ',
bio[[1]]$name$`family-name`$value, sep='')
s1 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '.[A-Z] ',
bio[[1]]$name$`family-name`$value, sep='')
s2 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '. [A-Z] ',
bio[[1]]$name$`family-name`$value, sep='')
s3 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '. [A-Z]. ',
bio[[1]]$name$`family-name`$value, sep='')
s4 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '.[A-Z]. ',
bio[[1]]$name$`family-name`$value, sep='')
s5 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), ' [A-Z] ',
bio[[1]]$name$`family-name`$value, sep='')
s6 = paste(substr(bio[[1]]$name$`given-names`$value,1,1), '[A-Z] ',
bio[[1]]$name$`family-name`$value, sep='')
middle = paste(bio[[1]]$name$`given-names`$value, ' [A-Z]. ',
bio[[1]]$name$`family-name`$value, sep='')
middle1 = paste(bio[[1]]$name$`given-names`$value, ' [A-Z] ',
bio[[1]]$name$`family-name`$value, sep='')
name.to.search = tolower(c(name, reverse, simple, s0, s1, s2, s3, s4, s5, s6, middle, middle1))
index = grep(paste(name.to.search, sep='', collapse='|'), tolower(authors[,1])) # first column of authors
papers$First.author = 0
papers$First.author[index] = 1
# last author
authors.na = authors
authors.na[authors.na==''] = NA # version with missing authors
last = apply(authors.na, 1, function(x) tail(na.omit(x), 1)) # extract last authors
index = grep(paste(name.to.search, sep='', collapse='|'), tolower(last)) #
papers$Last.author = 0
papers$Last.author[index] = 1
papers$Last.author[papers$First.author == 1] = 0 # Single author papers are only flagged as first author papers
# work out author order - so that it can be bolded in report
matches = str_match(pattern=paste(name.to.search, sep='', collapse='|'), string=tolower(authors))
matches = matrix(matches, nrow=nrow(papers))
author.order = (is.na(matches)==F)%*%1:ncol(matches) # which columns are not zero
# for appearances
papers$Title = as.character(papers$Title)
papers$Journal = as.character(papers$Journal)
if(class(papers$Year)=='factor'){
papers$Year = as.numeric(as.character(papers$Year))
}
if(class(papers$Volume)=='factor'){
papers$Volume = as.character(papers$Volume)
}
if(class(papers$Issue)=='factor'){
papers$Issue = as.character(papers$Issue)
}
if(class(papers$Pages)=='factor'){
papers$Pages = as.character(papers$Pages)
}
if(class(papers$DOI)=='factor'){
papers$DOI = as.character(papers$DOI)
}
## need to remove/change special characters like: … and -- from title
# replace NAs is authors with ''
authors[is.na(authors)==T] = ''
# give a consistent number of columns to author matrix
blank = matrix("", nrow=nrow(authors), ncol=50) # 50 authors max
if(ncol(authors)>50){authors = authors[,1:50]} # truncate at 50 if over 50 authors on a paper
blank[, 1:ncol(authors)] = authors
authors = blank
# return
ret$name = name
ret$papers = papers
ret$oa.warning = oa.warning
ret$authors = authors # separate matrix so that authors can be selected
ret$author.order = author.order
# return
return(ret)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.