content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Read GEM data from SAV files
#' @export
#' @param file path to a SAV file
#' @param ... arguments passed to \code{\link[haven]{read_sav}}
#' \dontrun{
#' files <- fs::dir_ls("~/Data/gem", regexp = ".sav$")
#' gem_data <- read_gem_data(files[1])
#' }
read_gem_data <- function(file, ...) {
# Note that haven is faster but returns fewer columns
file %>%
# haven::read_sav(...)
foreign::read.spss(to.data.frame = TRUE) %>%
tibble::as_tibble()
}
| /R/read.R | no_license | beanumber/gem | R | false | false | 461 | r | #' Read GEM data from SAV files
#' @export
#' @param file path to a SAV file
#' @param ... arguments passed to \code{\link[haven]{read_sav}}
#' \dontrun{
#' files <- fs::dir_ls("~/Data/gem", regexp = ".sav$")
#' gem_data <- read_gem_data(files[1])
#' }
read_gem_data <- function(file, ...) {
# Note that haven is faster but returns fewer columns
file %>%
# haven::read_sav(...)
foreign::read.spss(to.data.frame = TRUE) %>%
tibble::as_tibble()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blaster.R
\name{peak_blaster}
\alias{peak_blaster}
\title{MALDI-TOF MS peak "BLAST"er}
\usage{
peak_blaster(
query_peak_list,
subject_peak_list,
lowerMassCutoff = 3000,
upperMassCutoff = 15000,
chunksize = 10,
similarity_cutoff = NA
)
}
\arguments{
\item{query_peak_list}{list of numeric vectors representing query peaks}
\item{subject_peak_list}{list of numeric vectors representing subject peaks}
\item{lowerMassCutoff}{masses below this number will not be considered}
\item{upperMassCutoff}{masses above this number will not be considered}
\item{chunksize}{numeric, bin spectrum into X-sized bins}
\item{similarity_cutoff}{numeric to remove similarities above threshold, NA to return everythingg (default)}
}
\value{
data.table of pairwise similarities between query and subject peak lists
}
\description{
MALDI-TOF MS peak "BLAST"er
}
| /man/peak_blaster.Rd | no_license | chasemc/maldiblast | R | false | true | 934 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blaster.R
\name{peak_blaster}
\alias{peak_blaster}
\title{MALDI-TOF MS peak "BLAST"er}
\usage{
peak_blaster(
query_peak_list,
subject_peak_list,
lowerMassCutoff = 3000,
upperMassCutoff = 15000,
chunksize = 10,
similarity_cutoff = NA
)
}
\arguments{
\item{query_peak_list}{list of numeric vectors representing query peaks}
\item{subject_peak_list}{list of numeric vectors representing subject peaks}
\item{lowerMassCutoff}{masses below this number will not be considered}
\item{upperMassCutoff}{masses above this number will not be considered}
\item{chunksize}{numeric, bin spectrum into X-sized bins}
\item{similarity_cutoff}{numeric to remove similarities above threshold, NA to return everythingg (default)}
}
\value{
data.table of pairwise similarities between query and subject peak lists
}
\description{
MALDI-TOF MS peak "BLAST"er
}
|
/para anderson el gil.R | no_license | juandanielmedina/Prograaaaaaaa | R | false | false | 5,392 | r | ||
################################################################################
############## Analiza mjerenja na namotajima 2MVA 12.09.2020. ###############
################################################################################
# Source functions and objects file
source('functions.R')
# Plotting
library(ggplot2)
# Data paste
library(datapasta)
# Sorting
library(gtools)
# Fitting
library(broom)
library(rsm)
# Spatial data modelling
library(gstat)
library(sp)
# Gaussian process models
library(tgp)
library(mlegp)
################################################################################
################################################################################
# Transformator radi u kratkom spoju - 50 A, 1000 V na primaru.
# Mjerenja su provedena na 3 namotaja cilindričnog oblika.
# Mjerene su VN strana, NN strana i dvije bocne strane namotaja (ukupno 5
# povrsina).
# Koordinate točaka su zapisane kao duljina po kružnom luku od centralne
# simetrale.
# Na svakoj mjernoj povrsini je raspoređeno 15 mjernih točaka
# Namotaji su oznaceni s brojevima 1, 2, 3
# Data for loop input - KOORDINATE NEKIH TOČAKA PROMIJENJENE (STVARNE KORDINATE)
# I DODANO MJERENJE BOCNIH STRANA NAMOTAJA
# base.doe <- tibble::tribble(
# ~point.id, ~loc.x, ~loc.y,
# 1L, -151L, 389L,
# 2L, -125L, 216L,
# 3L, -109L, 566L,
# 4L, -80L, 106L,
# 5L, -60L, 335L,
# 6L, -51L, 524L,
# 7L, -25L, 130L,
# 8L, 1L, 20L,
# 9L, 32L, 284L,
# 10L, 35L, 612L,
# 11L, 66L, 440L,
# 12L, 75L, 199L,
# 13L, 96L, 57L,
# 14L, 135L, 320L,
# 15L, 150L, 461L
# )
# flank.doe <- base.doe %>%
# mutate(loc.x = case_when(
# loc.x == -151 ~ as.double(-140),
# loc.x == 150 ~ as.double(140),
# TRUE ~ as.double(loc.x)
# ))
#
# input.df <- base.doe %>% mutate(namotaj.id = 1, side = 'NN') %>%
# full_join(base.doe %>% mutate(namotaj.id = 1, side = 'VN') %>%
# filter(point.id != 11)) %>%
# full_join(flank.doe %>% mutate(namotaj.id = 1, side = 'bok')) %>%
# full_join(base.doe %>% mutate(namotaj.id = 2, side = 'NN')) %>%
# full_join(base.doe %>% mutate(namotaj.id = 2, side = 'VN') %>%
# filter(!point.id %in% c(10, 11))) %>%
# full_join(base.doe %>% mutate(namotaj.id = 3, side = 'NN')) %>%
# full_join(base.doe %>% mutate(namotaj.id = 3, side = 'VN') %>%
# filter(!point.id %in% c(10, 11))) %>%
# full_join(flank.doe %>% mutate(namotaj.id = 3, side = 'bok')) %>%
# select(namotaj.id, side, point.id, loc.x, loc.y)
#
# input.df %>% rename(Namotaj=namotaj.id, Strana=side, Tocka=point.id, x=loc.x,
# y=loc.y) %>%
# write_excel_csv2('2MVA/DoE/Aktivni_dio/namotaji_stvarne_koordinate.csv')
#
# input.df %>% group_by(namotaj.id, side) %>%
# summarise(count = n())
# Data input function
# data.input.f <- function(dat, input.df) {
# # Folder u kojem se nalaze mjerenja
# namotaji.path <- '2MVA/mjerenja/namotaji/'
# for (i in 1:length(input.df$point.id)) {
# # print(input.df$namotaj.id[i])
# # print(input.df$side[i])
# # print(paste0('pt', input.df$point.id[i]))
# # print(input.df$loc.x[i])
# # print(input.df$loc.y[i])
# read.path <- paste0(
# namotaji.path,
# switch (
# input.df$side[i], 'NN' = 'NN_strana/n', 'VN' = 'VN_strana/n',
# 'bok' = 'bok_lijevo_desno/b'
# ),
# input.df$namotaj.id[i], '_', input.df$point.id[i]
# )
# # print(read.path)
# print('#######################')
# dat <- dat %>%
# add_row(
# !!!VibData(
# point.id = input.df$point.id[i],
# loc.x = input.df$loc.x[i],
# loc.y = input.df$loc.y[i],
# rib = input.df$side[i],
# replication = input.df$namotaj.id[i],
# decimal.separator = ",",
# file.loc = read.path
# )
# )
# }
# return(dat)
# }
################################################################################
################################################################################
# Data input namotaj 1 NN pt1
# Input n1_1
# dat <- VibData(
# point.id = input.df$point.id[1],
# loc.x = input.df$loc.x[1],
# loc.y = input.df$loc.y[1],
# rib = input.df$side[1],
# replication = input.df$namotaj.id[1],
# decimal.separator = ',',
# file.loc = paste0('2MVA/mjerenja/namotaji/NN_strana/n1_1')
# )
# Remove input data for first point (n1_1)
# print(slice(input.df, 1))
# input.df <- input.df %>% slice(-1)
# Input the rest of data points for n1, n2 and n3
# dat <- data.input.f(dat, input.df)
# Change replication to winding and rib to side
# dat <- dat %>% rename(wind = replication, side = rib)
# Select only necessary variables
# dat <- dat %>% select(-pacf, -displacement.orig)
# Recalculate FFT resolution because of sample length change
# dat <- dat %>% group_by(wind, side, point.id) %>%
# mutate(d.freq = sample.rate/length(na.omit(velocity.amp))/2) %>% ungroup()
# Sanity data check
# dat %>% glimpse()
# dat %>% select(range, d.time, d.freq, sample.rate, sample.num) %>% distinct() %>% View()
# dat %>% group_by(wind, side, point.id) %>% summarise(
# length = length(velocity),
# min = min(velocity),
# max = max(velocity),
# mean = mean(velocity),
# median = median(velocity),
# ) %>%
# summarise(
# min.min = min(min),
# max.max = max(max)
# ) %>% View()
# dat %>%
# select(wind, side, point.id, peak.freq.orig, peak.vel.orig, peak.frequency, peak.velocity.amp) %>%
# drop_na() %>% group_by(wind, side, point.id) %>% slice(1) %>%
# mutate(
# equal = if_else(
# (peak.freq.orig <= (peak.frequency + 0.2)) & (peak.freq.orig >= (peak.frequency - 0.2)), T, F)) %>% View()
# Remove unnecessary variables
# dat <- dat %>% select(-peak.freq.orig, -peak.vel.orig, -peak.disp.orig,
# -peak.acc.orig, -velocity.orig)
# Create subtitles for plots
# dat <- dat %>% group_by(wind, side, point.id) %>%
# mutate(subtitle =
# paste0('Nam: ', wind, ' ', side, ' pt', point.id, ': ', '(', loc.x, ', ', loc.y, ')')
# ) %>% ungroup()
# Save current workspace
# save.image('2MVA/namotaji.RData')
# Load workspace
load('2MVA/namotaji.RData')
################################################################################
################################################################################
# Vremenska domena plot
dat %>% filter(wind == 1 & (
(side == 'NN' & point.id %in% c(2, 6, 14)) | (
(side == 'VN' & point.id %in% c(3, 6, 9)) | (
(side == 'bok' & point.id %in% c(1, 10, 11))
)))
) %>%
select(time, velocity, point.id, wind, side, subtitle) %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = time, y = velocity)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_line(color= 'black', size = 0.4) +
# coord_fixed(ratio = 0.0012) +
scale_y_continuous(
name = expression('Brzina ['*mu*'m/s]'),
# breaks = seq(-1000, 1000, 1000),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Vrijeme [s]",
breaks = seq(0, 7, 1),
limits = c(NA, NA)
) +
ggtitle("Namotaj 1 - Vremenska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_1_brzina_vrijeme.png',
width = 17, height = 9, units = 'cm', dpi = 320)
dat %>% filter(wind == 2 & (
(side == 'NN' & point.id %in% c(6, 14, 15)) | (
(side == 'VN' & point.id %in% c(1, 6, 13))
))
) %>%
select(time, velocity, point.id, wind, side, subtitle) %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = time, y = velocity)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_line(color= 'black', size = 0.4) +
# coord_fixed(ratio = 0.0012) +
scale_y_continuous(
name = expression('Brzina ['*mu*'m/s]'),
# breaks = seq(-1000, 1000, 1000),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Vrijeme [s]",
breaks = seq(0, 7, 1),
limits = c(NA, NA)
) +
ggtitle("Namotaj 2 - Vremenska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_2_brzina_vrijeme.png',
width = 17, height = 6.5, units = 'cm', dpi = 320)
dat %>% filter(wind == 3 & (
(side == 'NN' & point.id %in% c(10, 13, 14)) | (
(side == 'VN' & point.id %in% c(6, 9, 15)) | (
(side == 'bok' & point.id %in% c(2, 10, 11))
)))
) %>%
select(time, velocity, point.id, wind, side, subtitle) %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = time, y = velocity)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_line(color= 'black', size = 0.4) +
# coord_fixed(ratio = 0.0012) +
scale_y_continuous(
name = expression('Brzina ['*mu*'m/s]'),
# breaks = seq(-1000, 1000, 1000),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Vrijeme [s]",
breaks = seq(0, 7, 1),
limits = c(NA, NA)
) +
ggtitle("Namotaj 3 - Vremenska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_3_brzina_vrijeme.png',
width = 17, height = 9, units = 'cm', dpi = 320)
################################################################################
################################################################################
# Frekvencijska domena plot
dat %>% filter(wind == 1 & (
(side == 'NN' & point.id %in% c(2, 6, 14)) | (
(side == 'VN' & point.id %in% c(3, 6, 9)) | (
(side == 'bok' & point.id %in% c(1, 10, 11))
)))
) %>%
select(frequency, velocity.amp, point.id, wind, side, subtitle) %>%
drop_na() %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = frequency, y = velocity.amp)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_col(color= 'black', fill='black', width=0.6, size = 0.6) +
scale_y_continuous(
name = expression('Amplituda brzine ['*mu*'m/s]'),
# breaks = seq(0, 1000, 500),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Frekvencija [Hz]",
breaks = seq(0, 300, 50),
limits = c(24, 301)
# limits = c(0, 1000)
) +
ggtitle("Namotaj 1 - Frekvencijska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_1_brzina_freq.png',
width = 17, height = 9, units = 'cm', dpi = 320)
dat %>% filter(wind == 2 & (
(side == 'NN' & point.id %in% c(6, 14, 15)) | (
(side == 'VN' & point.id %in% c(1, 6, 13))
))
) %>%
select(frequency, velocity.amp, point.id, wind, side, subtitle) %>%
drop_na() %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = frequency, y = velocity.amp)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_col(color= 'black', fill='black', width=0.6, size = 0.6) +
scale_y_continuous(
name = expression('Amplituda brzine ['*mu*'m/s]'),
# breaks = seq(0, 1000, 500),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Frekvencija [Hz]",
breaks = seq(0, 300, 50),
limits = c(24, 301)
# limits = c(0, 1000)
) +
ggtitle("Namotaj 2 - Frekvencijska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_2_brzina_freq.png',
width = 17, height = 6.5, units = 'cm', dpi = 320)
dat %>% filter(wind == 3 & (
(side == 'NN' & point.id %in% c(10, 13, 14)) | (
(side == 'VN' & point.id %in% c(6, 9, 15)) | (
(side == 'bok' & point.id %in% c(2, 10, 11))
)))
) %>%
select(frequency, velocity.amp, point.id, wind, side, subtitle) %>%
drop_na() %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = frequency, y = velocity.amp)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_col(color= 'black', fill='black', width=0.6, size = 0.6) +
scale_y_continuous(
name = expression('Amplituda brzine ['*mu*'m/s]'),
# breaks = seq(0, 1000, 500),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Frekvencija [Hz]",
breaks = seq(0, 300, 50),
limits = c(24, 301)
# limits = c(0, 1000)
) +
ggtitle("Namotaj 3 - Frekvencijska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_3_brzina_freq.png',
width = 17, height = 9, units = 'cm', dpi = 320)
################################################################################
################################################################################
# Preliminary analysis
# Frequency table of peak.frequency
dat %>% group_by(wind, side, point.id) %>%
select(wind, side, point.id, peak.frequency, peak.velocity.amp) %>% drop_na() %>%
ungroup() %>% count(peak.frequency) %>% View()
# Sort by amplitudes
dat %>% group_by(wind, side, point.id) %>%
select(wind, side, point.id, peak.frequency, peak.velocity.amp) %>% drop_na() %>%
mutate(peak.velocity.amp = round(peak.velocity.amp)) %>% View()
# Fit only 100 Hz sine waves
dat_fit <- dat %>%
mutate(
om_100 = 2*pi*100,
A.100.c.term = om_100*cos(om_100*time),
A.100.s.term = om_100*sin(om_100*time),
) %>%
nest_by(wind, side, point.id) %>%
mutate(fitSine = list(lm(
velocity ~ 0 + A.100.c.term + A.100.s.term,
data = data)))
dat_fit %>% summarise(glance(fitSine)) %>% View()
dat_fit %>% summarise(tidy(fitSine))
# Plot sine fit for NN pt2 and pt6
fit_to_data_ <- function(dat_fit, point) {
dat_fit %>% filter(
wind == 1 & point.id == point & side == 'NN'
) %>% summarise(
fit.data = predict(fitSine),
r.squared = glance(fitSine)$r.squared
)
}
dat %>% filter(
wind == 1 & point.id %in% c(4, 2, 6) & side == 'NN'
) %>%
group_by(wind, side, point.id) %>%
mutate(
velocity.fit = fit_to_data_(dat_fit, unique(point.id))$fit.data,
r.squared = fit_to_data_(dat_fit, unique(point.id))$r.squared
) %>%
select(time, velocity, velocity.fit, point.id, wind, side, subtitle, r.squared) %>%
mutate(
subtitle = round(r.squared, 2)
) %>%
pivot_longer(
cols = c(velocity, velocity.fit),
names_to = 'name_groups',
values_to = 'velocity'
) %>%
mutate(
name_groups = case_when(
name_groups == 'velocity' ~ 'Izmjereni',
name_groups == 'velocity.fit' ~ 'Modelirani',
T ~ name_groups
)
) %>%
ggplot(aes(x = time, y = velocity, color = name_groups)) +
facet_grid(subtitle ~ .,
# scales = 'free_y',
labeller = label_bquote(italic(R)^2 * ' = ' * .(subtitle))) +
geom_line(size = 0.6) +
scale_y_continuous(
name = expression('Brzina ['*mu*'m/s]'),
# breaks = seq(-1000, 1000, 1000),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Vrijeme [s]",
breaks = seq(0, 7, 0.025),
limits = c(2, 2.2)
) +
scale_color_manual(
name = 'Podaci: ',
values = c('dodgerblue2', 'orangered1')
) +
ggtitle("Primjer aproksimacije izmjerenih vibracija") +
theme_bw() + theme(
# panel.border = element_rect(),
legend.position = 'top',
panel.background = element_blank(),
panel.spacing = unit(2, 'mm'),
legend.box.spacing = unit(2, 'mm'),
legend.spacing = unit(2, 'mm'),
legend.margin = margin(0, 0, 0, 0, 'mm'),
axis.title = element_text(face="plain", size = 10),
axis.text.x = element_text(colour="black", size = 8),
axis.text.y = element_text(colour="black", size = 8),
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5)
)
ggsave(filename = 'izvjestaj_final/slike/fit_sinusoida_primjer.png',
width = 16, height = 8, units = 'cm', dpi = 320, pointsize = 12)
# Amplitudes and adj_r_squared
dat_model <- dat_fit %>% summarise(tidy(fitSine)) %>%
group_by(wind, side, point.id) %>%
select(wind, side, point.id, term, estimate) %>%
pivot_wider(names_from = term, values_from = estimate) %>%
summarise(
A.100.v = 2*pi*100*sqrt(A.100.c.term^2 + A.100.s.term^2),
A.100.x = sqrt(A.100.c.term^2 + A.100.s.term^2)
) %>% full_join(
select(dat_fit %>% summarise(glance(fitSine)), wind, side, point.id,
adj.r.squared),
by = c('wind', 'side', 'point.id')
)
dat_model %>% View()
# Summary of data with top 5 peaks and distinct frequency peaks
dat_summary <- dat %>%
select(wind, side, point.id, loc.x, loc.y, velocity, peak.to.peak.vel,
rms.vel, peak.frequency, peak.velocity.amp, frequency, d.freq,
velocity.amp) %>%
group_by(wind, side, point.id) %>%
summarise(
Namotaj = unique(wind), Strana=unique(side), Tocka = unique(point.id),
Sirina = unique(loc.x), Visina = unique(loc.y),
"Uvjet rada" = "Kratki spoj - 50 A - 1 kV primar",
"Brzina min" = min(velocity), "Brzina max" = max(velocity),
"Brzina sd" = sd(velocity),
RMS = mean(rms.vel), 'Peak-to-peak' = unique(peak.to.peak.vel),
"Peak freq 1" = peak.frequency[1],
"Peak amp 1" = peak.velocity.amp[1],
"Peak freq 2" = peak.frequency[2],
"Peak amp 2" = peak.velocity.amp[2],
"Peak freq 3" = peak.frequency[3],
"Peak amp 3" = peak.velocity.amp[3],
"Peak freq 4" = peak.frequency[4],
"Peak amp 4" = peak.velocity.amp[4],
"Peak freq 5" = peak.frequency[5],
"Peak amp 5" = peak.velocity.amp[5],
"25 Hz amp" = na.omit(velocity.amp[
frequency >= (25.0-d.freq[1]/5) & frequency <= (25.0+d.freq[1]/5)
]),
"50 Hz amp" = na.omit(velocity.amp[
frequency >= (50.0-d.freq[1]/5) & frequency <= (50.0+d.freq[1]/5)
]),
"100 Hz amp" = na.omit(velocity.amp[
frequency >= (100.0-d.freq[1]/5) & frequency <= (100.0+d.freq[1]/5)
]),
"200 Hz amp" = na.omit(velocity.amp[
frequency >= (200.0-d.freq[1]/5) & frequency <= (200.0+d.freq[1]/5)
]),
"300 Hz amp" = na.omit(velocity.amp[
frequency >= (300.0-d.freq[1]/5) & frequency <= (300.0+d.freq[1]/5)
])
) %>%
full_join(dat_model, by = c('wind', 'side', 'point.id')) %>%
rename(
'Model 100 Hz amp' = A.100.v,
'Model 100 Hz disp amp' = A.100.x,
'Model Adj R Squared' = adj.r.squared
) %>%
ungroup() %>% select(-wind, -side, -point.id)
dat_summary %>%
write_excel_csv2(
path = '2MVA/preliminarna_obrada/aktivni_dio/namotaji_summary.csv'
)
# Load data summary
dat_summary <- read_csv2(
file = '2MVA/preliminarna_obrada/aktivni_dio/namotaji_summary.csv'
)
################################################################################
################################################################################
# Plotovi na namotajima
# Plot amplituda brzine Namotaj 1 - 100 Hz
dat_summary %>%
select(wind = Namotaj, side = Strana, point.id = Tocka, loc.x = Sirina,
loc.y = Visina, amp.100 = `100 Hz amp`) %>%
filter(wind == 1) %>%
mutate(
side = paste0('Strana: ', side)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
facet_wrap(
factor(
side, levels = c('Strana: NN', 'Strana: bok', 'Strana: VN')) ~ .
) +
coord_fixed() +
geom_vline(aes(xintercept = 0)) +
geom_point(aes(fill = amp.100, size = amp.100),
shape = 21, colour = "black",
stroke = 1,
) +
geom_text() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-150, 150, 50),
limits = c(NA, NA)
) +
scale_fill_viridis_c(
name = expression('Amplituda ['*mu*'m/s]'),
breaks = seq(0, 2000, 250),
option = 'C',
alpha = 1,
begin = 0.3,
) +
scale_size_area(
max_size = 13,
# range = c(5, 10),
guide = "none"
) +
ggtitle("Namotaj: 1 - Amplitude brzine - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/amplitude_100_hz_nam_1.png',
width = 25, height = 15, units = 'cm', dpi = 320)
# Plot amplituda brzine Namotaj 2 - 100 Hz
dat_summary %>%
select(wind = Namotaj, side = Strana, point.id = Tocka, loc.x = Sirina,
loc.y = Visina, amp.100 = `100 Hz amp`) %>%
filter(wind == 2) %>%
mutate(
side = paste0('Strana: ', side)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
facet_wrap(
factor(
side, levels = c('Strana: NN', 'Strana: bok', 'Strana: VN')) ~ .
) +
coord_fixed() +
geom_vline(aes(xintercept = 0)) +
geom_point(aes(fill = amp.100, size = amp.100),
shape = 21, colour = "black",
stroke = 1,
) +
geom_text() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-150, 150, 50),
limits = c(NA, NA)
) +
scale_fill_viridis_c(
name = expression('Amplituda ['*mu*'m/s]'),
breaks = seq(0, 800, 100),
option = 'C',
alpha = 1,
begin = 0.3,
) +
scale_size_area(
max_size = 7,
# range = c(5, 10),
guide = "none"
) +
ggtitle("Namotaj: 2 - Amplitude brzine - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/amplitude_100_hz_nam_2.png',
width = 17, height = 15, units = 'cm', dpi = 320)
# Plot amplituda brzine Namotaj 3 - 100 Hz
dat_summary %>%
select(wind = Namotaj, side = Strana, point.id = Tocka, loc.x = Sirina,
loc.y = Visina, amp.100 = `100 Hz amp`) %>%
filter(wind == 3) %>%
mutate(
side = paste0('Strana: ', side)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
facet_wrap(
factor(
side, levels = c('Strana: VN', 'Strana: bok', 'Strana: NN')) ~ .
) +
coord_fixed() +
geom_vline(aes(xintercept = 0)) +
geom_point(aes(fill = amp.100, size = amp.100),
shape = 21, colour = "black",
stroke = 1,
) +
geom_text() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-150, 150, 50),
limits = c(NA, NA)
) +
scale_fill_viridis_c(
name = expression('Amplituda ['*mu*'m/s]'),
breaks = seq(0, 2000, 250),
option = 'C',
alpha = 1,
begin = 0.3,
) +
scale_size_area(
max_size = 13,
# range = c(5, 10),
guide = "none"
) +
ggtitle("Namotaj: 3 - Amplitude brzine - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/amplitude_100_hz_nam_3.png',
width = 25, height = 15, units = 'cm', dpi = 320)
################################################################################
################################################################################
# Surface modelling
# Loading summary and adding coordinates for wind 1 and 3 from sides
dat_surf <- read_csv2(
file = '2MVA/preliminarna_obrada/aktivni_dio/namotaji_summary.csv'
) %>%
select(
wind = Namotaj, side = Strana, point.id = Tocka, x = Sirina, y = Visina,
amp.v.100 = `Model 100 Hz amp`, amp.x.100 = `Model 100 Hz disp amp`,
adj.r.sq = `Model Adj R Squared`
) %>%
mutate(
dist.wind = 202.5 * pi/ 2 + 50,
x.full = case_when(
(wind == 1 & side == 'NN') ~ as.integer(-dist.wind + x),
(wind == 1 & side == 'VN') ~ as.integer(dist.wind + x),
(wind == 3 & side == 'NN') ~ as.integer(dist.wind + x),
(wind == 3 & side == 'VN') ~ as.integer(-dist.wind + x),
TRUE ~ as.integer(x)
)
)
# Plotting coordinates for sanity check
dat_surf %>% filter(wind == 3) %>%
ggplot(aes(x = x.full, y = y, label = paste0(side, point.id))) + geom_text()
# Extremes for coding
dat_surf <- dat_surf %>% group_by(wind, side) %>%
select(wind, side, x, x.full, y, amp.v.100) %>%
mutate(
mean.x = mean(x),
extr.x = max(max(x) - mean.x, abs(min(x) - mean.x)),
mean.y = mean(y),
extr.y = max(max(y) - mean.y, abs(min(y) - mean.y))
) %>% ungroup()
dat_surf %>% distinct(wind, side, mean.x, extr.x, mean.y, extr.y) %>% View()
# Coding data
dat_coded <- coded.data(
data =
dat_surf %>%
filter(side == 'NN' & wind %in% c(3)) %>%
select(wind, x, y, amp.v.100) %>%
mutate(wind = as.factor(wind)),
x1 ~ (x + 0) / 159,
x2 ~ (y - 310.5) / 310.5
)
as.data.frame(dat_coded)
# RSM low r squared value
dat_coded.rsm <- rsm(amp.v.100 ~ SO(x1, x2) + PQ(x1, x2), data = dat_coded)
summary(dat_coded.rsm)
sqrt(mean(dat_coded.rsm$residuals^2))
# Gaussian process regression
X = dat_surf %>% filter(side == 'NN' & wind %in% c(3)) %>% select(x, y)
Z = dat_surf %>% filter(side == 'NN' & wind %in% c(3)) %>% select(amp.v.100)
dat_mod <- btgpllm(X = X, Z=Z)
# dat_mod
sqrt(mean((Z$amp.v.100 - dat_mod$Zp.mean)^2))
plot(dat_mod)
# Spatial data modelling - Kriging interpolation 100 Hz
krige.inter <- function(
amp_df = dat_surf,
amp = 'amp.v.100',
plt_map = T,
variogram_only = T,
vgm_cutoff = 900,
vgm_bin_num = 4,
vgm_model = 'Gau',
vgm_psill = NA,
vgm_range = NA,
vgm_alpha = seq(0, 135, 45),
vgm_an_angle = 0,
vgm_an_ratio = 1,
grid_num = 300
) {
# Creating SpatialPointsDataFrame
dat_sp <- amp_df %$%
SpatialPointsDataFrame(
coords = select(., x.full, y),
data = select(., !!amp, wind)
)
# Examining experimental variogram
var_exp <- variogram(
formula(paste0(amp, '~wind')),
dat_sp, cutoff = vgm_cutoff, width = vgm_cutoff/vgm_bin_num,
alpha=vgm_alpha, map = plt_map
)
# Plot variogram map
if (plt_map) {
print(plot(var_exp, threshold = 3))
return()
}
# Creating variogram fitting model
vgm_mod <- vgm(
psill = vgm_psill, model = vgm_model,
range = vgm_range, anis = c(vgm_an_angle, vgm_an_ratio)
)
# Fitting varigram to experimenta data
var_fit <- fit.variogram(
var_exp,
model = vgm_mod)
# Plotting fitted variogram
if (variogram_only) {
print(plot(var_exp, var_fit))
return()
}
# Calculating grid limits from data
grid_limits <- amp_df %>%
summarise(
min.x = min(x.full)*1.03,
max.x = max(x.full)*1.03,
min.y = min(y)*0.1,
max.y = max(y)*1.03
)
# Creating predictive grid and converting to SpatialPixels (with ribs data)
pred.grid <- expand.grid(
x = seq(grid_limits$min.x, grid_limits$max.x, length.out = grid_num),
y = seq(grid_limits$min.y, grid_limits$max.y, length.out = grid_num)
)
pred.grid <- SpatialPixels(SpatialPoints(pred.grid))
# Kriging interpolation
krige.pred <- krige(
formula(paste0(amp, '~1')),
locations = dat_sp,
newdata = pred.grid,
model = var_fit
)
print(spplot(krige.pred['var1.pred'], scales = list(draw = T)))
# Return object
krige.pred
}
# Determining cutoff
dat_surf %>% filter(wind == 2 & side == 'VN') %>% select(x.full, y) %>%
summarise(
max.x = max(x.full),
min.x = min(x.full),
max.y = max(y),
min.y = min(y),
) %>%
mutate(
diag = sqrt((max.x - min.x)^2 + (max.y - min.y)^2),
diag.3 = diag/3,
cutoff.max = sqrt(2) / 2 * diag
)
# Testing parameters for interpolation
krige.inter(
amp_df = dat_surf %>% filter(wind == 1),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 800,
vgm_bin_num = 6,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Gau',
vgm_alpha = seq(0, 135, 45),
vgm_an_angle = 135,
vgm_an_ratio = 5/11,
grid_num = 300
)
# Creating tibble for contour and points plot
dat_disp_disp <- dat_surf %>%
select(-amp.v.100, -adj.r.sq, -x, dist.wind) %>%
rename(x = x.full) %>%
mutate(point.id = as.character(point.id)) %>%
full_join(by = c('wind', 'point.id', 'x', 'y', 'amp.x.100'),
y = tibble(
wind = 1,
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 1),
amp = 'amp.x.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 800,
vgm_bin_num = 6,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Gau',
vgm_alpha = seq(0, 135, 45),
vgm_an_angle = 135,
vgm_an_ratio = 5/11,
grid_num = 300
))
) %>% rename(amp.x.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'x', 'y', 'amp.x.100'),
y = tibble(
wind = 3,
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 3),
amp = 'amp.x.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 800,
vgm_bin_num = 6,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Gau',
vgm_alpha = seq(0, 126, 31.5),
vgm_an_angle = 63,
vgm_an_ratio = 8/11,
grid_num = 300
))
) %>% rename(amp.x.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'side', 'x', 'y', 'amp.x.100'),
y = tibble(
wind = 2,
side = 'NN',
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 2 & side == 'NN'),
amp = 'amp.x.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 400,
vgm_bin_num = 3,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Exp',
vgm_alpha = seq(0, 135, 135),
vgm_an_angle = 0,
vgm_an_ratio = 1/2,
grid_num = 300
))
) %>% rename(amp.x.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'side', 'x', 'y', 'amp.x.100'),
y = tibble(
wind = 2,
side = 'VN',
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 2 & side == 'VN'),
amp = 'amp.x.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 400,
vgm_bin_num = 3,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Exp',
vgm_alpha = seq(0, 135, 135),
vgm_an_angle = 0,
vgm_an_ratio = 3/8,
grid_num = 300
))
) %>% rename(amp.x.100 = var1.pred) %>% select(-var1.var)
)
# Plot amplituda pomaka - Namotaj 1 - 100 Hz
dat_disp_disp %>%
filter(wind == 1) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.x.100.cnt = case_when(
point.id == 'model' & amp.x.100 >= 0 ~ amp.x.100,
point.id == 'model' & amp.x.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.x.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.x.100
),
point.id = case_when(
point.id != 'model' ~ paste0(side, point.id),
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
geom_contour_filled(aes(z = amp.x.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_vline(aes(xintercept = dist.wind)) +
geom_vline(aes(xintercept = -dist.wind)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-600, 600, 100),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m]'),
option = 'C',
alpha = 1,
begin = 0.3,
) +
ggtitle("Namotaj: 1 - Amplitude pomaka - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_disp_nam_1_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
# Plot amplituda pomaka - Namotaj 3 - 100 Hz
dat_disp_disp %>%
filter(wind == 3) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.x.100.cnt = case_when(
point.id == 'model' & amp.x.100 >= 0 ~ amp.x.100,
point.id == 'model' & amp.x.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.x.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.x.100
),
point.id = case_when(
point.id != 'model' ~ paste0(side, point.id),
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
geom_contour_filled(aes(z = amp.x.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_vline(aes(xintercept = dist.wind)) +
geom_vline(aes(xintercept = -dist.wind)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-600, 600, 100),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m]'),
option = 'C',
alpha = 1,
begin = 0.3,
) +
ggtitle("Namotaj: 3 - Amplitude pomaka - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_disp_nam_3_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
# Plot amplituda pomaka - Namotaj 2 - 100 Hz
dat_disp_disp %>%
filter(wind == 2) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.x.100.cnt = case_when(
point.id == 'model' & amp.x.100 >= 0 ~ amp.x.100,
point.id == 'model' & amp.x.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.x.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.x.100
),
point.id = case_when(
point.id != 'model' ~ point.id,
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind),
side = paste0('Strana: ', side)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
facet_wrap(side ~ ., scales = 'fixed') +
geom_contour_filled(aes(z = amp.x.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-300, 300, 50),
limits = c(NA, NA),
expand = c(0.03, 0.03)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m]'),
option = 'C',
alpha = 1,
begin = 0.3,
end = 0.8
) +
ggtitle("Namotaj: 2 - Amplitude pomaka - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_disp_nam_2_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
## Modeling velocity
# Determining cutoff
dat_surf %>% filter(wind == 2 & side == 'VN') %>% select(x.full, y) %>%
summarise(
max.x = max(x.full),
min.x = min(x.full),
max.y = max(y),
min.y = min(y),
) %>%
mutate(
diag = sqrt((max.x - min.x)^2 + (max.y - min.y)^2),
diag.3 = diag/3,
cutoff.max = sqrt(2) / 2 * diag
)
# Testing parameters for interpolation
krige.inter(
amp_df = dat_surf %>% filter(wind == 2 & side == 'VN'),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 400,
vgm_bin_num = 3,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Exp',
vgm_alpha = seq(0, 135, 135),
vgm_an_angle = 0,
vgm_an_ratio = 3/8,
grid_num = 300
)
# Creating tibble for contour and points plot
dat_disp_vel <- dat_surf %>%
select(-amp.x.100, -adj.r.sq, -x, dist.wind) %>%
rename(x = x.full) %>%
mutate(point.id = as.character(point.id)) %>%
full_join(by = c('wind', 'point.id', 'x', 'y', 'amp.v.100'),
y = tibble(
wind = 1,
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 1),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 800,
vgm_bin_num = 6,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Gau',
vgm_alpha = seq(0, 135, 45),
vgm_an_angle = 135,
vgm_an_ratio = 5/11,
grid_num = 300
))
) %>% rename(amp.v.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'x', 'y', 'amp.v.100'),
y = tibble(
wind = 3,
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 3),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 800,
vgm_bin_num = 6,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Gau',
vgm_alpha = seq(0, 126, 31.5),
vgm_an_angle = 63,
vgm_an_ratio = 8/11,
grid_num = 300
))
) %>% rename(amp.v.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'side', 'x', 'y', 'amp.v.100'),
y = tibble(
wind = 2,
side = 'NN',
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 2 & side == 'NN'),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 400,
vgm_bin_num = 3,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Exp',
vgm_alpha = seq(0, 135, 135),
vgm_an_angle = 0,
vgm_an_ratio = 1/2,
grid_num = 300
))
) %>% rename(amp.v.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'side', 'x', 'y', 'amp.v.100'),
y = tibble(
wind = 2,
side = 'VN',
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 2 & side == 'VN'),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 400,
vgm_bin_num = 3,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Exp',
vgm_alpha = seq(0, 135, 135),
vgm_an_angle = 0,
vgm_an_ratio = 3/8,
grid_num = 300
))
) %>% rename(amp.v.100 = var1.pred) %>% select(-var1.var)
)
# Plot amplituda brzine - Namotaj 1 - 100 Hz
dat_disp_vel %>%
filter(wind == 1) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.v.100.cnt = case_when(
point.id == 'model' & amp.v.100 >= 0 ~ amp.v.100,
point.id == 'model' & amp.v.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.v.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.v.100
),
point.id = case_when(
point.id != 'model' ~ paste0(side, point.id),
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
geom_contour_filled(aes(z = amp.v.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_vline(aes(xintercept = dist.wind)) +
geom_vline(aes(xintercept = -dist.wind)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-600, 600, 100),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m/s]'),
option = 'C',
alpha = 1,
begin = 0.3,
) +
ggtitle("Namotaj: 1 - Amplitude brzina - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_vel_nam_1_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
# Plot amplituda brzina - Namotaj 3 - 100 Hz
dat_disp_vel %>%
filter(wind == 3) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.v.100.cnt = case_when(
point.id == 'model' & amp.v.100 >= 0 ~ amp.v.100,
point.id == 'model' & amp.v.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.v.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.v.100
),
point.id = case_when(
point.id != 'model' ~ paste0(side, point.id),
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
geom_contour_filled(aes(z = amp.v.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_vline(aes(xintercept = dist.wind)) +
geom_vline(aes(xintercept = -dist.wind)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-600, 600, 100),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m/s]'),
option = 'C',
alpha = 1,
begin = 0.3,
) +
ggtitle("Namotaj: 3 - Amplitude brzina - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_vel_nam_3_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
# Plot amplituda pomaka - Namotaj 2 - 100 Hz
dat_disp_vel %>%
filter(wind == 2) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.v.100.cnt = case_when(
point.id == 'model' & amp.v.100 >= 0 ~ amp.v.100,
point.id == 'model' & amp.v.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.v.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.v.100
),
point.id = case_when(
point.id != 'model' ~ point.id,
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind),
side = paste0('Strana: ', side)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
facet_wrap(side ~ ., scales = 'fixed') +
geom_contour_filled(aes(z = amp.v.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-300, 300, 50),
limits = c(NA, NA),
expand = c(0.03, 0.03)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m/s]'),
option = 'C',
alpha = 1,
begin = 0.3,
end = 0.8
) +
ggtitle("Namotaj: 2 - Amplitude brzina - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_vel_nam_2_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
################################################################################
################################################################################
# Data input referenca dok radi samo generator
# Input referenca_25Hz
dat_ref <- VibData(
point.id = 'ref',
loc.x = 0,
loc.y = 0,
rib = F,
replication = 1,
decimal.separator = ',',
file.loc = paste0('2MVA/mjerenja/namotaji/NN_strana/referenca_25Hz')
)
dat_ref <- dat_ref %>%
add_row(
!!!VibData(
point.id = 'ref',
loc.x = 0,
loc.y = 0,
rib = F,
replication = 2,
decimal.separator = ",",
file.loc = paste0('2MVA/mjerenja/namotaji/NN_strana/referenca_25Hz2')
)
)
# Frekvencijska domena plot referentne tocke
dat_ref %>%
select(frequency, velocity.amp, point.id, replication) %>%
drop_na() %>%
mutate(subtitle = paste0('Referentno mjerenje: ', replication)) %>%
ggplot(aes(x = frequency, y = velocity.amp)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_col(color= 'black', fill='black', width=0.6, size = 0.6) +
scale_y_continuous(
name = expression('Amplituda brzine ['*mu*'m/s]'),
# breaks = seq(0, 1000, 500),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Frekvencija [Hz]",
breaks = seq(0, 200, 25),
limits = c(0, 201)
# limits = c(0, 1000)
) +
ggtitle("Utjecaj generatora - Transformator ne radi") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/ref_vibracije.png',
width = 17, height = 4.5, units = 'cm', dpi = 320)
| /2MVA/analysis_namotaji_2MVA.R | permissive | tbazina/transformer-measurement-analysis | R | false | false | 46,678 | r | ################################################################################
############## Analiza mjerenja na namotajima 2MVA 12.09.2020. ###############
################################################################################
# Source functions and objects file
source('functions.R')
# Plotting
library(ggplot2)
# Data paste
library(datapasta)
# Sorting
library(gtools)
# Fitting
library(broom)
library(rsm)
# Spatial data modelling
library(gstat)
library(sp)
# Gaussian process models
library(tgp)
library(mlegp)
################################################################################
################################################################################
# Transformator radi u kratkom spoju - 50 A, 1000 V na primaru.
# Mjerenja su provedena na 3 namotaja cilindričnog oblika.
# Mjerene su VN strana, NN strana i dvije bocne strane namotaja (ukupno 5
# povrsina).
# Koordinate točaka su zapisane kao duljina po kružnom luku od centralne
# simetrale.
# Na svakoj mjernoj povrsini je raspoređeno 15 mjernih točaka
# Namotaji su oznaceni s brojevima 1, 2, 3
# Data for loop input - KOORDINATE NEKIH TOČAKA PROMIJENJENE (STVARNE KORDINATE)
# I DODANO MJERENJE BOCNIH STRANA NAMOTAJA
# base.doe <- tibble::tribble(
# ~point.id, ~loc.x, ~loc.y,
# 1L, -151L, 389L,
# 2L, -125L, 216L,
# 3L, -109L, 566L,
# 4L, -80L, 106L,
# 5L, -60L, 335L,
# 6L, -51L, 524L,
# 7L, -25L, 130L,
# 8L, 1L, 20L,
# 9L, 32L, 284L,
# 10L, 35L, 612L,
# 11L, 66L, 440L,
# 12L, 75L, 199L,
# 13L, 96L, 57L,
# 14L, 135L, 320L,
# 15L, 150L, 461L
# )
# flank.doe <- base.doe %>%
# mutate(loc.x = case_when(
# loc.x == -151 ~ as.double(-140),
# loc.x == 150 ~ as.double(140),
# TRUE ~ as.double(loc.x)
# ))
#
# input.df <- base.doe %>% mutate(namotaj.id = 1, side = 'NN') %>%
# full_join(base.doe %>% mutate(namotaj.id = 1, side = 'VN') %>%
# filter(point.id != 11)) %>%
# full_join(flank.doe %>% mutate(namotaj.id = 1, side = 'bok')) %>%
# full_join(base.doe %>% mutate(namotaj.id = 2, side = 'NN')) %>%
# full_join(base.doe %>% mutate(namotaj.id = 2, side = 'VN') %>%
# filter(!point.id %in% c(10, 11))) %>%
# full_join(base.doe %>% mutate(namotaj.id = 3, side = 'NN')) %>%
# full_join(base.doe %>% mutate(namotaj.id = 3, side = 'VN') %>%
# filter(!point.id %in% c(10, 11))) %>%
# full_join(flank.doe %>% mutate(namotaj.id = 3, side = 'bok')) %>%
# select(namotaj.id, side, point.id, loc.x, loc.y)
#
# input.df %>% rename(Namotaj=namotaj.id, Strana=side, Tocka=point.id, x=loc.x,
# y=loc.y) %>%
# write_excel_csv2('2MVA/DoE/Aktivni_dio/namotaji_stvarne_koordinate.csv')
#
# input.df %>% group_by(namotaj.id, side) %>%
# summarise(count = n())
# Data input function
# data.input.f <- function(dat, input.df) {
# # Folder u kojem se nalaze mjerenja
# namotaji.path <- '2MVA/mjerenja/namotaji/'
# for (i in 1:length(input.df$point.id)) {
# # print(input.df$namotaj.id[i])
# # print(input.df$side[i])
# # print(paste0('pt', input.df$point.id[i]))
# # print(input.df$loc.x[i])
# # print(input.df$loc.y[i])
# read.path <- paste0(
# namotaji.path,
# switch (
# input.df$side[i], 'NN' = 'NN_strana/n', 'VN' = 'VN_strana/n',
# 'bok' = 'bok_lijevo_desno/b'
# ),
# input.df$namotaj.id[i], '_', input.df$point.id[i]
# )
# # print(read.path)
# print('#######################')
# dat <- dat %>%
# add_row(
# !!!VibData(
# point.id = input.df$point.id[i],
# loc.x = input.df$loc.x[i],
# loc.y = input.df$loc.y[i],
# rib = input.df$side[i],
# replication = input.df$namotaj.id[i],
# decimal.separator = ",",
# file.loc = read.path
# )
# )
# }
# return(dat)
# }
################################################################################
################################################################################
# Data input namotaj 1 NN pt1
# Input n1_1
# dat <- VibData(
# point.id = input.df$point.id[1],
# loc.x = input.df$loc.x[1],
# loc.y = input.df$loc.y[1],
# rib = input.df$side[1],
# replication = input.df$namotaj.id[1],
# decimal.separator = ',',
# file.loc = paste0('2MVA/mjerenja/namotaji/NN_strana/n1_1')
# )
# Remove input data for first point (n1_1)
# print(slice(input.df, 1))
# input.df <- input.df %>% slice(-1)
# Input the rest of data points for n1, n2 and n3
# dat <- data.input.f(dat, input.df)
# Change replication to winding and rib to side
# dat <- dat %>% rename(wind = replication, side = rib)
# Select only necessary variables
# dat <- dat %>% select(-pacf, -displacement.orig)
# Recalculate FFT resolution because of sample length change
# dat <- dat %>% group_by(wind, side, point.id) %>%
# mutate(d.freq = sample.rate/length(na.omit(velocity.amp))/2) %>% ungroup()
# Sanity data check
# dat %>% glimpse()
# dat %>% select(range, d.time, d.freq, sample.rate, sample.num) %>% distinct() %>% View()
# dat %>% group_by(wind, side, point.id) %>% summarise(
# length = length(velocity),
# min = min(velocity),
# max = max(velocity),
# mean = mean(velocity),
# median = median(velocity),
# ) %>%
# summarise(
# min.min = min(min),
# max.max = max(max)
# ) %>% View()
# dat %>%
# select(wind, side, point.id, peak.freq.orig, peak.vel.orig, peak.frequency, peak.velocity.amp) %>%
# drop_na() %>% group_by(wind, side, point.id) %>% slice(1) %>%
# mutate(
# equal = if_else(
# (peak.freq.orig <= (peak.frequency + 0.2)) & (peak.freq.orig >= (peak.frequency - 0.2)), T, F)) %>% View()
# Remove unnecessary variables
# dat <- dat %>% select(-peak.freq.orig, -peak.vel.orig, -peak.disp.orig,
# -peak.acc.orig, -velocity.orig)
# Create subtitles for plots
# dat <- dat %>% group_by(wind, side, point.id) %>%
# mutate(subtitle =
# paste0('Nam: ', wind, ' ', side, ' pt', point.id, ': ', '(', loc.x, ', ', loc.y, ')')
# ) %>% ungroup()
# Save current workspace
# save.image('2MVA/namotaji.RData')
# Load workspace
load('2MVA/namotaji.RData')
################################################################################
################################################################################
# Vremenska domena plot
dat %>% filter(wind == 1 & (
(side == 'NN' & point.id %in% c(2, 6, 14)) | (
(side == 'VN' & point.id %in% c(3, 6, 9)) | (
(side == 'bok' & point.id %in% c(1, 10, 11))
)))
) %>%
select(time, velocity, point.id, wind, side, subtitle) %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = time, y = velocity)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_line(color= 'black', size = 0.4) +
# coord_fixed(ratio = 0.0012) +
scale_y_continuous(
name = expression('Brzina ['*mu*'m/s]'),
# breaks = seq(-1000, 1000, 1000),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Vrijeme [s]",
breaks = seq(0, 7, 1),
limits = c(NA, NA)
) +
ggtitle("Namotaj 1 - Vremenska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_1_brzina_vrijeme.png',
width = 17, height = 9, units = 'cm', dpi = 320)
dat %>% filter(wind == 2 & (
(side == 'NN' & point.id %in% c(6, 14, 15)) | (
(side == 'VN' & point.id %in% c(1, 6, 13))
))
) %>%
select(time, velocity, point.id, wind, side, subtitle) %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = time, y = velocity)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_line(color= 'black', size = 0.4) +
# coord_fixed(ratio = 0.0012) +
scale_y_continuous(
name = expression('Brzina ['*mu*'m/s]'),
# breaks = seq(-1000, 1000, 1000),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Vrijeme [s]",
breaks = seq(0, 7, 1),
limits = c(NA, NA)
) +
ggtitle("Namotaj 2 - Vremenska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_2_brzina_vrijeme.png',
width = 17, height = 6.5, units = 'cm', dpi = 320)
dat %>% filter(wind == 3 & (
(side == 'NN' & point.id %in% c(10, 13, 14)) | (
(side == 'VN' & point.id %in% c(6, 9, 15)) | (
(side == 'bok' & point.id %in% c(2, 10, 11))
)))
) %>%
select(time, velocity, point.id, wind, side, subtitle) %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = time, y = velocity)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_line(color= 'black', size = 0.4) +
# coord_fixed(ratio = 0.0012) +
scale_y_continuous(
name = expression('Brzina ['*mu*'m/s]'),
# breaks = seq(-1000, 1000, 1000),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Vrijeme [s]",
breaks = seq(0, 7, 1),
limits = c(NA, NA)
) +
ggtitle("Namotaj 3 - Vremenska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_3_brzina_vrijeme.png',
width = 17, height = 9, units = 'cm', dpi = 320)
################################################################################
################################################################################
# Frekvencijska domena plot
dat %>% filter(wind == 1 & (
(side == 'NN' & point.id %in% c(2, 6, 14)) | (
(side == 'VN' & point.id %in% c(3, 6, 9)) | (
(side == 'bok' & point.id %in% c(1, 10, 11))
)))
) %>%
select(frequency, velocity.amp, point.id, wind, side, subtitle) %>%
drop_na() %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = frequency, y = velocity.amp)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_col(color= 'black', fill='black', width=0.6, size = 0.6) +
scale_y_continuous(
name = expression('Amplituda brzine ['*mu*'m/s]'),
# breaks = seq(0, 1000, 500),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Frekvencija [Hz]",
breaks = seq(0, 300, 50),
limits = c(24, 301)
# limits = c(0, 1000)
) +
ggtitle("Namotaj 1 - Frekvencijska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_1_brzina_freq.png',
width = 17, height = 9, units = 'cm', dpi = 320)
dat %>% filter(wind == 2 & (
(side == 'NN' & point.id %in% c(6, 14, 15)) | (
(side == 'VN' & point.id %in% c(1, 6, 13))
))
) %>%
select(frequency, velocity.amp, point.id, wind, side, subtitle) %>%
drop_na() %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = frequency, y = velocity.amp)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_col(color= 'black', fill='black', width=0.6, size = 0.6) +
scale_y_continuous(
name = expression('Amplituda brzine ['*mu*'m/s]'),
# breaks = seq(0, 1000, 500),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Frekvencija [Hz]",
breaks = seq(0, 300, 50),
limits = c(24, 301)
# limits = c(0, 1000)
) +
ggtitle("Namotaj 2 - Frekvencijska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_2_brzina_freq.png',
width = 17, height = 6.5, units = 'cm', dpi = 320)
dat %>% filter(wind == 3 & (
(side == 'NN' & point.id %in% c(10, 13, 14)) | (
(side == 'VN' & point.id %in% c(6, 9, 15)) | (
(side == 'bok' & point.id %in% c(2, 10, 11))
)))
) %>%
select(frequency, velocity.amp, point.id, wind, side, subtitle) %>%
drop_na() %>%
mutate(subtitle = str_sub(subtitle, start = 8)) %>%
ggplot(aes(x = frequency, y = velocity.amp)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_col(color= 'black', fill='black', width=0.6, size = 0.6) +
scale_y_continuous(
name = expression('Amplituda brzine ['*mu*'m/s]'),
# breaks = seq(0, 1000, 500),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Frekvencija [Hz]",
breaks = seq(0, 300, 50),
limits = c(24, 301)
# limits = c(0, 1000)
) +
ggtitle("Namotaj 3 - Frekvencijska domena") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/nam_3_brzina_freq.png',
width = 17, height = 9, units = 'cm', dpi = 320)
################################################################################
################################################################################
# Preliminary analysis
# Frequency table of peak.frequency
dat %>% group_by(wind, side, point.id) %>%
select(wind, side, point.id, peak.frequency, peak.velocity.amp) %>% drop_na() %>%
ungroup() %>% count(peak.frequency) %>% View()
# Sort by amplitudes
dat %>% group_by(wind, side, point.id) %>%
select(wind, side, point.id, peak.frequency, peak.velocity.amp) %>% drop_na() %>%
mutate(peak.velocity.amp = round(peak.velocity.amp)) %>% View()
# Fit only 100 Hz sine waves
dat_fit <- dat %>%
mutate(
om_100 = 2*pi*100,
A.100.c.term = om_100*cos(om_100*time),
A.100.s.term = om_100*sin(om_100*time),
) %>%
nest_by(wind, side, point.id) %>%
mutate(fitSine = list(lm(
velocity ~ 0 + A.100.c.term + A.100.s.term,
data = data)))
dat_fit %>% summarise(glance(fitSine)) %>% View()
dat_fit %>% summarise(tidy(fitSine))
# Plot sine fit for NN pt2 and pt6
fit_to_data_ <- function(dat_fit, point) {
dat_fit %>% filter(
wind == 1 & point.id == point & side == 'NN'
) %>% summarise(
fit.data = predict(fitSine),
r.squared = glance(fitSine)$r.squared
)
}
dat %>% filter(
wind == 1 & point.id %in% c(4, 2, 6) & side == 'NN'
) %>%
group_by(wind, side, point.id) %>%
mutate(
velocity.fit = fit_to_data_(dat_fit, unique(point.id))$fit.data,
r.squared = fit_to_data_(dat_fit, unique(point.id))$r.squared
) %>%
select(time, velocity, velocity.fit, point.id, wind, side, subtitle, r.squared) %>%
mutate(
subtitle = round(r.squared, 2)
) %>%
pivot_longer(
cols = c(velocity, velocity.fit),
names_to = 'name_groups',
values_to = 'velocity'
) %>%
mutate(
name_groups = case_when(
name_groups == 'velocity' ~ 'Izmjereni',
name_groups == 'velocity.fit' ~ 'Modelirani',
T ~ name_groups
)
) %>%
ggplot(aes(x = time, y = velocity, color = name_groups)) +
facet_grid(subtitle ~ .,
# scales = 'free_y',
labeller = label_bquote(italic(R)^2 * ' = ' * .(subtitle))) +
geom_line(size = 0.6) +
scale_y_continuous(
name = expression('Brzina ['*mu*'m/s]'),
# breaks = seq(-1000, 1000, 1000),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Vrijeme [s]",
breaks = seq(0, 7, 0.025),
limits = c(2, 2.2)
) +
scale_color_manual(
name = 'Podaci: ',
values = c('dodgerblue2', 'orangered1')
) +
ggtitle("Primjer aproksimacije izmjerenih vibracija") +
theme_bw() + theme(
# panel.border = element_rect(),
legend.position = 'top',
panel.background = element_blank(),
panel.spacing = unit(2, 'mm'),
legend.box.spacing = unit(2, 'mm'),
legend.spacing = unit(2, 'mm'),
legend.margin = margin(0, 0, 0, 0, 'mm'),
axis.title = element_text(face="plain", size = 10),
axis.text.x = element_text(colour="black", size = 8),
axis.text.y = element_text(colour="black", size = 8),
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5)
)
ggsave(filename = 'izvjestaj_final/slike/fit_sinusoida_primjer.png',
width = 16, height = 8, units = 'cm', dpi = 320, pointsize = 12)
# Amplitudes and adj_r_squared
dat_model <- dat_fit %>% summarise(tidy(fitSine)) %>%
group_by(wind, side, point.id) %>%
select(wind, side, point.id, term, estimate) %>%
pivot_wider(names_from = term, values_from = estimate) %>%
summarise(
A.100.v = 2*pi*100*sqrt(A.100.c.term^2 + A.100.s.term^2),
A.100.x = sqrt(A.100.c.term^2 + A.100.s.term^2)
) %>% full_join(
select(dat_fit %>% summarise(glance(fitSine)), wind, side, point.id,
adj.r.squared),
by = c('wind', 'side', 'point.id')
)
dat_model %>% View()
# Summary of data with top 5 peaks and distinct frequency peaks
dat_summary <- dat %>%
select(wind, side, point.id, loc.x, loc.y, velocity, peak.to.peak.vel,
rms.vel, peak.frequency, peak.velocity.amp, frequency, d.freq,
velocity.amp) %>%
group_by(wind, side, point.id) %>%
summarise(
Namotaj = unique(wind), Strana=unique(side), Tocka = unique(point.id),
Sirina = unique(loc.x), Visina = unique(loc.y),
"Uvjet rada" = "Kratki spoj - 50 A - 1 kV primar",
"Brzina min" = min(velocity), "Brzina max" = max(velocity),
"Brzina sd" = sd(velocity),
RMS = mean(rms.vel), 'Peak-to-peak' = unique(peak.to.peak.vel),
"Peak freq 1" = peak.frequency[1],
"Peak amp 1" = peak.velocity.amp[1],
"Peak freq 2" = peak.frequency[2],
"Peak amp 2" = peak.velocity.amp[2],
"Peak freq 3" = peak.frequency[3],
"Peak amp 3" = peak.velocity.amp[3],
"Peak freq 4" = peak.frequency[4],
"Peak amp 4" = peak.velocity.amp[4],
"Peak freq 5" = peak.frequency[5],
"Peak amp 5" = peak.velocity.amp[5],
"25 Hz amp" = na.omit(velocity.amp[
frequency >= (25.0-d.freq[1]/5) & frequency <= (25.0+d.freq[1]/5)
]),
"50 Hz amp" = na.omit(velocity.amp[
frequency >= (50.0-d.freq[1]/5) & frequency <= (50.0+d.freq[1]/5)
]),
"100 Hz amp" = na.omit(velocity.amp[
frequency >= (100.0-d.freq[1]/5) & frequency <= (100.0+d.freq[1]/5)
]),
"200 Hz amp" = na.omit(velocity.amp[
frequency >= (200.0-d.freq[1]/5) & frequency <= (200.0+d.freq[1]/5)
]),
"300 Hz amp" = na.omit(velocity.amp[
frequency >= (300.0-d.freq[1]/5) & frequency <= (300.0+d.freq[1]/5)
])
) %>%
full_join(dat_model, by = c('wind', 'side', 'point.id')) %>%
rename(
'Model 100 Hz amp' = A.100.v,
'Model 100 Hz disp amp' = A.100.x,
'Model Adj R Squared' = adj.r.squared
) %>%
ungroup() %>% select(-wind, -side, -point.id)
dat_summary %>%
write_excel_csv2(
path = '2MVA/preliminarna_obrada/aktivni_dio/namotaji_summary.csv'
)
# Load data summary
dat_summary <- read_csv2(
file = '2MVA/preliminarna_obrada/aktivni_dio/namotaji_summary.csv'
)
################################################################################
################################################################################
# Plotovi na namotajima
# Plot amplituda brzine Namotaj 1 - 100 Hz
dat_summary %>%
select(wind = Namotaj, side = Strana, point.id = Tocka, loc.x = Sirina,
loc.y = Visina, amp.100 = `100 Hz amp`) %>%
filter(wind == 1) %>%
mutate(
side = paste0('Strana: ', side)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
facet_wrap(
factor(
side, levels = c('Strana: NN', 'Strana: bok', 'Strana: VN')) ~ .
) +
coord_fixed() +
geom_vline(aes(xintercept = 0)) +
geom_point(aes(fill = amp.100, size = amp.100),
shape = 21, colour = "black",
stroke = 1,
) +
geom_text() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-150, 150, 50),
limits = c(NA, NA)
) +
scale_fill_viridis_c(
name = expression('Amplituda ['*mu*'m/s]'),
breaks = seq(0, 2000, 250),
option = 'C',
alpha = 1,
begin = 0.3,
) +
scale_size_area(
max_size = 13,
# range = c(5, 10),
guide = "none"
) +
ggtitle("Namotaj: 1 - Amplitude brzine - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/amplitude_100_hz_nam_1.png',
width = 25, height = 15, units = 'cm', dpi = 320)
# Plot amplituda brzine Namotaj 2 - 100 Hz
dat_summary %>%
select(wind = Namotaj, side = Strana, point.id = Tocka, loc.x = Sirina,
loc.y = Visina, amp.100 = `100 Hz amp`) %>%
filter(wind == 2) %>%
mutate(
side = paste0('Strana: ', side)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
facet_wrap(
factor(
side, levels = c('Strana: NN', 'Strana: bok', 'Strana: VN')) ~ .
) +
coord_fixed() +
geom_vline(aes(xintercept = 0)) +
geom_point(aes(fill = amp.100, size = amp.100),
shape = 21, colour = "black",
stroke = 1,
) +
geom_text() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-150, 150, 50),
limits = c(NA, NA)
) +
scale_fill_viridis_c(
name = expression('Amplituda ['*mu*'m/s]'),
breaks = seq(0, 800, 100),
option = 'C',
alpha = 1,
begin = 0.3,
) +
scale_size_area(
max_size = 7,
# range = c(5, 10),
guide = "none"
) +
ggtitle("Namotaj: 2 - Amplitude brzine - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/amplitude_100_hz_nam_2.png',
width = 17, height = 15, units = 'cm', dpi = 320)
# Plot amplituda brzine Namotaj 3 - 100 Hz
dat_summary %>%
select(wind = Namotaj, side = Strana, point.id = Tocka, loc.x = Sirina,
loc.y = Visina, amp.100 = `100 Hz amp`) %>%
filter(wind == 3) %>%
mutate(
side = paste0('Strana: ', side)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
facet_wrap(
factor(
side, levels = c('Strana: VN', 'Strana: bok', 'Strana: NN')) ~ .
) +
coord_fixed() +
geom_vline(aes(xintercept = 0)) +
geom_point(aes(fill = amp.100, size = amp.100),
shape = 21, colour = "black",
stroke = 1,
) +
geom_text() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-150, 150, 50),
limits = c(NA, NA)
) +
scale_fill_viridis_c(
name = expression('Amplituda ['*mu*'m/s]'),
breaks = seq(0, 2000, 250),
option = 'C',
alpha = 1,
begin = 0.3,
) +
scale_size_area(
max_size = 13,
# range = c(5, 10),
guide = "none"
) +
ggtitle("Namotaj: 3 - Amplitude brzine - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/amplitude_100_hz_nam_3.png',
width = 25, height = 15, units = 'cm', dpi = 320)
################################################################################
################################################################################
# Surface modelling
# Loading summary and adding coordinates for wind 1 and 3 from sides
dat_surf <- read_csv2(
file = '2MVA/preliminarna_obrada/aktivni_dio/namotaji_summary.csv'
) %>%
select(
wind = Namotaj, side = Strana, point.id = Tocka, x = Sirina, y = Visina,
amp.v.100 = `Model 100 Hz amp`, amp.x.100 = `Model 100 Hz disp amp`,
adj.r.sq = `Model Adj R Squared`
) %>%
mutate(
dist.wind = 202.5 * pi/ 2 + 50,
x.full = case_when(
(wind == 1 & side == 'NN') ~ as.integer(-dist.wind + x),
(wind == 1 & side == 'VN') ~ as.integer(dist.wind + x),
(wind == 3 & side == 'NN') ~ as.integer(dist.wind + x),
(wind == 3 & side == 'VN') ~ as.integer(-dist.wind + x),
TRUE ~ as.integer(x)
)
)
# Plotting coordinates for sanity check
dat_surf %>% filter(wind == 3) %>%
ggplot(aes(x = x.full, y = y, label = paste0(side, point.id))) + geom_text()
# Extremes for coding
dat_surf <- dat_surf %>% group_by(wind, side) %>%
select(wind, side, x, x.full, y, amp.v.100) %>%
mutate(
mean.x = mean(x),
extr.x = max(max(x) - mean.x, abs(min(x) - mean.x)),
mean.y = mean(y),
extr.y = max(max(y) - mean.y, abs(min(y) - mean.y))
) %>% ungroup()
dat_surf %>% distinct(wind, side, mean.x, extr.x, mean.y, extr.y) %>% View()
# Coding data
dat_coded <- coded.data(
data =
dat_surf %>%
filter(side == 'NN' & wind %in% c(3)) %>%
select(wind, x, y, amp.v.100) %>%
mutate(wind = as.factor(wind)),
x1 ~ (x + 0) / 159,
x2 ~ (y - 310.5) / 310.5
)
as.data.frame(dat_coded)
# RSM low r squared value
dat_coded.rsm <- rsm(amp.v.100 ~ SO(x1, x2) + PQ(x1, x2), data = dat_coded)
summary(dat_coded.rsm)
sqrt(mean(dat_coded.rsm$residuals^2))
# Gaussian process regression
X = dat_surf %>% filter(side == 'NN' & wind %in% c(3)) %>% select(x, y)
Z = dat_surf %>% filter(side == 'NN' & wind %in% c(3)) %>% select(amp.v.100)
dat_mod <- btgpllm(X = X, Z=Z)
# dat_mod
sqrt(mean((Z$amp.v.100 - dat_mod$Zp.mean)^2))
plot(dat_mod)
# Spatial data modelling - Kriging interpolation 100 Hz
krige.inter <- function(
amp_df = dat_surf,
amp = 'amp.v.100',
plt_map = T,
variogram_only = T,
vgm_cutoff = 900,
vgm_bin_num = 4,
vgm_model = 'Gau',
vgm_psill = NA,
vgm_range = NA,
vgm_alpha = seq(0, 135, 45),
vgm_an_angle = 0,
vgm_an_ratio = 1,
grid_num = 300
) {
# Creating SpatialPointsDataFrame
dat_sp <- amp_df %$%
SpatialPointsDataFrame(
coords = select(., x.full, y),
data = select(., !!amp, wind)
)
# Examining experimental variogram
var_exp <- variogram(
formula(paste0(amp, '~wind')),
dat_sp, cutoff = vgm_cutoff, width = vgm_cutoff/vgm_bin_num,
alpha=vgm_alpha, map = plt_map
)
# Plot variogram map
if (plt_map) {
print(plot(var_exp, threshold = 3))
return()
}
# Creating variogram fitting model
vgm_mod <- vgm(
psill = vgm_psill, model = vgm_model,
range = vgm_range, anis = c(vgm_an_angle, vgm_an_ratio)
)
# Fitting varigram to experimenta data
var_fit <- fit.variogram(
var_exp,
model = vgm_mod)
# Plotting fitted variogram
if (variogram_only) {
print(plot(var_exp, var_fit))
return()
}
# Calculating grid limits from data
grid_limits <- amp_df %>%
summarise(
min.x = min(x.full)*1.03,
max.x = max(x.full)*1.03,
min.y = min(y)*0.1,
max.y = max(y)*1.03
)
# Creating predictive grid and converting to SpatialPixels (with ribs data)
pred.grid <- expand.grid(
x = seq(grid_limits$min.x, grid_limits$max.x, length.out = grid_num),
y = seq(grid_limits$min.y, grid_limits$max.y, length.out = grid_num)
)
pred.grid <- SpatialPixels(SpatialPoints(pred.grid))
# Kriging interpolation
krige.pred <- krige(
formula(paste0(amp, '~1')),
locations = dat_sp,
newdata = pred.grid,
model = var_fit
)
print(spplot(krige.pred['var1.pred'], scales = list(draw = T)))
# Return object
krige.pred
}
# Determining cutoff
dat_surf %>% filter(wind == 2 & side == 'VN') %>% select(x.full, y) %>%
summarise(
max.x = max(x.full),
min.x = min(x.full),
max.y = max(y),
min.y = min(y),
) %>%
mutate(
diag = sqrt((max.x - min.x)^2 + (max.y - min.y)^2),
diag.3 = diag/3,
cutoff.max = sqrt(2) / 2 * diag
)
# Testing parameters for interpolation
krige.inter(
amp_df = dat_surf %>% filter(wind == 1),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 800,
vgm_bin_num = 6,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Gau',
vgm_alpha = seq(0, 135, 45),
vgm_an_angle = 135,
vgm_an_ratio = 5/11,
grid_num = 300
)
# Creating tibble for contour and points plot
dat_disp_disp <- dat_surf %>%
select(-amp.v.100, -adj.r.sq, -x, dist.wind) %>%
rename(x = x.full) %>%
mutate(point.id = as.character(point.id)) %>%
full_join(by = c('wind', 'point.id', 'x', 'y', 'amp.x.100'),
y = tibble(
wind = 1,
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 1),
amp = 'amp.x.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 800,
vgm_bin_num = 6,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Gau',
vgm_alpha = seq(0, 135, 45),
vgm_an_angle = 135,
vgm_an_ratio = 5/11,
grid_num = 300
))
) %>% rename(amp.x.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'x', 'y', 'amp.x.100'),
y = tibble(
wind = 3,
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 3),
amp = 'amp.x.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 800,
vgm_bin_num = 6,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Gau',
vgm_alpha = seq(0, 126, 31.5),
vgm_an_angle = 63,
vgm_an_ratio = 8/11,
grid_num = 300
))
) %>% rename(amp.x.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'side', 'x', 'y', 'amp.x.100'),
y = tibble(
wind = 2,
side = 'NN',
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 2 & side == 'NN'),
amp = 'amp.x.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 400,
vgm_bin_num = 3,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Exp',
vgm_alpha = seq(0, 135, 135),
vgm_an_angle = 0,
vgm_an_ratio = 1/2,
grid_num = 300
))
) %>% rename(amp.x.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'side', 'x', 'y', 'amp.x.100'),
y = tibble(
wind = 2,
side = 'VN',
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 2 & side == 'VN'),
amp = 'amp.x.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 400,
vgm_bin_num = 3,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Exp',
vgm_alpha = seq(0, 135, 135),
vgm_an_angle = 0,
vgm_an_ratio = 3/8,
grid_num = 300
))
) %>% rename(amp.x.100 = var1.pred) %>% select(-var1.var)
)
# Plot amplituda pomaka - Namotaj 1 - 100 Hz
dat_disp_disp %>%
filter(wind == 1) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.x.100.cnt = case_when(
point.id == 'model' & amp.x.100 >= 0 ~ amp.x.100,
point.id == 'model' & amp.x.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.x.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.x.100
),
point.id = case_when(
point.id != 'model' ~ paste0(side, point.id),
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
geom_contour_filled(aes(z = amp.x.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_vline(aes(xintercept = dist.wind)) +
geom_vline(aes(xintercept = -dist.wind)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-600, 600, 100),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m]'),
option = 'C',
alpha = 1,
begin = 0.3,
) +
ggtitle("Namotaj: 1 - Amplitude pomaka - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_disp_nam_1_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
# Plot amplituda pomaka - Namotaj 3 - 100 Hz
dat_disp_disp %>%
filter(wind == 3) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.x.100.cnt = case_when(
point.id == 'model' & amp.x.100 >= 0 ~ amp.x.100,
point.id == 'model' & amp.x.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.x.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.x.100
),
point.id = case_when(
point.id != 'model' ~ paste0(side, point.id),
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
geom_contour_filled(aes(z = amp.x.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_vline(aes(xintercept = dist.wind)) +
geom_vline(aes(xintercept = -dist.wind)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-600, 600, 100),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m]'),
option = 'C',
alpha = 1,
begin = 0.3,
) +
ggtitle("Namotaj: 3 - Amplitude pomaka - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_disp_nam_3_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
# Plot amplituda pomaka - Namotaj 2 - 100 Hz
dat_disp_disp %>%
filter(wind == 2) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.x.100.cnt = case_when(
point.id == 'model' & amp.x.100 >= 0 ~ amp.x.100,
point.id == 'model' & amp.x.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.x.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.x.100
),
point.id = case_when(
point.id != 'model' ~ point.id,
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind),
side = paste0('Strana: ', side)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
facet_wrap(side ~ ., scales = 'fixed') +
geom_contour_filled(aes(z = amp.x.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-300, 300, 50),
limits = c(NA, NA),
expand = c(0.03, 0.03)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m]'),
option = 'C',
alpha = 1,
begin = 0.3,
end = 0.8
) +
ggtitle("Namotaj: 2 - Amplitude pomaka - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_disp_nam_2_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
## Modeling velocity
# Determining cutoff
dat_surf %>% filter(wind == 2 & side == 'VN') %>% select(x.full, y) %>%
summarise(
max.x = max(x.full),
min.x = min(x.full),
max.y = max(y),
min.y = min(y),
) %>%
mutate(
diag = sqrt((max.x - min.x)^2 + (max.y - min.y)^2),
diag.3 = diag/3,
cutoff.max = sqrt(2) / 2 * diag
)
# Testing parameters for interpolation
krige.inter(
amp_df = dat_surf %>% filter(wind == 2 & side == 'VN'),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 400,
vgm_bin_num = 3,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Exp',
vgm_alpha = seq(0, 135, 135),
vgm_an_angle = 0,
vgm_an_ratio = 3/8,
grid_num = 300
)
# Creating tibble for contour and points plot
dat_disp_vel <- dat_surf %>%
select(-amp.x.100, -adj.r.sq, -x, dist.wind) %>%
rename(x = x.full) %>%
mutate(point.id = as.character(point.id)) %>%
full_join(by = c('wind', 'point.id', 'x', 'y', 'amp.v.100'),
y = tibble(
wind = 1,
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 1),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 800,
vgm_bin_num = 6,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Gau',
vgm_alpha = seq(0, 135, 45),
vgm_an_angle = 135,
vgm_an_ratio = 5/11,
grid_num = 300
))
) %>% rename(amp.v.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'x', 'y', 'amp.v.100'),
y = tibble(
wind = 3,
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 3),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 800,
vgm_bin_num = 6,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Gau',
vgm_alpha = seq(0, 126, 31.5),
vgm_an_angle = 63,
vgm_an_ratio = 8/11,
grid_num = 300
))
) %>% rename(amp.v.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'side', 'x', 'y', 'amp.v.100'),
y = tibble(
wind = 2,
side = 'NN',
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 2 & side == 'NN'),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 400,
vgm_bin_num = 3,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Exp',
vgm_alpha = seq(0, 135, 135),
vgm_an_angle = 0,
vgm_an_ratio = 1/2,
grid_num = 300
))
) %>% rename(amp.v.100 = var1.pred) %>% select(-var1.var)
) %>%
full_join(by = c('wind', 'point.id', 'side', 'x', 'y', 'amp.v.100'),
y = tibble(
wind = 2,
side = 'VN',
point.id = 'model',
!!!as_tibble(krige.inter(
amp_df = dat_surf %>% filter(wind == 2 & side == 'VN'),
amp = 'amp.v.100',
plt_map = F,
variogram_only = F,
vgm_cutoff = 400,
vgm_bin_num = 3,
vgm_psill = NA,
vgm_range = NA,
vgm_model = 'Exp',
vgm_alpha = seq(0, 135, 135),
vgm_an_angle = 0,
vgm_an_ratio = 3/8,
grid_num = 300
))
) %>% rename(amp.v.100 = var1.pred) %>% select(-var1.var)
)
# Plot amplituda brzine - Namotaj 1 - 100 Hz
dat_disp_vel %>%
filter(wind == 1) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.v.100.cnt = case_when(
point.id == 'model' & amp.v.100 >= 0 ~ amp.v.100,
point.id == 'model' & amp.v.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.v.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.v.100
),
point.id = case_when(
point.id != 'model' ~ paste0(side, point.id),
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
geom_contour_filled(aes(z = amp.v.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_vline(aes(xintercept = dist.wind)) +
geom_vline(aes(xintercept = -dist.wind)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-600, 600, 100),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m/s]'),
option = 'C',
alpha = 1,
begin = 0.3,
) +
ggtitle("Namotaj: 1 - Amplitude brzina - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_vel_nam_1_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
# Plot amplituda brzina - Namotaj 3 - 100 Hz
dat_disp_vel %>%
filter(wind == 3) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.v.100.cnt = case_when(
point.id == 'model' & amp.v.100 >= 0 ~ amp.v.100,
point.id == 'model' & amp.v.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.v.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.v.100
),
point.id = case_when(
point.id != 'model' ~ paste0(side, point.id),
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
geom_contour_filled(aes(z = amp.v.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_vline(aes(xintercept = dist.wind)) +
geom_vline(aes(xintercept = -dist.wind)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-600, 600, 100),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m/s]'),
option = 'C',
alpha = 1,
begin = 0.3,
) +
ggtitle("Namotaj: 3 - Amplitude brzina - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_vel_nam_3_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
# Plot amplituda pomaka - Namotaj 2 - 100 Hz
dat_disp_vel %>%
filter(wind == 2) %>%
rename(loc.x = x, loc.y = y) %>%
mutate(
amp.v.100.cnt = case_when(
point.id == 'model' & amp.v.100 >= 0 ~ amp.v.100,
point.id == 'model' & amp.v.100 <= 0 ~ 0,
TRUE ~ NA_real_
),
amp.v.100 = case_when(
point.id == 'model' ~ NA_real_,
TRUE ~ amp.v.100
),
point.id = case_when(
point.id != 'model' ~ point.id,
TRUE ~ NA_character_
),
wind = paste0('Namotaj: ', wind),
side = paste0('Strana: ', side)
) %>%
ggplot(aes(x = loc.x, y = loc.y, label = point.id)) +
facet_wrap(side ~ ., scales = 'fixed') +
geom_contour_filled(aes(z = amp.v.100.cnt), na.rm = T) +
geom_vline(aes(xintercept = 0)) +
geom_text(aes(label = point.id), na.rm = T) +
coord_fixed() +
scale_y_continuous(
name = expression('Visina '*italic('y')*' [mm]'),
breaks = seq(0, 700, 50),
limits = c(NA, NA),
expand = c(0.02, 0.02)
) +
scale_x_continuous(
name = expression('Širina '*italic('x')*' [mm]'),
breaks = seq(-300, 300, 50),
limits = c(NA, NA),
expand = c(0.03, 0.03)
) +
scale_fill_viridis_d(
name = expression('Amplituda ['*mu*'m/s]'),
option = 'C',
alpha = 1,
begin = 0.3,
end = 0.8
) +
ggtitle("Namotaj: 2 - Amplitude brzina - 100 Hz") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(1, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/contour_vel_nam_2_100_hz.png',
width = 25, height = 15, units = 'cm', dpi = 320)
################################################################################
################################################################################
# Data input referenca dok radi samo generator
# Input referenca_25Hz
dat_ref <- VibData(
point.id = 'ref',
loc.x = 0,
loc.y = 0,
rib = F,
replication = 1,
decimal.separator = ',',
file.loc = paste0('2MVA/mjerenja/namotaji/NN_strana/referenca_25Hz')
)
dat_ref <- dat_ref %>%
add_row(
!!!VibData(
point.id = 'ref',
loc.x = 0,
loc.y = 0,
rib = F,
replication = 2,
decimal.separator = ",",
file.loc = paste0('2MVA/mjerenja/namotaji/NN_strana/referenca_25Hz2')
)
)
# Frekvencijska domena plot referentne tocke
dat_ref %>%
select(frequency, velocity.amp, point.id, replication) %>%
drop_na() %>%
mutate(subtitle = paste0('Referentno mjerenje: ', replication)) %>%
ggplot(aes(x = frequency, y = velocity.amp)) +
facet_wrap(factor(subtitle, levels = mixedsort(unique(subtitle))) ~ .) +
geom_col(color= 'black', fill='black', width=0.6, size = 0.6) +
scale_y_continuous(
name = expression('Amplituda brzine ['*mu*'m/s]'),
# breaks = seq(0, 1000, 500),
limits = c(NA, NA)
) +
scale_x_continuous(
name = "Frekvencija [Hz]",
breaks = seq(0, 200, 25),
limits = c(0, 201)
# limits = c(0, 1000)
) +
ggtitle("Utjecaj generatora - Transformator ne radi") +
theme_bw() +
theme(
axis.line = element_line(size=0.5, colour = "black"),
plot.title = element_text(hjust = 0.5),
panel.spacing = unit(0.2, 'lines')
)
ggsave(filename = '2MVA/preliminarna_obrada/aktivni_dio/slike/ref_vibracije.png',
width = 17, height = 4.5, units = 'cm', dpi = 320)
|
## functions in R
"
function_name <- function(arg_1, arg_2, ...) {
Function body
}
"
#Built-in
#To calculate sum of numbers in R
v<-c(1,2,3,4,5)
#return the sum of the numbers in v
sum(v)
#returns the average value of the numbers in v
mean(v)
##Custom functions
#To write a customized function for squaring
square<-function(x){
return(x^2)
}
#Try out the following and observe the output
square(FALSE)
v<-c(1,2,3,4,5)
square(v)
y<-data.frame(A=c(1,2,3,4),B=c(5,6,7,8))
square(y)
# some of the useful functions in data manipulations ##
# Apply:
# Lapply: to use apply on a vector and return a list
# Tapply: gives a table wrt to a categorical attribute
# Sapply
#1. Apply function
m <- matrix(c(1:10, 11:20), nrow = 10, ncol = 2)
dim(m)
m
# find mean of the row
apply(m,1,mean)
# find mean of the column
apply(m,2, mean)
# divide entire matrix by 2
apply(m, 1:2, function(x) x/2)
#2. lapply
# Crate a list of 2 elements
l <- list(a= (1:10),b= (21:30))
#mean of values in each element
lapply(l, mean)
# sum of values in each element using lapply function
lapply(l,sum)
# 3. Sapply
x <- list(a = 1, b = 1:3, c = 10:100)
#Compare with above; a named vector, not a list
sapply(x, FUN = length)
sapply(x, FUN = sum)
# 4. mapply
l1 <- list(a = c(1:10), b = c(11:20))
l2 <- list(c = c(21:30), d = c(31:40))
# sum the corresponding elements of l1 and l2
mapply(sum, l1$a, l1$b, l2$c, l2$d)
## sum of a[1]+b[1]+c[1]+d[1] = 1+11+21+31 = 64
# 5. tapply
attach(iris)
str(iris)
# Let us calculate the mean of the Sepal Length
mean(iris$Sepal.Length)
# calculate the mean of the Sepal Length but broken by the Species
tapply(iris$Sepal.Length,iris$Species, mean)
# Example with MtCars
attach(mtcars)
data <- mtcars
##Want to find max value for each column
apply(data,2,min) #This generates the min values for each numeric attribute
apply(data,1,max)
##writing this to a data frame
A<-apply(data[,2:11],2,min)
A<-data.frame(min=apply(data[,2:11],2,min))
B<-apply(data[,2:11],2,max)
##We can find the stats for each of the variable separately
##If we want to have all the stats in a data frame we can write a customize function for this
stat<-function(x){
"Mean"=mean(x)
"Min"=min(x)
"Max"=max(x)
A<-data.frame(Min,Mean,Max)
return(A)
}
stats<-apply(data[,2:11],2,FUN=stat) ##Observe the ouptput of apply.. it is a list
result<-do.call(rbind,stats)
##Subsetting##
##This might form an important aspect in Data analysis where we might want to work on a subset of data
##Subset on vectors
v<-c(1,2,3,4,5)
v[v>3] #Output all elements greater than 3
##Subset on matrices and data frames
#a. Calling by cell positions
library(dplyr)
data1<-data[,2:11]
data1<-data[1:10,2:11]
#b. By using column names
data1<-data[,c("mpg","cyl")]
name<-c("mpg","cyl","disp","hp")
data1<-data[names(data)%in% name] ## %in% comes in handy for subsetting
select(data,mpg,cyl,disp,hp)
#c. Using a subset function ##from help identify the argument to be given
data1<-subset(data,mpg>25,select=mpg:carb) #From data extracts all the records whose mpg>25 and all columns
#d. The same dataframe can be obtained in another way
data1<-data[mpg>25,]
#e. The same dataframe can be obtained in another way using dplyr
x<-filter(data,mpg>25)
x
x <- 1:100
filter(x, rep(1, 3))
# check filter
##Multiple conditions can be given using "&" or "|"
data2<-data[mpg>25 & hp>75,]
data2<-subset(data,mpg>25 | gear==5,select=mpg:carb)
filter(data,mpg>25 | gear==5)
##Using which.max
data[which.max(mpg),]
##Using which.min
data[which.min(mpg),]
##Using which
data[which(data$mpg==max(data$mpg)),]
data[which(row.names(data) %in% c("Mazda RX4","Datsun 710")),]
| /Day 4 R DataTypes, Data Structure, Apply functions/Basics 2 Functions.r | no_license | AyubQuadri/Data-Science-Sessions | R | false | false | 4,095 | r | ## functions in R
"
function_name <- function(arg_1, arg_2, ...) {
Function body
}
"
#Built-in
#To calculate sum of numbers in R
v<-c(1,2,3,4,5)
#return the sum of the numbers in v
sum(v)
#returns the average value of the numbers in v
mean(v)
##Custom functions
#To write a customized function for squaring
square<-function(x){
return(x^2)
}
#Try out the following and observe the output
square(FALSE)
v<-c(1,2,3,4,5)
square(v)
y<-data.frame(A=c(1,2,3,4),B=c(5,6,7,8))
square(y)
# some of the useful functions in data manipulations ##
# Apply:
# Lapply: to use apply on a vector and return a list
# Tapply: gives a table wrt to a categorical attribute
# Sapply
#1. Apply function
m <- matrix(c(1:10, 11:20), nrow = 10, ncol = 2)
dim(m)
m
# find mean of the row
apply(m,1,mean)
# find mean of the column
apply(m,2, mean)
# divide entire matrix by 2
apply(m, 1:2, function(x) x/2)
#2. lapply
# Crate a list of 2 elements
l <- list(a= (1:10),b= (21:30))
#mean of values in each element
lapply(l, mean)
# sum of values in each element using lapply function
lapply(l,sum)
# 3. Sapply
x <- list(a = 1, b = 1:3, c = 10:100)
#Compare with above; a named vector, not a list
sapply(x, FUN = length)
sapply(x, FUN = sum)
# 4. mapply
l1 <- list(a = c(1:10), b = c(11:20))
l2 <- list(c = c(21:30), d = c(31:40))
# sum the corresponding elements of l1 and l2
mapply(sum, l1$a, l1$b, l2$c, l2$d)
## sum of a[1]+b[1]+c[1]+d[1] = 1+11+21+31 = 64
# 5. tapply
attach(iris)
str(iris)
# Let us calculate the mean of the Sepal Length
mean(iris$Sepal.Length)
# calculate the mean of the Sepal Length but broken by the Species
tapply(iris$Sepal.Length,iris$Species, mean)
# Example with MtCars
attach(mtcars)
data <- mtcars
##Want to find max value for each column
apply(data,2,min) #This generates the min values for each numeric attribute
apply(data,1,max)
##writing this to a data frame
A<-apply(data[,2:11],2,min)
A<-data.frame(min=apply(data[,2:11],2,min))
B<-apply(data[,2:11],2,max)
##We can find the stats for each of the variable separately
##If we want to have all the stats in a data frame we can write a customize function for this
stat<-function(x){
"Mean"=mean(x)
"Min"=min(x)
"Max"=max(x)
A<-data.frame(Min,Mean,Max)
return(A)
}
stats<-apply(data[,2:11],2,FUN=stat) ##Observe the ouptput of apply.. it is a list
result<-do.call(rbind,stats)
##Subsetting##
##This might form an important aspect in Data analysis where we might want to work on a subset of data
##Subset on vectors
v<-c(1,2,3,4,5)
v[v>3] #Output all elements greater than 3
##Subset on matrices and data frames
#a. Calling by cell positions
library(dplyr)
data1<-data[,2:11]
data1<-data[1:10,2:11]
#b. By using column names
data1<-data[,c("mpg","cyl")]
name<-c("mpg","cyl","disp","hp")
data1<-data[names(data)%in% name] ## %in% comes in handy for subsetting
select(data,mpg,cyl,disp,hp)
#c. Using a subset function ##from help identify the argument to be given
data1<-subset(data,mpg>25,select=mpg:carb) #From data extracts all the records whose mpg>25 and all columns
#d. The same dataframe can be obtained in another way
data1<-data[mpg>25,]
#e. The same dataframe can be obtained in another way using dplyr
x<-filter(data,mpg>25)
x
x <- 1:100
filter(x, rep(1, 3))
# check filter
##Multiple conditions can be given using "&" or "|"
data2<-data[mpg>25 & hp>75,]
data2<-subset(data,mpg>25 | gear==5,select=mpg:carb)
filter(data,mpg>25 | gear==5)
##Using which.max
data[which.max(mpg),]
##Using which.min
data[which.min(mpg),]
##Using which
data[which(data$mpg==max(data$mpg)),]
data[which(row.names(data) %in% c("Mazda RX4","Datsun 710")),]
|
####this script is to read new file and update the data
library(lubridate);library(shiny)
library(data.table)
library(ggplot2)
library(dplyr)
library(curl)
####update data
allData <- fread( "https://www.dropbox.com/s/ngaexvxlazshb0j/allData.csv?dl=1")
allData$dates <- as.POSIXct(allData$dates)
# allData$longName[which(allData$longName=="07AA SS58")] <- "07AA SS58 GRID"
# nameIDs <- matrix(unlist(strsplit(allData$longName," ")),nrow(allData),3,byrow = T)
# reordNames <- paste(nameIDs[,2],nameIDs[,3],nameIDs[,1])
# allData$longName <- reordNames
qualityCheck <- fread( "https://www.dropbox.com/s/z38ffeqv7akraov/qualCheck.csv?dl=1")
qualityCheck$last_soilMes <- as.POSIXct(qualityCheck$last_soilMes)
allData <- merge(allData,qualityCheck[,c(1,3)])
save(allData,file="allData.rdata") | /shinyApp/updateData.r | no_license | ForModLabUHel/valeDaLama | R | false | false | 796 | r | ####this script is to read new file and update the data
library(lubridate);library(shiny)
library(data.table)
library(ggplot2)
library(dplyr)
library(curl)
####update data
allData <- fread( "https://www.dropbox.com/s/ngaexvxlazshb0j/allData.csv?dl=1")
allData$dates <- as.POSIXct(allData$dates)
# allData$longName[which(allData$longName=="07AA SS58")] <- "07AA SS58 GRID"
# nameIDs <- matrix(unlist(strsplit(allData$longName," ")),nrow(allData),3,byrow = T)
# reordNames <- paste(nameIDs[,2],nameIDs[,3],nameIDs[,1])
# allData$longName <- reordNames
qualityCheck <- fread( "https://www.dropbox.com/s/z38ffeqv7akraov/qualCheck.csv?dl=1")
qualityCheck$last_soilMes <- as.POSIXct(qualityCheck$last_soilMes)
allData <- merge(allData,qualityCheck[,c(1,3)])
save(allData,file="allData.rdata") |
#ui.R
ui <- fluidPage(
titlePanel("Wine Quality"),
sidebarPanel(
helpText("Show the information of chemical properties of wine."),
selectInput("Var",
label = "Choose a variable to display :",
choices = c("Distribution of All Variables","Correlation","Outliers"),
selected = "Distribution of All Variables")
),
mainPanel(plotOutput("barChart"))) | /Data Mining Part 1/ui.R | no_license | playnice/Data-Mining-Project | R | false | false | 427 | r | #ui.R
ui <- fluidPage(
titlePanel("Wine Quality"),
sidebarPanel(
helpText("Show the information of chemical properties of wine."),
selectInput("Var",
label = "Choose a variable to display :",
choices = c("Distribution of All Variables","Correlation","Outliers"),
selected = "Distribution of All Variables")
),
mainPanel(plotOutput("barChart"))) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{deleteAnalysis}
\alias{deleteAnalysis}
\title{Delete Analysis}
\usage{
deleteAnalysis(object, which.assay, which.analysis, verbose = TRUE)
}
\arguments{
\item{object}{calibration object}
\item{which.assay}{specifies assay}
\item{which.analysis}{specifies analysis}
\item{verbose}{print progress}
}
\value{
character
}
\description{
Delete analysis from specified assay of calibration object
}
| /man/deleteAnalysis.Rd | permissive | BWillieLab/XcalRep | R | false | true | 500 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{deleteAnalysis}
\alias{deleteAnalysis}
\title{Delete Analysis}
\usage{
deleteAnalysis(object, which.assay, which.analysis, verbose = TRUE)
}
\arguments{
\item{object}{calibration object}
\item{which.assay}{specifies assay}
\item{which.analysis}{specifies analysis}
\item{verbose}{print progress}
}
\value{
character
}
\description{
Delete analysis from specified assay of calibration object
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knutils.R
\name{slimknclusters}
\alias{slimknclusters}
\title{Remove overlapping clusters}
\usage{
slimknclusters(d, knresults, minsize = 1)
}
\arguments{
\item{d}{Data.frame with data used in the detection of clusters.}
\item{knresults}{Object returned by function opgam() with the clusters detected.}
\item{minsize}{Minimum size of cluster (default to 1).}
}
\value{
A subset of knresults with non-overlaping clusters of at least
minsize size.
}
\description{
This function slims the number of clusters down.
The spatial scan statistic is known to detect duplicated
clusters. This function aims to reduce the number of clusters
by removing duplicated and overlapping clusters.
}
\examples{
data("brainNM_clusters")
nm.cl1.s <- slimknclusters(brainst, nm.cl1)
nm.cl1.s
}
| /man/slimknclusters.Rd | no_license | cran/DClusterm | R | false | true | 853 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knutils.R
\name{slimknclusters}
\alias{slimknclusters}
\title{Remove overlapping clusters}
\usage{
slimknclusters(d, knresults, minsize = 1)
}
\arguments{
\item{d}{Data.frame with data used in the detection of clusters.}
\item{knresults}{Object returned by function opgam() with the clusters detected.}
\item{minsize}{Minimum size of cluster (default to 1).}
}
\value{
A subset of knresults with non-overlaping clusters of at least
minsize size.
}
\description{
This function slims the number of clusters down.
The spatial scan statistic is known to detect duplicated
clusters. This function aims to reduce the number of clusters
by removing duplicated and overlapping clusters.
}
\examples{
data("brainNM_clusters")
nm.cl1.s <- slimknclusters(brainst, nm.cl1)
nm.cl1.s
}
|
testlist <- list(data = structure(c(3.94108708470682e-312, 1.72759795870984e-260, 4.94078866277842e+131, 7.00072806654748e-304, 3.52953696509973e+30, 3.52959258041992e+30, 4.46014903970612e+43, 0), .Dim = c(1L, 8L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610557035-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 296 | r | testlist <- list(data = structure(c(3.94108708470682e-312, 1.72759795870984e-260, 4.94078866277842e+131, 7.00072806654748e-304, 3.52953696509973e+30, 3.52959258041992e+30, 4.46014903970612e+43, 0), .Dim = c(1L, 8L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
x<-1:10
x
| /code.R | no_license | SabrinaGuy/productivitytools-0 | R | false | false | 10 | r | x<-1:10
x
|
library(tidyverse)
setwd('/Users/lancecundy/Documents/Research/Nielsen/EIS')
mydata <- read.csv('UseData/SampleData.csv')
# mydata %>%
# group_by(household_code) %>%
# count() %>%
# ungroup()
#
# # For each household i, construct a gride of quantiles, /tau
#
# mydata.i <- mydata %>%
# filter(household_code == 2000000)
mydata.i <- mydata %>%
group_by(household_code) %>%
mutate(household_income = rand(1)*100) %>%
ungroup()
source("Code/dCGKL_2018_code/gmmq.R")
## Setup Data
Y <- as.matrix(mydata.i$Y)
n <- nrow(Y)
#X.excl <- matrix(data=1, nrow=n, ncol=1)
X.excl <- cbind(matrix(data=1, nrow=n, ncol=1), mydata.i$household_income)
D <- as.matrix(mydata.i$LogR)
Z.excl <- as.matrix(cbind(mydata.i$YInst, mydata.i$Lag2LogNomR, mydata.i$Lag2Inf))
Z <- cbind(Z.excl, X.excl)
X <- cbind(D, X.excl)
## Yogo (2004) 2SLS log-linear estimator as a starting point
PZ <- Z %*% solve(t(Z)%*%Z) %*% t(Z)
StartingPointReg <- solve(t(X)%*%PZ%*%X) %*% (t(X)%*%PZ%*%Y)
####################################################################
##### Setup Functions #####
####################################################################
# conv.fn convert log-linear fn's (b[1],b[2])=(slope, constant) to (beta, gamma)
conv.fn <- function(b) c(exp(b[2]/b[1]), 1/b[1]) #convert log-linear parameters to (beta,gamma)
conv.inv.fn <- function(b) c(log(b[2])/b[1], 1/b[1])
# conv.fn convert log-linear fn's (b[1],b[2])=(slope, constant) to (beta, EIS)
conv2.fn <- function(b) c(exp(b[2]/b[1]), b[1]) #convert log-linear parameters to (beta,EIS)
conv2.inv.fn <- function(b) c(log(b[2])*b[1], b[1])
conv3.fn <- function(b) c(b[1],1/b[2]) # convert (beta,gamma) to (beta,EIS)
conv3.inv.fn <- function(b) c(b[1],1/b[2])
# Residual/Lambda functions (and derivatives) for smoothed MM estimation
Lfn.gmmq <- function(y,x,b) y[,1]-cbind(y[,-1],x)%*%b #log-linear
Ldfn.gmmq <- function(y,x,b) -cbind(y[,-1],x)
Lfn2b.gmmq <- function(y,x,b) -Lfn.gmmq(y,x,b) #-y[,1]+cbind(x,y[,-1])%*%b #log-linear, 1-tau
Ldfn2b.gmmq <- function(y,x,b) -Ldfn.gmmq(y,x,b) #cbind(x,y[,-1])
Lfn2.gmmq <- function(y,x,b) b[1]*exp(y[,1])^(-b[2])*exp(y[,2]) - 1 #nonlinear (beta,gamma)
Ldfn2.gmmq <- function(y,x,b) cbind((Lfn2.gmmq(y=y,x=x,b=b)+1) / b[1],
-y[,1]*(Lfn2.gmmq(y=y,x=x,b=b)+1))
Lfn22.gmmq <- function(y,x,b) b[1]*exp(y[,1])^(-1/b[2])*exp(y[,2]) - 1 #nonlinear (beta,EIS)
Ldfn22.gmmq <- function(y,x,b) cbind((Lfn2.gmmq(y=y,x=x,b=b)+1) / b[1],
y[,1]*(Lfn2.gmmq(y=y,x=x,b=b)+1)/b[2]^2)
# Residual/Lambda functions (and derivatives) for smoothed GMM estimation
Lfn <- function(y,x,b) y-x%*%b #log-linear
Ldfn <- function(y,x,b) -x
Lfn2b <- function(y,x,b) -Lfn(y,x,b) #-y+x%*%b #log-linear, 1-tau
Ldfn2b <- function(y,x,b) -Ldfn(y,x,b) #x
Lfn2 <- function(y,x,b) b[1]*exp(y)^(-b[2])*exp(x[,1]) - 1 #nonlinear (beta,gamma)
Ldfn2 <- function(y,x,b) cbind((Lfn2(y=y,x=x,b=b)+1) / b[1],
-y*(Lfn2(y=y,x=x,b=b)+1))
Lfn22 <- function(y,x,b) b[1]*exp(y)^(-1/b[2])*exp(x[,1]) - 1 #nonlinear (beta,gamma)
Ldfn22 <- function(y,x,b) cbind((Lfn2(y=y,x=x,b=b)+1) / b[1],
y*(Lfn2(y=y,x=x,b=b)+1)/b[2]^2)
####################################################################
## Initialize Variables
dimX <- 3
H.HUGE <- 0.001
tau<-seq(0.1,0.9,0.1)
nt<-length(tau)
coef.beta<-array(0,dim=c(nt,1))
coef.eis<-array(0,dim=c(nt,1))
se.beta<-array(0,dim=c(nt,1))
se.eis<-array(0,dim=c(nt,1))
band.eis<-array(0,dim=c(nt,1))
#band <- 1
band<-seq(0.05,0.95,0.45)
nb<-length(band)
for (i in 1:nt){
print(tau[i])
# GMMQ Function
ret2b <- tryCatch(gmmq(tau=tau[i], dB=dimX, Y=cbind(Y,D), X=X.excl, Z.excl=Z.excl,
Lambda=Lfn2b.gmmq, Lambda.derivative=Ldfn2b.gmmq,
h=H.HUGE, VERBOSE=FALSE, RETURN.Z=FALSE, b.init=StartingPointReg),
error=function(w)list(b=c(NA,NA),h=NA))
# Get Coefficients
coef.beta[i]<-conv2.fn(ret2b$b)[1]
coef.eis[i]<-conv2.fn(ret2b$b)[2]
# Get G
g.theta1<-1/(coef.beta[i]*coef.eis[i])
g.theta2<--log(coef.beta[i])*(1/coef.eis[i]^2)
g.theta<-c(g.theta1,g.theta2)
# Create empty SE matrix
se.beta.t<-array(0,dim=c(nb,1))
se.eis.t<-array(0,dim=c(nb,1))
for (j in 1:nb){
print(band[j])
# Get Covariance
cov.est <- cov.est.fn(tau=tau,Y=cbind(Y,D),X=X.excl,Z=Z.excl,Lambda=Lfn2b.gmmq,Lambda.derivative=Ldfn2b.gmmq,beta.hat=ret2b$b,Itilde=Itilde.KS17,Itilde.deriv=Itilde.deriv.KS17,h=H.HUGE,structure=c('ts'),cluster.X.col=0,LRV.kernel=c('Bartlett'),LRV.ST=NA,VERBOSE=FALSE,h.adj=band[j])
# Y=cbind(Y,D)
# X=X.excl
# Z=Z.excl
# Lambda=Lfn2b.gmmq
# Lambda.derivative=Ldfn2b.gmmq
# beta.hat=ret2b$b
# Itilde.deriv=Itilde.deriv.KS17
# h=H.HUGE
# VERBOSE=FALSE
# n <- dim(Z)[1]
# L <- Lfn2b.gmmq(Y,X,beta.hat)
# Ld <- Ldfn2b.gmmq(Y,X,beta.hat)
# # tmpsum <- array(0,dim=c(dim(Z)[2],length(beta.hat)))
# # for (i in 1:n) {
# # tmp <- Itilde.deriv(-L[i]/h) *
# # matrix(Z[i,],ncol=1) %*% matrix(Ld[i,], nrow=1)
# # tmpsum <- tmpsum + tmp
# # }
# tmpsum2 <- t(array(data=Itilde.deriv(-L/h),dim=dim(Z)) * Z) %*% Ld
# G.hat <- (-tmpsum2/(n*h))
#
# Ginv <- tryCatch(solve(G.hat))
print("cov")
print(cov.est)
# Get SE when the cov matrix comes out
if (all(is.na(cov.est))) {
se.beta.t[j] <- NA
se.eis.t[j] <- NA
} else {
cov <- cov.est
cov_beta<-g.theta%*%cov%*%g.theta
cov_eis<-cov[2,2]
se.beta.t[j]<-sqrt(cov_beta/n)
se.eis.t[j]<-sqrt(cov_eis/n)
}
print("se.eis.t")
print(se.eis.t[j])
}
print("Made it through")
print(se.eis.t)
print(which.min(se.eis.t))
print(band[which.min(se.eis.t)])
print(se.beta.t[which.min(se.eis.t)])
print(se.eis.t[which.min(se.eis.t)])
# Get minimum SE
MinLoc <- which.min(se.eis.t)
finalband <- band[MinLoc]
min.se.beta <- se.beta.t[MinLoc]
min.se.eis <- se.eis.t[MinLoc]
# Keep minimum SE
se.beta[i]<-min.se.beta
se.eis[i]<-min.se.eis
band.eis[i] <- finalband
}
QGMMResults <- cbind(tau,coef.beta,se.beta,coef.eis,se.eis,band.eis)
colnames(QGMMResults) <- c("tau", "Beta", "Beta.SE", "EIS", "EIS.SE", "EIS.Band")
print(QGMMResults)
#write.csv(QGMMResults, "EIS/Output/QGMM_PooledResultsP1_Adjusted3.csv", row.names=FALSE)
| /Code/identification_covariates.R | no_license | lancecundy/EIS | R | false | false | 6,511 | r |
library(tidyverse)
setwd('/Users/lancecundy/Documents/Research/Nielsen/EIS')
mydata <- read.csv('UseData/SampleData.csv')
# mydata %>%
# group_by(household_code) %>%
# count() %>%
# ungroup()
#
# # For each household i, construct a gride of quantiles, /tau
#
# mydata.i <- mydata %>%
# filter(household_code == 2000000)
mydata.i <- mydata %>%
group_by(household_code) %>%
mutate(household_income = rand(1)*100) %>%
ungroup()
source("Code/dCGKL_2018_code/gmmq.R")
## Setup Data
Y <- as.matrix(mydata.i$Y)
n <- nrow(Y)
#X.excl <- matrix(data=1, nrow=n, ncol=1)
X.excl <- cbind(matrix(data=1, nrow=n, ncol=1), mydata.i$household_income)
D <- as.matrix(mydata.i$LogR)
Z.excl <- as.matrix(cbind(mydata.i$YInst, mydata.i$Lag2LogNomR, mydata.i$Lag2Inf))
Z <- cbind(Z.excl, X.excl)
X <- cbind(D, X.excl)
## Yogo (2004) 2SLS log-linear estimator as a starting point
PZ <- Z %*% solve(t(Z)%*%Z) %*% t(Z)
StartingPointReg <- solve(t(X)%*%PZ%*%X) %*% (t(X)%*%PZ%*%Y)
####################################################################
##### Setup Functions #####
####################################################################
# conv.fn convert log-linear fn's (b[1],b[2])=(slope, constant) to (beta, gamma)
conv.fn <- function(b) c(exp(b[2]/b[1]), 1/b[1]) #convert log-linear parameters to (beta,gamma)
conv.inv.fn <- function(b) c(log(b[2])/b[1], 1/b[1])
# conv.fn convert log-linear fn's (b[1],b[2])=(slope, constant) to (beta, EIS)
conv2.fn <- function(b) c(exp(b[2]/b[1]), b[1]) #convert log-linear parameters to (beta,EIS)
conv2.inv.fn <- function(b) c(log(b[2])*b[1], b[1])
conv3.fn <- function(b) c(b[1],1/b[2]) # convert (beta,gamma) to (beta,EIS)
conv3.inv.fn <- function(b) c(b[1],1/b[2])
# Residual/Lambda functions (and derivatives) for smoothed MM estimation
Lfn.gmmq <- function(y,x,b) y[,1]-cbind(y[,-1],x)%*%b #log-linear
Ldfn.gmmq <- function(y,x,b) -cbind(y[,-1],x)
Lfn2b.gmmq <- function(y,x,b) -Lfn.gmmq(y,x,b) #-y[,1]+cbind(x,y[,-1])%*%b #log-linear, 1-tau
Ldfn2b.gmmq <- function(y,x,b) -Ldfn.gmmq(y,x,b) #cbind(x,y[,-1])
Lfn2.gmmq <- function(y,x,b) b[1]*exp(y[,1])^(-b[2])*exp(y[,2]) - 1 #nonlinear (beta,gamma)
Ldfn2.gmmq <- function(y,x,b) cbind((Lfn2.gmmq(y=y,x=x,b=b)+1) / b[1],
-y[,1]*(Lfn2.gmmq(y=y,x=x,b=b)+1))
Lfn22.gmmq <- function(y,x,b) b[1]*exp(y[,1])^(-1/b[2])*exp(y[,2]) - 1 #nonlinear (beta,EIS)
Ldfn22.gmmq <- function(y,x,b) cbind((Lfn2.gmmq(y=y,x=x,b=b)+1) / b[1],
y[,1]*(Lfn2.gmmq(y=y,x=x,b=b)+1)/b[2]^2)
# Residual/Lambda functions (and derivatives) for smoothed GMM estimation
Lfn <- function(y,x,b) y-x%*%b #log-linear
Ldfn <- function(y,x,b) -x
Lfn2b <- function(y,x,b) -Lfn(y,x,b) #-y+x%*%b #log-linear, 1-tau
Ldfn2b <- function(y,x,b) -Ldfn(y,x,b) #x
Lfn2 <- function(y,x,b) b[1]*exp(y)^(-b[2])*exp(x[,1]) - 1 #nonlinear (beta,gamma)
Ldfn2 <- function(y,x,b) cbind((Lfn2(y=y,x=x,b=b)+1) / b[1],
-y*(Lfn2(y=y,x=x,b=b)+1))
Lfn22 <- function(y,x,b) b[1]*exp(y)^(-1/b[2])*exp(x[,1]) - 1 #nonlinear (beta,gamma)
Ldfn22 <- function(y,x,b) cbind((Lfn2(y=y,x=x,b=b)+1) / b[1],
y*(Lfn2(y=y,x=x,b=b)+1)/b[2]^2)
####################################################################
## Initialize Variables
dimX <- 3
H.HUGE <- 0.001
tau<-seq(0.1,0.9,0.1)
nt<-length(tau)
coef.beta<-array(0,dim=c(nt,1))
coef.eis<-array(0,dim=c(nt,1))
se.beta<-array(0,dim=c(nt,1))
se.eis<-array(0,dim=c(nt,1))
band.eis<-array(0,dim=c(nt,1))
#band <- 1
band<-seq(0.05,0.95,0.45)
nb<-length(band)
for (i in 1:nt){
print(tau[i])
# GMMQ Function
ret2b <- tryCatch(gmmq(tau=tau[i], dB=dimX, Y=cbind(Y,D), X=X.excl, Z.excl=Z.excl,
Lambda=Lfn2b.gmmq, Lambda.derivative=Ldfn2b.gmmq,
h=H.HUGE, VERBOSE=FALSE, RETURN.Z=FALSE, b.init=StartingPointReg),
error=function(w)list(b=c(NA,NA),h=NA))
# Get Coefficients
coef.beta[i]<-conv2.fn(ret2b$b)[1]
coef.eis[i]<-conv2.fn(ret2b$b)[2]
# Get G
g.theta1<-1/(coef.beta[i]*coef.eis[i])
g.theta2<--log(coef.beta[i])*(1/coef.eis[i]^2)
g.theta<-c(g.theta1,g.theta2)
# Create empty SE matrix
se.beta.t<-array(0,dim=c(nb,1))
se.eis.t<-array(0,dim=c(nb,1))
for (j in 1:nb){
print(band[j])
# Get Covariance
cov.est <- cov.est.fn(tau=tau,Y=cbind(Y,D),X=X.excl,Z=Z.excl,Lambda=Lfn2b.gmmq,Lambda.derivative=Ldfn2b.gmmq,beta.hat=ret2b$b,Itilde=Itilde.KS17,Itilde.deriv=Itilde.deriv.KS17,h=H.HUGE,structure=c('ts'),cluster.X.col=0,LRV.kernel=c('Bartlett'),LRV.ST=NA,VERBOSE=FALSE,h.adj=band[j])
# Y=cbind(Y,D)
# X=X.excl
# Z=Z.excl
# Lambda=Lfn2b.gmmq
# Lambda.derivative=Ldfn2b.gmmq
# beta.hat=ret2b$b
# Itilde.deriv=Itilde.deriv.KS17
# h=H.HUGE
# VERBOSE=FALSE
# n <- dim(Z)[1]
# L <- Lfn2b.gmmq(Y,X,beta.hat)
# Ld <- Ldfn2b.gmmq(Y,X,beta.hat)
# # tmpsum <- array(0,dim=c(dim(Z)[2],length(beta.hat)))
# # for (i in 1:n) {
# # tmp <- Itilde.deriv(-L[i]/h) *
# # matrix(Z[i,],ncol=1) %*% matrix(Ld[i,], nrow=1)
# # tmpsum <- tmpsum + tmp
# # }
# tmpsum2 <- t(array(data=Itilde.deriv(-L/h),dim=dim(Z)) * Z) %*% Ld
# G.hat <- (-tmpsum2/(n*h))
#
# Ginv <- tryCatch(solve(G.hat))
print("cov")
print(cov.est)
# Get SE when the cov matrix comes out
if (all(is.na(cov.est))) {
se.beta.t[j] <- NA
se.eis.t[j] <- NA
} else {
cov <- cov.est
cov_beta<-g.theta%*%cov%*%g.theta
cov_eis<-cov[2,2]
se.beta.t[j]<-sqrt(cov_beta/n)
se.eis.t[j]<-sqrt(cov_eis/n)
}
print("se.eis.t")
print(se.eis.t[j])
}
print("Made it through")
print(se.eis.t)
print(which.min(se.eis.t))
print(band[which.min(se.eis.t)])
print(se.beta.t[which.min(se.eis.t)])
print(se.eis.t[which.min(se.eis.t)])
# Get minimum SE
MinLoc <- which.min(se.eis.t)
finalband <- band[MinLoc]
min.se.beta <- se.beta.t[MinLoc]
min.se.eis <- se.eis.t[MinLoc]
# Keep minimum SE
se.beta[i]<-min.se.beta
se.eis[i]<-min.se.eis
band.eis[i] <- finalband
}
QGMMResults <- cbind(tau,coef.beta,se.beta,coef.eis,se.eis,band.eis)
colnames(QGMMResults) <- c("tau", "Beta", "Beta.SE", "EIS", "EIS.SE", "EIS.Band")
print(QGMMResults)
#write.csv(QGMMResults, "EIS/Output/QGMM_PooledResultsP1_Adjusted3.csv", row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataHelp.R
\docType{data}
\name{colorectalLongi}
\alias{colorectalLongi}
\title{Follow-up of metastatic colorectal cancer patients : longitudinal
measurements of tumor size}
\format{
This data frame contains the following columns: \describe{
\item{id}{identification of each subject. Repeated for each recurrence}
\item{year}{time of visit counted in years from baseline}
\item{tumor.size}{Individual longitudinal measurement of transformed
(Box-Cox with parameter 0.3) sums of the longest diameters, left-censored
due to a detection limit (threshold \eqn{s=-3.33}). } \item{treatment}{To
which treatment arm a patient was allocated? 1: sequential (S); 2:
combination (C)} \item{age}{Age at baseline: 1: <50 years, 2: 50-69 years,
3: >69 years} \item{who.PS}{WHO performance status at baseline: 1: status 0,
2: status 1, 3: status 2} \item{prev.resection}{Previous resection of the
primate tumor? 0: No, 1: Yes} }
}
\usage{
data(colorectalLongi)
}
\description{
Randomly chosen 150 patients from the follow-up of the FFCD 2000-05
multicenter phase III clinical trial originally including 410 patients with
metastatic colorectal cancer randomized into two therapeutic strategies:
combination and sequential. The dataset contains measurements of tumor size
(left-censored sums of the longest diameters of target lesions; transformed
using Box-Cox) with baseline characteristics(treatment arm, age, WHO
performance status and previous resection).
}
\note{
We thank the Federation Francophone de Cancerologie Digestive and
Gustave Roussy for sharing the data of the FFCD 2000-05 trial supported by
an unrestricted Grant from Sanofi.
}
\references{
Ducreux, M., Malka, D., Mendiboure, J., Etienne, P.-L.,
Texereau, P., Auby, D., Rougier, P., Gasmi, M., Castaing, M., Abbas, M.,
Michel, P., Gargot, D., Azzedine, A., Lombard- Bohas, C., Geoffroy, P.,
Denis, B., Pignon, J.-P., Bedenne, L., and Bouche, O. (2011). Sequential
versus combination chemotherapy for the treatment of advanced colorectal
cancer (FFCD 2000-05): an open-label, randomised, phase 3 trial. \emph{The
Lancet Oncology} \bold{12}, 1032-44.
}
\keyword{datasets}
| /man/colorectalLongi.Rd | no_license | cran/frailtypack | R | false | true | 2,251 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataHelp.R
\docType{data}
\name{colorectalLongi}
\alias{colorectalLongi}
\title{Follow-up of metastatic colorectal cancer patients : longitudinal
measurements of tumor size}
\format{
This data frame contains the following columns: \describe{
\item{id}{identification of each subject. Repeated for each recurrence}
\item{year}{time of visit counted in years from baseline}
\item{tumor.size}{Individual longitudinal measurement of transformed
(Box-Cox with parameter 0.3) sums of the longest diameters, left-censored
due to a detection limit (threshold \eqn{s=-3.33}). } \item{treatment}{To
which treatment arm a patient was allocated? 1: sequential (S); 2:
combination (C)} \item{age}{Age at baseline: 1: <50 years, 2: 50-69 years,
3: >69 years} \item{who.PS}{WHO performance status at baseline: 1: status 0,
2: status 1, 3: status 2} \item{prev.resection}{Previous resection of the
primate tumor? 0: No, 1: Yes} }
}
\usage{
data(colorectalLongi)
}
\description{
Randomly chosen 150 patients from the follow-up of the FFCD 2000-05
multicenter phase III clinical trial originally including 410 patients with
metastatic colorectal cancer randomized into two therapeutic strategies:
combination and sequential. The dataset contains measurements of tumor size
(left-censored sums of the longest diameters of target lesions; transformed
using Box-Cox) with baseline characteristics(treatment arm, age, WHO
performance status and previous resection).
}
\note{
We thank the Federation Francophone de Cancerologie Digestive and
Gustave Roussy for sharing the data of the FFCD 2000-05 trial supported by
an unrestricted Grant from Sanofi.
}
\references{
Ducreux, M., Malka, D., Mendiboure, J., Etienne, P.-L.,
Texereau, P., Auby, D., Rougier, P., Gasmi, M., Castaing, M., Abbas, M.,
Michel, P., Gargot, D., Azzedine, A., Lombard- Bohas, C., Geoffroy, P.,
Denis, B., Pignon, J.-P., Bedenne, L., and Bouche, O. (2011). Sequential
versus combination chemotherapy for the treatment of advanced colorectal
cancer (FFCD 2000-05): an open-label, randomised, phase 3 trial. \emph{The
Lancet Oncology} \bold{12}, 1032-44.
}
\keyword{datasets}
|
#' Chainable View
#'
#' A View that doesn't break a pipeline. Same thing as \code{utils::View()}, but returns the original data object invisibly to allow putting view commands anywhere in a pipeline. This also allows for adding a view command at the end of pipelines while also assigning the data object the pipeline returns.
#'
#' @param x an \R object which can be coerced to a data frame with non-zero numbers of rows and columns
#' @param title the title for the viewer window; defaults to name of x if not given
#'
#' @seealso View
#' @keywords View pipeline
#'
#' @examples
#' \dontrun{
#' # 1
#' view(iris)
#'
#' # 2
#' iris_test <- iris %>%
#' group_by(
#' Species
#' ) %>%
#' summarise(
#' sum = sum(Petal.Width)
#' ) %>%
#' view('iris:sum') %>%
#' mutate(
#' sum_squared = sum^2
#' ) %>%
#' view('iris:sum_squared')
#' }
#' #
#'
#' @rdname view
#' @export
view <- function(x, title) {
# the following gets the data to use the Rstudio Viewer, and not an X11 window
# https://stackoverflow.com/questions/46953507/how-to-access-copy-the-view-function
RStudioView <- as.environment("package:utils")$View
if (missing(title)) {
title <- deparse(substitute(x))[1]
}
RStudioView(x, title)
#
invisible(x)
}
| /R/view.R | permissive | Paul-James/pjames | R | false | false | 1,266 | r | #' Chainable View
#'
#' A View that doesn't break a pipeline. Same thing as \code{utils::View()}, but returns the original data object invisibly to allow putting view commands anywhere in a pipeline. This also allows for adding a view command at the end of pipelines while also assigning the data object the pipeline returns.
#'
#' @param x an \R object which can be coerced to a data frame with non-zero numbers of rows and columns
#' @param title the title for the viewer window; defaults to name of x if not given
#'
#' @seealso View
#' @keywords View pipeline
#'
#' @examples
#' \dontrun{
#' # 1
#' view(iris)
#'
#' # 2
#' iris_test <- iris %>%
#' group_by(
#' Species
#' ) %>%
#' summarise(
#' sum = sum(Petal.Width)
#' ) %>%
#' view('iris:sum') %>%
#' mutate(
#' sum_squared = sum^2
#' ) %>%
#' view('iris:sum_squared')
#' }
#' #
#'
#' @rdname view
#' @export
view <- function(x, title) {
# the following gets the data to use the Rstudio Viewer, and not an X11 window
# https://stackoverflow.com/questions/46953507/how-to-access-copy-the-view-function
RStudioView <- as.environment("package:utils")$View
if (missing(title)) {
title <- deparse(substitute(x))[1]
}
RStudioView(x, title)
#
invisible(x)
}
|
# Here starts my R excercise
## H1
| /SRC/analysis.R | permissive | FrankINBO/testfile | R | false | false | 36 | r | # Here starts my R excercise
## H1
|
#The deforestation in the dry tropical forests of Mato Grosso (NT0140) and the correlation with the increase in coverage to an agricultural one in Brazil 🏞️↪️🌾
#install.packages() is a function which download and install packages from CRAN-like repositories or from local files
#The first package to install is raster through which is possible reading, writing, manipulating, analyzing and modeling of spatial data. The package implements basic and high-level functions for raster data and for vector data operations such as intersections:
install.packages("raster")
#The second package to install is RStoolbox, a toolbox for remote sensing image processing and analysis such as calculating spectral indices, principal component transformation, unsupervised and supervised classification or fractional cover analyses:
install.packages("RStoolbox")
#The third package to install is ggplot2, which is a system for declaratively creating graphics, based on "The Grammar of Graphics". The user provides the data, tell 'ggplot2' how to map variables to aesthetics, what graphical primitives to use, and it takes care of the details:
install.packages("ggplot2")
#The fourth package to install is gridExtra, which provides a number of user-level functions to work with "grid" graphics, notably to arrange multiple grid-based plots on a page, and draw tables:
install.packages("gridExtra")
#library() is a function which load and attach add-on packages, in this case the previous raster, RStoolbox, ggplot2 and gridExtra packages:
library(raster)
library(RStoolbox)
library(ggplot2)
library(gridExtra)
#In my computer R must use the folder named simply lab and I exploit a function (setwd) for the association between R and lab folder:
setwd("C:/lab/")
#Identically to the R_remote_code_sensing_first.r, I still exploit a function to import data from lab folder - external - to R - internal¹ - and this will be brick and not raster because our data does not represent raster layer objects but multi-layer raster objects!
defor1 <- brick("defor1.jpg")
#Important: the brick function creates a RasterBrick object that is a multi-layer raster object typically from a multi-layer (band) file. Instead the raster function creates a RasterLayer object from scratch, a file, an Extent object, a matrix, an 'image' object, or from a Raster *, Spatial *, im (spatstat) asc, kasc (adehabitat *), grf (geoR) or kde object!
#defor1.jpg is an image of the deforestation in the dry tropical forests of Mato Grosso (NT0140) captured by NASA's Terra satellite and downloaded from the website https://www.jpl.nasa.gov/images/mato-grosso-brazil
#plotRGB() is a Red-Green-Blue plot based on three layers (in a RasterBrick or RasterStack) combined such that they represent the red, green and blue channel. Through plotRGB () it is possible to graphically visualize defor1 in the colors of the visible spectrum:
plotRGB(defor1, r=1, g=2, b=3, stretch="lin")
#ggRGB() is a function that calculates RGB color composite RasterLayers for plotting with ggplot2. Optional values for clipping and and stretching can be used to enhance the imagery
ggRGB(defor1, r=1, g=2, b=3, stretch="lin")
#The same iterative cycle of brick (), plotRGB () and ggRGB () functions must be applied to the defor2.jpg image so that an initial comparison can be made in the state of deforestation in the dry tropical forests of Mato Grosso (NT0140)
#Identically to the R_remote_code_sensing_first.r, I still exploit a function to import data from lab folder - external - to R - internal¹ - and this will be brick and not raster because our data does not represent raster layer objects but multi-layer raster objects!
defor2 <- brick("defor2.jpg")
#Important: the brick function creates a RasterBrick object that is a multi-layer raster object typically from a multi-layer (band) file. Instead the raster function creates a RasterLayer object from scratch, a file, an Extent object, a matrix, an 'image' object, or from a Raster *, Spatial *, im (spatstat) asc, kasc (adehabitat *), grf (geoR) or kde object!
#defor2.jpg is an image of the deforestation in the dry tropical forests of Mato Grosso (NT0140) captured by NASA's Terra satellite and downloaded from the website https://www.jpl.nasa.gov/images/mato-grosso-brazil
#plotRGB() is a Red-Green-Blue plot based on three layers (in a RasterBrick or RasterStack) combined such that they represent the red, green and blue channel. Through plotRGB () it is possible to graphically visualize defor1 in the colors of the visible spectrum:
plotRGB(defor2, r=1, g=2, b=3, stretch="lin")
#ggRGB() is a function that calculates RGB color composite RasterLayers for plotting with ggplot2. Optional values for clipping and and stretching can be used to enhance the imagery
ggRGB(defor2, r=1, g=2, b=3, stretch="lin")
#The same iterative cycle of brick (), plotRGB () and ggRGB () functions must be applied to the defor2.jpg image so that an initial comparison can be made in the state of deforestation in the dry tropical forests of Mato Grosso (NT0140) through the function par()
#The initial comparison between defor1 and defor2 for the deforestation in the dry tropical forests of Mato Grosso (NT0140) can be graphically visualized through the plotRGB() function and in particular by organizing the previous ones in a multiframe which r̲o̲w̲s or c̲o̲l̲umns are at the user's discretion
#With the function par() there is the possibility of combining multiple object's level of "interest" into one graphical visualization of their called multiframe:
par(mfrow=c(1,2))
plotRGB(defor1, r=1, g=2, b=3, stretch="lin")
plotRGB(defor2, r=1, g=2, b=3, stretch="lin")
#The initial comparison in the deforestation in the dry tropical forests of Mato Grosso (NT0140) between defor1 and defor2 can also be graphically displayed through the ggRGB() function; unlike the previous function, images can be organized in a multiframe through the function grid.arrange () not before giving them a name!
D1 <- ggRGB(defor1, r=1, g=2, b=3, stretch="lin")
D2 <- ggRGB(defor2, r=1, g=2, b=3, stretch="lin")
#arrangeGrob set up a gtable layout to place multiple grobs on a page. In particular grid.arrange() draw on the current device and is useful to organize ggRGB elements after simply renamed them:
grid.arrange(D1, D2, nrow=2)
#The classification of a satellite image depends on the reflectance values attributed to each pixel
#The values for reflectance vary in the red, green and blue (RGB) bands and two or more pixels will be "merged" to form a class if and only if they have a superimposable value
#These pixels will be placed in a multispectral region as a point cloud which, together with others in a three-dimensional reference system, define the spectrum through which the satellite image is displayed
#The above classification can be effectuated with a training set consisting of: 1) a reference satellite image and 2) the model with which there should be a classification of the pixels depending on their reflectance values or with the unsuperClass () function, where the classification of pixels depending on their reflectance values, on the other hand, occurs randomly!
#The unsupervised Classification () function randomly classifies the sampled pixels (whose number can be indicated in the nSamples argument) depending on their reflectance values, according to the number of classes selected by the user via the nClasses argument:
D1c <- unsuperClass(defor1, nClass=2)
#In the above classification, being interested in the phenomenon of deforestation, I created two classes using the nClasses argument: the first for the Amazon forest (near-infrared or NIR band displayed through the red R channel) and the second for agricultural land ( other bands displayed through the G and B channels)
#In R, to visualize information of D1c, name of it followed by Enter ↵ as physical command by keyboard:
D1c
#The relatively D1c information is contained within the table:
unsuperClass results
*************** Map ******************
$map
class : RasterLayer
dimensions : 478, 714, 341292 (nrow, ncol, ncell)
resolution : 1, 1 (x, y)
extent : 0, 714, 0, 478 (xmin, xmax, ymin, ymax)
crs : NA
source : memory
names : layer
values : 1, 2 (min, max)
#plot is a common function for plotting of R object and in this case is based on the basic extraction operator $: from the initial plot () of an object represented by a matrix of data, a set of them that will allow the user to visualize graphically the level of "interest" which is for the user the classification of the satellite image in the 107th code string. This is viewable through the map () function that transform its input by applying a function to each element and returning a vector the same length as the input:
plot(D1c$map)
#The sequence of the set.seed () and r.norm () functions is functional to set the seed of R's random number generator. The seed will always be the chosen one to reproduce the same classification (nClasses and / or nSamples) in a series of plot () for satellite images to which R_code_classification.r is being applied:
set.seed(1)
r.norm(1)
#The unsupervised Classification () function randomly classifies the sampled pixels (whose number can be indicated in the nSamples argument) depending on their reflectance values, according to the number of classes selected by the user via the nClasses argument:
D2c <- unsuperClass(defor2, nClass=2)
#In the above classification, being interested in the phenomenon of deforestation, I created the two same classes of defor1 in the unsuperClass () function using the nClasses argument: the first for the 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 (near-infrared or NIR band displayed through the red channel R) and the second for agricultural land (other bands displayed through the G and B channels)
#In R, to visualize information of D2c, name of it followed by Enter ↵ as physical command by keyboard:
D2c
#The relatively D1c information is contained within the table:
unsuperClass results
*************** Map ******************
$map
class : RasterLayer
dimensions : 478, 717, 342726 (nrow, ncol, ncell)
resolution : 1, 1 (x, y)
extent : 0, 717, 0, 478 (xmin, xmax, ymin, ymax)
crs : NA
source : memory
names : layer
values : 1, 2 (min, max)
#plot is a common function for plotting of R object and in this case is based on the basic extraction operator $: from the initial plot () of an object represented by a matrix of data, a set of them that will allow the user to visualize graphically the level of "interest" which is for the user the classification of the satellite image in the 136th code string. This is viewable through the map () function that transform its input by applying a function to each element and returning a vector the same length as the input:
plot(D2c$map)
#plot(D2c$map) is indicative of the sensitivity in the unsuperClass() function for Dc2, being insufficient in discriminating agricultural land from other areas or watersheds through a number of nClasses=2 having similar reflectance values!
#I repropose again for D2c the classification to the 136th string of code by increasing the number of classes from 2 to 3 in the nClasses argument of the unsuperClass() function
#The unsupervised Classification() function randomly classifies the sampled pixels (whose number can be indicated in the nSamples argument) depending on their reflectance values, according to the number of classes selected by the user via the nClasses argument:
D2c3 <- unsuperClass(defor2, nClass=3)
#plot is a common function for plotting of R object and in this case is based on the basic extraction operator $: from the initial plot () of an object represented by a matrix of data, a set of them that will allow the user to visualize graphically the level of "interest" which is for the user the classification of the satellite image in the 107th code string. This is viewable through the map () function that transform its input by applying a function to each element and returning a vector the same length as the input:
plot(D2c3$map)
#To estimate the coverage of dry forests in the state of Mato Grosso and the coverage of agriculture land and how their progress/regression ratio has evolved, I use the freq() function to get the number of pixels that belong to the previous classes selected in the unsuperClass() function at the 101st and 136th code strings: D1c <- unsuperClass (defor1, nClass=2) and D2c <- unsuperClass (defor2, nClass =2) respectively
#freq() is a function that generates and formats frequency tables from a variable or a table, with percentages and formatting options:
freq(D1c$map)
#The relatively freq(D1c$map) information is contained within the table:
# value count
[1,] 1 306374
[2,] 2 34918
#freq() is a function that generates and formats frequency tables from a variable or a table, with percentages and formatting options:
freq(D2c$map)
#The relatively D2c$map information is contained within the table:
# value count
[1,] 1 178490
[2,] 2 164236
#The total number of pixels in Dc1$map is obtained as the sum of the number of pixels for each previously identified class, which in this code are 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 and 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳:
s1 <- 306374 + 34918
#In R, to visualize information of s1, name of it followed by Enter ↵ as physical command by keyboard:
s1
#The relatively s1 information is contained within the string:
[1] 341292
#The total number of pixels in Dc2$map is obtained as the sum of the number of pixels for each previously identified class, which in this code are 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 and 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳:
s2 <- 178490 + 164236
#In R, to visualize information of s2, name of it followed by Enter ↵ as physical command by keyboard:
s2
#The relatively s2 information is contained within the string:
[1] 342726
#From the number of pixels that belong to the 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 and 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳 classes respectively, how is it possible to have a percentage of the actual coverage in the state of Mato Grosso?
#Initially, the ratio between freq (D1c $ map) and s1 is to be calculated as shown in the string below:
prop1 <-freq(D1c$map)/ s1
#In R, to visualize information of prop1, name of it followed by Enter ↵ as physical command by keyboard:
prop1
#The relatively prop1 information is contained within the table:
value count
[1,] 5.860085e-06 0.8965314 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵
[2,] 2.930042e-06 0.1034686 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳
#Initially, the ratio between freq (D1c $ map) and s1 is to be calculated as shown in the string below:
prop2 <-freq(D2c$map)/ s2
#In R, to visualize information of prop2, name of it followed by Enter ↵ as physical command by keyboard:
prop2
#The relatively prop2 information is contained within the table:
value count
[1,] 2.917783e-06 0.520795 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵
[2,] 5.835565e-06 0.479205 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳
#The previous series of commands had the purpose of obtaining prop1 and prop2 and verify their values which, with a simple multiplication * 100, will give the user the effective coverage of the 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 and 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳 respectively:
percent_1992 <- c(89.65, 10.34)
percent_2016 <- c(52.07, 47.92)
#The percentage values for the 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 and 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳 classes can be organized with the cover () function as categorical variables in the first column of a dataframe that will be constructed progressively:
cover <- c("Forest", "Agriculture")
#The dataframe, in addition to the column previously created with the name of cover, will consist of two other columns percent_1992 and percent_2016. In these the percentages of 89.83 and 52.18 for the 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 class and 10.16 and 47.81 for the 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳 class will be visible and the final structure of the dataframe
#The function data.frame() creates data frames, tightly coupled collections of variables which share many of the properties of matrices and of lists, used as the fundamental data structure by most of R's modeling software:
losspercentages <- data.frame(cover, percent_1992, percent_2016)
#In R, to visualize the dataframe of losspercentages, name of it followed by Enter ↵ as physical command by keyboard:
losspercentages
#The relatively losspercentages information is contained within the dataframe:
cover percent_1992 percent_2016
1 Forest 89.65 52.07
2 Agriculture 10.34 47.92
#Which is the basic syntax of ggplot() function?
#The basic syntax of ggplot() function is: ggplot(data = specifies the dataframe you want to plot, aes(x= , y= how we are mapping variables from data to visual elements)) + geom_point(specifies what we want to draw)
#The graph that I have selected is a bar graph through the function geom_bar () and ggplot () which function allows me to graphically display the data that I have so far analyzed in the code:
ggplot(losspercentages, aes(x=cover, y=percent_1992, color=cover)) + geom_bar(stat="identity", fill="pink")
ggplot(losspercentages, aes(x=cover, y=percent_2016 , color=cover)) + geom_bar(stat="identity", fill="pink")
#arrangeGrob set up a gtable layout to place multiple grobs on a page. In particular grid.arrange() draw on the current device and is useful to organize ggRGB elements after simply renamed them:
ggD1 <- ggplot(losspercentages, aes(x=cover, y=percent_1992, color=cover)) + geom_bar(stat="identity", fill="pink")
ggD2 <- ggplot(losspercentages, aes(x=cover, y=percent_2016 , color=cover)) + geom_bar(stat="identity", fill="pink")
grid.arrange(ggD1, ggD2, nrow=1)
#Sequence of informatic commands for R_code_knitr.r
install.packages("raster")
install.packages("RStoolbox")
install.packages("ggplot2")
install.packages("gridExtra")
library(raster)
library(RStoolbox)
library(ggplot2)
library(gridExtra)
setwd("C:/lab/")
defor1 <- brick("defor1.jpg")
plotRGB(defor1, r=1, g=2, b=3, stretch="lin")
ggRGB(defor1, r=1, g=2, b=3, stretch="lin")
defor2 <- brick("defor2.jpg")
plotRGB(defor2, r=1, g=2, b=3, stretch="lin")
ggRGB(defor2, r=1, g=2, b=3, stretch="lin")
par(mfrow=c(1,2))
plotRGB(defor1, r=1, g=2, b=3, stretch="lin")
plotRGB(defor2, r=1, g=2, b=3, stretch="lin")
par(mfrow=c(1,2))
plotRGB(defor1, r=1, g=2, b=3, stretch="lin")
plotRGB(defor2, r=1, g=2, b=3, stretch="lin")
D1 <- ggRGB(defor1, r=1, g=2, b=3, stretch="lin")
D2 <- ggRGB(defor2, r=1, g=2, b=3, stretch="lin")
grid.arrange(D1, D2, nrow=2)
D1c <- unsuperClass(defor1, nClass=2)
D1c
set.seed(1)
r.norm(1)
D2c <- unsuperClass(defor2, nClass=2)
D2c
plot(D2c$map)
D2c3 <- unsuperClass(defor2, nClass=3)
plot(D2c3$map)
freq(D1c$map)
freq(D2c$map)
s1 <- 306374 + 34918
s1
s2 <- 178490 + 164236
s2
prop1 <-freq(D1c$map)/ s1
prop1
prop2 <-freq(D2c$map)/ s2
prop2
percent_1992 <- c(89.65, 10.34)
percent_2016 <- c(52.07, 47.92)
cover <- c("Forest", "Agriculture")
losspercentages <- data.frame(cover, percent_1992, percent_2016)
losspercentages
ggplot(losspercentages, aes(x=cover, y=percent_1992, color=cover)) + geom_bar(stat="identity", fill="pink")
ggplot(losspercentages, aes(x=cover, y=percent_2016 , color=cover)) + geom_bar(stat="identity", fill="pink")
ggD1 <- ggplot(losspercentages, aes(x=cover, y=percent_1992, color=cover)) + geom_bar(stat="identity", fill="pink")
ggD2 <- ggplot(losspercentages, aes(x=cover, y=percent_2016 , color=cover)) + geom_bar(stat="identity", fill="pink")
grid.arrange(ggD1, ggD2, nrow=1)
| /R_code_land_cover.r | no_license | AndreaCapponi/telerilevamento_2021 | R | false | false | 20,262 | r | #The deforestation in the dry tropical forests of Mato Grosso (NT0140) and the correlation with the increase in coverage to an agricultural one in Brazil 🏞️↪️🌾
#install.packages() is a function which download and install packages from CRAN-like repositories or from local files
#The first package to install is raster through which is possible reading, writing, manipulating, analyzing and modeling of spatial data. The package implements basic and high-level functions for raster data and for vector data operations such as intersections:
install.packages("raster")
#The second package to install is RStoolbox, a toolbox for remote sensing image processing and analysis such as calculating spectral indices, principal component transformation, unsupervised and supervised classification or fractional cover analyses:
install.packages("RStoolbox")
#The third package to install is ggplot2, which is a system for declaratively creating graphics, based on "The Grammar of Graphics". The user provides the data, tell 'ggplot2' how to map variables to aesthetics, what graphical primitives to use, and it takes care of the details:
install.packages("ggplot2")
#The fourth package to install is gridExtra, which provides a number of user-level functions to work with "grid" graphics, notably to arrange multiple grid-based plots on a page, and draw tables:
install.packages("gridExtra")
#library() is a function which load and attach add-on packages, in this case the previous raster, RStoolbox, ggplot2 and gridExtra packages:
library(raster)
library(RStoolbox)
library(ggplot2)
library(gridExtra)
#In my computer R must use the folder named simply lab and I exploit a function (setwd) for the association between R and lab folder:
setwd("C:/lab/")
#Identically to the R_remote_code_sensing_first.r, I still exploit a function to import data from lab folder - external - to R - internal¹ - and this will be brick and not raster because our data does not represent raster layer objects but multi-layer raster objects!
defor1 <- brick("defor1.jpg")
#Important: the brick function creates a RasterBrick object that is a multi-layer raster object typically from a multi-layer (band) file. Instead the raster function creates a RasterLayer object from scratch, a file, an Extent object, a matrix, an 'image' object, or from a Raster *, Spatial *, im (spatstat) asc, kasc (adehabitat *), grf (geoR) or kde object!
#defor1.jpg is an image of the deforestation in the dry tropical forests of Mato Grosso (NT0140) captured by NASA's Terra satellite and downloaded from the website https://www.jpl.nasa.gov/images/mato-grosso-brazil
#plotRGB() is a Red-Green-Blue plot based on three layers (in a RasterBrick or RasterStack) combined such that they represent the red, green and blue channel. Through plotRGB () it is possible to graphically visualize defor1 in the colors of the visible spectrum:
plotRGB(defor1, r=1, g=2, b=3, stretch="lin")
#ggRGB() is a function that calculates RGB color composite RasterLayers for plotting with ggplot2. Optional values for clipping and and stretching can be used to enhance the imagery
ggRGB(defor1, r=1, g=2, b=3, stretch="lin")
#The same iterative cycle of brick (), plotRGB () and ggRGB () functions must be applied to the defor2.jpg image so that an initial comparison can be made in the state of deforestation in the dry tropical forests of Mato Grosso (NT0140)
#Identically to the R_remote_code_sensing_first.r, I still exploit a function to import data from lab folder - external - to R - internal¹ - and this will be brick and not raster because our data does not represent raster layer objects but multi-layer raster objects!
defor2 <- brick("defor2.jpg")
#Important: the brick function creates a RasterBrick object that is a multi-layer raster object typically from a multi-layer (band) file. Instead the raster function creates a RasterLayer object from scratch, a file, an Extent object, a matrix, an 'image' object, or from a Raster *, Spatial *, im (spatstat) asc, kasc (adehabitat *), grf (geoR) or kde object!
#defor2.jpg is an image of the deforestation in the dry tropical forests of Mato Grosso (NT0140) captured by NASA's Terra satellite and downloaded from the website https://www.jpl.nasa.gov/images/mato-grosso-brazil
#plotRGB() is a Red-Green-Blue plot based on three layers (in a RasterBrick or RasterStack) combined such that they represent the red, green and blue channel. Through plotRGB () it is possible to graphically visualize defor1 in the colors of the visible spectrum:
plotRGB(defor2, r=1, g=2, b=3, stretch="lin")
#ggRGB() is a function that calculates RGB color composite RasterLayers for plotting with ggplot2. Optional values for clipping and and stretching can be used to enhance the imagery
ggRGB(defor2, r=1, g=2, b=3, stretch="lin")
#The same iterative cycle of brick (), plotRGB () and ggRGB () functions must be applied to the defor2.jpg image so that an initial comparison can be made in the state of deforestation in the dry tropical forests of Mato Grosso (NT0140) through the function par()
#The initial comparison between defor1 and defor2 for the deforestation in the dry tropical forests of Mato Grosso (NT0140) can be graphically visualized through the plotRGB() function and in particular by organizing the previous ones in a multiframe which r̲o̲w̲s or c̲o̲l̲umns are at the user's discretion
#With the function par() there is the possibility of combining multiple object's level of "interest" into one graphical visualization of their called multiframe:
par(mfrow=c(1,2))
plotRGB(defor1, r=1, g=2, b=3, stretch="lin")
plotRGB(defor2, r=1, g=2, b=3, stretch="lin")
#The initial comparison in the deforestation in the dry tropical forests of Mato Grosso (NT0140) between defor1 and defor2 can also be graphically displayed through the ggRGB() function; unlike the previous function, images can be organized in a multiframe through the function grid.arrange () not before giving them a name!
D1 <- ggRGB(defor1, r=1, g=2, b=3, stretch="lin")
D2 <- ggRGB(defor2, r=1, g=2, b=3, stretch="lin")
#arrangeGrob set up a gtable layout to place multiple grobs on a page. In particular grid.arrange() draw on the current device and is useful to organize ggRGB elements after simply renamed them:
grid.arrange(D1, D2, nrow=2)
#The classification of a satellite image depends on the reflectance values attributed to each pixel
#The values for reflectance vary in the red, green and blue (RGB) bands and two or more pixels will be "merged" to form a class if and only if they have a superimposable value
#These pixels will be placed in a multispectral region as a point cloud which, together with others in a three-dimensional reference system, define the spectrum through which the satellite image is displayed
#The above classification can be effectuated with a training set consisting of: 1) a reference satellite image and 2) the model with which there should be a classification of the pixels depending on their reflectance values or with the unsuperClass () function, where the classification of pixels depending on their reflectance values, on the other hand, occurs randomly!
#The unsupervised Classification () function randomly classifies the sampled pixels (whose number can be indicated in the nSamples argument) depending on their reflectance values, according to the number of classes selected by the user via the nClasses argument:
D1c <- unsuperClass(defor1, nClass=2)
#In the above classification, being interested in the phenomenon of deforestation, I created two classes using the nClasses argument: the first for the Amazon forest (near-infrared or NIR band displayed through the red R channel) and the second for agricultural land ( other bands displayed through the G and B channels)
#In R, to visualize information of D1c, name of it followed by Enter ↵ as physical command by keyboard:
D1c
#The relatively D1c information is contained within the table:
unsuperClass results
*************** Map ******************
$map
class : RasterLayer
dimensions : 478, 714, 341292 (nrow, ncol, ncell)
resolution : 1, 1 (x, y)
extent : 0, 714, 0, 478 (xmin, xmax, ymin, ymax)
crs : NA
source : memory
names : layer
values : 1, 2 (min, max)
#plot is a common function for plotting of R object and in this case is based on the basic extraction operator $: from the initial plot () of an object represented by a matrix of data, a set of them that will allow the user to visualize graphically the level of "interest" which is for the user the classification of the satellite image in the 107th code string. This is viewable through the map () function that transform its input by applying a function to each element and returning a vector the same length as the input:
plot(D1c$map)
#The sequence of the set.seed () and r.norm () functions is functional to set the seed of R's random number generator. The seed will always be the chosen one to reproduce the same classification (nClasses and / or nSamples) in a series of plot () for satellite images to which R_code_classification.r is being applied:
set.seed(1)
r.norm(1)
#The unsupervised Classification () function randomly classifies the sampled pixels (whose number can be indicated in the nSamples argument) depending on their reflectance values, according to the number of classes selected by the user via the nClasses argument:
D2c <- unsuperClass(defor2, nClass=2)
#In the above classification, being interested in the phenomenon of deforestation, I created the two same classes of defor1 in the unsuperClass () function using the nClasses argument: the first for the 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 (near-infrared or NIR band displayed through the red channel R) and the second for agricultural land (other bands displayed through the G and B channels)
#In R, to visualize information of D2c, name of it followed by Enter ↵ as physical command by keyboard:
D2c
#The relatively D1c information is contained within the table:
unsuperClass results
*************** Map ******************
$map
class : RasterLayer
dimensions : 478, 717, 342726 (nrow, ncol, ncell)
resolution : 1, 1 (x, y)
extent : 0, 717, 0, 478 (xmin, xmax, ymin, ymax)
crs : NA
source : memory
names : layer
values : 1, 2 (min, max)
#plot is a common function for plotting of R object and in this case is based on the basic extraction operator $: from the initial plot () of an object represented by a matrix of data, a set of them that will allow the user to visualize graphically the level of "interest" which is for the user the classification of the satellite image in the 136th code string. This is viewable through the map () function that transform its input by applying a function to each element and returning a vector the same length as the input:
plot(D2c$map)
#plot(D2c$map) is indicative of the sensitivity in the unsuperClass() function for Dc2, being insufficient in discriminating agricultural land from other areas or watersheds through a number of nClasses=2 having similar reflectance values!
#I repropose again for D2c the classification to the 136th string of code by increasing the number of classes from 2 to 3 in the nClasses argument of the unsuperClass() function
#The unsupervised Classification() function randomly classifies the sampled pixels (whose number can be indicated in the nSamples argument) depending on their reflectance values, according to the number of classes selected by the user via the nClasses argument:
D2c3 <- unsuperClass(defor2, nClass=3)
#plot is a common function for plotting of R object and in this case is based on the basic extraction operator $: from the initial plot () of an object represented by a matrix of data, a set of them that will allow the user to visualize graphically the level of "interest" which is for the user the classification of the satellite image in the 107th code string. This is viewable through the map () function that transform its input by applying a function to each element and returning a vector the same length as the input:
plot(D2c3$map)
#To estimate the coverage of dry forests in the state of Mato Grosso and the coverage of agriculture land and how their progress/regression ratio has evolved, I use the freq() function to get the number of pixels that belong to the previous classes selected in the unsuperClass() function at the 101st and 136th code strings: D1c <- unsuperClass (defor1, nClass=2) and D2c <- unsuperClass (defor2, nClass =2) respectively
#freq() is a function that generates and formats frequency tables from a variable or a table, with percentages and formatting options:
freq(D1c$map)
#The relatively freq(D1c$map) information is contained within the table:
# value count
[1,] 1 306374
[2,] 2 34918
#freq() is a function that generates and formats frequency tables from a variable or a table, with percentages and formatting options:
freq(D2c$map)
#The relatively D2c$map information is contained within the table:
# value count
[1,] 1 178490
[2,] 2 164236
#The total number of pixels in Dc1$map is obtained as the sum of the number of pixels for each previously identified class, which in this code are 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 and 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳:
s1 <- 306374 + 34918
#In R, to visualize information of s1, name of it followed by Enter ↵ as physical command by keyboard:
s1
#The relatively s1 information is contained within the string:
[1] 341292
#The total number of pixels in Dc2$map is obtained as the sum of the number of pixels for each previously identified class, which in this code are 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 and 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳:
s2 <- 178490 + 164236
#In R, to visualize information of s2, name of it followed by Enter ↵ as physical command by keyboard:
s2
#The relatively s2 information is contained within the string:
[1] 342726
#From the number of pixels that belong to the 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 and 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳 classes respectively, how is it possible to have a percentage of the actual coverage in the state of Mato Grosso?
#Initially, the ratio between freq (D1c $ map) and s1 is to be calculated as shown in the string below:
prop1 <-freq(D1c$map)/ s1
#In R, to visualize information of prop1, name of it followed by Enter ↵ as physical command by keyboard:
prop1
#The relatively prop1 information is contained within the table:
value count
[1,] 5.860085e-06 0.8965314 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵
[2,] 2.930042e-06 0.1034686 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳
#Initially, the ratio between freq (D1c $ map) and s1 is to be calculated as shown in the string below:
prop2 <-freq(D2c$map)/ s2
#In R, to visualize information of prop2, name of it followed by Enter ↵ as physical command by keyboard:
prop2
#The relatively prop2 information is contained within the table:
value count
[1,] 2.917783e-06 0.520795 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵
[2,] 5.835565e-06 0.479205 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳
#The previous series of commands had the purpose of obtaining prop1 and prop2 and verify their values which, with a simple multiplication * 100, will give the user the effective coverage of the 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 and 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳 respectively:
percent_1992 <- c(89.65, 10.34)
percent_2016 <- c(52.07, 47.92)
#The percentage values for the 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 and 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳 classes can be organized with the cover () function as categorical variables in the first column of a dataframe that will be constructed progressively:
cover <- c("Forest", "Agriculture")
#The dataframe, in addition to the column previously created with the name of cover, will consist of two other columns percent_1992 and percent_2016. In these the percentages of 89.83 and 52.18 for the 𝘈𝘮𝘢𝘻𝘰𝘯 𝘍𝘰𝘳𝘦𝘴𝘵 class and 10.16 and 47.81 for the 𝘈𝘨𝘳𝘪𝘤𝘶𝘭𝘵𝘶𝘳𝘦/𝘖𝘵𝘩𝘦𝘳 class will be visible and the final structure of the dataframe
#The function data.frame() creates data frames, tightly coupled collections of variables which share many of the properties of matrices and of lists, used as the fundamental data structure by most of R's modeling software:
losspercentages <- data.frame(cover, percent_1992, percent_2016)
#In R, to visualize the dataframe of losspercentages, name of it followed by Enter ↵ as physical command by keyboard:
losspercentages
#The relatively losspercentages information is contained within the dataframe:
cover percent_1992 percent_2016
1 Forest 89.65 52.07
2 Agriculture 10.34 47.92
#Which is the basic syntax of ggplot() function?
#The basic syntax of ggplot() function is: ggplot(data = specifies the dataframe you want to plot, aes(x= , y= how we are mapping variables from data to visual elements)) + geom_point(specifies what we want to draw)
#The graph that I have selected is a bar graph through the function geom_bar () and ggplot () which function allows me to graphically display the data that I have so far analyzed in the code:
ggplot(losspercentages, aes(x=cover, y=percent_1992, color=cover)) + geom_bar(stat="identity", fill="pink")
ggplot(losspercentages, aes(x=cover, y=percent_2016 , color=cover)) + geom_bar(stat="identity", fill="pink")
#arrangeGrob set up a gtable layout to place multiple grobs on a page. In particular grid.arrange() draw on the current device and is useful to organize ggRGB elements after simply renamed them:
ggD1 <- ggplot(losspercentages, aes(x=cover, y=percent_1992, color=cover)) + geom_bar(stat="identity", fill="pink")
ggD2 <- ggplot(losspercentages, aes(x=cover, y=percent_2016 , color=cover)) + geom_bar(stat="identity", fill="pink")
grid.arrange(ggD1, ggD2, nrow=1)
#Sequence of informatic commands for R_code_knitr.r
install.packages("raster")
install.packages("RStoolbox")
install.packages("ggplot2")
install.packages("gridExtra")
library(raster)
library(RStoolbox)
library(ggplot2)
library(gridExtra)
setwd("C:/lab/")
defor1 <- brick("defor1.jpg")
plotRGB(defor1, r=1, g=2, b=3, stretch="lin")
ggRGB(defor1, r=1, g=2, b=3, stretch="lin")
defor2 <- brick("defor2.jpg")
plotRGB(defor2, r=1, g=2, b=3, stretch="lin")
ggRGB(defor2, r=1, g=2, b=3, stretch="lin")
par(mfrow=c(1,2))
plotRGB(defor1, r=1, g=2, b=3, stretch="lin")
plotRGB(defor2, r=1, g=2, b=3, stretch="lin")
par(mfrow=c(1,2))
plotRGB(defor1, r=1, g=2, b=3, stretch="lin")
plotRGB(defor2, r=1, g=2, b=3, stretch="lin")
D1 <- ggRGB(defor1, r=1, g=2, b=3, stretch="lin")
D2 <- ggRGB(defor2, r=1, g=2, b=3, stretch="lin")
grid.arrange(D1, D2, nrow=2)
D1c <- unsuperClass(defor1, nClass=2)
D1c
set.seed(1)
r.norm(1)
D2c <- unsuperClass(defor2, nClass=2)
D2c
plot(D2c$map)
D2c3 <- unsuperClass(defor2, nClass=3)
plot(D2c3$map)
freq(D1c$map)
freq(D2c$map)
s1 <- 306374 + 34918
s1
s2 <- 178490 + 164236
s2
prop1 <-freq(D1c$map)/ s1
prop1
prop2 <-freq(D2c$map)/ s2
prop2
percent_1992 <- c(89.65, 10.34)
percent_2016 <- c(52.07, 47.92)
cover <- c("Forest", "Agriculture")
losspercentages <- data.frame(cover, percent_1992, percent_2016)
losspercentages
ggplot(losspercentages, aes(x=cover, y=percent_1992, color=cover)) + geom_bar(stat="identity", fill="pink")
ggplot(losspercentages, aes(x=cover, y=percent_2016 , color=cover)) + geom_bar(stat="identity", fill="pink")
ggD1 <- ggplot(losspercentages, aes(x=cover, y=percent_1992, color=cover)) + geom_bar(stat="identity", fill="pink")
ggD2 <- ggplot(losspercentages, aes(x=cover, y=percent_2016 , color=cover)) + geom_bar(stat="identity", fill="pink")
grid.arrange(ggD1, ggD2, nrow=1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genomation-classes.R
\docType{class}
\name{ScoreMatrixList-class}
\alias{ScoreMatrixList-class}
\title{An S4 class for storing a set of \code{ScoreMatrixList}}
\description{
The resulting object is an extension of a \code{list} object, where each element corresponds to a score matrix object
}
\section{Constructors}{
see \code{\link{ScoreMatrixList}}
}
\section{Coercion}{
as(from, "ScoreMatrixList"): Creates a \code{ScoreMatrixList} object from a list containing \code{\link{ScoreMatrix}} or \code{\link{ScoreMatrixBin}} objects.
}
\section{Subsetting}{
In the code snippets below, x is a ScoreMatrixList object.
\code{x[[i]]},\code{x[[i]]}: Get or set elements \code{i}, where \code{i} is a numeric or character vector of length 1.
\code{x$name}, \code{x$name}: value: Get or set element \code{name}, where \code{name} is a name or character vector of length 1.
}
\seealso{
\code{\link{ScoreMatrixList}}
}
| /man/ScoreMatrixList-class.Rd | no_license | katwre/genomation | R | false | true | 996 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genomation-classes.R
\docType{class}
\name{ScoreMatrixList-class}
\alias{ScoreMatrixList-class}
\title{An S4 class for storing a set of \code{ScoreMatrixList}}
\description{
The resulting object is an extension of a \code{list} object, where each element corresponds to a score matrix object
}
\section{Constructors}{
see \code{\link{ScoreMatrixList}}
}
\section{Coercion}{
as(from, "ScoreMatrixList"): Creates a \code{ScoreMatrixList} object from a list containing \code{\link{ScoreMatrix}} or \code{\link{ScoreMatrixBin}} objects.
}
\section{Subsetting}{
In the code snippets below, x is a ScoreMatrixList object.
\code{x[[i]]},\code{x[[i]]}: Get or set elements \code{i}, where \code{i} is a numeric or character vector of length 1.
\code{x$name}, \code{x$name}: value: Get or set element \code{name}, where \code{name} is a name or character vector of length 1.
}
\seealso{
\code{\link{ScoreMatrixList}}
}
|
##Section: 3.1.4 Page No.:133
##pnorm: Cumulative distribution function
EdwardScore=1400
percentile=pnorm(1400,mean=1500,sd=300)*100
cat("Edward's percentile is",percentile) | /Distribution of random variables/Ex3_11.R | no_license | thesaltree/TBC_R | R | false | false | 181 | r | ##Section: 3.1.4 Page No.:133
##pnorm: Cumulative distribution function
EdwardScore=1400
percentile=pnorm(1400,mean=1500,sd=300)*100
cat("Edward's percentile is",percentile) |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584303941804e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615768575-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584303941804e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wafregional_operations.R
\name{wafregional_delete_size_constraint_set}
\alias{wafregional_delete_size_constraint_set}
\title{Permanently deletes a SizeConstraintSet}
\usage{
wafregional_delete_size_constraint_set(SizeConstraintSetId, ChangeToken)
}
\arguments{
\item{SizeConstraintSetId}{[required] The \code{SizeConstraintSetId} of the SizeConstraintSet that you want to
delete. \code{SizeConstraintSetId} is returned by CreateSizeConstraintSet and
by ListSizeConstraintSets.}
\item{ChangeToken}{[required] The value returned by the most recent call to GetChangeToken.}
}
\description{
Permanently deletes a SizeConstraintSet. You can't delete a
\code{SizeConstraintSet} if it's still used in any \code{Rules} or if it still
includes any SizeConstraint objects (any filters).
}
\details{
If you just want to remove a \code{SizeConstraintSet} from a \code{Rule}, use
UpdateRule.
To permanently delete a \code{SizeConstraintSet}, perform the following
steps:
\enumerate{
\item Update the \code{SizeConstraintSet} to remove filters, if any. For more
information, see UpdateSizeConstraintSet.
\item Use GetChangeToken to get the change token that you provide in the
\code{ChangeToken} parameter of a \code{DeleteSizeConstraintSet} request.
\item Submit a \code{DeleteSizeConstraintSet} request.
}
}
\section{Request syntax}{
\preformatted{svc$delete_size_constraint_set(
SizeConstraintSetId = "string",
ChangeToken = "string"
)
}
}
\examples{
# The following example deletes a size constraint set with the ID
# example1ds3t-46da-4fdb-b8d5-abc321j569j5.
\donttest{svc$delete_size_constraint_set(
ChangeToken = "abcd12f2-46da-4fdb-b8d5-fbd4c466928f",
SizeConstraintSetId = "example1ds3t-46da-4fdb-b8d5-abc321j569j5"
)}
}
\keyword{internal}
| /cran/paws.security.identity/man/wafregional_delete_size_constraint_set.Rd | permissive | peoplecure/paws | R | false | true | 1,826 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wafregional_operations.R
\name{wafregional_delete_size_constraint_set}
\alias{wafregional_delete_size_constraint_set}
\title{Permanently deletes a SizeConstraintSet}
\usage{
wafregional_delete_size_constraint_set(SizeConstraintSetId, ChangeToken)
}
\arguments{
\item{SizeConstraintSetId}{[required] The \code{SizeConstraintSetId} of the SizeConstraintSet that you want to
delete. \code{SizeConstraintSetId} is returned by CreateSizeConstraintSet and
by ListSizeConstraintSets.}
\item{ChangeToken}{[required] The value returned by the most recent call to GetChangeToken.}
}
\description{
Permanently deletes a SizeConstraintSet. You can't delete a
\code{SizeConstraintSet} if it's still used in any \code{Rules} or if it still
includes any SizeConstraint objects (any filters).
}
\details{
If you just want to remove a \code{SizeConstraintSet} from a \code{Rule}, use
UpdateRule.
To permanently delete a \code{SizeConstraintSet}, perform the following
steps:
\enumerate{
\item Update the \code{SizeConstraintSet} to remove filters, if any. For more
information, see UpdateSizeConstraintSet.
\item Use GetChangeToken to get the change token that you provide in the
\code{ChangeToken} parameter of a \code{DeleteSizeConstraintSet} request.
\item Submit a \code{DeleteSizeConstraintSet} request.
}
}
\section{Request syntax}{
\preformatted{svc$delete_size_constraint_set(
SizeConstraintSetId = "string",
ChangeToken = "string"
)
}
}
\examples{
# The following example deletes a size constraint set with the ID
# example1ds3t-46da-4fdb-b8d5-abc321j569j5.
\donttest{svc$delete_size_constraint_set(
ChangeToken = "abcd12f2-46da-4fdb-b8d5-fbd4c466928f",
SizeConstraintSetId = "example1ds3t-46da-4fdb-b8d5-abc321j569j5"
)}
}
\keyword{internal}
|
#!/usr/bin/env Rscript
DATAPATH <- Sys.getenv("DATAPATH")
#R commands and output:
## Input data.
x = c(-1.40, -0.44, -0.30, -0.24, -0.22, -0.13, -0.05,
0.06, 0.10, 0.18, 0.20, 0.39, 0.48, 0.63, 1.01)
## Specify k, the number of outliers being tested.
k = 2
## Generate normal probability plot.
qqnorm(x)
## Create a function to compute statistic to
## test for outliers in both tails.
tm = function(x,k){
n = length(x)
## Compute the absolute residuals.
r = abs(x - mean(x))
## Sort data according to size of residual.
df = data.frame(x,r)
dfs = df[order(df$r),]
## Create a subset of the data without the largest k values.
klarge = c((n-k+1):n)
subx = dfs$x[-klarge]
## Compute the sums of squares.
ksub = (subx - mean(subx))**2
all = (df$x - mean(df$x))**2
## Compute the test statistic.
ek = sum(ksub)/sum(all)
}
## Call the function and compute value of test statistic for data.
ekstat = tm(x,k)
ekstat
#> [1] 0.2919994
## Compute critical value based on simulation.
test = c(1:10000)
for (i in 1:10000){
xx = rnorm(length(x))
test[i] = tm(xx,k)}
quantile(test,0.05)
#> 5%
#> 0.3150342
| /03/xx/01-r/1_eda_explore/eda35h2_Tietjen-Moore_Test_for_Outliers.r | no_license | microgenios/cod | R | false | false | 1,172 | r | #!/usr/bin/env Rscript
DATAPATH <- Sys.getenv("DATAPATH")
#R commands and output:
## Input data.
x = c(-1.40, -0.44, -0.30, -0.24, -0.22, -0.13, -0.05,
0.06, 0.10, 0.18, 0.20, 0.39, 0.48, 0.63, 1.01)
## Specify k, the number of outliers being tested.
k = 2
## Generate normal probability plot.
qqnorm(x)
## Create a function to compute statistic to
## test for outliers in both tails.
tm = function(x,k){
n = length(x)
## Compute the absolute residuals.
r = abs(x - mean(x))
## Sort data according to size of residual.
df = data.frame(x,r)
dfs = df[order(df$r),]
## Create a subset of the data without the largest k values.
klarge = c((n-k+1):n)
subx = dfs$x[-klarge]
## Compute the sums of squares.
ksub = (subx - mean(subx))**2
all = (df$x - mean(df$x))**2
## Compute the test statistic.
ek = sum(ksub)/sum(all)
}
## Call the function and compute value of test statistic for data.
ekstat = tm(x,k)
ekstat
#> [1] 0.2919994
## Compute critical value based on simulation.
test = c(1:10000)
for (i in 1:10000){
xx = rnorm(length(x))
test[i] = tm(xx,k)}
quantile(test,0.05)
#> 5%
#> 0.3150342
|
library(tolerance)
### Name: normtol.int
### Title: Normal (or Log-Normal) Tolerance Intervals
### Aliases: normtol.int
### Keywords: file
### ** Examples
## 95%/95% 2-sided normal tolerance intervals for a sample
## of size 100.
set.seed(100)
x <- rnorm(100, 0, 0.2)
out <- normtol.int(x = x, alpha = 0.05, P = 0.95, side = 2,
method = "HE", log.norm = FALSE)
out
plottol(out, x, plot.type = "both", side = "two",
x.lab = "Normal Data")
| /data/genthat_extracted_code/tolerance/examples/normtolint.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 477 | r | library(tolerance)
### Name: normtol.int
### Title: Normal (or Log-Normal) Tolerance Intervals
### Aliases: normtol.int
### Keywords: file
### ** Examples
## 95%/95% 2-sided normal tolerance intervals for a sample
## of size 100.
set.seed(100)
x <- rnorm(100, 0, 0.2)
out <- normtol.int(x = x, alpha = 0.05, P = 0.95, side = 2,
method = "HE", log.norm = FALSE)
out
plottol(out, x, plot.type = "both", side = "two",
x.lab = "Normal Data")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distFuncs.R
\name{binPDF}
\alias{binPDF}
\title{Probability mass function for binomial distribution}
\usage{
binPDF(x, mu, var)
}
\arguments{
\item{x}{numeric, quantile}
\item{mu}{numeric, mean}
\item{var}{numeric, variance}
}
\value{
numeric
}
\description{
Binomial PMF, supports fill-rate calculator of \code{\link[=estimateFR]{estimateFR()}}
}
\keyword{internal}
| /man/binPDF.Rd | no_license | duncanrellis/SawTooth | R | false | true | 447 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distFuncs.R
\name{binPDF}
\alias{binPDF}
\title{Probability mass function for binomial distribution}
\usage{
binPDF(x, mu, var)
}
\arguments{
\item{x}{numeric, quantile}
\item{mu}{numeric, mean}
\item{var}{numeric, variance}
}
\value{
numeric
}
\description{
Binomial PMF, supports fill-rate calculator of \code{\link[=estimateFR]{estimateFR()}}
}
\keyword{internal}
|
# Copyright 2014 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#!/bin/bash
## List Transpose
##
## Transpose a list of lists
##
## @param l list
##
## @examples
##
## ravro:::t.list(mtcars)[[1]]
#' @import Rcpp
#' @useDynLib ravro
t.list <- function(x) {
if(length(x) == 0){
x
} else {
.Call("t_list", x, PACKAGE = "ravro")
}
}
## Takes a data.table with potentially "nested" data.table columns
## and flattens it
flatten <- function(x){
structure(do.call(c,mapply(function(xj,xjname)
# If it's a datatable/Avro "record"
# "flatten" it
if(is.data.table(xj)){
xj <- flatten(xj)
names(xj) <-
paste0(xjname,".",names(xj))
# Return it as a list, otherwise it's created
# as a single column containing a dataframe,
# which is cool, but not desired
unclass(xj)
}else {
# Make sure it's named correctly
lst <- list(xj)
names(lst) <- xjname
lst
},
x,
names(x),
SIMPLIFY=F,
USE.NAMES=F)),
class="data.table",row.names=attr(x,"row.names"))
}
## Takes a data.table with "." names and unflattens it
## so that it has nested data.table columns
unflatten <- function(x){
xnames <- names(x)
unflat_index <- which(grepl(".",xnames,fixed=T))
if (length(unflat_index)>0){
unflat_names <- xnames[unflat_index]
nested_record <- sapply(strsplit(unflat_names,".",fixed=T),`[[`,1L)
record_splits <- split(unflat_index,nested_record)
record_index <- sapply(record_splits,`[[`,1L) # grab the first occurence of each record
other_index <- unlist(lapply(record_splits,`[`,-1L)) # grab the rest
x[record_index] <- lapply(names(record_splits),function(record){
original_names <- record_splits[[record]]
nested_names <- substring(xnames[original_names],nchar(record)+2L)
xi <- structure(x[original_names],
names=nested_names,
class="data.table",
row.names=attr(x,"row.names"))
unflatten(xi)
})
names(x)[record_index] <- names(record_splits)
x <- x[-other_index]
}
x
}
# wrap each element of x as a single-element list,
# using the corresponding element from names as the name for the single element
enlist <- function(x,names){
mapply(
function(xi,name){
xlst=list(xi)
names(xlst) = name
xlst
},x,names,
SIMPLIFY=F,
USE.NAMES=F)
} | /R/utils.R | no_license | orenov/ravro | R | false | false | 2,965 | r | # Copyright 2014 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#!/bin/bash
## List Transpose
##
## Transpose a list of lists
##
## @param l list
##
## @examples
##
## ravro:::t.list(mtcars)[[1]]
#' @import Rcpp
#' @useDynLib ravro
t.list <- function(x) {
if(length(x) == 0){
x
} else {
.Call("t_list", x, PACKAGE = "ravro")
}
}
## Takes a data.table with potentially "nested" data.table columns
## and flattens it
flatten <- function(x){
structure(do.call(c,mapply(function(xj,xjname)
# If it's a datatable/Avro "record"
# "flatten" it
if(is.data.table(xj)){
xj <- flatten(xj)
names(xj) <-
paste0(xjname,".",names(xj))
# Return it as a list, otherwise it's created
# as a single column containing a dataframe,
# which is cool, but not desired
unclass(xj)
}else {
# Make sure it's named correctly
lst <- list(xj)
names(lst) <- xjname
lst
},
x,
names(x),
SIMPLIFY=F,
USE.NAMES=F)),
class="data.table",row.names=attr(x,"row.names"))
}
## Takes a data.table with "." names and unflattens it
## so that it has nested data.table columns
unflatten <- function(x){
xnames <- names(x)
unflat_index <- which(grepl(".",xnames,fixed=T))
if (length(unflat_index)>0){
unflat_names <- xnames[unflat_index]
nested_record <- sapply(strsplit(unflat_names,".",fixed=T),`[[`,1L)
record_splits <- split(unflat_index,nested_record)
record_index <- sapply(record_splits,`[[`,1L) # grab the first occurence of each record
other_index <- unlist(lapply(record_splits,`[`,-1L)) # grab the rest
x[record_index] <- lapply(names(record_splits),function(record){
original_names <- record_splits[[record]]
nested_names <- substring(xnames[original_names],nchar(record)+2L)
xi <- structure(x[original_names],
names=nested_names,
class="data.table",
row.names=attr(x,"row.names"))
unflatten(xi)
})
names(x)[record_index] <- names(record_splits)
x <- x[-other_index]
}
x
}
# wrap each element of x as a single-element list,
# using the corresponding element from names as the name for the single element
enlist <- function(x,names){
mapply(
function(xi,name){
xlst=list(xi)
names(xlst) = name
xlst
},x,names,
SIMPLIFY=F,
USE.NAMES=F)
} |
#R-script to analyze DNA-rmsd evolution
library(ggplot2)
library(fitdistrplus)
library(gtools)
library(png)
library(grid)
library(reshape2)
library(gridExtra)
library(plyr)
#dfcryst$X<-as.factor(dfcryst$X)
dna_prot<-read.csv('../analysis_data/dna_prot_raw_df.csv')
rr<-read.csv('../analysis_data/resid_resname_df.csv',stringsAsFactors=FALSE)
colnames(rr)<-c('DNA_chain','DNA_resid','DNA_resname')
dna_prot=merge(dna_prot,rr)
m_m <- function(df){
if(df['DNA_resname']=='ADE') {
if(df['DNA_atom'] %in% c('N6','C6','C5','N7','C8')){ return('major')}
else if(df['DNA_atom'] %in% c('C2','N3','C4')){ return('minor')}
else return('NA')}
if(df['DNA_resname']=='GUA') {
if(df['DNA_atom'] %in% c('O6','C6','C5','N7','C8')){ return('major')}
else if(df['DNA_atom'] %in% c('N2','C2','N3','C4')){ return('minor')}
else return('NA')}
if(df['DNA_resname']=='THY') {
if(df['DNA_atom'] %in% c('C6','C5','C4','C5M','O4')){ return('major')}
else if(df['DNA_atom'] %in% c('N3','C2','O2')){ return('minor')}
else return('NA')}
if(df['DNA_resname']=='CYT') {
if(df['DNA_atom'] %in% c('C6','C5','C4','N4')){ return('major')}
else if(df['DNA_atom'] %in% c('N3','C2','O2')){ return('minor')}
else return('NA')}
}
dna_prot$groove<-as.factor(apply(dna_prot[,c('DNA_atom','DNA_resname')],1,m_m))
dna_prot_h3<-subset(dna_prot,type=='SC' & (PROT_chain%in%c('CHE','CHA')) & (PROT_resid%in%c(5,8,26)) & groove=='minor')
# dfm=melt(df,id.var=c("Time"))
dna_prot_h3=ddply(dna_prot_h3,c('DNA_chain', 'DNA_resid', 'PROT_chain', 'PROT_resid', 'Time'),summarize,num=length(param1))
dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']=-dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']
head(dna_prot_h3)
dna_prot_h3$PROT_resid=factor(dna_prot_h3$PROT_resid)
theme_set(theme_bw()+theme(panel.border =element_rect(linetype = "dashed", colour = "white")))
dna_prot_h3$PROT_chain=revalue(dna_prot_h3$PROT_chain,c('CHA'='Chain A','CHB'='Chain B','CHC'='Chain C','CHD'='Chain D','CHE'='Chain E','CHF'='Chain F','CHG'='Chain G','CHH'='Chain H'))
a<-ggplot(data=dna_prot_h3,aes(x=Time,y=(DNA_resid),color=PROT_resid))+
# geom_tile(aes(fill=RMSD)) +
geom_point()+
xlab('Time')+guides(colour = guide_legend(title = "Residue #"))+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("DNA position")+ggtitle("H3, contacts of anchor residues with bases in minor grooves")+facet_grid(PROT_chain~.,scales='free')
ggsave("../analysis_data/pub_int_dna_anch_dyn_h3.png",plot=a,height=3,width=12)
####H4
dna_prot_h3<-subset(dna_prot,type=='SC' & (PROT_chain%in%c('CHF','CHB')) & (PROT_resid%in%c(8,16,17)) & groove=='minor')
# dfm=melt(df,id.var=c("Time"))
dna_prot_h3=ddply(dna_prot_h3,c('DNA_chain', 'DNA_resid', 'PROT_chain', 'PROT_resid', 'Time'),summarize,num=length(param1))
dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']=-dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']
head(dna_prot_h3)
dna_prot_h3$PROT_resid=factor(dna_prot_h3$PROT_resid)
theme_set(theme_bw()+theme(panel.border =element_rect(linetype = "dashed", colour = "white")))
dna_prot_h3$PROT_chain=revalue(dna_prot_h3$PROT_chain,c('CHA'='Chain A','CHB'='Chain B','CHC'='Chain C','CHD'='Chain D','CHE'='Chain E','CHF'='Chain F','CHG'='Chain G','CHH'='Chain H'))
a<-ggplot(data=dna_prot_h3,aes(x=Time,y=(DNA_resid),color=PROT_resid))+
# geom_tile(aes(fill=RMSD)) +
geom_point()+
xlab('Time')+guides(colour = guide_legend(title = "Residue #"))+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("DNA position")+ggtitle("H4, contacts of anchor residues with bases in minor grooves")+facet_grid(PROT_chain~.,scales='free')
ggsave("../analysis_data/pub_int_dna_anch_dyn_h4.png",plot=a,height=3,width=12)
dna_prot_h3<-subset(dna_prot,type=='SC' & (PROT_chain%in%c('CHG','CHC')) & (PROT_resid%in%c(3,9,11,13)) & groove=='minor')
# dfm=melt(df,id.var=c("Time"))
dna_prot_h3=ddply(dna_prot_h3,c('DNA_chain', 'DNA_resid', 'PROT_chain', 'PROT_resid', 'Time'),summarize,num=length(param1))
dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']=-dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']
head(dna_prot_h3)
dna_prot_h3$PROT_resid=factor(dna_prot_h3$PROT_resid)
theme_set(theme_bw()+theme(panel.border =element_rect(linetype = "dashed", colour = "white")))
dna_prot_h3$PROT_chain=revalue(dna_prot_h3$PROT_chain,c('CHA'='Chain A','CHB'='Chain B','CHC'='Chain C','CHD'='Chain D','CHE'='Chain E','CHF'='Chain F','CHG'='Chain G','CHH'='Chain H'))
a<-ggplot(data=dna_prot_h3,aes(x=Time,y=(DNA_resid),color=PROT_resid))+
# geom_tile(aes(fill=RMSD)) +
geom_point()+
xlab('Time')+guides(colour = guide_legend(title = "Residue #"))+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("DNA position")+ggtitle("H2A N-term, contacts of anchor residues with bases in minor grooves")+facet_grid(PROT_chain~.,scales='free')
ggsave("../analysis_data/pub_int_dna_anch_dyn_h2a_n.png",plot=a,height=3,width=12)
dna_prot_h3<-subset(dna_prot,type=='SC' & (PROT_chain%in%c('CHG','CHC')) & (PROT_resid%in%c(124,126,128)) & groove=='minor')
# dfm=melt(df,id.var=c("Time"))
dna_prot_h3=ddply(dna_prot_h3,c('DNA_chain', 'DNA_resid', 'PROT_chain', 'PROT_resid', 'Time'),summarize,num=length(param1))
dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']=-dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']
head(dna_prot_h3)
dna_prot_h3$PROT_resid=factor(dna_prot_h3$PROT_resid)
theme_set(theme_bw()+theme(panel.border =element_rect(linetype = "dashed", colour = "white")))
dna_prot_h3$PROT_chain=revalue(dna_prot_h3$PROT_chain,c('CHA'='Chain A','CHB'='Chain B','CHC'='Chain C','CHD'='Chain D','CHE'='Chain E','CHF'='Chain F','CHG'='Chain G','CHH'='Chain H'))
a<-ggplot(data=dna_prot_h3,aes(x=Time,y=(DNA_resid),color=PROT_resid))+
# geom_tile(aes(fill=RMSD)) +
geom_point()+
xlab('Time')+guides(colour = guide_legend(title = "Residue #"))+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("DNA position")+ggtitle("H2A C-term, contacts of anchor residues with bases in minor grooves")+facet_grid(PROT_chain~.,scales='free')
ggsave("../analysis_data/pub_int_dna_anch_dyn_h2a_c.png",plot=a,height=3,width=12)
dna_prot_h3<-subset(dna_prot,type=='SC' & (PROT_chain%in%c('CHH','CHD')) & (PROT_resid%in%c(5-3,29-3,30-3,33-3)) & groove=='minor')
# dfm=melt(df,id.var=c("Time"))
dna_prot_h3$PROT_resid=dna_prot_h3$PROT_resid+3
dna_prot_h3=ddply(dna_prot_h3,c('DNA_chain', 'DNA_resid', 'PROT_chain', 'PROT_resid', 'Time'),summarize,num=length(param1))
dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']=-dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']
head(dna_prot_h3)
dna_prot_h3$PROT_resid=factor(dna_prot_h3$PROT_resid)
theme_set(theme_bw()+theme(panel.border =element_rect(linetype = "dashed", colour = "white")))
dna_prot_h3$PROT_chain=revalue(dna_prot_h3$PROT_chain,c('CHA'='Chain A','CHB'='Chain B','CHC'='Chain C','CHD'='Chain D','CHE'='Chain E','CHF'='Chain F','CHG'='Chain G','CHH'='Chain H'))
a<-ggplot(data=dna_prot_h3,aes(x=Time,y=(DNA_resid),color=PROT_resid))+
# geom_tile(aes(fill=RMSD)) +
geom_point()+
xlab('Time')+guides(colour = guide_legend(title = "Residue #"))+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("DNA position")+ggtitle("H2B, contacts of anchor residues with bases in minor grooves")+facet_grid(PROT_chain~.,scales='free')
ggsave("../analysis_data/pub_int_dna_anch_dyn_h2b.png",plot=a,height=3,width=12)
q()
####H2B
h2bdata=df[df$Chain=='D' | df$Chain=='H',]
h2bdata=arrange(h2bdata,Chain,Resid,desc(sasa_fract))
d<-ggplot(data=h2bdata,aes(x=Resid,y=(sasa_fract)*(-100),fill=Chain,color=Chain))+
# geom_tile(aes(fill=RMSD)) +
geom_bar(data=subset(h2bdata,Chain=='D'),stat='identity',position='dodge',width=0.7)+#scale_y_continuous(limits=c(-101,50),breaks=seq(-100,0,by=20),labels=seq(100,0,by=-20),expand=c(0,0))+
geom_point()+scale_y_continuous(limits=c(-101,60),breaks=seq(-100,0,by=20),labels=seq(100,0,by=-20),expand=c(0,0))+
scale_x_continuous(limits=c(0,123),labels=c(),breaks=c(),expand=c(0,0))+
# scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
# scale_y_continuous(limits=c(0,1210),breaks = round(seq(0,1000, by = 100),1))+
xlab('')+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("Relative SASA, %")+#ggtitle("RMSD, C-alpha, nucleosome core. Histone H3, A")+
annotation_custom(h2b, ymin=0, ymax=60, xmin=0.5,xmax=122.5)
ggsave("../analysis_data/pub_prot_sasa_h2b.png",plot=d,height=2,width=12)
q<-arrangeGrob(a,b,c,d,ncol=1)
ggsave("../analysis_data/pub_prot_sasa.png",plot=q,height=8,width=12)
e<-ggplot(data=df[df$Chain=='CHE',],aes(x=Resid,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1210),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H3, E")+
annotation_custom(h3, ymin=1000, ymax=1270, xmin=43.5,xmax=131.5)
b<-ggplot(data=df[df$Chain=='CHB',],aes(x=Resid,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1260),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H4, B")+
annotation_custom(h4, ymin=1000, ymax=1335, xmin=23.5,xmax=98.5)
f<-ggplot(data=df[df$Chain=='CHF',],aes(x=Resid,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1260),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H4, F")+
annotation_custom(h4, ymin=1000, ymax=1335, xmin=23.5,xmax=98.5)
c<-ggplot(data=df[df$Chain=='CHC',],aes(x=Resid,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1200),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H2A, C")+
annotation_custom(h2a, ymin=1000, ymax=1270, xmin=15.5,xmax=117.5)
g<-ggplot(data=df[df$Chain=='CHG',],aes(x=Resid,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1200),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H2A, G")+
annotation_custom(h2a, ymin=1000, ymax=1270, xmin=15.5,xmax=117.5)
d<-ggplot(data=df[df$Chain=='CHD',],aes(x=Resid+3,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1220),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H2B, D")+
annotation_custom(h2b, ymin=1000, ymax=1290, xmin=32.5,xmax=123.5)
h<-ggplot(data=df[df$Chain=='CHH',],aes(x=Resid+3,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1220),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H2B, H")+
annotation_custom(h2b, ymin=1000, ymax=1290, xmin=32.5,xmax=123.5)
#facet_grid(Chain~.)
q<-arrangeGrob(a,b,e,f,c,d,g,h,ncol=2)
ggsave("../analysis_data/pub_prot_rmsd.png",plot=q,height=12,width=12)
#plot.new()
# z <- locator(1)
quit()
q<-arrangeGrob(t,s,ncol=1)
img <- readPNG(paste("dna_par_labels/",i,".png",sep=''))
g <- rasterGrob(img, interpolate=TRUE)
+ annotation_custom(g, ymin=0, xmax=-stdev+meand)
| /MD/NAMD/nucl/nucleosome_CHARMM/simul/analysis_scripts_link_zw/pub_int_dna_anch_dyn.r | no_license | TienMPhan/MolModEdu | R | false | false | 11,986 | r | #R-script to analyze DNA-rmsd evolution
library(ggplot2)
library(fitdistrplus)
library(gtools)
library(png)
library(grid)
library(reshape2)
library(gridExtra)
library(plyr)
#dfcryst$X<-as.factor(dfcryst$X)
dna_prot<-read.csv('../analysis_data/dna_prot_raw_df.csv')
rr<-read.csv('../analysis_data/resid_resname_df.csv',stringsAsFactors=FALSE)
colnames(rr)<-c('DNA_chain','DNA_resid','DNA_resname')
dna_prot=merge(dna_prot,rr)
m_m <- function(df){
if(df['DNA_resname']=='ADE') {
if(df['DNA_atom'] %in% c('N6','C6','C5','N7','C8')){ return('major')}
else if(df['DNA_atom'] %in% c('C2','N3','C4')){ return('minor')}
else return('NA')}
if(df['DNA_resname']=='GUA') {
if(df['DNA_atom'] %in% c('O6','C6','C5','N7','C8')){ return('major')}
else if(df['DNA_atom'] %in% c('N2','C2','N3','C4')){ return('minor')}
else return('NA')}
if(df['DNA_resname']=='THY') {
if(df['DNA_atom'] %in% c('C6','C5','C4','C5M','O4')){ return('major')}
else if(df['DNA_atom'] %in% c('N3','C2','O2')){ return('minor')}
else return('NA')}
if(df['DNA_resname']=='CYT') {
if(df['DNA_atom'] %in% c('C6','C5','C4','N4')){ return('major')}
else if(df['DNA_atom'] %in% c('N3','C2','O2')){ return('minor')}
else return('NA')}
}
dna_prot$groove<-as.factor(apply(dna_prot[,c('DNA_atom','DNA_resname')],1,m_m))
dna_prot_h3<-subset(dna_prot,type=='SC' & (PROT_chain%in%c('CHE','CHA')) & (PROT_resid%in%c(5,8,26)) & groove=='minor')
# dfm=melt(df,id.var=c("Time"))
dna_prot_h3=ddply(dna_prot_h3,c('DNA_chain', 'DNA_resid', 'PROT_chain', 'PROT_resid', 'Time'),summarize,num=length(param1))
dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']=-dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']
head(dna_prot_h3)
dna_prot_h3$PROT_resid=factor(dna_prot_h3$PROT_resid)
theme_set(theme_bw()+theme(panel.border =element_rect(linetype = "dashed", colour = "white")))
dna_prot_h3$PROT_chain=revalue(dna_prot_h3$PROT_chain,c('CHA'='Chain A','CHB'='Chain B','CHC'='Chain C','CHD'='Chain D','CHE'='Chain E','CHF'='Chain F','CHG'='Chain G','CHH'='Chain H'))
a<-ggplot(data=dna_prot_h3,aes(x=Time,y=(DNA_resid),color=PROT_resid))+
# geom_tile(aes(fill=RMSD)) +
geom_point()+
xlab('Time')+guides(colour = guide_legend(title = "Residue #"))+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("DNA position")+ggtitle("H3, contacts of anchor residues with bases in minor grooves")+facet_grid(PROT_chain~.,scales='free')
ggsave("../analysis_data/pub_int_dna_anch_dyn_h3.png",plot=a,height=3,width=12)
####H4
dna_prot_h3<-subset(dna_prot,type=='SC' & (PROT_chain%in%c('CHF','CHB')) & (PROT_resid%in%c(8,16,17)) & groove=='minor')
# dfm=melt(df,id.var=c("Time"))
dna_prot_h3=ddply(dna_prot_h3,c('DNA_chain', 'DNA_resid', 'PROT_chain', 'PROT_resid', 'Time'),summarize,num=length(param1))
dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']=-dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']
head(dna_prot_h3)
dna_prot_h3$PROT_resid=factor(dna_prot_h3$PROT_resid)
theme_set(theme_bw()+theme(panel.border =element_rect(linetype = "dashed", colour = "white")))
dna_prot_h3$PROT_chain=revalue(dna_prot_h3$PROT_chain,c('CHA'='Chain A','CHB'='Chain B','CHC'='Chain C','CHD'='Chain D','CHE'='Chain E','CHF'='Chain F','CHG'='Chain G','CHH'='Chain H'))
a<-ggplot(data=dna_prot_h3,aes(x=Time,y=(DNA_resid),color=PROT_resid))+
# geom_tile(aes(fill=RMSD)) +
geom_point()+
xlab('Time')+guides(colour = guide_legend(title = "Residue #"))+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("DNA position")+ggtitle("H4, contacts of anchor residues with bases in minor grooves")+facet_grid(PROT_chain~.,scales='free')
ggsave("../analysis_data/pub_int_dna_anch_dyn_h4.png",plot=a,height=3,width=12)
dna_prot_h3<-subset(dna_prot,type=='SC' & (PROT_chain%in%c('CHG','CHC')) & (PROT_resid%in%c(3,9,11,13)) & groove=='minor')
# dfm=melt(df,id.var=c("Time"))
dna_prot_h3=ddply(dna_prot_h3,c('DNA_chain', 'DNA_resid', 'PROT_chain', 'PROT_resid', 'Time'),summarize,num=length(param1))
dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']=-dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']
head(dna_prot_h3)
dna_prot_h3$PROT_resid=factor(dna_prot_h3$PROT_resid)
theme_set(theme_bw()+theme(panel.border =element_rect(linetype = "dashed", colour = "white")))
dna_prot_h3$PROT_chain=revalue(dna_prot_h3$PROT_chain,c('CHA'='Chain A','CHB'='Chain B','CHC'='Chain C','CHD'='Chain D','CHE'='Chain E','CHF'='Chain F','CHG'='Chain G','CHH'='Chain H'))
a<-ggplot(data=dna_prot_h3,aes(x=Time,y=(DNA_resid),color=PROT_resid))+
# geom_tile(aes(fill=RMSD)) +
geom_point()+
xlab('Time')+guides(colour = guide_legend(title = "Residue #"))+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("DNA position")+ggtitle("H2A N-term, contacts of anchor residues with bases in minor grooves")+facet_grid(PROT_chain~.,scales='free')
ggsave("../analysis_data/pub_int_dna_anch_dyn_h2a_n.png",plot=a,height=3,width=12)
dna_prot_h3<-subset(dna_prot,type=='SC' & (PROT_chain%in%c('CHG','CHC')) & (PROT_resid%in%c(124,126,128)) & groove=='minor')
# dfm=melt(df,id.var=c("Time"))
dna_prot_h3=ddply(dna_prot_h3,c('DNA_chain', 'DNA_resid', 'PROT_chain', 'PROT_resid', 'Time'),summarize,num=length(param1))
dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']=-dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']
head(dna_prot_h3)
dna_prot_h3$PROT_resid=factor(dna_prot_h3$PROT_resid)
theme_set(theme_bw()+theme(panel.border =element_rect(linetype = "dashed", colour = "white")))
dna_prot_h3$PROT_chain=revalue(dna_prot_h3$PROT_chain,c('CHA'='Chain A','CHB'='Chain B','CHC'='Chain C','CHD'='Chain D','CHE'='Chain E','CHF'='Chain F','CHG'='Chain G','CHH'='Chain H'))
a<-ggplot(data=dna_prot_h3,aes(x=Time,y=(DNA_resid),color=PROT_resid))+
# geom_tile(aes(fill=RMSD)) +
geom_point()+
xlab('Time')+guides(colour = guide_legend(title = "Residue #"))+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("DNA position")+ggtitle("H2A C-term, contacts of anchor residues with bases in minor grooves")+facet_grid(PROT_chain~.,scales='free')
ggsave("../analysis_data/pub_int_dna_anch_dyn_h2a_c.png",plot=a,height=3,width=12)
dna_prot_h3<-subset(dna_prot,type=='SC' & (PROT_chain%in%c('CHH','CHD')) & (PROT_resid%in%c(5-3,29-3,30-3,33-3)) & groove=='minor')
# dfm=melt(df,id.var=c("Time"))
dna_prot_h3$PROT_resid=dna_prot_h3$PROT_resid+3
dna_prot_h3=ddply(dna_prot_h3,c('DNA_chain', 'DNA_resid', 'PROT_chain', 'PROT_resid', 'Time'),summarize,num=length(param1))
dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']=-dna_prot_h3[dna_prot_h3$DNA_chain=='CHJ','DNA_resid']
head(dna_prot_h3)
dna_prot_h3$PROT_resid=factor(dna_prot_h3$PROT_resid)
theme_set(theme_bw()+theme(panel.border =element_rect(linetype = "dashed", colour = "white")))
dna_prot_h3$PROT_chain=revalue(dna_prot_h3$PROT_chain,c('CHA'='Chain A','CHB'='Chain B','CHC'='Chain C','CHD'='Chain D','CHE'='Chain E','CHF'='Chain F','CHG'='Chain G','CHH'='Chain H'))
a<-ggplot(data=dna_prot_h3,aes(x=Time,y=(DNA_resid),color=PROT_resid))+
# geom_tile(aes(fill=RMSD)) +
geom_point()+
xlab('Time')+guides(colour = guide_legend(title = "Residue #"))+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("DNA position")+ggtitle("H2B, contacts of anchor residues with bases in minor grooves")+facet_grid(PROT_chain~.,scales='free')
ggsave("../analysis_data/pub_int_dna_anch_dyn_h2b.png",plot=a,height=3,width=12)
q()
####H2B
h2bdata=df[df$Chain=='D' | df$Chain=='H',]
h2bdata=arrange(h2bdata,Chain,Resid,desc(sasa_fract))
d<-ggplot(data=h2bdata,aes(x=Resid,y=(sasa_fract)*(-100),fill=Chain,color=Chain))+
# geom_tile(aes(fill=RMSD)) +
geom_bar(data=subset(h2bdata,Chain=='D'),stat='identity',position='dodge',width=0.7)+#scale_y_continuous(limits=c(-101,50),breaks=seq(-100,0,by=20),labels=seq(100,0,by=-20),expand=c(0,0))+
geom_point()+scale_y_continuous(limits=c(-101,60),breaks=seq(-100,0,by=20),labels=seq(100,0,by=-20),expand=c(0,0))+
scale_x_continuous(limits=c(0,123),labels=c(),breaks=c(),expand=c(0,0))+
# scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
# scale_y_continuous(limits=c(0,1210),breaks = round(seq(0,1000, by = 100),1))+
xlab('')+
# theme(plot.margin = unit(c(1,1,1,1), "cm"))+
ylab("Relative SASA, %")+#ggtitle("RMSD, C-alpha, nucleosome core. Histone H3, A")+
annotation_custom(h2b, ymin=0, ymax=60, xmin=0.5,xmax=122.5)
ggsave("../analysis_data/pub_prot_sasa_h2b.png",plot=d,height=2,width=12)
q<-arrangeGrob(a,b,c,d,ncol=1)
ggsave("../analysis_data/pub_prot_sasa.png",plot=q,height=8,width=12)
e<-ggplot(data=df[df$Chain=='CHE',],aes(x=Resid,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1210),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H3, E")+
annotation_custom(h3, ymin=1000, ymax=1270, xmin=43.5,xmax=131.5)
b<-ggplot(data=df[df$Chain=='CHB',],aes(x=Resid,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1260),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H4, B")+
annotation_custom(h4, ymin=1000, ymax=1335, xmin=23.5,xmax=98.5)
f<-ggplot(data=df[df$Chain=='CHF',],aes(x=Resid,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1260),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H4, F")+
annotation_custom(h4, ymin=1000, ymax=1335, xmin=23.5,xmax=98.5)
c<-ggplot(data=df[df$Chain=='CHC',],aes(x=Resid,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1200),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H2A, C")+
annotation_custom(h2a, ymin=1000, ymax=1270, xmin=15.5,xmax=117.5)
g<-ggplot(data=df[df$Chain=='CHG',],aes(x=Resid,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1200),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H2A, G")+
annotation_custom(h2a, ymin=1000, ymax=1270, xmin=15.5,xmax=117.5)
d<-ggplot(data=df[df$Chain=='CHD',],aes(x=Resid+3,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1220),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H2B, D")+
annotation_custom(h2b, ymin=1000, ymax=1290, xmin=32.5,xmax=123.5)
h<-ggplot(data=df[df$Chain=='CHH',],aes(x=Resid+3,y=Time))+
geom_tile(aes(fill=RMSD)) +
scale_fill_gradient2(limits=c(0,6),low="blue",mid="green", high="red",midpoint=3.0,guide_legend(title="RMSD, A"))+
scale_y_continuous(limits=c(0,1220),breaks = round(seq(0,1000, by = 100),1))+
xlab("Residue number")+ylab("Time, ns")+ggtitle("RMSD, C-alpha, nucleosome core. Histone H2B, H")+
annotation_custom(h2b, ymin=1000, ymax=1290, xmin=32.5,xmax=123.5)
#facet_grid(Chain~.)
q<-arrangeGrob(a,b,e,f,c,d,g,h,ncol=2)
ggsave("../analysis_data/pub_prot_rmsd.png",plot=q,height=12,width=12)
#plot.new()
# z <- locator(1)
quit()
q<-arrangeGrob(t,s,ncol=1)
img <- readPNG(paste("dna_par_labels/",i,".png",sep=''))
g <- rasterGrob(img, interpolate=TRUE)
+ annotation_custom(g, ymin=0, xmax=-stdev+meand)
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/large_intestine.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.3,family="gaussian",standardize=TRUE)
sink('./large_intestine_041.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/large_intestine/large_intestine_041.R | no_license | esbgkannan/QSMART | R | false | false | 363 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/large_intestine.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.3,family="gaussian",standardize=TRUE)
sink('./large_intestine_041.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{pval_plot}
\alias{pval_plot}
\title{Plot one-tailed p-values}
\usage{
pval_plot(yi, vi, sei, alpha_select = 0.05)
}
\arguments{
\item{yi}{A vector of point estimates to be meta-analyzed. The signs of the
estimates should be chosen such that publication bias is assumed to operate
in favor of positive estimates.}
\item{vi}{A vector of estimated variances (i.e., squared standard errors) for
the point estimates.}
\item{sei}{A vector of estimated standard errors for the point estimates.
(Only one of \code{vi} or \code{sei} needs to be specified).}
\item{alpha_select}{Alpha level at which an estimate's probability of being
favored by publication bias is assumed to change (i.e.,
the threshold at which study investigators, journal editors, etc., consider
an estimate to be significant).}
}
\description{
Plots the one-tailed p-values. The leftmost red line indicates the cutoff for
one-tailed p-values less than 0.025 (corresponding to "affirmative" studies;
i.e., those with a positive point estimate and a two-tailed p-value less than
0.05). The rightmost red line indicates one-tailed p-values greater than
0.975 (i.e., studies with a negative point estimate and a two-tailed p-value
less than 0.05). If there is a substantial point mass of p-values to the
right of the rightmost red line, this suggests that selection may be
two-tailed rather than one-tailed.
}
\examples{
# compute meta-analytic effect sizes
require(metafor)
dat <- metafor::escalc(measure = "RR", ai = tpos, bi = tneg, ci = cpos,
di = cneg, data = dat.bcg)
# flip signs since we think publication bias favors negative effects
dat$yi <- -dat$yi
pval_plot(yi = dat$yi, vi = dat$vi)
}
\references{
\insertRef{mathur2020}{metabias}
}
| /man/pval_plot.Rd | no_license | cran/PublicationBias | R | false | true | 1,825 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{pval_plot}
\alias{pval_plot}
\title{Plot one-tailed p-values}
\usage{
pval_plot(yi, vi, sei, alpha_select = 0.05)
}
\arguments{
\item{yi}{A vector of point estimates to be meta-analyzed. The signs of the
estimates should be chosen such that publication bias is assumed to operate
in favor of positive estimates.}
\item{vi}{A vector of estimated variances (i.e., squared standard errors) for
the point estimates.}
\item{sei}{A vector of estimated standard errors for the point estimates.
(Only one of \code{vi} or \code{sei} needs to be specified).}
\item{alpha_select}{Alpha level at which an estimate's probability of being
favored by publication bias is assumed to change (i.e.,
the threshold at which study investigators, journal editors, etc., consider
an estimate to be significant).}
}
\description{
Plots the one-tailed p-values. The leftmost red line indicates the cutoff for
one-tailed p-values less than 0.025 (corresponding to "affirmative" studies;
i.e., those with a positive point estimate and a two-tailed p-value less than
0.05). The rightmost red line indicates one-tailed p-values greater than
0.975 (i.e., studies with a negative point estimate and a two-tailed p-value
less than 0.05). If there is a substantial point mass of p-values to the
right of the rightmost red line, this suggests that selection may be
two-tailed rather than one-tailed.
}
\examples{
# compute meta-analytic effect sizes
require(metafor)
dat <- metafor::escalc(measure = "RR", ai = tpos, bi = tneg, ci = cpos,
di = cneg, data = dat.bcg)
# flip signs since we think publication bias favors negative effects
dat$yi <- -dat$yi
pval_plot(yi = dat$yi, vi = dat$vi)
}
\references{
\insertRef{mathur2020}{metabias}
}
|
source("loadData.R")
data <- loadData("1/2/2007", "2/2/2007")
### plot1
par(mfrow = c(1, 1), mar = c(5, 4, 4, 2))
hist(data[,3], col = "red", main ="Global reactive power"
, xlab = "Global reactive power (kilowatts)")
dev.copy(png, file = "plot1.png", width=480, height=480)
dev.off()
| /CourseProject1/plot1.R | no_license | togop/ExData_Plotting1 | R | false | false | 295 | r | source("loadData.R")
data <- loadData("1/2/2007", "2/2/2007")
### plot1
par(mfrow = c(1, 1), mar = c(5, 4, 4, 2))
hist(data[,3], col = "red", main ="Global reactive power"
, xlab = "Global reactive power (kilowatts)")
dev.copy(png, file = "plot1.png", width=480, height=480)
dev.off()
|
# Fazendo Previsoes
# ***** Esta é a versão 2.0 deste script, atualizado em 23/05/2017 *****
# ***** Esse script pode ser executado nas versões 3.3.1, 3.3.2, 3.3.3 e 3.4.0 da linguagem R *****
# ***** Recomendamos a utilização da versão 3.4.0 da linguagem R *****
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# setwd("~/Dropbox/DSA/BigDataAnalytics-R-Azure/Cap11")
# getwd()
## Previsoes com um modelo de classificacao baseado em randomForest
require(randomForest)
# Gerando previsoes nos dados de teste
result_previsto <- data.frame( actual = Credit$CreditStatus,
previsto = predict(modelo, newdata = dados_teste))
# Visualizando o resultado
head(result_previsto)
| /RFundamentos/Part 11/05-ScoreModel.R | no_license | DaniloLFaria/DataScienceAcademy | R | false | false | 792 | r | # Fazendo Previsoes
# ***** Esta é a versão 2.0 deste script, atualizado em 23/05/2017 *****
# ***** Esse script pode ser executado nas versões 3.3.1, 3.3.2, 3.3.3 e 3.4.0 da linguagem R *****
# ***** Recomendamos a utilização da versão 3.4.0 da linguagem R *****
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# setwd("~/Dropbox/DSA/BigDataAnalytics-R-Azure/Cap11")
# getwd()
## Previsoes com um modelo de classificacao baseado em randomForest
require(randomForest)
# Gerando previsoes nos dados de teste
result_previsto <- data.frame( actual = Credit$CreditStatus,
previsto = predict(modelo, newdata = dados_teste))
# Visualizando o resultado
head(result_previsto)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/calcWatZ.R
\name{calcWatZ.gmacs}
\alias{calcWatZ.gmacs}
\title{Calculate weight-at-size by year, sex}
\usage{
calcWatZ.gmacs(mc, showPlot = TRUE)
}
\arguments{
\item{mc}{- model configuration object}
}
\value{
W_yxmsz: 5d array with weight-at-size by year/sex/maturity state/shell condition
}
\description{
}
| /man/calcWatZ.gmacs.Rd | permissive | seacode/rsimGmacs | R | false | false | 397 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/calcWatZ.R
\name{calcWatZ.gmacs}
\alias{calcWatZ.gmacs}
\title{Calculate weight-at-size by year, sex}
\usage{
calcWatZ.gmacs(mc, showPlot = TRUE)
}
\arguments{
\item{mc}{- model configuration object}
}
\value{
W_yxmsz: 5d array with weight-at-size by year/sex/maturity state/shell condition
}
\description{
}
|
library(gMCP)
### Name: weighted.test.functions
### Title: Weighted Test Functions for use with gMCP
### Aliases: weighted.test.functions
### ** Examples
# The test function 'bonferroni.test' is used in by gMCP in the following call:
graph <- BonferroniHolm(4)
pvalues <- c(0.01, 0.05, 0.03, 0.02)
alpha <- 0.05
r <- gMCP.extended(graph=graph, pvalues=pvalues, test=bonferroni.test, verbose=TRUE)
# For the intersection of all four elementary hypotheses this results in a call
bonferroni.test(pvalues=pvalues, weights=getWeights(graph))
bonferroni.test(pvalues=pvalues, weights=getWeights(graph), adjPValues=FALSE)
# bonferroni.test function:
bonferroni.test <- function(pvalues, weights, alpha=0.05, adjPValues=TRUE, verbose=FALSE, ...) {
if (adjPValues) {
return(min(pvalues/weights))
} else {
return(any(pvalues<=alpha*weights))
}
}
| /data/genthat_extracted_code/gMCP/examples/weighted.test.functions.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 862 | r | library(gMCP)
### Name: weighted.test.functions
### Title: Weighted Test Functions for use with gMCP
### Aliases: weighted.test.functions
### ** Examples
# The test function 'bonferroni.test' is used in by gMCP in the following call:
graph <- BonferroniHolm(4)
pvalues <- c(0.01, 0.05, 0.03, 0.02)
alpha <- 0.05
r <- gMCP.extended(graph=graph, pvalues=pvalues, test=bonferroni.test, verbose=TRUE)
# For the intersection of all four elementary hypotheses this results in a call
bonferroni.test(pvalues=pvalues, weights=getWeights(graph))
bonferroni.test(pvalues=pvalues, weights=getWeights(graph), adjPValues=FALSE)
# bonferroni.test function:
bonferroni.test <- function(pvalues, weights, alpha=0.05, adjPValues=TRUE, verbose=FALSE, ...) {
if (adjPValues) {
return(min(pvalues/weights))
} else {
return(any(pvalues<=alpha*weights))
}
}
|
library(ggplot2)
library(data.table)
library(stringr)
library(dplyr)
#----------------------------- Wine Pairings: R code and Data Visualizations --------------------------#
#use file.choose to find the file
association_rules <- fread(file.choose())
#Pulls the food and wine names out, drops the unformatted columns
rules <- association_rules %>%
mutate(Meats = word(antecedents,2,sep = "'"),
Wines = word(consequents,2,sep = "'")) %>%
select(-c(antecedents, consequents))
#Get a list of the distinct wines and pair with Red or white
wines = distinct(rules,Wines)
WineColor <- c("Red", "Red","White","Red", "White" , "Red",
"White" , "Red", "Red", "White" ,"White" ,"White" ,"White" )
wines = cbind(wines, WineColor)
#Inner join to add the colors
rules <- rules %>%
inner_join(wines, by = "Wines")
#Create the plot
ggPairs <- rules %>% ggplot(aes(x = confidence, y = Wines, fill = WineColor)) +
geom_col() +
facet_wrap(~Meats, nrow = 2) +
theme_minimal()+
theme(plot.title = element_text(hjust = 0.5)) +
scale_fill_manual(values = c('#B11226', '#f3e5ab')) +
scale_x_continuous(labels = scales::percent) +
labs(x = "Percentage of Orders", #could change to confidence
y = "",
fill = "Wine Color",
title = "Popular Wine Pairings")
# Orders ####
#NOTE: the factor mutation is meant to specify the order of the levels in the charts
#choose order data csv with line numbers
orders <- fread(file.choose())
#filters the wines, groups by item, gets count and sorts
wineOrder <- orders %>%
filter(line_number == 2) %>%
group_by(item) %>%
tally(sort = T) %>%
#mutate(item = factor(item, levels = item[order(n)])) %>%
rename(Wines = item)
wineOrder <- wineOrder %>%
inner_join( wines, by = 'Wines') %>%
mutate(Wines = factor(Wines, levels = Wines[order(n)]))
#same process for mains
mainOrder <- orders %>%
filter(line_number == 1) %>%
group_by(item) %>%
tally(sort = T) %>%
mutate(item = factor(item, levels = item[order(n)])) %>%
rename(Mains = item)
#adding colors
mains <- cbind(distinct(mainOrder, Mains),
'Meat Type' = c("Beef", "Fish", 'Pork', 'Pork', 'Fish', 'Poultry', 'Fish','Poultry'))
mainOrder <- inner_join(mainOrder, mains, by = 'Mains')
#same process for sides
sideOrder <- orders %>%
filter(line_number == 3) %>%
group_by(item) %>%
tally(sort = T) %>%
mutate(item = factor(item, levels = item[order(n)])) %>%
rename(Sides = item)
#adding colors
sides <- cbind(distinct(sideOrder, Sides),
'Side Type' = c('Vegetables', 'Beans', 'Vegetables', 'Salad', 'Potatoes', 'Salad', 'Potatoes'))
sideOrder <- inner_join(sideOrder, sides, by = 'Sides')
#simple bar charts
ggWine <- wineOrder %>% ggplot(aes(x = Wines, y = n, fill = WineColor)) +
geom_col() +
theme_minimal() +
scale_fill_manual(values = c('#B11226', '#f3e5ab')) + #handpicked colors
labs(x = "Wine", y = "Frequency", title = "Count of Wine Orders") +
coord_flip()
ggWine
#For sides
ggSide <- sideOrder %>% ggplot(aes(x = Sides, y = n, fill = `Side Type`)) +
geom_col() +
theme_minimal() +
scale_fill_manual(values = c('#672422', '#b79268', '#97be11', '#28590c')) +
labs(x = "Side Item", y = "Frequency", title = "Count of Side Item Orders") +
coord_flip()
ggSide
#For mains
ggMain <- mainOrder %>% ggplot(aes(x = Mains, y = n, fill = `Meat Type`)) +
geom_col() +
theme_minimal() +
scale_fill_manual(values = c('#B11226', '#add8e6', '#fcd7de', '#f4e8a4')) +
labs(x = "Main Item", y = "Frequency", title = "Count of Main Item Orders") +
coord_flip()
ggMain
| /Data_Visualizations_Code.R | no_license | chelynl/Association_Analysis | R | false | false | 3,753 | r | library(ggplot2)
library(data.table)
library(stringr)
library(dplyr)
#----------------------------- Wine Pairings: R code and Data Visualizations --------------------------#
#use file.choose to find the file
association_rules <- fread(file.choose())
#Pulls the food and wine names out, drops the unformatted columns
rules <- association_rules %>%
mutate(Meats = word(antecedents,2,sep = "'"),
Wines = word(consequents,2,sep = "'")) %>%
select(-c(antecedents, consequents))
#Get a list of the distinct wines and pair with Red or white
wines = distinct(rules,Wines)
WineColor <- c("Red", "Red","White","Red", "White" , "Red",
"White" , "Red", "Red", "White" ,"White" ,"White" ,"White" )
wines = cbind(wines, WineColor)
#Inner join to add the colors
rules <- rules %>%
inner_join(wines, by = "Wines")
#Create the plot
ggPairs <- rules %>% ggplot(aes(x = confidence, y = Wines, fill = WineColor)) +
geom_col() +
facet_wrap(~Meats, nrow = 2) +
theme_minimal()+
theme(plot.title = element_text(hjust = 0.5)) +
scale_fill_manual(values = c('#B11226', '#f3e5ab')) +
scale_x_continuous(labels = scales::percent) +
labs(x = "Percentage of Orders", #could change to confidence
y = "",
fill = "Wine Color",
title = "Popular Wine Pairings")
# Orders ####
#NOTE: the factor mutation is meant to specify the order of the levels in the charts
#choose order data csv with line numbers
orders <- fread(file.choose())
#filters the wines, groups by item, gets count and sorts
wineOrder <- orders %>%
filter(line_number == 2) %>%
group_by(item) %>%
tally(sort = T) %>%
#mutate(item = factor(item, levels = item[order(n)])) %>%
rename(Wines = item)
wineOrder <- wineOrder %>%
inner_join( wines, by = 'Wines') %>%
mutate(Wines = factor(Wines, levels = Wines[order(n)]))
#same process for mains
mainOrder <- orders %>%
filter(line_number == 1) %>%
group_by(item) %>%
tally(sort = T) %>%
mutate(item = factor(item, levels = item[order(n)])) %>%
rename(Mains = item)
#adding colors
mains <- cbind(distinct(mainOrder, Mains),
'Meat Type' = c("Beef", "Fish", 'Pork', 'Pork', 'Fish', 'Poultry', 'Fish','Poultry'))
mainOrder <- inner_join(mainOrder, mains, by = 'Mains')
#same process for sides
sideOrder <- orders %>%
filter(line_number == 3) %>%
group_by(item) %>%
tally(sort = T) %>%
mutate(item = factor(item, levels = item[order(n)])) %>%
rename(Sides = item)
#adding colors
sides <- cbind(distinct(sideOrder, Sides),
'Side Type' = c('Vegetables', 'Beans', 'Vegetables', 'Salad', 'Potatoes', 'Salad', 'Potatoes'))
sideOrder <- inner_join(sideOrder, sides, by = 'Sides')
#simple bar charts
ggWine <- wineOrder %>% ggplot(aes(x = Wines, y = n, fill = WineColor)) +
geom_col() +
theme_minimal() +
scale_fill_manual(values = c('#B11226', '#f3e5ab')) + #handpicked colors
labs(x = "Wine", y = "Frequency", title = "Count of Wine Orders") +
coord_flip()
ggWine
#For sides
ggSide <- sideOrder %>% ggplot(aes(x = Sides, y = n, fill = `Side Type`)) +
geom_col() +
theme_minimal() +
scale_fill_manual(values = c('#672422', '#b79268', '#97be11', '#28590c')) +
labs(x = "Side Item", y = "Frequency", title = "Count of Side Item Orders") +
coord_flip()
ggSide
#For mains
ggMain <- mainOrder %>% ggplot(aes(x = Mains, y = n, fill = `Meat Type`)) +
geom_col() +
theme_minimal() +
scale_fill_manual(values = c('#B11226', '#add8e6', '#fcd7de', '#f4e8a4')) +
labs(x = "Main Item", y = "Frequency", title = "Count of Main Item Orders") +
coord_flip()
ggMain
|
#' German credit dataset dictionary.
#'
#' This dataset can be used to build credit risk models
#' There is enough observations and variables to analyze
#'
#' @format A data frame with 1000 rows and 21 variables:
#' \describe{
#' \item{Creditability}{Response variable 0=Bad, 1=Good}
#' ...
#' }
#' All other variables are quantitative and self explanatoru
#' @source \url{https://onlinecourses.science.psu.edu/stat857/node/215}
"german_credit" | /unobankproject/R/data.R | no_license | arunkumar4all/unobankproject | R | false | false | 448 | r | #' German credit dataset dictionary.
#'
#' This dataset can be used to build credit risk models
#' There is enough observations and variables to analyze
#'
#' @format A data frame with 1000 rows and 21 variables:
#' \describe{
#' \item{Creditability}{Response variable 0=Bad, 1=Good}
#' ...
#' }
#' All other variables are quantitative and self explanatoru
#' @source \url{https://onlinecourses.science.psu.edu/stat857/node/215}
"german_credit" |
#=============================================================================#
# Name : cropZone
# Author : Jorge Flores
# Date :
# Version:
# Aim :
# URL :
#=============================================================================#
cropZone <- function(
input_path,
output_path = input_path,
pattern = '.nc',
xmn=-100, xmx=-70, ymn=-20, ymx=0,
varname = 'chlor_a',
pre_name = 'peru')
{
library(raster)
rasterFiles <- list.files(path = input_path, pattern = paste0('.*\\',pattern), full.names = F, recursive = T)
for(i in 1:length(rasterFiles)){
ras <- raster(paste0(input_path, rasterFiles[i]), varname = varname)
names(ras) <- varname
zone_crop <- extent(xmn, xmx, ymn, ymx)
ras <- crop(ras, zone_crop)
crop_name <- paste0(output_path, pre_name, rasterFiles[i])
writeRaster(x = ras, filename = crop_name, overwrite = TRUE)
print(crop_name)
}
}
#=============================================================================#
# END OF FUNCTION
#=============================================================================#
dirpath <- 'G:/Clorofila/'
library(maps)
library(mapdata)
# #-----PERU DOMAIN-----#
pre_name <- 'Peru'
xmn <- -90
xmx <- -70
ymn <- -20
ymx <- 0
# #-----PERU DOMAIN-----#
# pre_name <- 'Peru4'
# xmn <- -90
# xmx <- -70
# ymn <- -20
# ymx <- -15
# #-----SECHURA DOMAIN-----#
# pre_name <- 'Sechura'
# xmn <- -82
# xmx <- -80
# ymn <- -7
# ymx <- -4
# #-----MIRAFLORES DOMAIN-----#
# pre_name <- 'Miraflores'
# xmn <- -77.3
# xmx <- -77
# ymn <- -12.25
# ymx <- -11.9
# #-----CHIMBOTE DOMAIN-----#
# pre_name <- 'Chimbote'
# xmn <- -78.8
# xmx <- -78.4
# ymn <- -9.4
# ymx <- -8.9
# DO NOT CHANGE ANYTHIG AFTER HERE #
new_folder <- paste0(dirpath, 'crop_', pre_name)
dir.create(path = new_folder, showWarnings = F)
png(filename = paste0(new_folder, '/cropDomain.png'), width = 850, height = 850, res = 120)
map('worldHires', add=F, fill=T, col='gray', ylim = c(ymn, ymx), xlim = c(xmn, xmx))
axis(1)
axis(2, las = 2)
box()
dev.off()
for(year in 2002:2018){
input_path <- paste0(dirpath, year, '/')
output_path <- paste0(new_folder, '/',year, '/')
dir.create(path = output_path, showWarnings = F)
cropZone(input_path = input_path,
output_path = output_path,
pre_name = tolower(pre_name),
xmn = xmn, xmx = xmx, ymn = ymn, ymx = ymx)
}
rm(list = ls())
#=============================================================================#
# END OF PROGRAM
#=============================================================================# | /cropZone.R | no_license | NilsTeilher/chl_sat | R | false | false | 2,570 | r | #=============================================================================#
# Name : cropZone
# Author : Jorge Flores
# Date :
# Version:
# Aim :
# URL :
#=============================================================================#
cropZone <- function(
input_path,
output_path = input_path,
pattern = '.nc',
xmn=-100, xmx=-70, ymn=-20, ymx=0,
varname = 'chlor_a',
pre_name = 'peru')
{
library(raster)
rasterFiles <- list.files(path = input_path, pattern = paste0('.*\\',pattern), full.names = F, recursive = T)
for(i in 1:length(rasterFiles)){
ras <- raster(paste0(input_path, rasterFiles[i]), varname = varname)
names(ras) <- varname
zone_crop <- extent(xmn, xmx, ymn, ymx)
ras <- crop(ras, zone_crop)
crop_name <- paste0(output_path, pre_name, rasterFiles[i])
writeRaster(x = ras, filename = crop_name, overwrite = TRUE)
print(crop_name)
}
}
#=============================================================================#
# END OF FUNCTION
#=============================================================================#
dirpath <- 'G:/Clorofila/'
library(maps)
library(mapdata)
# #-----PERU DOMAIN-----#
pre_name <- 'Peru'
xmn <- -90
xmx <- -70
ymn <- -20
ymx <- 0
# #-----PERU DOMAIN-----#
# pre_name <- 'Peru4'
# xmn <- -90
# xmx <- -70
# ymn <- -20
# ymx <- -15
# #-----SECHURA DOMAIN-----#
# pre_name <- 'Sechura'
# xmn <- -82
# xmx <- -80
# ymn <- -7
# ymx <- -4
# #-----MIRAFLORES DOMAIN-----#
# pre_name <- 'Miraflores'
# xmn <- -77.3
# xmx <- -77
# ymn <- -12.25
# ymx <- -11.9
# #-----CHIMBOTE DOMAIN-----#
# pre_name <- 'Chimbote'
# xmn <- -78.8
# xmx <- -78.4
# ymn <- -9.4
# ymx <- -8.9
# DO NOT CHANGE ANYTHIG AFTER HERE #
new_folder <- paste0(dirpath, 'crop_', pre_name)
dir.create(path = new_folder, showWarnings = F)
png(filename = paste0(new_folder, '/cropDomain.png'), width = 850, height = 850, res = 120)
map('worldHires', add=F, fill=T, col='gray', ylim = c(ymn, ymx), xlim = c(xmn, xmx))
axis(1)
axis(2, las = 2)
box()
dev.off()
for(year in 2002:2018){
input_path <- paste0(dirpath, year, '/')
output_path <- paste0(new_folder, '/',year, '/')
dir.create(path = output_path, showWarnings = F)
cropZone(input_path = input_path,
output_path = output_path,
pre_name = tolower(pre_name),
xmn = xmn, xmx = xmx, ymn = ymn, ymx = ymx)
}
rm(list = ls())
#=============================================================================#
# END OF PROGRAM
#=============================================================================# |
#用来获取alluvial作图的数据
get_alluvial.data <- function(group){
#group = "COPD_smoker"
query = paste0("
MATCH (A:Genes)
return A.Symbols AS gene,
A.",group,"_M0_module,
A.",group,"_M3_module,
A.",group,"_M6_module,
A.",group,"_M12_module
")
mod_tab <- cypher(graph, query)
alluvial.data <- data.frame(gene = numeric(0),
module = numeric(0),
timepoint = numeric(0))
timepoints = c("M0", "M3", "M6", "M12")
for (i in 2:5){
mod_mx <- mod_tab[!is.na(mod_tab[i]),c(1,i)]
names(mod_mx) <- c("gene", "module")
mod_mx[,"timepoint"] = timepoints[i-1]
#更改module_name
module <- unique(mod_mx$module)
mod_name <- paste0(timepoints[i-1],"_module", 01:length(module))
mod_name <- data.frame(module, mod_name)
mod_mx <- merge(mod_mx,
mod_name,
by = "module",
all.x = TRUE)
mod_mx <- mod_mx[,c("gene","timepoint","mod_name")]
names(mod_mx)[3] <- "module"
#合并
alluvial.data <- rbind(alluvial.data, mod_mx)
}
return(alluvial.data)
} | /copd-shiny-docker/copd_shiny/FUNCTION/get_alluvial.data.R | no_license | xiaowei3223/docker_build | R | false | false | 1,254 | r | #用来获取alluvial作图的数据
get_alluvial.data <- function(group){
#group = "COPD_smoker"
query = paste0("
MATCH (A:Genes)
return A.Symbols AS gene,
A.",group,"_M0_module,
A.",group,"_M3_module,
A.",group,"_M6_module,
A.",group,"_M12_module
")
mod_tab <- cypher(graph, query)
alluvial.data <- data.frame(gene = numeric(0),
module = numeric(0),
timepoint = numeric(0))
timepoints = c("M0", "M3", "M6", "M12")
for (i in 2:5){
mod_mx <- mod_tab[!is.na(mod_tab[i]),c(1,i)]
names(mod_mx) <- c("gene", "module")
mod_mx[,"timepoint"] = timepoints[i-1]
#更改module_name
module <- unique(mod_mx$module)
mod_name <- paste0(timepoints[i-1],"_module", 01:length(module))
mod_name <- data.frame(module, mod_name)
mod_mx <- merge(mod_mx,
mod_name,
by = "module",
all.x = TRUE)
mod_mx <- mod_mx[,c("gene","timepoint","mod_name")]
names(mod_mx)[3] <- "module"
#合并
alluvial.data <- rbind(alluvial.data, mod_mx)
}
return(alluvial.data)
} |
## rankall.R
##
## Bob George https://github.com/rwgeorge
###############################################################################
## Coursera R Programming (rprog-011)
## Assignment 3: Hospital Quality
## https://github.com/rwgeorge/ProgrammingAssignment3
###############################################################################
## This function reads the outcome-of-care-measures.csv file and returns a
## 2-column data frame containing the hospital in each state that has the
## ranking specified in num. For example the function call
## rankall("heart attack", "best") would return a data frame containing the
## names of the hospitals that are the best in their respective states for
## 30-day heart attack death rates. The function should return a value for
## every state (some may be NA). The first column in the data frame is named
## hospital, which contains the hospital name, and the second column is named
## state, which contains the 2-character abbreviation for the state name.
## Hospitals that do not have data on a particular outcome should be excluded
## from the set of hospitals when deciding the rankings.
rankall <- function(outcome, num = "best") {
## Check supplied outcome.
outcomes <- c("heart attack", "heart failure", "pneumonia")
if (!(outcome %in% outcomes)) {
stop("invalid outcome")
}
## Get the data.
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
dataByHospital <- data[order(data$Hospital.Name),]
## Get column number.
if (outcome == "heart attack")
outcomeColumn <- 11
else if (outcome == "heart failure")
outcomeColumn <- 17
else
outcomeColumn <- 23
#result <- data.frame()
#for (item in split(dataByHospital, dataByHospital$State)) {
# result <- rbind(result, findRow(item, outcomeColumn, num))
#}
findRow <- function(data) {
column <- as.numeric(data[,outcomeColumn])
if (num=="best")
row <- which.min(column)
else if (num=="worst")
row <- which.max(column)
else {
data <- data[order(column),]
row <- num
}
c(data$Hospital.Name[row], data[,7][row])
}
appliedRows <- lapply(split(dataByHospital, dataByHospital$State), findRow)
result <- data.frame(matrix(unlist(appliedRows), ncol=2, byrow=T))
colnames(result) <- c('hospital','state')
result[order(result$state),]
} | /rankall.R | no_license | rwgeorge/ProgrammingAssignment3 | R | false | false | 2,510 | r | ## rankall.R
##
## Bob George https://github.com/rwgeorge
###############################################################################
## Coursera R Programming (rprog-011)
## Assignment 3: Hospital Quality
## https://github.com/rwgeorge/ProgrammingAssignment3
###############################################################################
## This function reads the outcome-of-care-measures.csv file and returns a
## 2-column data frame containing the hospital in each state that has the
## ranking specified in num. For example the function call
## rankall("heart attack", "best") would return a data frame containing the
## names of the hospitals that are the best in their respective states for
## 30-day heart attack death rates. The function should return a value for
## every state (some may be NA). The first column in the data frame is named
## hospital, which contains the hospital name, and the second column is named
## state, which contains the 2-character abbreviation for the state name.
## Hospitals that do not have data on a particular outcome should be excluded
## from the set of hospitals when deciding the rankings.
rankall <- function(outcome, num = "best") {
## Check supplied outcome.
outcomes <- c("heart attack", "heart failure", "pneumonia")
if (!(outcome %in% outcomes)) {
stop("invalid outcome")
}
## Get the data.
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
dataByHospital <- data[order(data$Hospital.Name),]
## Get column number.
if (outcome == "heart attack")
outcomeColumn <- 11
else if (outcome == "heart failure")
outcomeColumn <- 17
else
outcomeColumn <- 23
#result <- data.frame()
#for (item in split(dataByHospital, dataByHospital$State)) {
# result <- rbind(result, findRow(item, outcomeColumn, num))
#}
findRow <- function(data) {
column <- as.numeric(data[,outcomeColumn])
if (num=="best")
row <- which.min(column)
else if (num=="worst")
row <- which.max(column)
else {
data <- data[order(column),]
row <- num
}
c(data$Hospital.Name[row], data[,7][row])
}
appliedRows <- lapply(split(dataByHospital, dataByHospital$State), findRow)
result <- data.frame(matrix(unlist(appliedRows), ncol=2, byrow=T))
colnames(result) <- c('hospital','state')
result[order(result$state),]
} |
#' All dependencies for the material design packages
#'
#' \code{rt.setup}
#'
#' Read all of the dependency index tables and externals into the package
#' data folder for package build and function dependency requirements.
#'
#'
#'
#' @aliases \code{mtrl.setup}
#'
#'
rt.setup <- function(add_res = NULL){
base <- 'https://github.com/Rterial/Rtbase/blob/master/inst/data/'
tail <- '?raw=true'
file_list <- list(mtrl_icons = 'mtrl_icons.rds',
mtrl_icons_svg = 'mtrl_icons_svg.rds',
mtrl_colors = 'mtrl_colors.rds',
html_tags = 'tag_refs.rds')
if(!is.null(add_res)){
file_list <- append(file_list,add_res)
}
if(!file.exists('inst')){
dir.create('inst')
dir.create('inst/data')
}
rel_paths <- paste0(base,file_list,tail)
to_paths <- paste0('inst/data/',file_list)
get_df <- data.frame(froms = rel_paths, tos = to_paths)
lapply(1:nrow(get_df),function(i)
curl::curl_download(get_df[[1]][[i]],
get_df[[2]][[i]])
)
if(file.exists(get_df[nrow(get_df),2])){
sapply(1:nrow(get_df),function(i)
assign(names(file_list[i]),readRDS(get_df[i,2]),envir = .GlobalEnv)
)
invisible()
}
if(!file.exists('revdep')){
dir.create('revdep')
if(!exists('pure.html')){
assign('pure.html',
function (text, ...) {
htmlText <- c(text, as.character(list(...)))
htmlText <- paste(htmlText, collapse = " ")
attr(htmlText, "html") <- TRUE
class(htmlText) <- c("html", "character")
htmlText
},envir = .GlobalEnv)
}
fi_src <- "https://raw.githubusercontent.com/CarlBoneri/mtace/master/R/MTRL_DEPS.R"
readLines(fi_src) %>%
paste0(collapse = "\n") %>% pure.html %>%
write(paste0('revdep/',basename(fi_src)))
}
invisible()
}
| /R/rt_depends.R | no_license | Rterial/rtIcons | R | false | false | 1,893 | r | #' All dependencies for the material design packages
#'
#' \code{rt.setup}
#'
#' Read all of the dependency index tables and externals into the package
#' data folder for package build and function dependency requirements.
#'
#'
#'
#' @aliases \code{mtrl.setup}
#'
#'
rt.setup <- function(add_res = NULL){
base <- 'https://github.com/Rterial/Rtbase/blob/master/inst/data/'
tail <- '?raw=true'
file_list <- list(mtrl_icons = 'mtrl_icons.rds',
mtrl_icons_svg = 'mtrl_icons_svg.rds',
mtrl_colors = 'mtrl_colors.rds',
html_tags = 'tag_refs.rds')
if(!is.null(add_res)){
file_list <- append(file_list,add_res)
}
if(!file.exists('inst')){
dir.create('inst')
dir.create('inst/data')
}
rel_paths <- paste0(base,file_list,tail)
to_paths <- paste0('inst/data/',file_list)
get_df <- data.frame(froms = rel_paths, tos = to_paths)
lapply(1:nrow(get_df),function(i)
curl::curl_download(get_df[[1]][[i]],
get_df[[2]][[i]])
)
if(file.exists(get_df[nrow(get_df),2])){
sapply(1:nrow(get_df),function(i)
assign(names(file_list[i]),readRDS(get_df[i,2]),envir = .GlobalEnv)
)
invisible()
}
if(!file.exists('revdep')){
dir.create('revdep')
if(!exists('pure.html')){
assign('pure.html',
function (text, ...) {
htmlText <- c(text, as.character(list(...)))
htmlText <- paste(htmlText, collapse = " ")
attr(htmlText, "html") <- TRUE
class(htmlText) <- c("html", "character")
htmlText
},envir = .GlobalEnv)
}
fi_src <- "https://raw.githubusercontent.com/CarlBoneri/mtace/master/R/MTRL_DEPS.R"
readLines(fi_src) %>%
paste0(collapse = "\n") %>% pure.html %>%
write(paste0('revdep/',basename(fi_src)))
}
invisible()
}
|
source("utils.R")
# Remove Expired Files ----------------------------------------------------
idx_old <- list.files("messages/", full.names = TRUE) %>%
lapply(function (x) file.mtime(x) < (Sys.Date()-5)) %>%
do.call(rbind, .)
list.files("messages/", full.names = TRUE)[idx_old] %>%
file.remove()
idx_old <- list.files("data/weights/", full.names = TRUE, recursive = TRUE) %>%
.[!grepl("@history_master|@log_fav_weights", .)] %>%
lapply(function (x) file.mtime(x) < (Sys.Date()-5)) %>%
do.call(rbind, .)
list.files("data/weights/", full.names = TRUE, recursive = TRUE) %>%
.[!grepl("@history_master|@log_fav_weights", .)] %>%
.[idx_old] %>%
file.remove()
list.files("~/Downloads/",
pattern = "WF........-daily-",
full.names = TRUE) %>%
file.remove()
# Copy Portfolio Report ---------------------------------------------------
file.copy("./src/32_create_report.html", cloud_dir)
file.rename(paste0(cloud_dir, "/32_create_report.html"),
paste0(cloud_dir, "/", save_date, "_portfolio_report.html"))
file.remove("./src/32_create_report.html")
| /src/32_cleanup.R | no_license | re-tradr/mom_portfolio_builder | R | false | false | 1,115 | r |
source("utils.R")
# Remove Expired Files ----------------------------------------------------
idx_old <- list.files("messages/", full.names = TRUE) %>%
lapply(function (x) file.mtime(x) < (Sys.Date()-5)) %>%
do.call(rbind, .)
list.files("messages/", full.names = TRUE)[idx_old] %>%
file.remove()
idx_old <- list.files("data/weights/", full.names = TRUE, recursive = TRUE) %>%
.[!grepl("@history_master|@log_fav_weights", .)] %>%
lapply(function (x) file.mtime(x) < (Sys.Date()-5)) %>%
do.call(rbind, .)
list.files("data/weights/", full.names = TRUE, recursive = TRUE) %>%
.[!grepl("@history_master|@log_fav_weights", .)] %>%
.[idx_old] %>%
file.remove()
list.files("~/Downloads/",
pattern = "WF........-daily-",
full.names = TRUE) %>%
file.remove()
# Copy Portfolio Report ---------------------------------------------------
file.copy("./src/32_create_report.html", cloud_dir)
file.rename(paste0(cloud_dir, "/32_create_report.html"),
paste0(cloud_dir, "/", save_date, "_portfolio_report.html"))
file.remove("./src/32_create_report.html")
|
rm(list = ls())
library("dplyr")
library("readr")
library("httr")
library("rvest")
library("geojsonio")
map <- "https://raw.githubusercontent.com/johan/world.geo.json/master/countries/AUS.geo.json" %>%
GET() %>%
content() %>%
jsonlite::fromJSON(simplifyVector = FALSE)
# http://openflights.org/data.html
airports <- read_csv("https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports.dat",
col_names = FALSE)
tblnames <- read_html("http://openflights.org/data.html") %>%
html_node("table") %>%
html_table(fill = TRUE)
airports <- setNames(airports, str_to_id(tblnames$X1))
colalt <- function(x){
colorRamp(viridisLite::plasma(10), alpha = FALSE)(x) %>%
{./255} %>%
rgb()
}
airportsmin <- airports %>%
filter(country == "Australia", tz_database_time_zone != "\\N") %>%
select(name, latitude, longitude, altitude)
airpjson <- geojson_json(airportsmin, lat = "latitude", lon = "longitude")
highchart(type = "map") %>%
hc_title(text = "Airports in Australia") %>%
hc_chart(backgroundColor = "#D9E9FF") %>%
hc_add_series(mapData = map, showInLegend = FALSE,
nullColor = "#C0D890") %>%
hc_add_series(data = airpjson, type = "mappoint", dataLabels = list(enabled = FALSE),
name = "Airports", color = 'rgba(57, 86, 139, 0.5)',
tooltip = list(pointFormat = "{point.properties.name}: {point.properties.altitude} fts")) %>%
hc_mapNavigation(enabled = TRUE)
| /devscripts/highmaps2.R | no_license | APKBridget/highcharter | R | false | false | 1,539 | r | rm(list = ls())
library("dplyr")
library("readr")
library("httr")
library("rvest")
library("geojsonio")
map <- "https://raw.githubusercontent.com/johan/world.geo.json/master/countries/AUS.geo.json" %>%
GET() %>%
content() %>%
jsonlite::fromJSON(simplifyVector = FALSE)
# http://openflights.org/data.html
airports <- read_csv("https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports.dat",
col_names = FALSE)
tblnames <- read_html("http://openflights.org/data.html") %>%
html_node("table") %>%
html_table(fill = TRUE)
airports <- setNames(airports, str_to_id(tblnames$X1))
colalt <- function(x){
colorRamp(viridisLite::plasma(10), alpha = FALSE)(x) %>%
{./255} %>%
rgb()
}
airportsmin <- airports %>%
filter(country == "Australia", tz_database_time_zone != "\\N") %>%
select(name, latitude, longitude, altitude)
airpjson <- geojson_json(airportsmin, lat = "latitude", lon = "longitude")
highchart(type = "map") %>%
hc_title(text = "Airports in Australia") %>%
hc_chart(backgroundColor = "#D9E9FF") %>%
hc_add_series(mapData = map, showInLegend = FALSE,
nullColor = "#C0D890") %>%
hc_add_series(data = airpjson, type = "mappoint", dataLabels = list(enabled = FALSE),
name = "Airports", color = 'rgba(57, 86, 139, 0.5)',
tooltip = list(pointFormat = "{point.properties.name}: {point.properties.altitude} fts")) %>%
hc_mapNavigation(enabled = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaPvalues.R
\name{metaPvalues}
\alias{metaPvalues}
\title{Meta-analysis of p-values}
\usage{
metaPvalues(x)
}
\arguments{
\item{x}{a \code{dsOmics} object obtained from \code{ds.limma}, \code{ds.GWAS} or \code{ds.PLINK} functions applied o 2 or more studies}
}
\value{
a matrix with features p-values of each study and its combination
}
\description{
Performs meta-analys of pvalues using the sum of logs method (Fisher's method)
}
\author{
Gonzalez, JR.
}
| /man/metaPvalues.Rd | permissive | isglobal-brge/dsOmicsClient | R | false | true | 537 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaPvalues.R
\name{metaPvalues}
\alias{metaPvalues}
\title{Meta-analysis of p-values}
\usage{
metaPvalues(x)
}
\arguments{
\item{x}{a \code{dsOmics} object obtained from \code{ds.limma}, \code{ds.GWAS} or \code{ds.PLINK} functions applied o 2 or more studies}
}
\value{
a matrix with features p-values of each study and its combination
}
\description{
Performs meta-analys of pvalues using the sum of logs method (Fisher's method)
}
\author{
Gonzalez, JR.
}
|
\name{kernelkc}
\alias{kernelkc}
\alias{kernelkcbase}
\alias{getvolumeUDk}
\alias{getvolumeUDs}
\alias{getverticeshrk}
\alias{getverticeshrs}
\alias{exwc}
\title{Kernel Smoothing in Space and Time of the Animals' Use of Space}
\description{
These functions estimate the utilization distribution (UD) in space
and time of animals monitored using radio-telemetry, using the product
kernel estimator advocated by Keating and Cherry (2009).
Note that this approach has also been useful for the analysis of
recoveries in programs involving ringed birds (Calenge et al. 2010,
see section examples below).
\code{kernelkc} estimate the UD of several animals from an object of
class \code{ltraj}.
\code{kernelkcbase} estimate one UD from a data frame with three
columns indicating the spatial coordinates and associated timing.
\code{exwc} allows to search for the best value of the
time smoothing parameter in the case where the time is considered as a
circular variable (see details).
}
\usage{
kernelkc(tr, h, tcalc, t0, grid = 40, circular = FALSE,
cycle = 24 * 3600, same4all = FALSE,
byburst = FALSE, extent = 0.5)
kernelkcbase(xyt, h, tcalc, t0, grid=40, circular=FALSE,
cycle=24*3600, extent=0.5)
exwc(hv)
}
\arguments{
\item{tr}{an object of class \code{ltraj}}
\item{xyt}{a data frame with three columns indicating the x and y
coordinates, as well as the timing of the relocations. }
\item{h}{a numeric vector with three elements indicating the value of
the smoothing parameters: the first and second elements are
the smoothing parameters of the X and Y coordinates respectively,
the third element is the smoothing parameter for the time
dimension. If \code{circular=TRUE} it should be a smoothing
parameter in the interval 0-1 (see details). If
\code{circular=FALSE} this smoothing parameter should be given in
seconds.}
\item{tcalc}{the time at which the UD is to be estimated}
\item{t0}{if \code{circular=TRUE}, this parameter indicates the time
at which the time cycle begins (see examples).}
\item{grid}{a number giving the size of the grid on which the UD should
be estimated. Alternatively, this parameter may be an object
of class \code{SpatialPixels}. In addition, for the function
\code{kernelkc} this parameter can be a list of objects of class
\code{SpatialPixels}, with named elements corresponding to each
level of the burst/id}
\item{circular}{logical. Indicates whether the time should be
considered as a circular variable (e.g., the 31th december 2007 is
considered to be one day before the 1st january 2007) or not (e.g.,
the 31th december 2007 is considered to be one year after the 1st
january 2007).}
\item{cycle}{if \code{circular=TRUE}, the duration of the time cycle.
for \code{kernelkc}, it should be given in seconds, and for
\code{kernelkcbase}, in the units of the data (the units of the
third column of \code{xyt}).}
\item{same4all}{logical. If \code{TRUE}, the same grid is used for all
levels of id/burst. If \code{FALSE}, one grid per id/burst is used.}
\item{byburst}{logical. Indicates whether one UD should be estimated
by burst of \code{tr}, or whether the data should be pooled across
all bursts of each value of id in \code{tr}}
\item{extent}{a value indicating the extent of the grid used for the
estimation (the extent of the grid on the abscissa is equal
to \code{(min(xy[,1]) + extent * diff(range(xy[,1])))}). }
\item{hv}{a value of smoothing parameter for the time dimension.}
\item{\dots}{additional arguments to be passed to the function
\code{contour}.}
}
\details{
Keating and Cherry (2009) advocated the estimation of the UD in
time and space using the product kernel estimator. These functions
implement exactly this methodology.\
For the spatial coordinates, the implemented kernel function is the
biweight kernel.
Two possible
approaches are possible to manage the time in the estimation process:
(i) the time may be considered as a linear variable (e.g., the 31th
december 2007 is considered to be one day before the 1st january
2007), or (ii) the time may be considered as a circular variable
(e.g., the 31th december 2007 is considered to be one year after the
1st january 2007).
If the time is considered as a linear variable, the kernel function
used in the estimation process is the biweight kernel. If the time is
considered as a circular variable, the implemented kernel is the
wrapped Cauchy distribution (as in the article of Keating and Cherry).
In this latter case, the smoothing parameter should be chosen in the
interval 0-1, with a value of 1 corresponding to a stronger
smoothing.
These functions can only be used on objects of class "ltraj", but
the estimation of the UD in time and space is also possible with other
types of data (see the help page of \code{kernelkcbase}). Note that
both \code{kernelkc} and \code{kernelkcbase} return conditional
probability density function (pdf), i.e. the pdf to relocate an animal
at a place, given that it has been relocated at time \code{tcalc}
(i.e. the volume under the UD estimated at time \code{tcalc} is equal
to 1 whatever \code{tcalc}).
The function \code{exwc} draws a graph of the wrapped
Cauchy distribution for the chosen \code{h} parameter (for circular
time), so that it is possible to make one's mind concerning the weight
that can be given to the neighbouring points of a given time point.
Note that although Keating and Cherry (2009) advocated the use of
an automatic algorithm to select "optimal" values for the smoothing
parameter, it is not implemented in adehabitatHR. Indeed, different
smoothing parameters may allow to identify patterns at different
scales, and we encourage the user to try several values before
subjectively choosing the value which allows to more clearly identify
the patterns of the UD.
}
\value{
\code{kernelkc} returns a list of class "\code{estUDm}" containing
objects of class \code{estUD}, mapping one estimate of the UD per burst
or id (depending on the value of the parameter \code{byburst}).
\code{kernelkcbase} returns an object of class "\code{estUD}" mapping
the estimated UD.
}
\references{
Keating, K. and Cherry, S. (2009) Modeling utilization
distributions in space and time. \emph{Ecology}, \bold{90}:
1971--1980.
Calenge, C., Guillemain, M., Gauthier-Clerc, M. and Simon, G. 2010. A
new exploratory approach to the study of the spatio-temporal
distribution of ring recoveries - the example of Teal (Anas crecca)
ringed in Camargue, Southern France. \emph{Journal of Ornithology},
\bold{151}, 945--950.
}
\author{Clement Calenge \email{clement.calenge@oncfs.gouv.fr}}
\seealso{\code{\link{as.ltraj}} for additional information on objects of
class \code{ltraj}, \code{\link{kernelUD}} for the "classical" kernel
home range estimates. }
\examples{
\dontrun{
################################################
##
## Illustrates the analysis of recoveries of
## ringed data
data(teal)
head(teal)
## compute the sequence of dates at which the
## probability density function (pdf) of recoveries is to be estimated
vv <- seq(min(teal$date), max(teal$date), length=50)
head(vv)
## The package "maps" should be installed for the example below
library(maps)
re <- lapply(1:length(vv), function(i) {
## Estimate the pdf. We choose a smoothing parameter of
## 2 degrees of lat-long for X and Y coordinates,
## and of 2 months for the time
uu <- kernelkcbase(teal, c(2.5,2.5,2*30*24*3600), tcalc =
vv[i], grid=100, extent=0.1)
## now, we show the result
## potentially, we could type
##
## jpeg(paste("prdefu", i, ".jpg", sep=""))
##
## to store the figures in a file, and then to build a
## movie with the resulting files:
##
image(uu, col=grey(seq(1,0, length=8)))
title(main=vv[i])
## highlight the area on which there is a probability
## equal to 0.95 to recover a bird
## ****warning! The argument standardize=TRUE should
## be passed, because the UD is defined in space and
## time, and because we estimate the UD just in space
plot(getverticeshr(uu, 95, standardize=TRUE), add=TRUE,
border="red", lwd=2)
## The map:
map(xlim=c(-20,70), ylim=c(30,80), add=TRUE)
## and if we had typed jpeg(...) before, we have to type
## dev.off()
## to close the device. When we have finished this loop
## We could combine the resulting files with imagemagick
## (windows) or mencoder (linux)
})
################################################
##
## Illustrates how to explore the UD in time and
## space with the bear data
data(bear)
## compute the sequence of dates at which the UD is to be
## estimated
vv <- seq(min(bear[[1]]$date), max(bear[[1]]$date), length=50)
head(vv)
## estimates the UD at each time point
re <- lapply(1:length(vv), function(i) {
## estimate the UD. We choose a smoothing parameter of
## 1000 meters for X and Y coordinates, and of 72 hours
## for the time (after a visual exploration)
uu <- kernelkc(bear, h = c(1000,1000,72*3600),
tcalc= vv[i], grid=100)
## now, we show the result
## potentially, we could type
##
## jpeg(paste("UD", i, ".jpg", sep=""))
##
## to store the figures in a file, and then to build a
## movie with the resulting files:
##
image(uu, col=grey(seq(1,0,length=10)))
title(main=vv[i])
## highlight the 95 percent home range
## we set standardize = TRUE because we want to estimate
## the home range in space from a UD estimated in space and
## time
plot(getverticeshr(uu, 95, standardize=TRUE), lwd=2,
border="red", add=TRUE)
## and if we had typed jpeg(...) before, we have to type
## dev.off()
## to close the device. When we have finished this loop
## We could combine the resulting files with imagemagick
## (windows) or mencoder (linux)
})
## Or, just show the home range:
re <- lapply(1:length(vv), function(i) {
uu <- kernelkc(bear, h = c(1000,1000,72*3600),
tcalc= vv[i])
pc <- getverticeshr(uu, 95, standardize=TRUE)
plot(pc, xlim=c(510000, 530000),
ylim=c(6810000, 6825000))
title(main=vv[i])
})
##################################################
##
## Example with several wild boars (linear time)
## load wild boar data
data(puechcirc)
## keep only the first two circuits:
puechc <- puechcirc[1:2]
## Now load the map of the elevation
data(puechabonsp)
## compute the time point at which the UD is to be estimated
vv <- seq(min(puechcirc[[2]]$date), max(puechcirc[[2]]$date),
length=50)
## The estimate the UD
re <- lapply(1:length(vv),
function(i) {
## We choose a smoothing parameter of 300 meters for
## the x and y coordinates and of one hour for the time
## (but try to play with these smoothing parameters)
uu <- kernelkc(puechcirc, h=c(300,300,3600),
tcalc = vv[i], same4all=TRUE,
extent=0.1)
## show the elevation
image(puechabonsp$map,
xlim=c(698000,704000),
ylim=c(3156000,3160000))
title(main=vv[i])
## and the UD, with contour lines
colo <- c("green","blue")
lapply(1:length(uu), function(i) {
contour(as(uu[[i]],"SpatialPixelsDataFrame"),
add=TRUE, col=colo[i])
})
## the blue contour lines show the UD of the mother and
## the red ones correspond to her son. Adult wild boars
## are known to be more "shy" that the youger ones.
## Here, the low elevation corresponds to crop area
## (vineyards). The young boar is the first and the
## last in the crops
})
##################################################
##
## Example with the bear, to illustrate (circular time)
data(bear)
## We consider a time cycle of 24 hours.
## the following vector contains the time points on the
## time circle at which the UD is to be estimated (note that
## the time is given in seconds)
vv <- seq(0, 24*3600-1, length=40)
## for each time point:
re <- lapply(1:length(vv),
function(i) {
## Estimation of the UD for the bear. We choose
## a smoothing parameter of 1000 meters for the spatial
## coordinates and a smoothing parameter equal to 0.2
## for the time. We set the beginning of the time
## cycle at midnight (no particular reason, just to
## illustrate the function). So we pass, as t0, any
## object of class POSIXct corresponding t a date at
## this hour, for example the 12/25/2012 at 00H00
t0 <- as.POSIXct("2012-12-25 00:00")
uu <- kernelkc(bear, h=c(1000,1000,0.2), cycle=24*3600,
tcalc=vv[i], t0=t0, circular=TRUE)
## shows the results
## first compute the hour for the title
hour <- paste(floor(vv[i]/3600), "hours",
floor((vv[i]\%\%3600)/60), "min")
## compute the 95\% home range
pc <- getverticeshr(uu, 95, standardize=TRUE)
plot(pc, xlim=c(510000, 530000),
ylim=c(6810000, 6825000))
title(main=hour)
## compute the 50\% home range
pc <- getverticeshr(uu, 50, standardize=TRUE)
plot(pc, add=TRUE, col="blue")
})
## Now, each home range computed at a given time point corresponds to
## the area used by the animal at this time period. We may for example
## try to identify the main difference in habitat composition of the
## home-range between different time, to identify differences in
## habitat use between different time of the day. We do not do it here
## (lack of example data)
##################################################
##
## Example of the use of the function kernelkcbase and
## related functions
## load the data
data(puechabonsp)
locs <- puechabonsp$relocs
## keeps only the wild boar Jean
locs <- locs[slot(locs, "data")[,1]=="Jean",]
## compute the number of days since the beginning
## of the monitoring
dd <- cumsum(c(0, diff(strptime(slot(locs, "data")[,4], "\%y\%m\%d"))))
dd
## compute xyt. Note that t is here the number of
## days since the beginning of the monitoring (it
## is not an object of class POSIXt, but it may be)
xyt <- data.frame(as.data.frame(coordinates(locs)), dd)
## Now compute the time points at which the UD is to be estimated:
vv <- 1:61
## and finally, show the UD changed with time:
re <- lapply(1:length(vv),
function(i) {
ud <- kernelkcbase(xyt, h=c(300,300,20),
tcalc=vv[i], grid=100)
image(ud, main=vv[i])
plot(getverticeshr(ud, 95, standardize=TRUE),
border="red", lwd=2, add=TRUE)
## Just to slow down the process
Sys.sleep(0.2)
})
}
}
\keyword{spatial}
| /adehabitatHR/man/kernelkc.Rd | no_license | radfordneal/R-package-mods | R | false | false | 16,304 | rd | \name{kernelkc}
\alias{kernelkc}
\alias{kernelkcbase}
\alias{getvolumeUDk}
\alias{getvolumeUDs}
\alias{getverticeshrk}
\alias{getverticeshrs}
\alias{exwc}
\title{Kernel Smoothing in Space and Time of the Animals' Use of Space}
\description{
These functions estimate the utilization distribution (UD) in space
and time of animals monitored using radio-telemetry, using the product
kernel estimator advocated by Keating and Cherry (2009).
Note that this approach has also been useful for the analysis of
recoveries in programs involving ringed birds (Calenge et al. 2010,
see section examples below).
\code{kernelkc} estimate the UD of several animals from an object of
class \code{ltraj}.
\code{kernelkcbase} estimate one UD from a data frame with three
columns indicating the spatial coordinates and associated timing.
\code{exwc} allows to search for the best value of the
time smoothing parameter in the case where the time is considered as a
circular variable (see details).
}
\usage{
kernelkc(tr, h, tcalc, t0, grid = 40, circular = FALSE,
cycle = 24 * 3600, same4all = FALSE,
byburst = FALSE, extent = 0.5)
kernelkcbase(xyt, h, tcalc, t0, grid=40, circular=FALSE,
cycle=24*3600, extent=0.5)
exwc(hv)
}
\arguments{
\item{tr}{an object of class \code{ltraj}}
\item{xyt}{a data frame with three columns indicating the x and y
coordinates, as well as the timing of the relocations. }
\item{h}{a numeric vector with three elements indicating the value of
the smoothing parameters: the first and second elements are
the smoothing parameters of the X and Y coordinates respectively,
the third element is the smoothing parameter for the time
dimension. If \code{circular=TRUE} it should be a smoothing
parameter in the interval 0-1 (see details). If
\code{circular=FALSE} this smoothing parameter should be given in
seconds.}
\item{tcalc}{the time at which the UD is to be estimated}
\item{t0}{if \code{circular=TRUE}, this parameter indicates the time
at which the time cycle begins (see examples).}
\item{grid}{a number giving the size of the grid on which the UD should
be estimated. Alternatively, this parameter may be an object
of class \code{SpatialPixels}. In addition, for the function
\code{kernelkc} this parameter can be a list of objects of class
\code{SpatialPixels}, with named elements corresponding to each
level of the burst/id}
\item{circular}{logical. Indicates whether the time should be
considered as a circular variable (e.g., the 31th december 2007 is
considered to be one day before the 1st january 2007) or not (e.g.,
the 31th december 2007 is considered to be one year after the 1st
january 2007).}
\item{cycle}{if \code{circular=TRUE}, the duration of the time cycle.
for \code{kernelkc}, it should be given in seconds, and for
\code{kernelkcbase}, in the units of the data (the units of the
third column of \code{xyt}).}
\item{same4all}{logical. If \code{TRUE}, the same grid is used for all
levels of id/burst. If \code{FALSE}, one grid per id/burst is used.}
\item{byburst}{logical. Indicates whether one UD should be estimated
by burst of \code{tr}, or whether the data should be pooled across
all bursts of each value of id in \code{tr}}
\item{extent}{a value indicating the extent of the grid used for the
estimation (the extent of the grid on the abscissa is equal
to \code{(min(xy[,1]) + extent * diff(range(xy[,1])))}). }
\item{hv}{a value of smoothing parameter for the time dimension.}
\item{\dots}{additional arguments to be passed to the function
\code{contour}.}
}
\details{
Keating and Cherry (2009) advocated the estimation of the UD in
time and space using the product kernel estimator. These functions
implement exactly this methodology.\
For the spatial coordinates, the implemented kernel function is the
biweight kernel.
Two possible
approaches are possible to manage the time in the estimation process:
(i) the time may be considered as a linear variable (e.g., the 31th
december 2007 is considered to be one day before the 1st january
2007), or (ii) the time may be considered as a circular variable
(e.g., the 31th december 2007 is considered to be one year after the
1st january 2007).
If the time is considered as a linear variable, the kernel function
used in the estimation process is the biweight kernel. If the time is
considered as a circular variable, the implemented kernel is the
wrapped Cauchy distribution (as in the article of Keating and Cherry).
In this latter case, the smoothing parameter should be chosen in the
interval 0-1, with a value of 1 corresponding to a stronger
smoothing.
These functions can only be used on objects of class "ltraj", but
the estimation of the UD in time and space is also possible with other
types of data (see the help page of \code{kernelkcbase}). Note that
both \code{kernelkc} and \code{kernelkcbase} return conditional
probability density function (pdf), i.e. the pdf to relocate an animal
at a place, given that it has been relocated at time \code{tcalc}
(i.e. the volume under the UD estimated at time \code{tcalc} is equal
to 1 whatever \code{tcalc}).
The function \code{exwc} draws a graph of the wrapped
Cauchy distribution for the chosen \code{h} parameter (for circular
time), so that it is possible to make one's mind concerning the weight
that can be given to the neighbouring points of a given time point.
Note that although Keating and Cherry (2009) advocated the use of
an automatic algorithm to select "optimal" values for the smoothing
parameter, it is not implemented in adehabitatHR. Indeed, different
smoothing parameters may allow to identify patterns at different
scales, and we encourage the user to try several values before
subjectively choosing the value which allows to more clearly identify
the patterns of the UD.
}
\value{
\code{kernelkc} returns a list of class "\code{estUDm}" containing
objects of class \code{estUD}, mapping one estimate of the UD per burst
or id (depending on the value of the parameter \code{byburst}).
\code{kernelkcbase} returns an object of class "\code{estUD}" mapping
the estimated UD.
}
\references{
Keating, K. and Cherry, S. (2009) Modeling utilization
distributions in space and time. \emph{Ecology}, \bold{90}:
1971--1980.
Calenge, C., Guillemain, M., Gauthier-Clerc, M. and Simon, G. 2010. A
new exploratory approach to the study of the spatio-temporal
distribution of ring recoveries - the example of Teal (Anas crecca)
ringed in Camargue, Southern France. \emph{Journal of Ornithology},
\bold{151}, 945--950.
}
\author{Clement Calenge \email{clement.calenge@oncfs.gouv.fr}}
\seealso{\code{\link{as.ltraj}} for additional information on objects of
class \code{ltraj}, \code{\link{kernelUD}} for the "classical" kernel
home range estimates. }
\examples{
\dontrun{
################################################
##
## Illustrates the analysis of recoveries of
## ringed data
data(teal)
head(teal)
## compute the sequence of dates at which the
## probability density function (pdf) of recoveries is to be estimated
vv <- seq(min(teal$date), max(teal$date), length=50)
head(vv)
## The package "maps" should be installed for the example below
library(maps)
re <- lapply(1:length(vv), function(i) {
## Estimate the pdf. We choose a smoothing parameter of
## 2 degrees of lat-long for X and Y coordinates,
## and of 2 months for the time
uu <- kernelkcbase(teal, c(2.5,2.5,2*30*24*3600), tcalc =
vv[i], grid=100, extent=0.1)
## now, we show the result
## potentially, we could type
##
## jpeg(paste("prdefu", i, ".jpg", sep=""))
##
## to store the figures in a file, and then to build a
## movie with the resulting files:
##
image(uu, col=grey(seq(1,0, length=8)))
title(main=vv[i])
## highlight the area on which there is a probability
## equal to 0.95 to recover a bird
## ****warning! The argument standardize=TRUE should
## be passed, because the UD is defined in space and
## time, and because we estimate the UD just in space
plot(getverticeshr(uu, 95, standardize=TRUE), add=TRUE,
border="red", lwd=2)
## The map:
map(xlim=c(-20,70), ylim=c(30,80), add=TRUE)
## and if we had typed jpeg(...) before, we have to type
## dev.off()
## to close the device. When we have finished this loop
## We could combine the resulting files with imagemagick
## (windows) or mencoder (linux)
})
################################################
##
## Illustrates how to explore the UD in time and
## space with the bear data
data(bear)
## compute the sequence of dates at which the UD is to be
## estimated
vv <- seq(min(bear[[1]]$date), max(bear[[1]]$date), length=50)
head(vv)
## estimates the UD at each time point
re <- lapply(1:length(vv), function(i) {
## estimate the UD. We choose a smoothing parameter of
## 1000 meters for X and Y coordinates, and of 72 hours
## for the time (after a visual exploration)
uu <- kernelkc(bear, h = c(1000,1000,72*3600),
tcalc= vv[i], grid=100)
## now, we show the result
## potentially, we could type
##
## jpeg(paste("UD", i, ".jpg", sep=""))
##
## to store the figures in a file, and then to build a
## movie with the resulting files:
##
image(uu, col=grey(seq(1,0,length=10)))
title(main=vv[i])
## highlight the 95 percent home range
## we set standardize = TRUE because we want to estimate
## the home range in space from a UD estimated in space and
## time
plot(getverticeshr(uu, 95, standardize=TRUE), lwd=2,
border="red", add=TRUE)
## and if we had typed jpeg(...) before, we have to type
## dev.off()
## to close the device. When we have finished this loop
## We could combine the resulting files with imagemagick
## (windows) or mencoder (linux)
})
## Or, just show the home range:
re <- lapply(1:length(vv), function(i) {
uu <- kernelkc(bear, h = c(1000,1000,72*3600),
tcalc= vv[i])
pc <- getverticeshr(uu, 95, standardize=TRUE)
plot(pc, xlim=c(510000, 530000),
ylim=c(6810000, 6825000))
title(main=vv[i])
})
##################################################
##
## Example with several wild boars (linear time)
## load wild boar data
data(puechcirc)
## keep only the first two circuits:
puechc <- puechcirc[1:2]
## Now load the map of the elevation
data(puechabonsp)
## compute the time point at which the UD is to be estimated
vv <- seq(min(puechcirc[[2]]$date), max(puechcirc[[2]]$date),
length=50)
## The estimate the UD
re <- lapply(1:length(vv),
function(i) {
## We choose a smoothing parameter of 300 meters for
## the x and y coordinates and of one hour for the time
## (but try to play with these smoothing parameters)
uu <- kernelkc(puechcirc, h=c(300,300,3600),
tcalc = vv[i], same4all=TRUE,
extent=0.1)
## show the elevation
image(puechabonsp$map,
xlim=c(698000,704000),
ylim=c(3156000,3160000))
title(main=vv[i])
## and the UD, with contour lines
colo <- c("green","blue")
lapply(1:length(uu), function(i) {
contour(as(uu[[i]],"SpatialPixelsDataFrame"),
add=TRUE, col=colo[i])
})
## the blue contour lines show the UD of the mother and
## the red ones correspond to her son. Adult wild boars
## are known to be more "shy" that the youger ones.
## Here, the low elevation corresponds to crop area
## (vineyards). The young boar is the first and the
## last in the crops
})
##################################################
##
## Example with the bear, to illustrate (circular time)
data(bear)
## We consider a time cycle of 24 hours.
## the following vector contains the time points on the
## time circle at which the UD is to be estimated (note that
## the time is given in seconds)
vv <- seq(0, 24*3600-1, length=40)
## for each time point:
re <- lapply(1:length(vv),
function(i) {
## Estimation of the UD for the bear. We choose
## a smoothing parameter of 1000 meters for the spatial
## coordinates and a smoothing parameter equal to 0.2
## for the time. We set the beginning of the time
## cycle at midnight (no particular reason, just to
## illustrate the function). So we pass, as t0, any
## object of class POSIXct corresponding t a date at
## this hour, for example the 12/25/2012 at 00H00
t0 <- as.POSIXct("2012-12-25 00:00")
uu <- kernelkc(bear, h=c(1000,1000,0.2), cycle=24*3600,
tcalc=vv[i], t0=t0, circular=TRUE)
## shows the results
## first compute the hour for the title
hour <- paste(floor(vv[i]/3600), "hours",
floor((vv[i]\%\%3600)/60), "min")
## compute the 95\% home range
pc <- getverticeshr(uu, 95, standardize=TRUE)
plot(pc, xlim=c(510000, 530000),
ylim=c(6810000, 6825000))
title(main=hour)
## compute the 50\% home range
pc <- getverticeshr(uu, 50, standardize=TRUE)
plot(pc, add=TRUE, col="blue")
})
## Now, each home range computed at a given time point corresponds to
## the area used by the animal at this time period. We may for example
## try to identify the main difference in habitat composition of the
## home-range between different time, to identify differences in
## habitat use between different time of the day. We do not do it here
## (lack of example data)
##################################################
##
## Example of the use of the function kernelkcbase and
## related functions
## load the data
data(puechabonsp)
locs <- puechabonsp$relocs
## keeps only the wild boar Jean
locs <- locs[slot(locs, "data")[,1]=="Jean",]
## compute the number of days since the beginning
## of the monitoring
dd <- cumsum(c(0, diff(strptime(slot(locs, "data")[,4], "\%y\%m\%d"))))
dd
## compute xyt. Note that t is here the number of
## days since the beginning of the monitoring (it
## is not an object of class POSIXt, but it may be)
xyt <- data.frame(as.data.frame(coordinates(locs)), dd)
## Now compute the time points at which the UD is to be estimated:
vv <- 1:61
## and finally, show the UD changed with time:
re <- lapply(1:length(vv),
function(i) {
ud <- kernelkcbase(xyt, h=c(300,300,20),
tcalc=vv[i], grid=100)
image(ud, main=vv[i])
plot(getverticeshr(ud, 95, standardize=TRUE),
border="red", lwd=2, add=TRUE)
## Just to slow down the process
Sys.sleep(0.2)
})
}
}
\keyword{spatial}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TimeScale.r
\name{TimeScale}
\alias{TimeScale}
\title{Time scale vector from 00:00:00 to 23:59:59.}
\format{
A vector of characters with 86400 entries.
}
\description{
A vector of length 86400 containing the time scale characters from 00:00:00
to 23:59:59.
}
| /man/TimeScale.Rd | no_license | cran/ActivityIndex | R | false | true | 350 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TimeScale.r
\name{TimeScale}
\alias{TimeScale}
\title{Time scale vector from 00:00:00 to 23:59:59.}
\format{
A vector of characters with 86400 entries.
}
\description{
A vector of length 86400 containing the time scale characters from 00:00:00
to 23:59:59.
}
|
/*
Hacks to match MacOS (most recent first):
<Sys7.1> 8/3/92 Reverted <23>: restored '…' to About Keyboards strings.
9/2/94 SuperMario ROM source dump (header preserved below)
*/
/*
File: International.r
Contains: Rez-format international resources for the System file
Written by: PKE Peter Edberg
SMB Sue Bartalo
Copyright: © 1983-1992 by Apple Computer, Inc., all rights reserved.
Change History (most recent first):
<26> 6/23/92 SMB #1029263,<pke>: Set the 'smsfSynchUnstyledTE' bit in the Roman
'itlb' script flags field to indicate that unstyled TE should
synchronize the keyboard and font.
<25> 6/17/92 SMB #1024950,<cv>: Since now using ParamText for About Keyboards
dialog items, I added new STR# resources for each of the DITLs.
Added a new DITL for the About Keyboards dialog to mention input
methods (as well as keyboard layouts) when a 2-byte script is
installed. Updated all About Keyboards dialog items with
Instructional Products suggestions.
Added different default input method icons (again), a different
default keyboard layout icon that is smaller and more rounded in
appearance than the previous icon, and updated the US-System 6
keyboard icons as well.
<24> 5/12/92 SMB #1026017,<pke>: Added different default input method icon.
<23> 5/12/92 SMB #1026017,<pke>: Added default input method icon. Updated System
6 U.S. keyboard icons. Removed '…' from About Keyboards
strings.
<22> 4/16/92 JSM Get rid of hasBalloonHelp (which was never used consistently
anyway) and SixPack (which was never used at all) conditionals.
<21> 4/15/92 PKE #1026910,<jh>: Delete unnecessary kcs8 resources, save 512
bytes.
<20> 4/8/92 PKE #1026586,<cv>: Add tentative keyboard icons for System 6 U.S.
keyboard, ID 16383.
<19> 3/30/92 PKE #1022875,<ha>: In KSWP, for Cmd-rightArrow, use new
RomanIfOthers action instead of Roman (corresponds to KeyScript
verb smKeyRoman instead of smRoman). This way, Cmd-rightArrow
will not be stripped from the input queue unless it does
something useful, which is only when there are other scripts
installed besides Roman.
<18> 12/11/91 SMB #1014712 - For CubeE, in the About Keyboards… dialog for
non-Roman scripts, the message about rotating keyboards within
scripts should appear. Had to enlarge the DITL by 15 pixels for
this.
<17> 9/25/91 PKE For Cube-E (and Bruges): Update 'itlc' data to match latest
template in SysTypes.r.
<16> 9/20/91 PKE Remove CubeE conditional, since the new plan (according to
Darin) is that everything for Cube-E goes in without
conditionals. While I'm at it, remove some ">=$700"
conditionals.
<15> 9/15/91 PKE Oops, fix feature symbols to match latest BBSStartup.
<14> 9/15/91 PKE Changes for Bruges/Cube-E: Turn off FontForce in default ‘itlc’
resource. Fix ‘KSWP’ so arrow keys work on ADB keyboards too
(Command-leftArrow selects system script, Command-rightArrow
selects Roman).
<13> 2/26/91 PKE smb,#perCindyR: Change resource name “US” to “U.S.”.
<12> 2/21/91 SMB pke,#smb-2/21/91-1: Added smaller default keyboard icons
(‘kcs#’,‘kcs4’,‘kcs8’ with ID -16491).
<11> 2/15/91 SMB pke,#smb-2/15/91-1: Changing US 0 keyboard resources to smaller
icons.
<10> 1/7/91 SMB (JDR) approved the last change!
<9> 1/7/91 SMB Replacing the degree symbol as the default keyboard marker with
null until it's demonstrated that a symbol should be used in the
keyboard menu. Also removed the comment about the degree symbol
from the DITL. Updated the DITL and DLOG for About Keyboards
dialog.
<8> 12/14/90 PKE (DC) Change abbreviated day and month strings in 'itl1' to match
old 3-char form, so abbreviated dates don’t change on US system.
Complete DITL text that explains switching among keyboards (the
Cmd-Opt-space key combo needed to be finalized; other edits).
<7> 12/10/90 PKE (VL) Add to KSWP resource: Cmd-Opt-space, for rotate to next
keyboard in current script.
<6> 10/30/90 SMB Changed data for US and default kcs’s.
<5> 9/1/90 SMB Added another About Keyboards DITL & DLOG for a system with only
1 script. Also modified the other About msg.
<4> 8/10/90 SMB Added ‘About Keyboards…’ dialog (DITL & DLOG) and updated help
messages.
<3> 8/7/90 csd Fixed problems caused by having double-/ comments in #defines.
<2> 8/7/90 SMB Added default keybd symbol and "About Keyboards…" to the
Keyboard Menu STR# resource (although About is conditionalized
out until later). Added balloon help 'hmnu' resource and indices
for new help strings in Keyboard Menu STR# resource.
<1> 7/23/90 PKE New today, extracted from Sys.r. Added include of BalloonTypes.r
since we’ll need it later. Note that real resource attributes are
still set in Sys.r.
Relevant comments from Sys.r (marker changed to <xNN>
<x163> 7/17/90 PKE ‘kscn’ resource (see <x9.8>) has been superseded by keyboard icon
suite (kcs#, etc. - see <x102>). Changed ‘kscn’ 0 back to ‘SICN’
0 for backward compatibility.
<x158> 7/7/90 PKE Define “sysHeapFromNowOn” symbol to use as heap attribute for
resources that did not have sysheap attribute in 6.x, but which
should have sysheap attribute set for 7.0 and beyond (Darin gets
credit for the symbol name). Use it for itlb, itl0, itl1, itl2,
and itl4 (all ID=0) and PACK 6. For new 7.0 international
resources, just set sysHeap attribute: itlm, kcs#, kcs4, kcs8,
and STR# (-16491). This fixes a bug with saving handles to
resources loaded at INIT time.
<x146> 6/22/90 PKE Updated 'itlm' data to use new script, language, and region
names, and rearranged sorting order of scripts, languages, and
regions.
<x121> 6/1/90 SMB Add STR# and default icon suites for Keyboard Menu.
<x117> 5/29/90 PKE Added bytes for valid style and alias style to ‘itlb’ type.
Added list of additional separators to ‘itl1’. Both changes
match new formats in SysTypes.r.
<x109> 5/21/90 PKE Added region code table to ‘itlm’ resource to match new format
in SysTypes.r. Updated itlm data to use newer script, lang, and
region codes (smTradChinese, etc.). Converted ‘itl1’ resource
for 7.0 to new extended form with list of abbreviated day and
month names, etc.
<x102> 5/12/90 SMB Adding the keyboard small icons (kcs#, kcs4, kcs8) for the US
default keyboard (KCHR 0).
<x97> 5/2/90 PKE Rearranged 'itlm' resource data to match new format.
<x94> 4/25/90 PKE Add 'itlm' resource for multiscript mapping and sorting data.
<x59> 3/21/90 PKE Added new info to US 'itlb' resource: font/size data, script
local data size (needed for auto-initialization of script
systems). Matches updated type in SysTypes.r. Also defined some
FOND ID constants.
<x19> 1/17/90 PKE Updated itlc resource data for new region code field added to
itlc type in SysTypes.r.
<x18> 1/17/90 PKE Updated itlc resource data to use MPW 3.1 itlc format, which is
now in SysTypes.r.
<x9.8> 9/18/89 PKE For 7.0, changed type of keyboard/script small icon from 'SICN'
to new 'kscn' to avoid resource ID conflicts.
3/12/89 PKE Added script icon location at end of itlc (in previously
reserved bytes)
3/8/89 PKE Commented out itlr stuff
To Do:
• We can delete the kcs8 resources to save space, since they use the same colors as the
kcs4 resources.
*/
#include "Types.r"
#include "SysTypes.r"
#include "BalloonTypes.r"
//__________________________________________________________________________________________________
// macros from Sys.r
#define IncSys Include $$Shell("ObjDir")
#define codeAs 'RSRC' (0) as
#define sysHeapFromNowOn sysHeap
#define kKeybdMenuID -16491 /* if this changes, update kKeyboardMenuID in ScriptPriv.a */
#define kDefInputMethodIconID -16492 /* if this changes, update kDefaultIMIconID in ScriptPriv.a <23> */
// STR# resource IDs
#define kKeybdMenuHelpID kKeybdMenuID /* <25> */
#define kAboutKeybdRoman kKeybdMenuID-1 /* <25> */
#define kAboutKeybdIMs kKeybdMenuID-2 /* <25> */
#define kAboutKeybdMulti kKeybdMenuID-3 /* <25> */
#define kKeybdMenuItemsID kKeybdMenuID-4 /* if this changes, update kKeyboardMenuItemsID in ScriptPriv.a <25> */
// indices for strings in Keyboard Menu help STR# resource (kKeybdMenuHelpID)
#define kKMEnabledTitleIndex 1 /* Enabled keyboard Menu title <2> */
#define kKMDisabledTitleIndex 2 /* Disabled keyboard Menu title <2> */
#define kKMDisabledTitleModalIndex 3 /* Disabled kybd menu title w/ modal up <2> */
#define kKMDisabledItemModalIndex 4 /* Disabled kybd menu item w/ modal up <2> */
#define kKMEnabledItemIndex 5 /* Enabled keyboard Menu item <2> */
#define kKMDisabledItemIndex 6 /* Disabled keyboard Menu item <2> */
#define kKMCheckedItemIndex 7 /* Checked keyboard menu item <2> */
#define kKMOtherItemIndex 8 /* Default keyboard for a script <2> */
#define kKMAboutItemIndex 9 /* About item <2> */
#define kKMDisabledAboutItemIndex 10 /* About item disabled <2> */
#define USname "U.S." /*<13>*/
//__________________________________________________________________________________________________
// *************************************************************************************************
// IMPORTANT: the real attributes for all of the following are set in Sys.r.
// *************************************************************************************************
resource 'itlc' (0, sysHeap, purgeable) {
0, // system script is Roman.
2048, // key cache size is 2K
noFontForce, // fontForce is off <14>
intlForce, // intlForce is on
noOldKeyboard, // no old international keyboard.
0, // general flags (see smGenFlags info in ScriptEqu.a)
40, // keybd icon offset from end of menu bar <x18>
rightOffset, // keybd icon at right end of menu bar <x18>
0, // reserved for keybd icon data <x18>
verUS, // preferred region code <x19>
directionLeftRight, // default line direction <17>
$"" // reserved <x18>
};
resource 'itlm' (0, sysHeap, purgeable) {
$700, // version
$0000, // format
smUninterp, // max script code for script->lang mapping
langUnspecified, // default lang code for unlisted scripts
{ // script order and default lang table:
smRoman, langEnglish,
smSlavic, langCroatian,
smGreek, langGreek,
smCyrillic, langRussian,
smGeorgian, langGeorgian,
smArmenian, langArmenian,
smArabic, langArabic,
smExtArabic, langSindhi,
smHebrew, langHebrew,
smGeez, langAmharic,
smDevanagari, langHindi,
smGurmukhi, langPunjabi,
smGujarati, langGujarati,
smOriya, langOriya,
smBengali, langBengali,
smTamil, langTamil,
smTelugu, langTelugu,
smKannada, langKannada,
smMalayalam, langMalayalam,
smSinhalese, langSinhalese,
smBurmese, langBurmese,
smKhmer, langKhmer,
smThai, langThai,
smLaotian, langLao,
smTibetan, langTibetan,
smMongolian, langMongolian,
smVietnamese, langVietnamese,
smTradChinese, langTradChinese,
smSimpChinese, langSimpChinese,
smJapanese, langJapanese,
smKorean, langKorean,
smRSymbol, langHebrew,
smUninterp, langEnglish
},
langSimpChinese, // max lang code for lang->script mapping
smRoman, // default script code for unlisted langs
{ // lang order and parent script table:
langEnglish, smRoman,
langFrench, smRoman,
langGerman, smRoman,
langItalian, smRoman,
langDutch, smRoman,
langSwedish, smRoman,
langSpanish, smRoman,
langDanish, smRoman,
langPortuguese, smRoman,
langNorwegian, smRoman,
langFinnish, smRoman,
langIcelandic, smRoman,
langMaltese, smRoman,
langTurkish, smRoman,
langLithuanian, smRoman,
langEstonian, smRoman,
langLettish, smRoman,
langLappish, smRoman,
langFaeroese, smRoman,
langCroatian, smSlavic,
langPolish, smSlavic,
langHungarian, smSlavic,
langGreek, smGreek,
langRussian, smCyrillic,
langArabic, smArabic,
langUrdu, smArabic,
langFarsi, smArabic,
langHebrew, smHebrew,
langHindi, smDevanagari,
langThai, smThai,
langTradChinese, smTradChinese,
langSimpChinese, smSimpChinese,
langJapanese, smJapanese,
langKorean, smKorean,
},
verThailand, // max region code for region->lang mapping
langUnspecified, // default lang code for unlisted regions
{ // region order and parent lang table:
verUS, langEnglish,
verBritain, langEnglish,
verAustralia, langEnglish,
verIreland, langEnglish,
verFrance, langFrench,
verFrBelgiumLux, langFrench,
verFrCanada, langFrench,
verFrSwiss, langFrench,
verGermany, langGerman,
verGrSwiss, langGerman,
verItaly, langItalian,
verNetherlands, langDutch,
verSweden, langSwedish,
verSpain, langSpanish,
verDenmark, langDanish,
verPortugal, langPortuguese,
verNorway, langNorwegian,
verFinland, langFinnish,
verIceland, langIcelandic,
verMalta, langMaltese,
verTurkey, langTurkish,
verLithuania, langLithuanian,
verEstonia, langEstonian,
verLatvia, langLettish,
verLapland, langLappish,
verFaeroeIsl, langFaeroese,
verYugoCroatian, langCroatian,
verPoland, langPolish,
verHungary, langHungarian,
verGreece, langGreek,
verRussia, langRussian,
verArabic, langArabic,
verPakistan, langUrdu,
verIran, langFarsi,
verIsrael, langHebrew,
verIndiaHindi, langHindi,
verThailand, langThai,
verTaiwan, langTradChinese,
verChina, langSimpChinese,
verJapan, langJapanese,
verKorea, langKorean,
verCyprus, langUnspecified // Hmm, 2 languages, which to use here?
}
};
#define Chicago 0 /* <x59> */
#define Geneva 3 /* <x59> */
#define Monaco 4 /* <x59> */
resource 'itlb' (0, "Roman", sysHeapFromNowOn, purgeable) {
0, // itl0 ID
0, // itl1 ID
0, // itl2 ID
$0107, // script flags (see smScriptFlags info in ScriptEqu.a)
0, // itl4 ID
0, // optional itl5 ID (not used here). <x163>
0, // language code
0, // numbers/dates
0, // KCHR ID
0, // ID of SICN or kcs#/kcs4/kcs8. <x163>
116, // size of Roman local record, in bytes <x59>
Monaco, // default monospace FOND ID <x59>
9, // default monospace font size <x59>
Geneva, // preferred FOND ID <x59>
12, // preferred font size <x59>
Geneva, // default small FOND ID <x59>
9, // default small font size <x59>
Chicago, // default system FOND ID <x59>
12, // default system font size <x59>
Geneva, // default application FOND ID <x59>
12, // default application font size <x59>
Geneva, // default Help Mgr FOND ID <x59>
9, // default Help Mgr font size <x59>
$7F, // valid styles for Roman <x117>
$02 // style set for alias = [italic] <x117>
};
resource 'itl0' (0, USname, sysHeapFromNowOn, purgeable) { /*<13>*/
period, comma, semicolon, dollarsign, "\0x00", "\0x00",
leadingZero, trailingZero, paren, leads, monDayYear,
noCentury, noMonthLeadZero, noDayLeadZero, slash, twelveHour,
noHoursLeadZero, minutesLeadZero, secondsLeadZero, " AM", " PM", ":",
"\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00",
standard,
verUs, 1,
};
resource 'INTL' (0, USname, purgeable) { /*<13>*/
period, comma, semicolon, dollarsign, "\0x00", "\0x00",
leadingZero, trailingZero, paren, leads, monDayYear,
noCentury, noMonthLeadZero, noDayLeadZero, slash, twelveHour,
noHoursLeadZero, minutesLeadZero, secondsLeadZero, " AM", " PM", ":",
"\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00",
standard,
verUs, 1,
};
resource 'itl1' (0, USname, sysHeapFromNowOn, purgeable) { /*<13>*/
{ "Sunday"; "Monday"; "Tuesday"; "Wednesday"; "Thursday"; "Friday"; "Saturday"; },
{
"January"; "February"; "March"; "April"; "May"; "June";
"July"; "August"; "September"; "October"; "November"; "December";
},
dayName, monDayYear, noDayLeadZero, 3,
"", ", ", " ", ", ", "",
verUs, 1,
extFormat {
$0700, $0001, 0,
{}, // no extra day names (7 names are plenty, thank you)
{}, // no extra month names
{"Sun"; "Mon"; "Tue"; "Wed"; "Thu"; "Fri"; "Sat"}, // use old 3-char forms <8>
{"Jan"; "Feb"; "Mar"; "Apr"; "May"; "Jun"; "Jul"; "Aug"; "Sep"; "Oct"; "Nov"; "Dec"},
{"-"; "."} // other reasonable date separators <x117>
},
};
resource 'INTL' (1, USname, purgeable) { /*<13>*/
{ "Sunday"; "Monday"; "Tuesday"; "Wednesday"; "Thursday"; "Friday"; "Saturday"; },
{
"January"; "February"; "March"; "April"; "May"; "June";
"July"; "August"; "September"; "October"; "November"; "December";
},
dayName, monDayYear, noDayLeadZero, 3,
"", ", ", " ", ", ", "",
verUs, 1,
DefaultReturn,
};
resource 'SICN' (0, sysHeapFromNowOn, purgeable) { { // Roman script icon
$"0000 0100 0380 07C0 0FE0 1FF0 3FF8 7FFC"
$"3FF8 1FF0 0FE0 07C0 0380 0100 0000 0000"
} };
resource 'KSWP' (0, sysHeap) { {
Rotate, $31, controlOff, optionOff, shiftOff, commandOn; // space bar toggles script
RotateKybd, $31, controlOff, optionOn, shiftOff, commandOn; // opt space bar toggles kybd <7>
System, $46, controlOff, optionOff, shiftOff, commandOn; // Mac+ left arrow is system script
RomanIfOthers, $42, controlOff, optionOff, shiftOff, commandOn; // Mac+ right arrow is Roman <19>
System, $7B, controlOff, optionOff, shiftOff, commandOn; // ADB left arrow is system script <14>
RomanIfOthers, $7C, controlOff, optionOff, shiftOff, commandOn; // ADB right arrow is Roman <14><19>
} };
resource 'kcs#' (0, sysHeap, purgeable) { { /* array: 2 elements */ /* <11> */
/* [1] */
$"0000 0000 0000 0000 0000 FFFF AB01 FFFF"
$"D501 FFFF AB01 FFFF 8001 FFFF 8001 FFFF",
/* [2] */
$"0000 0000 0000 0000 0000 FFFF FFFF FFFF"
$"FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF"
} };
resource 'kcs4' (0, sysHeap, purgeable) { /* <11> */
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0666 6663 3333 3330 0660 6060 0000 00C0"
$"0666 6663 3333 3330 0606 0660 0000 00C0"
$"0666 6663 3333 3330 0C00 0000 0000 00C0"
$"0333 3333 3333 3330 0C00 0000 0000 00C0"
$"0333 3333 3333 3330 0000 0000 0000 0000"
};
#if 0
// don't need, has same colors as kcs4 /* <21> */
resource 'kcs8' (0, sysHeap, purgeable) {
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"00EC ECEC ECEC ECD8 D8D8 D8D8 D8D8 D800"
$"00EC EC00 EC00 EC00 0000 0000 0000 2B00"
$"00EC ECEC ECEC ECD8 D8D8 D8D8 D8D8 D800"
$"00EC 00EC 00EC EC00 0000 0000 0000 2B00"
$"00EC ECEC ECEC ECD8 D8D8 D8D8 D8D8 D800"
$"002B 0000 0000 0000 0000 0000 0000 2B00"
$"00D8 D8D8 D8D8 D8D8 D8D8 D8D8 D8D8 D800"
$"002B 0000 0000 0000 0000 0000 0000 2B00"
$"00D8 D8D8 D8D8 D8D8 D8D8 D8D8 D8D8 D800"
};
#endif
resource 'kcs#' (16383, sysheap, purgeable) { /*<20><25>*/
{ /* array: 2 elements */
/* [1] */
$"0000 7FFC 6A04 7FFC 5604 7FFE 4102 7F32"
$"4142 7F72 014A 014A 0132 0102 01FE",
/* [2] */
$"FFFE FFFE FFFE FFFE FFFF FFFF FFFF FFFF"
$"FFFF FFFF FFFF 03FF 03FF 03FF 03FF 03FF"
}
};
resource 'kcs4' (16383, sysheap, purgeable) { /*<20><25>*/
$"0000 0000 0000 0000 0666 6663 3333 3300"
$"0660 6060 0000 0C00 0666 6663 3333 3300"
$"0606 0660 0000 0C00 0666 666F FFFF FFF0"
$"0C00 000F 0000 00F0 0333 333F 00FF 00F0"
$"0C00 000F 0F00 00F0 0333 333F 0FFF 00F0"
$"0000 000F 0F00 F0F0 0000 000F 0F00 F0F0"
$"0000 000F 00FF 00F0 0000 000F 0000 00F0"
$"0000 000F FFFF FFF0"
};
#if 0
// don't need, has same colors as kcs4
resource 'kcs8' (16383, sysheap, purgeable) { /*<20><25>*/
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"00EC ECEC ECEC ECD8 D8D8 D8D8 D8D8 0000"
$"00EC EC00 EC00 EC00 0000 0000 002B 0000"
$"00EC ECEC ECEC ECD8 D8D8 D8D8 D8D8 0000"
$"00EC 00EC 00EC EC00 0000 0000 002B 0000"
$"00EC ECEC ECEC ECFF FFFF FFFF FFFF FF00"
$"002B 0000 0000 00FF 0000 0000 0000 FF00"
$"00D8 D8D8 D8D8 D8FF 0000 FFFF 0000 FF00"
$"002B 0000 0000 00FF 00FF 0000 0000 FF00"
$"00D8 D8D8 D8D8 D8FF 00FF FFFF 0000 FF00"
$"0000 0000 0000 00FF 00FF 0000 FF00 FF00"
$"0000 0000 0000 00FF 00FF 0000 FF00 FF00"
$"0000 0000 0000 00FF 0000 FFFF 0000 FF00"
$"0000 0000 0000 00FF 0000 0000 0000 FF00"
$"0000 0000 0000 00FF FFFF FFFF FFFF FF"
};
#endif
// default small color icons for the scripts that don't include them <x121>/* <12> */
resource 'kcs#' (kKeybdMenuID, sysHeap, purgeable) { { /* array: 2 elements */
/* [1] */
$"0000 0000 0000 0000 7FFE 4002 5552 4002"
$"57EA 4002 7FFE",
/* [2] */
$"0000 0000 0000 7FFE FFFF FFFF FFFF FFFF"
$"FFFF FFFF FFFF 7FFE"
} };
resource 'kcs4' (kKeybdMenuID, sysHeap, purgeable) {
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0DFF FFFF FFFF FFD0 0FCC CCCC CCCC CCF0"
$"0FCF CFCF CFCF CCF0 0FCC CCCC CCCC CCF0"
$"0FC3 CFFF FFFC FCF0 0FCC CCCC CCCC CCF0"
$"0DFF FFFF FFFF FFD0"
};
#if 0
// don't need, has same colors as kcs4 /* <21> */
resource 'kcs8' (kKeybdMenuID, sysHeap, purgeable) {
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"00F9 FFFF FFFF FFFF FFFF FFFF FFFF F900"
$"00FF 2B2B 2B2B 2B2B 2B2B 2B2B 2B2B FF00"
$"00FF 2BFF 2BFF 2BFF 2BFF 2BFF 2B2B FF00"
$"00FF 2B2B 2B2B 2B2B 2B2B 2B2B 2B2B FF00"
$"00FF 2BD8 2BFF FFFF FFFF FF2B FF2B FF00"
$"00FF 2B2B 2B2B 2B2B 2B2B 2B2B 2B2B FF00"
$"00F9 FFFF FFFF FFFF FFFF FFFF FFFF F9"
};
#endif
resource 'kcs#' (kDefInputMethodIconID, sysHeap, purgeable) { /* <23><24> */
{ /* array: 2 elements */
/* [1] */
$"0000 0C3E 1212 2122 44C6 924A 4130 2490"
$"1248 0924 1C92 3E02 7D24 3888 1050 0020",
/* [2] */
$"0C7F 1E7F 3F3F 7FFF FFFF FFFF FFFB 7FF8"
$"3FFC 1FFE 3FFF 7FFF FFFE 7DFC 38F8 1070"
}
};
resource 'kcs4' (kDefInputMethodIconID, sysHeap, purgeable) { /* <23><24> */
$"0000 0000 0000 0000 0000 FF00 00FF FFF0"
$"000F CCF0 000F 00F0 00FC CCCF 00F0 00F0"
$"0FCC CFCC FF00 0FF0 DCC3 CCFC CF00 F0F0"
$"0FCC CCCF CCFF 0000 00FC CFCC FCCF 0000"
$"000F CCFC CFCC F000 0000 FCCF CCFC CF00"
$"000F 9FCC FCCF CCF0 00F9 99FC CCCC CCF0"
$"0F99 9F0F CCFC CF00 00F9 F000 FCCC F000"
$"000F 0000 0FCF 0000 0000 0000 00D0"
};
#if 0
// don't need, has same colors as kcs4 /* <23><24> */
resource 'kcs8' (kDefInputMethodIconID, sysHeap, purgeable) {
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 FFFF 0000 0000 FFFF FFFF FF00"
$"0000 00FF 2B2B FF00 0000 00FF 0000 FF00"
$"0000 FF2B 2B2B 2BFF 0000 FF00 0000 FF00"
$"00FF 2B2B 2BFF 2B2B FFFF 0000 00FF FF00"
$"F92B 2BD8 2B2B FF2B 2BFF 0000 FF00 FF00"
$"00FF 2B2B 2B2B 2BFF 2B2B FFFF 0000 0000"
$"0000 FF2B 2BFF 2B2B FF2B 2BFF 0000 0000"
$"0000 00FF 2B2B FF2B 2BFF 2B2B FF00 0000"
$"0000 0000 FF2B 2BFF 2B2B FF2B 2BFF 0000"
$"0000 00FF CBFF 2B2B FF2B 2BFF 2B2B FF00"
$"0000 FFCB CBCB FF2B 2B2B 2B2B 2B2B FF00"
$"00FF CBCB CBFF 00FF 2B2B FF2B 2BFF 0000"
$"0000 FFCB FF00 0000 FF2B 2B2B FF00 0000"
$"0000 00FF 0000 0000 00FF 2BFF 0000 0000"
$"0000 0000 0000 0000 0000 F9"
};
#endif
resource 'STR#' (kKeybdMenuHelpID, sysHeap, purgeable) { { // Balloon Help for Keyboard Menu <25>
// Keyboard, (Menu Title), Normal (kKMEnabledTitleIndex)
"Keyboard menu\n\nUse this menu to switch from one keyboard to another. This "
"may also change the script you are using.";
// Keyboard, (Menu Title), Dimmed (kKMDisabledTitleIndex)
"Keyboard menu\n\nUse this menu to switch from one keyboard to another. This "
"may also change the script you are using. Not available because the "
"keyboard layout cannot be changed.";
// Keyboard, (Menu Title, Dimmed with Modal dialog up) (kKMDisabledTitleModalIndex)
"Keyboard menu\n\nThis menu is not available because it cannot be used with the dialog box "
"on your screen.";
// Keyboard, (Menu item, Dimmed with Modal dialog up) (kKMDisabledItemModalIndex)
"Keyboard menu\n\nThis item is not available because it cannot be used with the dialog box "
"on your screen.";
// Keyboard menu item, Normal/Selected (kKMEnabledItemIndex)
"Makes this keyboard layout active.";
// Keyboard menu item, Disabled/Selected (kKMDisabledItemIndex)
"Makes this keyboard layout active. Not available now because the script of this keyboard "
"cannot be used for this operation.";
// Keyboard menu item, Checked (kKMCheckedItemIndex)
"Makes this keyboard layout active. Checked because this keyboard layout is now active.";
// Keyboard menu item, Other (kKMOtherItemIndex)
"Makes this keyboard layout active. Marked because this keyboard layout is selected for "
"the script system.";
// Help, About keyboards..., Normal (kKMAboutItemIndex)
"Displays information about using hidden keystrokes to change your keyboard and script.";
// Help, About help..., Dimmed (kKMDisabledAboutItemIndex)
"Displays information about using hidden keystrokes to change your keyboard and script. "
"Not available because the About Keyboards dialog box is already open or because another "
"dialog box is open.";
} };
resource 'STR#' (kAboutKeybdRoman, sysHeap, purgeable) { { // About Keyboards for Roman-only system <25>
"The \0x12 indicates the active keyboard layout. To "
"rotate to the next keyboard layout, press \0x11-Option-Space bar.";
} };
resource 'STR#' (kAboutKeybdIMs, sysHeap, purgeable) { { // About Keyboards for system that includes 2-byte script <25>
"The \0x12 indicates the active keyboard layout or input method in "
"the active script system.\0x0D\0x0D";
"To rotate to the next keyboard layout or input method in the "
"active script, press \0x11-Option-Space bar. To rotate to the "
"preferred keyboard layout or input method in the next available "
"script, press \0x11-Space bar." ;
} };
resource 'STR#' (kAboutKeybdMulti, sysHeap, purgeable) { { // About Keyboards for multi-script w/o 2-byte script <25>
"The \0x12 indicates the active keyboard layout in the active script system.\0x0D\0x0D";
"To rotate to the next keyboard layout in the active script, press "
"\0x11-Option-Space bar. To rotate to the preferred keyboard layout "
"in the next available script, press \0x11-Space bar.";
} };
resource 'STR#' (kKeybdMenuItemsID, sysHeap, purgeable) { { // strings for menu items <x121><25>
"\0x00"; // marker for default keybd <2><9>
"About Keyboards…"; // <2> ex<23> <Sys7.1>
// "Next Script"; // currently not used
// "Next Keyboard in Script"; // currently not used
} };
resource 'hmnu' (kKeybdMenuHelpID, sysheap) { // balloon help strings for keyboard menu <2>
HelpMgrVersion, /* Help Version */
0, /* options */
0, /* theProc */
0, /* variant */
HMStringResItem { // use missing msg
kKeybdMenuHelpID,kKMEnabledItemIndex, /* enabled msg */
kKeybdMenuHelpID,kKMDisabledItemIndex, /* disabled msg */
kKeybdMenuHelpID,kKMCheckedItemIndex, /* checked msg */
kKeybdMenuHelpID,kKMOtherItemIndex /* other marked msg : default kchr for a script */
},
{
HMStringResItem { // keyboard menu title
kKeybdMenuHelpID,kKMEnabledTitleIndex,
kKeybdMenuHelpID,kKMDisabledTitleIndex,
kKeybdMenuHelpID,kKMDisabledTitleModalIndex,
kKeybdMenuHelpID,kKMDisabledItemModalIndex
},
HMStringResItem { // about keyboards… menu item
kKeybdMenuHelpID,kKMAboutItemIndex,
kKeybdMenuHelpID,kKMDisabledAboutItemIndex,
0,0,
0,0
},
HMSkipItem { // disabled line
},
}
};
resource 'DLOG' (kAboutKeybdMulti, purgeable) { // <4><18><25>
{58, 16, 264, 484},
dBoxProc,
visible,
noGoAway,
0x0,
kAboutKeybdMulti,
"About…",
alertPositionMainScreen
};
resource 'DLOG' (kAboutKeybdRoman, purgeable) { // <5><25>
{58, 18, 180, 458},
dBoxProc,
visible,
noGoAway,
0x0,
kAboutKeybdRoman,
"About…",
alertPositionMainScreen
};
resource 'DLOG' (kAboutKeybdIMs, purgeable) { // <25>
{58, 16, 264, 484},
dBoxProc,
visible,
noGoAway,
0x0,
kAboutKeybdIMs,
"About…",
alertPositionMainScreen
};
resource 'DITL' (kAboutKeybdMulti, purgeable) { // <4><18><25>
{ /* array DITLarray: 4 elements */
/* [1] */
{175, 367, 193, 447},
Button {
enabled,
"OK"
},
/* [2] */
{46, 15, 85, 447},
StaticText { // edited <8>
disabled,
"^0" // now uses ParamText <25>
},
/* [3] */
{96, 15, 159, 447},
StaticText { // edited <8>
disabled,
"^1" // now uses ParamText <25>
},
/* [4] */
{14, 15, 46, 447},
StaticText {
disabled,
"About Keyboards…" // ex<23> <Sys7.1>
}
}
};
resource 'DITL' (kAboutKeybdRoman, purgeable) { // <5>
{ /* array DITLarray: 3 elements */
/* [1] */
{87, 342, 105, 422},
Button {
enabled,
"OK"
},
/* [2] */
{44, 17, 82, 422},
StaticText {
disabled,
"^0" // now uses ParamText <25>
},
/* [3] */
{14, 17, 44, 422},
StaticText {
disabled,
"About Keyboards…" // ex<23> <Sys7.1>
}
}
};
resource 'DITL' (kAboutKeybdIMs, purgeable) { // <25>
{ /* array DITLarray: 4 elements */
/* [1] */
{175, 367, 193, 447},
Button {
enabled,
"OK"
},
/* [2] */
{46, 15, 85, 447},
StaticText {
disabled,
"^0" // now uses ParamText <25>
},
/* [3] */
{96, 15, 159, 447},
StaticText {
disabled,
"^1" // now uses ParamText <25>
},
/* [4] */
{14, 15, 46, 447},
StaticText {
disabled,
"About Keyboards…" // ex<23> <Sys7.1>
}
}
};
| /Toolbox/ScriptMgr/International.r | no_license | elliotnunn/sys7.1-doc-wip | R | false | false | 31,323 | r | /*
Hacks to match MacOS (most recent first):
<Sys7.1> 8/3/92 Reverted <23>: restored '…' to About Keyboards strings.
9/2/94 SuperMario ROM source dump (header preserved below)
*/
/*
File: International.r
Contains: Rez-format international resources for the System file
Written by: PKE Peter Edberg
SMB Sue Bartalo
Copyright: © 1983-1992 by Apple Computer, Inc., all rights reserved.
Change History (most recent first):
<26> 6/23/92 SMB #1029263,<pke>: Set the 'smsfSynchUnstyledTE' bit in the Roman
'itlb' script flags field to indicate that unstyled TE should
synchronize the keyboard and font.
<25> 6/17/92 SMB #1024950,<cv>: Since now using ParamText for About Keyboards
dialog items, I added new STR# resources for each of the DITLs.
Added a new DITL for the About Keyboards dialog to mention input
methods (as well as keyboard layouts) when a 2-byte script is
installed. Updated all About Keyboards dialog items with
Instructional Products suggestions.
Added different default input method icons (again), a different
default keyboard layout icon that is smaller and more rounded in
appearance than the previous icon, and updated the US-System 6
keyboard icons as well.
<24> 5/12/92 SMB #1026017,<pke>: Added different default input method icon.
<23> 5/12/92 SMB #1026017,<pke>: Added default input method icon. Updated System
6 U.S. keyboard icons. Removed '…' from About Keyboards
strings.
<22> 4/16/92 JSM Get rid of hasBalloonHelp (which was never used consistently
anyway) and SixPack (which was never used at all) conditionals.
<21> 4/15/92 PKE #1026910,<jh>: Delete unnecessary kcs8 resources, save 512
bytes.
<20> 4/8/92 PKE #1026586,<cv>: Add tentative keyboard icons for System 6 U.S.
keyboard, ID 16383.
<19> 3/30/92 PKE #1022875,<ha>: In KSWP, for Cmd-rightArrow, use new
RomanIfOthers action instead of Roman (corresponds to KeyScript
verb smKeyRoman instead of smRoman). This way, Cmd-rightArrow
will not be stripped from the input queue unless it does
something useful, which is only when there are other scripts
installed besides Roman.
<18> 12/11/91 SMB #1014712 - For CubeE, in the About Keyboards… dialog for
non-Roman scripts, the message about rotating keyboards within
scripts should appear. Had to enlarge the DITL by 15 pixels for
this.
<17> 9/25/91 PKE For Cube-E (and Bruges): Update 'itlc' data to match latest
template in SysTypes.r.
<16> 9/20/91 PKE Remove CubeE conditional, since the new plan (according to
Darin) is that everything for Cube-E goes in without
conditionals. While I'm at it, remove some ">=$700"
conditionals.
<15> 9/15/91 PKE Oops, fix feature symbols to match latest BBSStartup.
<14> 9/15/91 PKE Changes for Bruges/Cube-E: Turn off FontForce in default ‘itlc’
resource. Fix ‘KSWP’ so arrow keys work on ADB keyboards too
(Command-leftArrow selects system script, Command-rightArrow
selects Roman).
<13> 2/26/91 PKE smb,#perCindyR: Change resource name “US” to “U.S.”.
<12> 2/21/91 SMB pke,#smb-2/21/91-1: Added smaller default keyboard icons
(‘kcs#’,‘kcs4’,‘kcs8’ with ID -16491).
<11> 2/15/91 SMB pke,#smb-2/15/91-1: Changing US 0 keyboard resources to smaller
icons.
<10> 1/7/91 SMB (JDR) approved the last change!
<9> 1/7/91 SMB Replacing the degree symbol as the default keyboard marker with
null until it's demonstrated that a symbol should be used in the
keyboard menu. Also removed the comment about the degree symbol
from the DITL. Updated the DITL and DLOG for About Keyboards
dialog.
<8> 12/14/90 PKE (DC) Change abbreviated day and month strings in 'itl1' to match
old 3-char form, so abbreviated dates don’t change on US system.
Complete DITL text that explains switching among keyboards (the
Cmd-Opt-space key combo needed to be finalized; other edits).
<7> 12/10/90 PKE (VL) Add to KSWP resource: Cmd-Opt-space, for rotate to next
keyboard in current script.
<6> 10/30/90 SMB Changed data for US and default kcs’s.
<5> 9/1/90 SMB Added another About Keyboards DITL & DLOG for a system with only
1 script. Also modified the other About msg.
<4> 8/10/90 SMB Added ‘About Keyboards…’ dialog (DITL & DLOG) and updated help
messages.
<3> 8/7/90 csd Fixed problems caused by having double-/ comments in #defines.
<2> 8/7/90 SMB Added default keybd symbol and "About Keyboards…" to the
Keyboard Menu STR# resource (although About is conditionalized
out until later). Added balloon help 'hmnu' resource and indices
for new help strings in Keyboard Menu STR# resource.
<1> 7/23/90 PKE New today, extracted from Sys.r. Added include of BalloonTypes.r
since we’ll need it later. Note that real resource attributes are
still set in Sys.r.
Relevant comments from Sys.r (marker changed to <xNN>
<x163> 7/17/90 PKE ‘kscn’ resource (see <x9.8>) has been superseded by keyboard icon
suite (kcs#, etc. - see <x102>). Changed ‘kscn’ 0 back to ‘SICN’
0 for backward compatibility.
<x158> 7/7/90 PKE Define “sysHeapFromNowOn” symbol to use as heap attribute for
resources that did not have sysheap attribute in 6.x, but which
should have sysheap attribute set for 7.0 and beyond (Darin gets
credit for the symbol name). Use it for itlb, itl0, itl1, itl2,
and itl4 (all ID=0) and PACK 6. For new 7.0 international
resources, just set sysHeap attribute: itlm, kcs#, kcs4, kcs8,
and STR# (-16491). This fixes a bug with saving handles to
resources loaded at INIT time.
<x146> 6/22/90 PKE Updated 'itlm' data to use new script, language, and region
names, and rearranged sorting order of scripts, languages, and
regions.
<x121> 6/1/90 SMB Add STR# and default icon suites for Keyboard Menu.
<x117> 5/29/90 PKE Added bytes for valid style and alias style to ‘itlb’ type.
Added list of additional separators to ‘itl1’. Both changes
match new formats in SysTypes.r.
<x109> 5/21/90 PKE Added region code table to ‘itlm’ resource to match new format
in SysTypes.r. Updated itlm data to use newer script, lang, and
region codes (smTradChinese, etc.). Converted ‘itl1’ resource
for 7.0 to new extended form with list of abbreviated day and
month names, etc.
<x102> 5/12/90 SMB Adding the keyboard small icons (kcs#, kcs4, kcs8) for the US
default keyboard (KCHR 0).
<x97> 5/2/90 PKE Rearranged 'itlm' resource data to match new format.
<x94> 4/25/90 PKE Add 'itlm' resource for multiscript mapping and sorting data.
<x59> 3/21/90 PKE Added new info to US 'itlb' resource: font/size data, script
local data size (needed for auto-initialization of script
systems). Matches updated type in SysTypes.r. Also defined some
FOND ID constants.
<x19> 1/17/90 PKE Updated itlc resource data for new region code field added to
itlc type in SysTypes.r.
<x18> 1/17/90 PKE Updated itlc resource data to use MPW 3.1 itlc format, which is
now in SysTypes.r.
<x9.8> 9/18/89 PKE For 7.0, changed type of keyboard/script small icon from 'SICN'
to new 'kscn' to avoid resource ID conflicts.
3/12/89 PKE Added script icon location at end of itlc (in previously
reserved bytes)
3/8/89 PKE Commented out itlr stuff
To Do:
• We can delete the kcs8 resources to save space, since they use the same colors as the
kcs4 resources.
*/
#include "Types.r"
#include "SysTypes.r"
#include "BalloonTypes.r"
//__________________________________________________________________________________________________
// macros from Sys.r
#define IncSys Include $$Shell("ObjDir")
#define codeAs 'RSRC' (0) as
#define sysHeapFromNowOn sysHeap
#define kKeybdMenuID -16491 /* if this changes, update kKeyboardMenuID in ScriptPriv.a */
#define kDefInputMethodIconID -16492 /* if this changes, update kDefaultIMIconID in ScriptPriv.a <23> */
// STR# resource IDs
#define kKeybdMenuHelpID kKeybdMenuID /* <25> */
#define kAboutKeybdRoman kKeybdMenuID-1 /* <25> */
#define kAboutKeybdIMs kKeybdMenuID-2 /* <25> */
#define kAboutKeybdMulti kKeybdMenuID-3 /* <25> */
#define kKeybdMenuItemsID kKeybdMenuID-4 /* if this changes, update kKeyboardMenuItemsID in ScriptPriv.a <25> */
// indices for strings in Keyboard Menu help STR# resource (kKeybdMenuHelpID)
#define kKMEnabledTitleIndex 1 /* Enabled keyboard Menu title <2> */
#define kKMDisabledTitleIndex 2 /* Disabled keyboard Menu title <2> */
#define kKMDisabledTitleModalIndex 3 /* Disabled kybd menu title w/ modal up <2> */
#define kKMDisabledItemModalIndex 4 /* Disabled kybd menu item w/ modal up <2> */
#define kKMEnabledItemIndex 5 /* Enabled keyboard Menu item <2> */
#define kKMDisabledItemIndex 6 /* Disabled keyboard Menu item <2> */
#define kKMCheckedItemIndex 7 /* Checked keyboard menu item <2> */
#define kKMOtherItemIndex 8 /* Default keyboard for a script <2> */
#define kKMAboutItemIndex 9 /* About item <2> */
#define kKMDisabledAboutItemIndex 10 /* About item disabled <2> */
#define USname "U.S." /*<13>*/
//__________________________________________________________________________________________________
// *************************************************************************************************
// IMPORTANT: the real attributes for all of the following are set in Sys.r.
// *************************************************************************************************
resource 'itlc' (0, sysHeap, purgeable) {
0, // system script is Roman.
2048, // key cache size is 2K
noFontForce, // fontForce is off <14>
intlForce, // intlForce is on
noOldKeyboard, // no old international keyboard.
0, // general flags (see smGenFlags info in ScriptEqu.a)
40, // keybd icon offset from end of menu bar <x18>
rightOffset, // keybd icon at right end of menu bar <x18>
0, // reserved for keybd icon data <x18>
verUS, // preferred region code <x19>
directionLeftRight, // default line direction <17>
$"" // reserved <x18>
};
resource 'itlm' (0, sysHeap, purgeable) {
$700, // version
$0000, // format
smUninterp, // max script code for script->lang mapping
langUnspecified, // default lang code for unlisted scripts
{ // script order and default lang table:
smRoman, langEnglish,
smSlavic, langCroatian,
smGreek, langGreek,
smCyrillic, langRussian,
smGeorgian, langGeorgian,
smArmenian, langArmenian,
smArabic, langArabic,
smExtArabic, langSindhi,
smHebrew, langHebrew,
smGeez, langAmharic,
smDevanagari, langHindi,
smGurmukhi, langPunjabi,
smGujarati, langGujarati,
smOriya, langOriya,
smBengali, langBengali,
smTamil, langTamil,
smTelugu, langTelugu,
smKannada, langKannada,
smMalayalam, langMalayalam,
smSinhalese, langSinhalese,
smBurmese, langBurmese,
smKhmer, langKhmer,
smThai, langThai,
smLaotian, langLao,
smTibetan, langTibetan,
smMongolian, langMongolian,
smVietnamese, langVietnamese,
smTradChinese, langTradChinese,
smSimpChinese, langSimpChinese,
smJapanese, langJapanese,
smKorean, langKorean,
smRSymbol, langHebrew,
smUninterp, langEnglish
},
langSimpChinese, // max lang code for lang->script mapping
smRoman, // default script code for unlisted langs
{ // lang order and parent script table:
langEnglish, smRoman,
langFrench, smRoman,
langGerman, smRoman,
langItalian, smRoman,
langDutch, smRoman,
langSwedish, smRoman,
langSpanish, smRoman,
langDanish, smRoman,
langPortuguese, smRoman,
langNorwegian, smRoman,
langFinnish, smRoman,
langIcelandic, smRoman,
langMaltese, smRoman,
langTurkish, smRoman,
langLithuanian, smRoman,
langEstonian, smRoman,
langLettish, smRoman,
langLappish, smRoman,
langFaeroese, smRoman,
langCroatian, smSlavic,
langPolish, smSlavic,
langHungarian, smSlavic,
langGreek, smGreek,
langRussian, smCyrillic,
langArabic, smArabic,
langUrdu, smArabic,
langFarsi, smArabic,
langHebrew, smHebrew,
langHindi, smDevanagari,
langThai, smThai,
langTradChinese, smTradChinese,
langSimpChinese, smSimpChinese,
langJapanese, smJapanese,
langKorean, smKorean,
},
verThailand, // max region code for region->lang mapping
langUnspecified, // default lang code for unlisted regions
{ // region order and parent lang table:
verUS, langEnglish,
verBritain, langEnglish,
verAustralia, langEnglish,
verIreland, langEnglish,
verFrance, langFrench,
verFrBelgiumLux, langFrench,
verFrCanada, langFrench,
verFrSwiss, langFrench,
verGermany, langGerman,
verGrSwiss, langGerman,
verItaly, langItalian,
verNetherlands, langDutch,
verSweden, langSwedish,
verSpain, langSpanish,
verDenmark, langDanish,
verPortugal, langPortuguese,
verNorway, langNorwegian,
verFinland, langFinnish,
verIceland, langIcelandic,
verMalta, langMaltese,
verTurkey, langTurkish,
verLithuania, langLithuanian,
verEstonia, langEstonian,
verLatvia, langLettish,
verLapland, langLappish,
verFaeroeIsl, langFaeroese,
verYugoCroatian, langCroatian,
verPoland, langPolish,
verHungary, langHungarian,
verGreece, langGreek,
verRussia, langRussian,
verArabic, langArabic,
verPakistan, langUrdu,
verIran, langFarsi,
verIsrael, langHebrew,
verIndiaHindi, langHindi,
verThailand, langThai,
verTaiwan, langTradChinese,
verChina, langSimpChinese,
verJapan, langJapanese,
verKorea, langKorean,
verCyprus, langUnspecified // Hmm, 2 languages, which to use here?
}
};
#define Chicago 0 /* <x59> */
#define Geneva 3 /* <x59> */
#define Monaco 4 /* <x59> */
resource 'itlb' (0, "Roman", sysHeapFromNowOn, purgeable) {
0, // itl0 ID
0, // itl1 ID
0, // itl2 ID
$0107, // script flags (see smScriptFlags info in ScriptEqu.a)
0, // itl4 ID
0, // optional itl5 ID (not used here). <x163>
0, // language code
0, // numbers/dates
0, // KCHR ID
0, // ID of SICN or kcs#/kcs4/kcs8. <x163>
116, // size of Roman local record, in bytes <x59>
Monaco, // default monospace FOND ID <x59>
9, // default monospace font size <x59>
Geneva, // preferred FOND ID <x59>
12, // preferred font size <x59>
Geneva, // default small FOND ID <x59>
9, // default small font size <x59>
Chicago, // default system FOND ID <x59>
12, // default system font size <x59>
Geneva, // default application FOND ID <x59>
12, // default application font size <x59>
Geneva, // default Help Mgr FOND ID <x59>
9, // default Help Mgr font size <x59>
$7F, // valid styles for Roman <x117>
$02 // style set for alias = [italic] <x117>
};
resource 'itl0' (0, USname, sysHeapFromNowOn, purgeable) { /*<13>*/
period, comma, semicolon, dollarsign, "\0x00", "\0x00",
leadingZero, trailingZero, paren, leads, monDayYear,
noCentury, noMonthLeadZero, noDayLeadZero, slash, twelveHour,
noHoursLeadZero, minutesLeadZero, secondsLeadZero, " AM", " PM", ":",
"\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00",
standard,
verUs, 1,
};
resource 'INTL' (0, USname, purgeable) { /*<13>*/
period, comma, semicolon, dollarsign, "\0x00", "\0x00",
leadingZero, trailingZero, paren, leads, monDayYear,
noCentury, noMonthLeadZero, noDayLeadZero, slash, twelveHour,
noHoursLeadZero, minutesLeadZero, secondsLeadZero, " AM", " PM", ":",
"\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00", "\0x00",
standard,
verUs, 1,
};
resource 'itl1' (0, USname, sysHeapFromNowOn, purgeable) { /*<13>*/
{ "Sunday"; "Monday"; "Tuesday"; "Wednesday"; "Thursday"; "Friday"; "Saturday"; },
{
"January"; "February"; "March"; "April"; "May"; "June";
"July"; "August"; "September"; "October"; "November"; "December";
},
dayName, monDayYear, noDayLeadZero, 3,
"", ", ", " ", ", ", "",
verUs, 1,
extFormat {
$0700, $0001, 0,
{}, // no extra day names (7 names are plenty, thank you)
{}, // no extra month names
{"Sun"; "Mon"; "Tue"; "Wed"; "Thu"; "Fri"; "Sat"}, // use old 3-char forms <8>
{"Jan"; "Feb"; "Mar"; "Apr"; "May"; "Jun"; "Jul"; "Aug"; "Sep"; "Oct"; "Nov"; "Dec"},
{"-"; "."} // other reasonable date separators <x117>
},
};
resource 'INTL' (1, USname, purgeable) { /*<13>*/
{ "Sunday"; "Monday"; "Tuesday"; "Wednesday"; "Thursday"; "Friday"; "Saturday"; },
{
"January"; "February"; "March"; "April"; "May"; "June";
"July"; "August"; "September"; "October"; "November"; "December";
},
dayName, monDayYear, noDayLeadZero, 3,
"", ", ", " ", ", ", "",
verUs, 1,
DefaultReturn,
};
resource 'SICN' (0, sysHeapFromNowOn, purgeable) { { // Roman script icon
$"0000 0100 0380 07C0 0FE0 1FF0 3FF8 7FFC"
$"3FF8 1FF0 0FE0 07C0 0380 0100 0000 0000"
} };
resource 'KSWP' (0, sysHeap) { {
Rotate, $31, controlOff, optionOff, shiftOff, commandOn; // space bar toggles script
RotateKybd, $31, controlOff, optionOn, shiftOff, commandOn; // opt space bar toggles kybd <7>
System, $46, controlOff, optionOff, shiftOff, commandOn; // Mac+ left arrow is system script
RomanIfOthers, $42, controlOff, optionOff, shiftOff, commandOn; // Mac+ right arrow is Roman <19>
System, $7B, controlOff, optionOff, shiftOff, commandOn; // ADB left arrow is system script <14>
RomanIfOthers, $7C, controlOff, optionOff, shiftOff, commandOn; // ADB right arrow is Roman <14><19>
} };
resource 'kcs#' (0, sysHeap, purgeable) { { /* array: 2 elements */ /* <11> */
/* [1] */
$"0000 0000 0000 0000 0000 FFFF AB01 FFFF"
$"D501 FFFF AB01 FFFF 8001 FFFF 8001 FFFF",
/* [2] */
$"0000 0000 0000 0000 0000 FFFF FFFF FFFF"
$"FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF"
} };
resource 'kcs4' (0, sysHeap, purgeable) { /* <11> */
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0666 6663 3333 3330 0660 6060 0000 00C0"
$"0666 6663 3333 3330 0606 0660 0000 00C0"
$"0666 6663 3333 3330 0C00 0000 0000 00C0"
$"0333 3333 3333 3330 0C00 0000 0000 00C0"
$"0333 3333 3333 3330 0000 0000 0000 0000"
};
#if 0
// don't need, has same colors as kcs4 /* <21> */
resource 'kcs8' (0, sysHeap, purgeable) {
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"00EC ECEC ECEC ECD8 D8D8 D8D8 D8D8 D800"
$"00EC EC00 EC00 EC00 0000 0000 0000 2B00"
$"00EC ECEC ECEC ECD8 D8D8 D8D8 D8D8 D800"
$"00EC 00EC 00EC EC00 0000 0000 0000 2B00"
$"00EC ECEC ECEC ECD8 D8D8 D8D8 D8D8 D800"
$"002B 0000 0000 0000 0000 0000 0000 2B00"
$"00D8 D8D8 D8D8 D8D8 D8D8 D8D8 D8D8 D800"
$"002B 0000 0000 0000 0000 0000 0000 2B00"
$"00D8 D8D8 D8D8 D8D8 D8D8 D8D8 D8D8 D800"
};
#endif
resource 'kcs#' (16383, sysheap, purgeable) { /*<20><25>*/
{ /* array: 2 elements */
/* [1] */
$"0000 7FFC 6A04 7FFC 5604 7FFE 4102 7F32"
$"4142 7F72 014A 014A 0132 0102 01FE",
/* [2] */
$"FFFE FFFE FFFE FFFE FFFF FFFF FFFF FFFF"
$"FFFF FFFF FFFF 03FF 03FF 03FF 03FF 03FF"
}
};
resource 'kcs4' (16383, sysheap, purgeable) { /*<20><25>*/
$"0000 0000 0000 0000 0666 6663 3333 3300"
$"0660 6060 0000 0C00 0666 6663 3333 3300"
$"0606 0660 0000 0C00 0666 666F FFFF FFF0"
$"0C00 000F 0000 00F0 0333 333F 00FF 00F0"
$"0C00 000F 0F00 00F0 0333 333F 0FFF 00F0"
$"0000 000F 0F00 F0F0 0000 000F 0F00 F0F0"
$"0000 000F 00FF 00F0 0000 000F 0000 00F0"
$"0000 000F FFFF FFF0"
};
#if 0
// don't need, has same colors as kcs4
resource 'kcs8' (16383, sysheap, purgeable) { /*<20><25>*/
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"00EC ECEC ECEC ECD8 D8D8 D8D8 D8D8 0000"
$"00EC EC00 EC00 EC00 0000 0000 002B 0000"
$"00EC ECEC ECEC ECD8 D8D8 D8D8 D8D8 0000"
$"00EC 00EC 00EC EC00 0000 0000 002B 0000"
$"00EC ECEC ECEC ECFF FFFF FFFF FFFF FF00"
$"002B 0000 0000 00FF 0000 0000 0000 FF00"
$"00D8 D8D8 D8D8 D8FF 0000 FFFF 0000 FF00"
$"002B 0000 0000 00FF 00FF 0000 0000 FF00"
$"00D8 D8D8 D8D8 D8FF 00FF FFFF 0000 FF00"
$"0000 0000 0000 00FF 00FF 0000 FF00 FF00"
$"0000 0000 0000 00FF 00FF 0000 FF00 FF00"
$"0000 0000 0000 00FF 0000 FFFF 0000 FF00"
$"0000 0000 0000 00FF 0000 0000 0000 FF00"
$"0000 0000 0000 00FF FFFF FFFF FFFF FF"
};
#endif
// default small color icons for the scripts that don't include them <x121>/* <12> */
resource 'kcs#' (kKeybdMenuID, sysHeap, purgeable) { { /* array: 2 elements */
/* [1] */
$"0000 0000 0000 0000 7FFE 4002 5552 4002"
$"57EA 4002 7FFE",
/* [2] */
$"0000 0000 0000 7FFE FFFF FFFF FFFF FFFF"
$"FFFF FFFF FFFF 7FFE"
} };
resource 'kcs4' (kKeybdMenuID, sysHeap, purgeable) {
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0DFF FFFF FFFF FFD0 0FCC CCCC CCCC CCF0"
$"0FCF CFCF CFCF CCF0 0FCC CCCC CCCC CCF0"
$"0FC3 CFFF FFFC FCF0 0FCC CCCC CCCC CCF0"
$"0DFF FFFF FFFF FFD0"
};
#if 0
// don't need, has same colors as kcs4 /* <21> */
resource 'kcs8' (kKeybdMenuID, sysHeap, purgeable) {
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"00F9 FFFF FFFF FFFF FFFF FFFF FFFF F900"
$"00FF 2B2B 2B2B 2B2B 2B2B 2B2B 2B2B FF00"
$"00FF 2BFF 2BFF 2BFF 2BFF 2BFF 2B2B FF00"
$"00FF 2B2B 2B2B 2B2B 2B2B 2B2B 2B2B FF00"
$"00FF 2BD8 2BFF FFFF FFFF FF2B FF2B FF00"
$"00FF 2B2B 2B2B 2B2B 2B2B 2B2B 2B2B FF00"
$"00F9 FFFF FFFF FFFF FFFF FFFF FFFF F9"
};
#endif
resource 'kcs#' (kDefInputMethodIconID, sysHeap, purgeable) { /* <23><24> */
{ /* array: 2 elements */
/* [1] */
$"0000 0C3E 1212 2122 44C6 924A 4130 2490"
$"1248 0924 1C92 3E02 7D24 3888 1050 0020",
/* [2] */
$"0C7F 1E7F 3F3F 7FFF FFFF FFFF FFFB 7FF8"
$"3FFC 1FFE 3FFF 7FFF FFFE 7DFC 38F8 1070"
}
};
resource 'kcs4' (kDefInputMethodIconID, sysHeap, purgeable) { /* <23><24> */
$"0000 0000 0000 0000 0000 FF00 00FF FFF0"
$"000F CCF0 000F 00F0 00FC CCCF 00F0 00F0"
$"0FCC CFCC FF00 0FF0 DCC3 CCFC CF00 F0F0"
$"0FCC CCCF CCFF 0000 00FC CFCC FCCF 0000"
$"000F CCFC CFCC F000 0000 FCCF CCFC CF00"
$"000F 9FCC FCCF CCF0 00F9 99FC CCCC CCF0"
$"0F99 9F0F CCFC CF00 00F9 F000 FCCC F000"
$"000F 0000 0FCF 0000 0000 0000 00D0"
};
#if 0
// don't need, has same colors as kcs4 /* <23><24> */
resource 'kcs8' (kDefInputMethodIconID, sysHeap, purgeable) {
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 0000 FFFF 0000 0000 FFFF FFFF FF00"
$"0000 00FF 2B2B FF00 0000 00FF 0000 FF00"
$"0000 FF2B 2B2B 2BFF 0000 FF00 0000 FF00"
$"00FF 2B2B 2BFF 2B2B FFFF 0000 00FF FF00"
$"F92B 2BD8 2B2B FF2B 2BFF 0000 FF00 FF00"
$"00FF 2B2B 2B2B 2BFF 2B2B FFFF 0000 0000"
$"0000 FF2B 2BFF 2B2B FF2B 2BFF 0000 0000"
$"0000 00FF 2B2B FF2B 2BFF 2B2B FF00 0000"
$"0000 0000 FF2B 2BFF 2B2B FF2B 2BFF 0000"
$"0000 00FF CBFF 2B2B FF2B 2BFF 2B2B FF00"
$"0000 FFCB CBCB FF2B 2B2B 2B2B 2B2B FF00"
$"00FF CBCB CBFF 00FF 2B2B FF2B 2BFF 0000"
$"0000 FFCB FF00 0000 FF2B 2B2B FF00 0000"
$"0000 00FF 0000 0000 00FF 2BFF 0000 0000"
$"0000 0000 0000 0000 0000 F9"
};
#endif
resource 'STR#' (kKeybdMenuHelpID, sysHeap, purgeable) { { // Balloon Help for Keyboard Menu <25>
// Keyboard, (Menu Title), Normal (kKMEnabledTitleIndex)
"Keyboard menu\n\nUse this menu to switch from one keyboard to another. This "
"may also change the script you are using.";
// Keyboard, (Menu Title), Dimmed (kKMDisabledTitleIndex)
"Keyboard menu\n\nUse this menu to switch from one keyboard to another. This "
"may also change the script you are using. Not available because the "
"keyboard layout cannot be changed.";
// Keyboard, (Menu Title, Dimmed with Modal dialog up) (kKMDisabledTitleModalIndex)
"Keyboard menu\n\nThis menu is not available because it cannot be used with the dialog box "
"on your screen.";
// Keyboard, (Menu item, Dimmed with Modal dialog up) (kKMDisabledItemModalIndex)
"Keyboard menu\n\nThis item is not available because it cannot be used with the dialog box "
"on your screen.";
// Keyboard menu item, Normal/Selected (kKMEnabledItemIndex)
"Makes this keyboard layout active.";
// Keyboard menu item, Disabled/Selected (kKMDisabledItemIndex)
"Makes this keyboard layout active. Not available now because the script of this keyboard "
"cannot be used for this operation.";
// Keyboard menu item, Checked (kKMCheckedItemIndex)
"Makes this keyboard layout active. Checked because this keyboard layout is now active.";
// Keyboard menu item, Other (kKMOtherItemIndex)
"Makes this keyboard layout active. Marked because this keyboard layout is selected for "
"the script system.";
// Help, About keyboards..., Normal (kKMAboutItemIndex)
"Displays information about using hidden keystrokes to change your keyboard and script.";
// Help, About help..., Dimmed (kKMDisabledAboutItemIndex)
"Displays information about using hidden keystrokes to change your keyboard and script. "
"Not available because the About Keyboards dialog box is already open or because another "
"dialog box is open.";
} };
resource 'STR#' (kAboutKeybdRoman, sysHeap, purgeable) { { // About Keyboards for Roman-only system <25>
"The \0x12 indicates the active keyboard layout. To "
"rotate to the next keyboard layout, press \0x11-Option-Space bar.";
} };
resource 'STR#' (kAboutKeybdIMs, sysHeap, purgeable) { { // About Keyboards for system that includes 2-byte script <25>
"The \0x12 indicates the active keyboard layout or input method in "
"the active script system.\0x0D\0x0D";
"To rotate to the next keyboard layout or input method in the "
"active script, press \0x11-Option-Space bar. To rotate to the "
"preferred keyboard layout or input method in the next available "
"script, press \0x11-Space bar." ;
} };
resource 'STR#' (kAboutKeybdMulti, sysHeap, purgeable) { { // About Keyboards for multi-script w/o 2-byte script <25>
"The \0x12 indicates the active keyboard layout in the active script system.\0x0D\0x0D";
"To rotate to the next keyboard layout in the active script, press "
"\0x11-Option-Space bar. To rotate to the preferred keyboard layout "
"in the next available script, press \0x11-Space bar.";
} };
resource 'STR#' (kKeybdMenuItemsID, sysHeap, purgeable) { { // strings for menu items <x121><25>
"\0x00"; // marker for default keybd <2><9>
"About Keyboards…"; // <2> ex<23> <Sys7.1>
// "Next Script"; // currently not used
// "Next Keyboard in Script"; // currently not used
} };
resource 'hmnu' (kKeybdMenuHelpID, sysheap) { // balloon help strings for keyboard menu <2>
HelpMgrVersion, /* Help Version */
0, /* options */
0, /* theProc */
0, /* variant */
HMStringResItem { // use missing msg
kKeybdMenuHelpID,kKMEnabledItemIndex, /* enabled msg */
kKeybdMenuHelpID,kKMDisabledItemIndex, /* disabled msg */
kKeybdMenuHelpID,kKMCheckedItemIndex, /* checked msg */
kKeybdMenuHelpID,kKMOtherItemIndex /* other marked msg : default kchr for a script */
},
{
HMStringResItem { // keyboard menu title
kKeybdMenuHelpID,kKMEnabledTitleIndex,
kKeybdMenuHelpID,kKMDisabledTitleIndex,
kKeybdMenuHelpID,kKMDisabledTitleModalIndex,
kKeybdMenuHelpID,kKMDisabledItemModalIndex
},
HMStringResItem { // about keyboards… menu item
kKeybdMenuHelpID,kKMAboutItemIndex,
kKeybdMenuHelpID,kKMDisabledAboutItemIndex,
0,0,
0,0
},
HMSkipItem { // disabled line
},
}
};
resource 'DLOG' (kAboutKeybdMulti, purgeable) { // <4><18><25>
{58, 16, 264, 484},
dBoxProc,
visible,
noGoAway,
0x0,
kAboutKeybdMulti,
"About…",
alertPositionMainScreen
};
resource 'DLOG' (kAboutKeybdRoman, purgeable) { // <5><25>
{58, 18, 180, 458},
dBoxProc,
visible,
noGoAway,
0x0,
kAboutKeybdRoman,
"About…",
alertPositionMainScreen
};
resource 'DLOG' (kAboutKeybdIMs, purgeable) { // <25>
{58, 16, 264, 484},
dBoxProc,
visible,
noGoAway,
0x0,
kAboutKeybdIMs,
"About…",
alertPositionMainScreen
};
resource 'DITL' (kAboutKeybdMulti, purgeable) { // <4><18><25>
{ /* array DITLarray: 4 elements */
/* [1] */
{175, 367, 193, 447},
Button {
enabled,
"OK"
},
/* [2] */
{46, 15, 85, 447},
StaticText { // edited <8>
disabled,
"^0" // now uses ParamText <25>
},
/* [3] */
{96, 15, 159, 447},
StaticText { // edited <8>
disabled,
"^1" // now uses ParamText <25>
},
/* [4] */
{14, 15, 46, 447},
StaticText {
disabled,
"About Keyboards…" // ex<23> <Sys7.1>
}
}
};
resource 'DITL' (kAboutKeybdRoman, purgeable) { // <5>
{ /* array DITLarray: 3 elements */
/* [1] */
{87, 342, 105, 422},
Button {
enabled,
"OK"
},
/* [2] */
{44, 17, 82, 422},
StaticText {
disabled,
"^0" // now uses ParamText <25>
},
/* [3] */
{14, 17, 44, 422},
StaticText {
disabled,
"About Keyboards…" // ex<23> <Sys7.1>
}
}
};
resource 'DITL' (kAboutKeybdIMs, purgeable) { // <25>
{ /* array DITLarray: 4 elements */
/* [1] */
{175, 367, 193, 447},
Button {
enabled,
"OK"
},
/* [2] */
{46, 15, 85, 447},
StaticText {
disabled,
"^0" // now uses ParamText <25>
},
/* [3] */
{96, 15, 159, 447},
StaticText {
disabled,
"^1" // now uses ParamText <25>
},
/* [4] */
{14, 15, 46, 447},
StaticText {
disabled,
"About Keyboards…" // ex<23> <Sys7.1>
}
}
};
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gif.R
\name{gif_plot}
\alias{gif_plot}
\title{Create a dynamic plot of fish detections}
\usage{
gif_plot(
sldf,
detects,
dir = "D:/Jordy/myplots/",
extent = NA,
type = "bing",
darken = 2.5,
col_by_fish = F,
viterbi = F,
width = 1024,
height = 768,
fps = fps
)
}
\arguments{
\item{sldf}{A SpatialLineDataFrame representation of the river system. The CRS should match that of the detection data.}
\item{detects}{Output of \code{\link{get_locations}} or \code{\link{get_best_locations}}}
\item{dir}{The directory of the folder where the plots should be output (eg. "D:/Jordy/myplots/"). The folder should end with a "/".}
\item{extent}{A vector of length four specifying the plotting extent c(x_min, x_max, y_min, y_max)}
\item{type}{The background to use (see \code{\link[OpenStreetMap]{openmap}}) for more information.}
\item{darken}{Increase to darken the background when open_maps=T. Defaults to 1.}
\item{col_by_fish}{col_by_fish=T assigns each fish a unique color. This color will be preserved between mappings (i.e. between different flight periods).}
\item{viterbi}{Use viterbi=T to color by survival state using the viterbi path (detects needs to be the viterbi output from \code{\link{hmm_survival}}; see examples). Expired fish will be plotted in green.}
\item{width}{The width of the plot.}
\item{height}{The height of the plot.}
}
\value{
Static plots that plot the fish location by flight will be added to the folder along with a gif that iterates through the flights.
}
\description{
Create a dynamic plot of fish detections
}
\examples{
# Note: Create a folder on your machine and make dir coorespond to this directory to run examples
# extent <- c(x_min=466060, x_max=1174579, y_min=6835662, y_max=7499016)
# gif_plot(sldf, viterbi, dir="S:/Jordy/telprep/telprep/gifs/viterbi/", extent=extent, viterbi=T)
# # gif_plot(sldf, best_detects, dir="S:/Jordy/telprep/telprep/gifs/byfish/", extent=extent, col_by_fish=T, viterbi=F)
}
| /man/gif_plot.Rd | no_license | jBernardADFG/telprep | R | false | true | 2,049 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gif.R
\name{gif_plot}
\alias{gif_plot}
\title{Create a dynamic plot of fish detections}
\usage{
gif_plot(
sldf,
detects,
dir = "D:/Jordy/myplots/",
extent = NA,
type = "bing",
darken = 2.5,
col_by_fish = F,
viterbi = F,
width = 1024,
height = 768,
fps = fps
)
}
\arguments{
\item{sldf}{A SpatialLineDataFrame representation of the river system. The CRS should match that of the detection data.}
\item{detects}{Output of \code{\link{get_locations}} or \code{\link{get_best_locations}}}
\item{dir}{The directory of the folder where the plots should be output (eg. "D:/Jordy/myplots/"). The folder should end with a "/".}
\item{extent}{A vector of length four specifying the plotting extent c(x_min, x_max, y_min, y_max)}
\item{type}{The background to use (see \code{\link[OpenStreetMap]{openmap}}) for more information.}
\item{darken}{Increase to darken the background when open_maps=T. Defaults to 1.}
\item{col_by_fish}{col_by_fish=T assigns each fish a unique color. This color will be preserved between mappings (i.e. between different flight periods).}
\item{viterbi}{Use viterbi=T to color by survival state using the viterbi path (detects needs to be the viterbi output from \code{\link{hmm_survival}}; see examples). Expired fish will be plotted in green.}
\item{width}{The width of the plot.}
\item{height}{The height of the plot.}
}
\value{
Static plots that plot the fish location by flight will be added to the folder along with a gif that iterates through the flights.
}
\description{
Create a dynamic plot of fish detections
}
\examples{
# Note: Create a folder on your machine and make dir coorespond to this directory to run examples
# extent <- c(x_min=466060, x_max=1174579, y_min=6835662, y_max=7499016)
# gif_plot(sldf, viterbi, dir="S:/Jordy/telprep/telprep/gifs/viterbi/", extent=extent, viterbi=T)
# # gif_plot(sldf, best_detects, dir="S:/Jordy/telprep/telprep/gifs/byfish/", extent=extent, col_by_fish=T, viterbi=F)
}
|
source("A:/Machine_Learning/Basefolder/loadImage.R")
################################################################
#
# Exercise 3.1.1
#
# K-means clustering
#
################################################################
####### 1. Settings and preparation of training and test dataset #######
test_split=0.5 #how large should the training set be 0.9=90/10 training/testing
# First load two person data
x1 = loadSinglePersonsData(300,4,3,"A:/Machine_Learning/2017/group")
x2 = loadSinglePersonsData(300,4,2,"A:/Machine_Learning/2017/group")
#shuffle rows
set.seed(990)
dataset_shuffle1 <- x1[sample(nrow(x1)),]
set.seed(995)
dataset_shuffle2 <- x2[sample(nrow(x2)),]
#create the training set
dataset_train<- array(, dim=c((dim(dataset_shuffle1)[1]*test_split*2),dim(dataset_shuffle1)[2])) #*2 for 2 persons
for(i in 1:dim(dataset_train)[1])
{
#fill first part of kNN training set with person1 and then second part with person2
if(i < dim(dataset_train)[1]/2){
dataset_train[i,]<-dataset_shuffle1[i,]
}else{
dataset_train[i,]<-dataset_shuffle2[i/2,]
}
}
#create the testing set
dataset_test<- array(, dim=c(dim=c((dim(dataset_shuffle1)[1]*2 - dim(dataset_train)[1]),dim(dataset_shuffle1)[2])))
for(i in 1:dim(dataset_test)[1])
{
#fill first part of kNN test set with person1 and then second part with person2
if(i < dim(dataset_test)[1]/2){
dataset_test[i,]<-dataset_shuffle1[i+(dim(dataset_shuffle1)[1]*test_split),]
}else{
dataset_test[i,]<-dataset_shuffle2[i/2+(dim(dataset_shuffle2)[1]*test_split),]
}
}
# remove shuffled datasets
rm(dataset_shuffle1)
rm(dataset_shuffle2)
#training set classification vector (first column)
train_class<- array(, dim=c(1,dim(dataset_train)[1]))
for(i in 1:dim(dataset_train)[1])
{
train_class[i]=dataset_train[i,1]
}
#testing set classification vector (first column)
test_class<- array(, dim=c(1,dim(dataset_test)[1]))
for(i in 1:dim(dataset_test)[1])
{
test_class[i]=dataset_test[i,1]
}
####### 2. Perform k-means clustering on training data #######
cipher_cluster <- c()
label_cluster <- c()
#for each cipher, define clusters
for( i in 0:9) {
clusterData <- kmeans(dataset_train[ dataset_train[1:4000,1] == i, ], 200)
cipher_cluster[[i + 1]] <- clusterData$centers
label_cluster[[i + 1]] <- c(1:200)*0 + i
}
train_lab <- factor(unlist(label_cluster))
train_dat <- cipher_cluster[[1]]
for( i in 2:10) {
train_dat <- rbind(train_dat,cipher_cluster[[i]])
}
####### 3. Perform knn on clustered training data #######
kstart=1
kend=80
kinc=1
sample_size=(kend-kstart)/kinc+1
cat(" variance : ", (kend-kstart)/kinc+1, " different k values \n")
time_array <- array(0,dim=c(sample_size,3))
time_array[,1]<- seq(from=kstart, to=kend, by=kinc)
performance_array <- array(0,dim=c((kend-kstart)/kinc+1,3))
performance_array[,1]<- seq(from=kstart, to=kend, by=kinc)
for(k in seq(from=kstart, to=kend, by=kinc))
#for(k in kstart:(kstart+kruns-1))
{
cat("Progress: ",(k-kstart)/(kend-kstart+1)*100,"% \n")
#print(k)
t_time<-proc.time()
data_pred<-knn(train_dat, dataset_test,train_lab,k)
t_time<-proc.time()-t_time
time_array[(k-kstart)/kinc+1,2]<-t_time[3]
cat("Progress: ",(k-kstart+kinc/2)/(kend-kstart+1)*100,"% \n")
#check accuracy unprocessed
correct=0
incorrect=0
for(i in 1:dim(dataset_test)[1])
{
if(data_pred[i]==test_class[i])
{
correct=correct + 1
}
else
{
incorrect = incorrect + 1
}
}
accuracy=correct/dim(dataset_test)[1]*100
accuracy<-unname(accuracy) #this is only because NAMED NUM annoyed me. its not necessary
performance_array[(k-kstart)/kinc+1,2]<-accuracy
}
#plot accuracy
plot(performance_array,main="Accuracy", xlab="k",ylab="Accuracy[%]")
plot(time_array,main="Time", xlab="k",ylab="time [sec]")
################################################################
#
# Exercise 3.1.2
#
# KNN-comparison clustered data vs. raw data (should be taken from one of previous exercises)
#
################################################################
################################################################
#
# Exercise 3.1.3
#
# K-means on entire class
#
################################################################
| /Exercise3/exercise_3_1_nils.R | no_license | alkna13/SML | R | false | false | 4,234 | r | source("A:/Machine_Learning/Basefolder/loadImage.R")
################################################################
#
# Exercise 3.1.1
#
# K-means clustering
#
################################################################
####### 1. Settings and preparation of training and test dataset #######
test_split=0.5 #how large should the training set be 0.9=90/10 training/testing
# First load two person data
x1 = loadSinglePersonsData(300,4,3,"A:/Machine_Learning/2017/group")
x2 = loadSinglePersonsData(300,4,2,"A:/Machine_Learning/2017/group")
#shuffle rows
set.seed(990)
dataset_shuffle1 <- x1[sample(nrow(x1)),]
set.seed(995)
dataset_shuffle2 <- x2[sample(nrow(x2)),]
#create the training set
dataset_train<- array(, dim=c((dim(dataset_shuffle1)[1]*test_split*2),dim(dataset_shuffle1)[2])) #*2 for 2 persons
for(i in 1:dim(dataset_train)[1])
{
#fill first part of kNN training set with person1 and then second part with person2
if(i < dim(dataset_train)[1]/2){
dataset_train[i,]<-dataset_shuffle1[i,]
}else{
dataset_train[i,]<-dataset_shuffle2[i/2,]
}
}
#create the testing set
dataset_test<- array(, dim=c(dim=c((dim(dataset_shuffle1)[1]*2 - dim(dataset_train)[1]),dim(dataset_shuffle1)[2])))
for(i in 1:dim(dataset_test)[1])
{
#fill first part of kNN test set with person1 and then second part with person2
if(i < dim(dataset_test)[1]/2){
dataset_test[i,]<-dataset_shuffle1[i+(dim(dataset_shuffle1)[1]*test_split),]
}else{
dataset_test[i,]<-dataset_shuffle2[i/2+(dim(dataset_shuffle2)[1]*test_split),]
}
}
# remove shuffled datasets
rm(dataset_shuffle1)
rm(dataset_shuffle2)
#training set classification vector (first column)
train_class<- array(, dim=c(1,dim(dataset_train)[1]))
for(i in 1:dim(dataset_train)[1])
{
train_class[i]=dataset_train[i,1]
}
#testing set classification vector (first column)
test_class<- array(, dim=c(1,dim(dataset_test)[1]))
for(i in 1:dim(dataset_test)[1])
{
test_class[i]=dataset_test[i,1]
}
####### 2. Perform k-means clustering on training data #######
cipher_cluster <- c()
label_cluster <- c()
#for each cipher, define clusters
for( i in 0:9) {
clusterData <- kmeans(dataset_train[ dataset_train[1:4000,1] == i, ], 200)
cipher_cluster[[i + 1]] <- clusterData$centers
label_cluster[[i + 1]] <- c(1:200)*0 + i
}
train_lab <- factor(unlist(label_cluster))
train_dat <- cipher_cluster[[1]]
for( i in 2:10) {
train_dat <- rbind(train_dat,cipher_cluster[[i]])
}
####### 3. Perform knn on clustered training data #######
kstart=1
kend=80
kinc=1
sample_size=(kend-kstart)/kinc+1
cat(" variance : ", (kend-kstart)/kinc+1, " different k values \n")
time_array <- array(0,dim=c(sample_size,3))
time_array[,1]<- seq(from=kstart, to=kend, by=kinc)
performance_array <- array(0,dim=c((kend-kstart)/kinc+1,3))
performance_array[,1]<- seq(from=kstart, to=kend, by=kinc)
for(k in seq(from=kstart, to=kend, by=kinc))
#for(k in kstart:(kstart+kruns-1))
{
cat("Progress: ",(k-kstart)/(kend-kstart+1)*100,"% \n")
#print(k)
t_time<-proc.time()
data_pred<-knn(train_dat, dataset_test,train_lab,k)
t_time<-proc.time()-t_time
time_array[(k-kstart)/kinc+1,2]<-t_time[3]
cat("Progress: ",(k-kstart+kinc/2)/(kend-kstart+1)*100,"% \n")
#check accuracy unprocessed
correct=0
incorrect=0
for(i in 1:dim(dataset_test)[1])
{
if(data_pred[i]==test_class[i])
{
correct=correct + 1
}
else
{
incorrect = incorrect + 1
}
}
accuracy=correct/dim(dataset_test)[1]*100
accuracy<-unname(accuracy) #this is only because NAMED NUM annoyed me. its not necessary
performance_array[(k-kstart)/kinc+1,2]<-accuracy
}
#plot accuracy
plot(performance_array,main="Accuracy", xlab="k",ylab="Accuracy[%]")
plot(time_array,main="Time", xlab="k",ylab="time [sec]")
################################################################
#
# Exercise 3.1.2
#
# KNN-comparison clustered data vs. raw data (should be taken from one of previous exercises)
#
################################################################
################################################################
#
# Exercise 3.1.3
#
# K-means on entire class
#
################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tguh_decomp.R
\name{tguh.decomp}
\alias{tguh.decomp}
\title{The Tail-Greedy Unbalanced Haar decomposition of a vector}
\usage{
tguh.decomp(x, p = 0.01)
}
\arguments{
\item{x}{A vector you wish to decompose.}
\item{p}{Specifies the number of region pairs merged
in each pass through the data, as the proportion of all remaining region pairs. The default is
0.01.}
}
\value{
A list with the following components:
\item{n}{The length of \code{x}.}
\item{decomp.hist}{The decomposition history: the complete record of the \code{n}-1 steps taken to decompose \code{x}.
This is an array of dimensions 4 by 2 by \code{n}-1. Each of the \code{n}-1 matrices of dimensions 4 by 2
contains the following: first row - the indices of the regions merged, in increasing order (note: the indexing changes
through the transform); second row - the values of the Unbalanced Haar filter coefficients used to produce the
corresponding detail coefficient; third row - the (detail coefficient, smooth coefficient) of the decomposition;
fourth row - the lengths of (left wing, right wing) of the corresponding Unbalanced Haar wavelet.}
\item{tguh.coeffs}{The coefficients of the Tail-Greedy Unbalanced Haar transform of \code{x}.}
}
\description{
This function performs the Tail-Greedy Unbalanced Haar decomposition of the input
vector.
}
\details{
The Tail-Greedy Unbalanced Haar decomposition algorithm is described in
"Tail-greedy bottom-up data decompositions and fast multiple change-point
detection", P. Fryzlewicz (2017), preprint.
}
\examples{
rnoise <- rnorm(10)
tguh.decomp(rnoise)
}
\seealso{
\code{\link{tguh.cpt}}, \code{\link{tguh.denoise}}, \code{\link{tguh.reconstr}}
}
\author{
Piotr Fryzlewicz, \email{p.fryzlewicz@lse.ac.uk}
}
| /man/tguh.decomp.Rd | no_license | ChuangWan/breakfast | R | false | true | 1,804 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tguh_decomp.R
\name{tguh.decomp}
\alias{tguh.decomp}
\title{The Tail-Greedy Unbalanced Haar decomposition of a vector}
\usage{
tguh.decomp(x, p = 0.01)
}
\arguments{
\item{x}{A vector you wish to decompose.}
\item{p}{Specifies the number of region pairs merged
in each pass through the data, as the proportion of all remaining region pairs. The default is
0.01.}
}
\value{
A list with the following components:
\item{n}{The length of \code{x}.}
\item{decomp.hist}{The decomposition history: the complete record of the \code{n}-1 steps taken to decompose \code{x}.
This is an array of dimensions 4 by 2 by \code{n}-1. Each of the \code{n}-1 matrices of dimensions 4 by 2
contains the following: first row - the indices of the regions merged, in increasing order (note: the indexing changes
through the transform); second row - the values of the Unbalanced Haar filter coefficients used to produce the
corresponding detail coefficient; third row - the (detail coefficient, smooth coefficient) of the decomposition;
fourth row - the lengths of (left wing, right wing) of the corresponding Unbalanced Haar wavelet.}
\item{tguh.coeffs}{The coefficients of the Tail-Greedy Unbalanced Haar transform of \code{x}.}
}
\description{
This function performs the Tail-Greedy Unbalanced Haar decomposition of the input
vector.
}
\details{
The Tail-Greedy Unbalanced Haar decomposition algorithm is described in
"Tail-greedy bottom-up data decompositions and fast multiple change-point
detection", P. Fryzlewicz (2017), preprint.
}
\examples{
rnoise <- rnorm(10)
tguh.decomp(rnoise)
}
\seealso{
\code{\link{tguh.cpt}}, \code{\link{tguh.denoise}}, \code{\link{tguh.reconstr}}
}
\author{
Piotr Fryzlewicz, \email{p.fryzlewicz@lse.ac.uk}
}
|
testlist <- list(data = structure(c(4.84032536919862e-305, -1.867860512298e-35, 6.95335580788505e-310, -1.867860512298e-35, 4.61321311777405e-104 ), .Dim = c(1L, 5L)), q = -1.85984411421057e-35)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610556993-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 265 | r | testlist <- list(data = structure(c(4.84032536919862e-305, -1.867860512298e-35, 6.95335580788505e-310, -1.867860512298e-35, 4.61321311777405e-104 ), .Dim = c(1L, 5L)), q = -1.85984411421057e-35)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binarize.R
\name{binarize}
\alias{binarize}
\title{Binarize digital PCR data}
\usage{
binarize(input)
}
\arguments{
\item{input}{object of the class \code{\linkS4class{adpcr}} or
\code{\linkS4class{dpcr}} with one of following types:\code{"ct"}, \code{"fluo"} or
\code{"nm"}.}
}
\value{
object of the class \code{\linkS4class{adpcr}} or
\code{\linkS4class{dpcr}} (depending on \code{input}) with type \code{"np"}.
}
\description{
Transforms multinomial (number of molecules per partition) or continuous (fluorescence)
digital PCR data to binary (positive/negative partition) format.
}
\examples{
#adpcr object
rand_array <- sim_adpcr(200, 300, 100, pos_sums = FALSE, n_panels = 1)
binarize(rand_array)
#dpcr object
rand_droplets <- sim_dpcr(200, 300, 100, pos_sums = FALSE, n_exp = 1)
binarize(rand_droplets)
}
\author{
Michal Burdukiewicz.
}
\keyword{manip}
| /man/binarize.Rd | no_license | gaoce/dpcR | R | false | true | 940 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binarize.R
\name{binarize}
\alias{binarize}
\title{Binarize digital PCR data}
\usage{
binarize(input)
}
\arguments{
\item{input}{object of the class \code{\linkS4class{adpcr}} or
\code{\linkS4class{dpcr}} with one of following types:\code{"ct"}, \code{"fluo"} or
\code{"nm"}.}
}
\value{
object of the class \code{\linkS4class{adpcr}} or
\code{\linkS4class{dpcr}} (depending on \code{input}) with type \code{"np"}.
}
\description{
Transforms multinomial (number of molecules per partition) or continuous (fluorescence)
digital PCR data to binary (positive/negative partition) format.
}
\examples{
#adpcr object
rand_array <- sim_adpcr(200, 300, 100, pos_sums = FALSE, n_panels = 1)
binarize(rand_array)
#dpcr object
rand_droplets <- sim_dpcr(200, 300, 100, pos_sums = FALSE, n_exp = 1)
binarize(rand_droplets)
}
\author{
Michal Burdukiewicz.
}
\keyword{manip}
|
#https://ntguardian.wordpress.com/2017/03/27/introduction-stock-market-data-r-1/
#Stock Analysis
# Get quantmod
if (!require("quantmod")) {
install.packages("quantmod")
library(quantmod)
}
start <- as.Date("2018-01-01")
end <- as.Date("2018-12-01")
# Let's get Apple stock data; Apple's ticker symbol is AAPL. We use the
# quantmod function getSymbols, and pass a string as a first argument to
# identify the desired ticker symbol, pass 'yahoo' to src for Yahoo!
# Finance, and from and to specify date ranges
# The default behavior for getSymbols is to load data directly into the
# global environment, with the object being named after the loaded ticker
# symbol. This feature may become deprecated in the future, but we exploit
# it now.
getSymbols("AAPL", src = "yahoo", from = start, to = end)
# What is AAPL?
class(AAPL)
head(AAPL)
tail(AAPL)
plot(AAPL[, "AAPL.Close"], main = "AAPL")
candleChart(AAPL[1:10,], up.col = "black", dn.col = "red", theme = "white")
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white")
#30-Nov-2018 180.29 180.33 177.03 178.58 178.58 3,94,83,800
# Let's get data for Microsoft (MSFT) and Google (GOOG) (actually, Google is
# held by a holding company called Alphabet, Inc., which is the company
# traded on the exchange and uses the ticker symbol GOOG).
getSymbols(c("MSFT", "GOOG"), src = "yahoo", from = start, to = end)
MSFT
# Create an xts object (xts is loaded with quantmod) that contains closing
# prices for AAPL, MSFT, and GOOG
stocks = as.xts(data.frame(AAPL = AAPL[, "AAPL.Close"], MSFT = MSFT[, "MSFT.Close"], GOOG = GOOG[, "GOOG.Close"]))
head(stocks)
class(stocks)
# Create a plot showing all series as lines; must use as.zoo to use the zoo
# method for plot, which allows for multiple series to be plotted on same
# plot
plot(as.zoo(stocks), screens = 1, lty = 1:3, xlab = "Date", ylab = "Price")
legend("right", c("AAPL", "MSFT", "GOOG"), lty = 1:3, cex = 0.5)
plot(as.zoo(stocks[, c("AAPL.Close", "MSFT.Close")]), screens = 1, lty = 1:2, xlab = "Date", ylab = "Price")
par(new = TRUE)
plot(as.zoo(stocks[, "GOOG.Close"]), screens = 1, lty = 3, xaxt = "n", yaxt = "n", xlab = "", ylab = "")
axis(4)
mtext("Price", side = 4, line = 3)
legend("topleft", c("AAPL (left)", "MSFT (left)", "GOOG"), lty = 1:3, cex = 0.5)
# Get pipe operator!
if (!require("magrittr")) {
install.packages("magrittr")
library(magrittr)
}
stock_return = apply(stocks, 1, function(x) {x / stocks[1,]}) %>%
t %>% as.xts
head(stock_return)
plot(as.zoo(stock_return), screens = 1, lty = 1:3, xlab = "Date", ylab = "Return")
legend("topleft", c("AAPL", "MSFT", "GOOG"), lty = 1:3, cex = 0.5)
stock_change = stocks %>% log %>% diff
head(stock_change)
plot(as.zoo(stock_change), screens = 1, lty = 1:3, xlab = "Date", ylab = "Log Difference")
legend("topleft", c("AAPL", "MSFT", "GOOG"), lty = 1:3, cex = 0.5)
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white")
addSMA(n = 20)
start = as.Date("2010-01-01")
getSymbols(c("AAPL", "MSFT", "GOOG"), src = "yahoo", from = start, to = end)
# The subset argument allows specifying the date range to view in the chart.
# This uses xts style subsetting. Here, I'm using the idiom
# 'YYYY-MM-DD/YYYY-MM-DD', where the date on the left-hand side of the / is
# the start date, and the date on the right-hand side is the end date. If
# either is left blank, either the earliest date or latest date in the
# series is used (as appropriate). This method can be used for any xts
# object, say, AAPL
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-01-04/")
addSMA(n = 20)
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-01-04/")
addSMA(n = c(20, 50, 200))
| /01-IIM/77a5-FA-quantmod.R | no_license | DUanalytics/rAnalytics | R | false | false | 3,748 | r | #https://ntguardian.wordpress.com/2017/03/27/introduction-stock-market-data-r-1/
#Stock Analysis
# Get quantmod
if (!require("quantmod")) {
install.packages("quantmod")
library(quantmod)
}
start <- as.Date("2018-01-01")
end <- as.Date("2018-12-01")
# Let's get Apple stock data; Apple's ticker symbol is AAPL. We use the
# quantmod function getSymbols, and pass a string as a first argument to
# identify the desired ticker symbol, pass 'yahoo' to src for Yahoo!
# Finance, and from and to specify date ranges
# The default behavior for getSymbols is to load data directly into the
# global environment, with the object being named after the loaded ticker
# symbol. This feature may become deprecated in the future, but we exploit
# it now.
getSymbols("AAPL", src = "yahoo", from = start, to = end)
# What is AAPL?
class(AAPL)
head(AAPL)
tail(AAPL)
plot(AAPL[, "AAPL.Close"], main = "AAPL")
candleChart(AAPL[1:10,], up.col = "black", dn.col = "red", theme = "white")
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white")
#30-Nov-2018 180.29 180.33 177.03 178.58 178.58 3,94,83,800
# Let's get data for Microsoft (MSFT) and Google (GOOG) (actually, Google is
# held by a holding company called Alphabet, Inc., which is the company
# traded on the exchange and uses the ticker symbol GOOG).
getSymbols(c("MSFT", "GOOG"), src = "yahoo", from = start, to = end)
MSFT
# Create an xts object (xts is loaded with quantmod) that contains closing
# prices for AAPL, MSFT, and GOOG
stocks = as.xts(data.frame(AAPL = AAPL[, "AAPL.Close"], MSFT = MSFT[, "MSFT.Close"], GOOG = GOOG[, "GOOG.Close"]))
head(stocks)
class(stocks)
# Create a plot showing all series as lines; must use as.zoo to use the zoo
# method for plot, which allows for multiple series to be plotted on same
# plot
plot(as.zoo(stocks), screens = 1, lty = 1:3, xlab = "Date", ylab = "Price")
legend("right", c("AAPL", "MSFT", "GOOG"), lty = 1:3, cex = 0.5)
plot(as.zoo(stocks[, c("AAPL.Close", "MSFT.Close")]), screens = 1, lty = 1:2, xlab = "Date", ylab = "Price")
par(new = TRUE)
plot(as.zoo(stocks[, "GOOG.Close"]), screens = 1, lty = 3, xaxt = "n", yaxt = "n", xlab = "", ylab = "")
axis(4)
mtext("Price", side = 4, line = 3)
legend("topleft", c("AAPL (left)", "MSFT (left)", "GOOG"), lty = 1:3, cex = 0.5)
# Get pipe operator!
if (!require("magrittr")) {
install.packages("magrittr")
library(magrittr)
}
stock_return = apply(stocks, 1, function(x) {x / stocks[1,]}) %>%
t %>% as.xts
head(stock_return)
plot(as.zoo(stock_return), screens = 1, lty = 1:3, xlab = "Date", ylab = "Return")
legend("topleft", c("AAPL", "MSFT", "GOOG"), lty = 1:3, cex = 0.5)
stock_change = stocks %>% log %>% diff
head(stock_change)
plot(as.zoo(stock_change), screens = 1, lty = 1:3, xlab = "Date", ylab = "Log Difference")
legend("topleft", c("AAPL", "MSFT", "GOOG"), lty = 1:3, cex = 0.5)
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white")
addSMA(n = 20)
start = as.Date("2010-01-01")
getSymbols(c("AAPL", "MSFT", "GOOG"), src = "yahoo", from = start, to = end)
# The subset argument allows specifying the date range to view in the chart.
# This uses xts style subsetting. Here, I'm using the idiom
# 'YYYY-MM-DD/YYYY-MM-DD', where the date on the left-hand side of the / is
# the start date, and the date on the right-hand side is the end date. If
# either is left blank, either the earliest date or latest date in the
# series is used (as appropriate). This method can be used for any xts
# object, say, AAPL
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-01-04/")
addSMA(n = 20)
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-01-04/")
addSMA(n = c(20, 50, 200))
|
####
##
## name: 06-assessClassifiers.R
## date: 2017-01-26
## what: this assesses various classifers
##
####
setwd("F:/Dropbox/FSU/FSU_Fall_2017/Paper_2/Measure/")
cat("06: now assessing classifiers.\n")
source("fn-ccodeCleaner.R")
#### packages ####
library(dplyr)
library(tidyr)
library(data.table)
library(caret)
library(e1071)
library(pROC)
library(klaR)
library(caTools)
library(doParallel)
library(beepr)
#### function ####
myLL <- function(y, p)
mean(-1 * (y == "yes") * log(p + .Machine$double.eps) -
(y == "no") * log(1 - p + .Machine$double.eps))
#### read in a data frame ####
dat <- as.tbl(fread(paste0("DD/", list.files("DD")[1]))) %>%
mutate(A_ccode = makeCCode(A_ccode), B_ccode = makeCCode(B_ccode),
ud_contiguity = factor(ud_contiguity,
levels = c("none", "water400", "water150",
"water024", "water012",
"direct")),
ud_jointDem = ifelse(A_polity >= 6 & B_polity >= 6, 1, 0),
ud_distance = log(ud_distance),
ud_peaceYears2 = ud_peaceYears^2 / 100,
ud_peaceYears3 = ud_peaceYears^3 / 1000,
B_attacked = factor(B_attacked, labels = c("no", "yes"))) %>%
dplyr::select(B_attacked, everything()) %>%
group_by(dd) %>%
mutate(ud_rival_lag = lag(ud_rival),
ud_ongoingMID_lag = lag(ud_ongoingMID),
ud_peaceYears_lag = lag(ud_peaceYears),
ud_peaceYears2_lag = lag(ud_peaceYears2),
ud_peaceYears3_lag = lag(ud_peaceYears3),
ud_midInLast10_lag = lag(ud_midInLast10)) %>%
ungroup()
dat$ud_rival[is.na(dat$ud_rival) & dat$year != 1870] <- 0
dat$ud_ongoingMID[is.na(dat$ud_ongoingMID)] <- 0
dat <- filter(dat, year > 1870)
omit <- c("A_cyear", "B_cyear", "A_ccode", "B_ccode", "dd",
"A_stateabb", "B_stateabb", "A_milexNI", "B_milexNI",
"A_polityNI", "B_polityNI", "A_attacked", "A_milex",
"ud_ongoingMID", "ud_rival", "ud_peaceYears",
"ud_peaceYears2", "ud_peaceYears3", "ud_midInLast10")
dat <- dplyr::select(dat, -one_of(omit)) %>%
dplyr::select(ddy, B_attacked, starts_with("A"), starts_with("B"),
starts_with("ud"), everything())
# check for missingness
missing<-cbind(round(colMeans(is.na(dat)), 3))*100
# about 2.5% missingness in the lags, which is to be expected
nas<-which(complete.cases(dat)==F)
# should be safe to simply omit these rows
dat_omit<-dat[-nas,]
#### basic CV set-up ####
### data partitioning -- since we have so much, make a true vault data set.
### maintain class proportions with stratified random sampling
set.seed(90210)
split1 <- createDataPartition(dat_omit$B_attacked, p = .6)[[1]]
trainDat <- dat_omit[split1,]
other <- dat_omit[-split1,]
set.seed(555)
split2 <- createDataPartition(other$B_attacked, p = 1/3)[[1]]
evalDat <- other[split2,]
testDat <- other[-split2,]
rm(dat, dat_omit, other)
### to get a few different criteria
fiveStats <- function(...) c(twoClassSummary(...), defaultSummary(...),
mnLogLoss(...))
### parallel backend
numCore <- detectCores() - 2
registerDoParallel(cores = numCore)
### controls
ctrl <- trainControl(method = "cv",
classProbs = TRUE,
summaryFunction = fiveStats,
verboseIter = TRUE,
allowParallel = FALSE)
ctrlJustRun <- ctrl
ctrlJustRun$method <- "none"
ctrlParallel <- ctrl
ctrlParallel$verboseIter <- FALSE
ctrlParallel$allowParallel <- TRUE
### computational time of each classifier here ###
### baseline null ###
set.seed(555)
baseline_null<-suppressWarnings(### 0s and 1s
train(B_attacked ~ 1,
data = dplyr::select(trainDat, -ddy),
method = "glm",
trControl = ctrlParallel, ### still want stats
metric = "logLoss"))
if(!dir.exists("Models")) dir.create("Models")
save(baseline_null, file = "Models/baseline_null.Rdata")
rm(baseline_null)
### baseline logit ###
set.seed(555)
baseline_glm<-suppressWarnings(### 0s and 1s
train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "glm",
trControl = ctrlParallel, ### still want stats
metric = "logLoss"))
save(baseline_glm, file = "Models/baseline_glm.Rdata")
rm(baseline_glm)
### baseline boosted logit ###
set.seed(555)
baseline_LogitBoost<-suppressWarnings(### 0s and 1s
train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "LogitBoost",
trControl = ctrlParallel, ### still want stats
tuneLength=5,
metric = "logLoss"))
save(baseline_LogitBoost, file = "Models/baseline_LogitBoost.Rdata")
rm(baseline_LogitBoost)
### baseline elastic net ###
baseline_glmnet<-suppressWarnings(
train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "glmnet",
trControl = ctrlParallel,
tuneGrid = expand.grid(.alpha = c(0,.2,.4,.6,.8,1),
.lambda = c(0.00001,
0.0001,
0.001,
0.01)),
tuneLength = 5,
metric = "logLoss",
preProcess=c("center", "scale")))
save(baseline_glmnet, file = "Models/baseline_glmnet.Rdata")
rm(baseline_glmnet)
### baseline naive bayes ###
set.seed(555)
baseline_nb<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "nb",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_nb, file = "Models/baseline_nb.Rdata")
rm(baseline_nb)
### baseline mars ###
set.seed(555)
baseline_mars<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "earth",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_mars, file = "Models/baseline_mars.Rdata")
rm(baseline_mars)
### baseline knn ###
### THIS ONE IS MURDER ###
#set.seed(555)
#baseline_knn<-train(B_attacked ~ .,
# data = dplyr::select(trainDat, -ddy),
# method = "knn",
# trControl = ctrlParallel,
# tuneLength = 5,
# metric = "logLoss")
#save(baseline_knn, file = "Models/baseline_knn.Rdata")
#rm(baseline_knn)
### baseline cart ###
set.seed(555)
baseline_rpart<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "rpart",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_rpart, file = "Models/baseline_rpart.Rdata")
rm(baseline_rpart)
# send an email
send_message(mime(from="phil.henrickson@gmail.com", to="phil.henrickson@gmail.com", subject="Code Finished", "Woo!"))
### This part is computationally infeasible at the minute ###
### baseline rf using ranger ###
set.seed(555)
baseline_rf<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "ranger",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_rf, file = "Models/baseline_rf.Rdata")
rm(baseline_rf)
#
### baseline adaboost ###
set.seed(555)
baseline_adaboost<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "adaboost",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_adaboost, file = "Models/baseline_adaboost.Rdata")
rm(baseline_adaboost)
### baseline C5.0 ###
set.seed(555)
baseline_C5.0<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "C5.0",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_C5.0, file = "Models/baseline_C5.0.Rdata")
rm(baseline_C5.0)
### baseline svm ###
set.seed(555)
baseline_svm<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "svmRadialWeights",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_svm, file = "Models/baseline_svm.Rdata")
rm(baseline_svm)
### baseline pcaNNet neural net
set.seed(555)
#trans <- preProcess(dplyr::select(trainDat, -ddy, -B_attacked),
# method = c("center", "scale", "pca"))
#transformed <- as.tbl(predict(trans, trainDat))
#transformed <- transformed[complete.cases(transformed),]
baseline_nnet <- train(B_attacked ~ .,
data = dplyr::select(transformed, -ddy),
method = "avNNet",
tuneLength = 5,
trControl = ctrl,
metric = "logLoss")
save(baseline_nnet, file = "Models/baseline_nnet.Rdata")
rm(baseline_nnet)
### baseline radial basis function network ###
set.seed(555)
trans <- preProcess(dplyr::select(trainDat, -ddy, -B_attacked),
method = c("center", "scale", "pca"))
transformed <- as.tbl(predict(trans, trainDat))
transformed <- transformed[complete.cases(transformed),]
baseline_mlp <- train(B_attacked ~ .,
data = dplyr::select(transformed, -ddy),
method = "mlp",
tuneLength = 5,
trControl = ctrl,
metric = "logLoss")
save(baseline_mlp, file = "Models/baseline_mlp.Rdata")
rm(baseline_mlp)
### baseline multilayer perceptron ###
set.seed(555)
trans <- preProcess(dplyr::select(trainDat, -ddy, -B_attacked),
method = c("center", "scale", "pca"))
transformed <- as.tbl(predict(trans, trainDat))
transformed <- transformed[complete.cases(transformed),]
baseline_mlp <- train(B_attacked ~ .,
data = dplyr::select(transformed, -ddy),
method = "mlp",
tuneLength = 5,
trControl = ctrl,
metric = "logLoss")
save(baseline_mlp, file = "Models/baseline_mlp.Rdata")
rm(baseline_mlp)
# load what has been saved to this point
load("Models/baseline_null.Rdata")
load("Models/baseline_glm.Rdata")
load("Models/baseline_LogitBoost.Rdata")
#load("Models/baseline_knn.Rdata")
load("Models/baseline_glmnet.Rdata")
load("Models/baseline_mars.Rdata")
load("Models/baseline_nb.Rdata")
load("Models/baseline_rpart.Rdata")
### grab all models run so far ###
models_baseline<-lapply(ls(pattern="baseline_"), get)
# wipe the individual models to save memory
rm(list=ls(pattern="baseline_"))
myRate <- function(p, y, cut = 0.5){
TP <- sum(p >= cut & y == "yes")
TN <- sum(p < cut & y == "no")
FP <- sum(p >= cut & y == "no")
FN <- sum(p < cut & y == "yes")
results <- vector(mode = "list")
results$sens <- TP / (TP + FN)
results$spec <- TP / (TP + FP)
results$npv <- TN / (TN + FN)
results$prec <- TP / (TP + FP)
results$conf <- matrix(c(TP, FN, FP, TN), ncol = 2, nrow = 2, byrow = T)
results$acc <- (TP + TN) / (TP + TN + FP + FN)
results$logLoss <- myLL(y = y, p = p)
results$auc <- as.numeric(pROC::auc(response = y, predictor = p))
results
}
getResults <- function(p, y, cut = 0.5){
foo <- myRate(p = p, y = y, cut = cut)
data.frame(logLoss = foo$logLoss,
auc = foo$auc,
accuracy = foo$acc,
sensitivity = foo$sens,
specificity = foo$spec,
precision = foo$prec)
}
### store times, training performance, and then validation performance###
out_training<-foreach(i=1:length(models_baseline), .combine=rbind) %do% {
### store time to train ###
time<-models_baseline[[i]]$times$everything[3]
perf_min<-models_baseline[[i]]$results[which.min(models_baseline[[i]]$results[,"logLoss"]),]
### training performance ###
training_perf<-dplyr::select(perf_min, ROC, Sens, Spec, Accuracy, Kappa, logLoss)
out<-cbind(time, training_perf)
rownames(out)<-models_baseline[[i]]$method
out
}
### predict the validation set ###
out_validation<-foreach(i=1:length(models_baseline), .combine=rbind) %do% {
### validation performance ###
p<-suppressWarnings(predict.train(models_baseline[[i]],
newdata = dplyr::select(evalDat, -ddy),
type = "prob")[,2])
y<-evalDat$B_attacked
out<-getResults(y=y, p=p, cut=0.5)
print("done")
rownames(out)<-models_baseline[[i]]$method
out
}
### replace names for tables ###
rownames(out_training) <- gsub(pattern = "glm", replacement = "Logit", rownames(out_training))
rownames(out_training) <- gsub(pattern = "Logit1", replacement = "Null", rownames(out_training))
rownames(out_training) <- gsub(pattern = "rpart", replacement = "CART", rownames(out_training))
rownames(out_training) <- gsub(pattern = "Logitnet", replacement = "Elastic Net", rownames(out_training))
rownames(out_training) <- gsub(pattern = "LogitBoost", replacement = "Bosted Logit", rownames(out_training))
rownames(out_training) <- gsub(pattern = "earth", replacement = "MARS", rownames(out_training))
rownames(out_training) <- gsub(pattern = "nb", replacement = "Naive Bayes", rownames(out_training))
rownames(out_training) <- gsub(pattern = "knn", replacement = "KNN", rownames(out_training))
rownames(out_validation) <- gsub(pattern = "glm", replacement = "Logit", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "Logit1", replacement = "Null", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "rpart", replacement = "CART", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "Logitnet", replacement = "Elastic Net", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "LogitBoost", replacement = "Bosted Logit", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "earth", replacement = "MARS", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "nb", replacement = "Naive Bayes", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "knn", replacement = "KNN", rownames(out_validation))
### make tables ###
training_perf<-out_training[order(out_training$logLoss), , drop = FALSE]
training_perf$time<-training_perf$time/60
valid<-dplyr::select(out_validation, auc, sensitivity, specificity, accuracy, precision, logLoss)
validation_perf<-valid[order(valid$logLoss), , drop = FALSE]
if(!dir.exists("Tables")) dir.create("Tables")
save(training_perf, file="Tables/training_perf")
save(validation_perf, file="Tables/validation_perf")
### output to latex ###
library(Hmisc)
latex(round(training_perf, 3), file="")
latex(round(validation_perf, 3), file="")
| /Scripts/06-assessClassifers_update.R | no_license | phenrickson/threats | R | false | false | 16,050 | r | ####
##
## name: 06-assessClassifiers.R
## date: 2017-01-26
## what: this assesses various classifers
##
####
setwd("F:/Dropbox/FSU/FSU_Fall_2017/Paper_2/Measure/")
cat("06: now assessing classifiers.\n")
source("fn-ccodeCleaner.R")
#### packages ####
library(dplyr)
library(tidyr)
library(data.table)
library(caret)
library(e1071)
library(pROC)
library(klaR)
library(caTools)
library(doParallel)
library(beepr)
#### function ####
myLL <- function(y, p)
mean(-1 * (y == "yes") * log(p + .Machine$double.eps) -
(y == "no") * log(1 - p + .Machine$double.eps))
#### read in a data frame ####
dat <- as.tbl(fread(paste0("DD/", list.files("DD")[1]))) %>%
mutate(A_ccode = makeCCode(A_ccode), B_ccode = makeCCode(B_ccode),
ud_contiguity = factor(ud_contiguity,
levels = c("none", "water400", "water150",
"water024", "water012",
"direct")),
ud_jointDem = ifelse(A_polity >= 6 & B_polity >= 6, 1, 0),
ud_distance = log(ud_distance),
ud_peaceYears2 = ud_peaceYears^2 / 100,
ud_peaceYears3 = ud_peaceYears^3 / 1000,
B_attacked = factor(B_attacked, labels = c("no", "yes"))) %>%
dplyr::select(B_attacked, everything()) %>%
group_by(dd) %>%
mutate(ud_rival_lag = lag(ud_rival),
ud_ongoingMID_lag = lag(ud_ongoingMID),
ud_peaceYears_lag = lag(ud_peaceYears),
ud_peaceYears2_lag = lag(ud_peaceYears2),
ud_peaceYears3_lag = lag(ud_peaceYears3),
ud_midInLast10_lag = lag(ud_midInLast10)) %>%
ungroup()
dat$ud_rival[is.na(dat$ud_rival) & dat$year != 1870] <- 0
dat$ud_ongoingMID[is.na(dat$ud_ongoingMID)] <- 0
dat <- filter(dat, year > 1870)
omit <- c("A_cyear", "B_cyear", "A_ccode", "B_ccode", "dd",
"A_stateabb", "B_stateabb", "A_milexNI", "B_milexNI",
"A_polityNI", "B_polityNI", "A_attacked", "A_milex",
"ud_ongoingMID", "ud_rival", "ud_peaceYears",
"ud_peaceYears2", "ud_peaceYears3", "ud_midInLast10")
dat <- dplyr::select(dat, -one_of(omit)) %>%
dplyr::select(ddy, B_attacked, starts_with("A"), starts_with("B"),
starts_with("ud"), everything())
# check for missingness
missing<-cbind(round(colMeans(is.na(dat)), 3))*100
# about 2.5% missingness in the lags, which is to be expected
nas<-which(complete.cases(dat)==F)
# should be safe to simply omit these rows
dat_omit<-dat[-nas,]
#### basic CV set-up ####
### data partitioning -- since we have so much, make a true vault data set.
### maintain class proportions with stratified random sampling
set.seed(90210)
split1 <- createDataPartition(dat_omit$B_attacked, p = .6)[[1]]
trainDat <- dat_omit[split1,]
other <- dat_omit[-split1,]
set.seed(555)
split2 <- createDataPartition(other$B_attacked, p = 1/3)[[1]]
evalDat <- other[split2,]
testDat <- other[-split2,]
rm(dat, dat_omit, other)
### to get a few different criteria
fiveStats <- function(...) c(twoClassSummary(...), defaultSummary(...),
mnLogLoss(...))
### parallel backend
numCore <- detectCores() - 2
registerDoParallel(cores = numCore)
### controls
ctrl <- trainControl(method = "cv",
classProbs = TRUE,
summaryFunction = fiveStats,
verboseIter = TRUE,
allowParallel = FALSE)
ctrlJustRun <- ctrl
ctrlJustRun$method <- "none"
ctrlParallel <- ctrl
ctrlParallel$verboseIter <- FALSE
ctrlParallel$allowParallel <- TRUE
### computational time of each classifier here ###
### baseline null ###
set.seed(555)
baseline_null<-suppressWarnings(### 0s and 1s
train(B_attacked ~ 1,
data = dplyr::select(trainDat, -ddy),
method = "glm",
trControl = ctrlParallel, ### still want stats
metric = "logLoss"))
if(!dir.exists("Models")) dir.create("Models")
save(baseline_null, file = "Models/baseline_null.Rdata")
rm(baseline_null)
### baseline logit ###
set.seed(555)
baseline_glm<-suppressWarnings(### 0s and 1s
train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "glm",
trControl = ctrlParallel, ### still want stats
metric = "logLoss"))
save(baseline_glm, file = "Models/baseline_glm.Rdata")
rm(baseline_glm)
### baseline boosted logit ###
set.seed(555)
baseline_LogitBoost<-suppressWarnings(### 0s and 1s
train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "LogitBoost",
trControl = ctrlParallel, ### still want stats
tuneLength=5,
metric = "logLoss"))
save(baseline_LogitBoost, file = "Models/baseline_LogitBoost.Rdata")
rm(baseline_LogitBoost)
### baseline elastic net ###
baseline_glmnet<-suppressWarnings(
train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "glmnet",
trControl = ctrlParallel,
tuneGrid = expand.grid(.alpha = c(0,.2,.4,.6,.8,1),
.lambda = c(0.00001,
0.0001,
0.001,
0.01)),
tuneLength = 5,
metric = "logLoss",
preProcess=c("center", "scale")))
save(baseline_glmnet, file = "Models/baseline_glmnet.Rdata")
rm(baseline_glmnet)
### baseline naive bayes ###
set.seed(555)
baseline_nb<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "nb",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_nb, file = "Models/baseline_nb.Rdata")
rm(baseline_nb)
### baseline mars ###
set.seed(555)
baseline_mars<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "earth",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_mars, file = "Models/baseline_mars.Rdata")
rm(baseline_mars)
### baseline knn ###
### THIS ONE IS MURDER ###
#set.seed(555)
#baseline_knn<-train(B_attacked ~ .,
# data = dplyr::select(trainDat, -ddy),
# method = "knn",
# trControl = ctrlParallel,
# tuneLength = 5,
# metric = "logLoss")
#save(baseline_knn, file = "Models/baseline_knn.Rdata")
#rm(baseline_knn)
### baseline cart ###
set.seed(555)
baseline_rpart<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "rpart",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_rpart, file = "Models/baseline_rpart.Rdata")
rm(baseline_rpart)
# send an email
send_message(mime(from="phil.henrickson@gmail.com", to="phil.henrickson@gmail.com", subject="Code Finished", "Woo!"))
### This part is computationally infeasible at the minute ###
### baseline rf using ranger ###
set.seed(555)
baseline_rf<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "ranger",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_rf, file = "Models/baseline_rf.Rdata")
rm(baseline_rf)
#
### baseline adaboost ###
set.seed(555)
baseline_adaboost<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "adaboost",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_adaboost, file = "Models/baseline_adaboost.Rdata")
rm(baseline_adaboost)
### baseline C5.0 ###
set.seed(555)
baseline_C5.0<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "C5.0",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_C5.0, file = "Models/baseline_C5.0.Rdata")
rm(baseline_C5.0)
### baseline svm ###
set.seed(555)
baseline_svm<-train(B_attacked ~ .,
data = dplyr::select(trainDat, -ddy),
method = "svmRadialWeights",
trControl = ctrlParallel,
tuneLength = 5,
metric = "logLoss")
save(baseline_svm, file = "Models/baseline_svm.Rdata")
rm(baseline_svm)
### baseline pcaNNet neural net
set.seed(555)
#trans <- preProcess(dplyr::select(trainDat, -ddy, -B_attacked),
# method = c("center", "scale", "pca"))
#transformed <- as.tbl(predict(trans, trainDat))
#transformed <- transformed[complete.cases(transformed),]
baseline_nnet <- train(B_attacked ~ .,
data = dplyr::select(transformed, -ddy),
method = "avNNet",
tuneLength = 5,
trControl = ctrl,
metric = "logLoss")
save(baseline_nnet, file = "Models/baseline_nnet.Rdata")
rm(baseline_nnet)
### baseline radial basis function network ###
set.seed(555)
trans <- preProcess(dplyr::select(trainDat, -ddy, -B_attacked),
method = c("center", "scale", "pca"))
transformed <- as.tbl(predict(trans, trainDat))
transformed <- transformed[complete.cases(transformed),]
baseline_mlp <- train(B_attacked ~ .,
data = dplyr::select(transformed, -ddy),
method = "mlp",
tuneLength = 5,
trControl = ctrl,
metric = "logLoss")
save(baseline_mlp, file = "Models/baseline_mlp.Rdata")
rm(baseline_mlp)
### baseline multilayer perceptron ###
set.seed(555)
trans <- preProcess(dplyr::select(trainDat, -ddy, -B_attacked),
method = c("center", "scale", "pca"))
transformed <- as.tbl(predict(trans, trainDat))
transformed <- transformed[complete.cases(transformed),]
baseline_mlp <- train(B_attacked ~ .,
data = dplyr::select(transformed, -ddy),
method = "mlp",
tuneLength = 5,
trControl = ctrl,
metric = "logLoss")
save(baseline_mlp, file = "Models/baseline_mlp.Rdata")
rm(baseline_mlp)
# load what has been saved to this point
load("Models/baseline_null.Rdata")
load("Models/baseline_glm.Rdata")
load("Models/baseline_LogitBoost.Rdata")
#load("Models/baseline_knn.Rdata")
load("Models/baseline_glmnet.Rdata")
load("Models/baseline_mars.Rdata")
load("Models/baseline_nb.Rdata")
load("Models/baseline_rpart.Rdata")
### grab all models run so far ###
models_baseline<-lapply(ls(pattern="baseline_"), get)
# wipe the individual models to save memory
rm(list=ls(pattern="baseline_"))
myRate <- function(p, y, cut = 0.5){
TP <- sum(p >= cut & y == "yes")
TN <- sum(p < cut & y == "no")
FP <- sum(p >= cut & y == "no")
FN <- sum(p < cut & y == "yes")
results <- vector(mode = "list")
results$sens <- TP / (TP + FN)
results$spec <- TP / (TP + FP)
results$npv <- TN / (TN + FN)
results$prec <- TP / (TP + FP)
results$conf <- matrix(c(TP, FN, FP, TN), ncol = 2, nrow = 2, byrow = T)
results$acc <- (TP + TN) / (TP + TN + FP + FN)
results$logLoss <- myLL(y = y, p = p)
results$auc <- as.numeric(pROC::auc(response = y, predictor = p))
results
}
getResults <- function(p, y, cut = 0.5){
foo <- myRate(p = p, y = y, cut = cut)
data.frame(logLoss = foo$logLoss,
auc = foo$auc,
accuracy = foo$acc,
sensitivity = foo$sens,
specificity = foo$spec,
precision = foo$prec)
}
### store times, training performance, and then validation performance###
out_training<-foreach(i=1:length(models_baseline), .combine=rbind) %do% {
### store time to train ###
time<-models_baseline[[i]]$times$everything[3]
perf_min<-models_baseline[[i]]$results[which.min(models_baseline[[i]]$results[,"logLoss"]),]
### training performance ###
training_perf<-dplyr::select(perf_min, ROC, Sens, Spec, Accuracy, Kappa, logLoss)
out<-cbind(time, training_perf)
rownames(out)<-models_baseline[[i]]$method
out
}
### predict the validation set ###
out_validation<-foreach(i=1:length(models_baseline), .combine=rbind) %do% {
### validation performance ###
p<-suppressWarnings(predict.train(models_baseline[[i]],
newdata = dplyr::select(evalDat, -ddy),
type = "prob")[,2])
y<-evalDat$B_attacked
out<-getResults(y=y, p=p, cut=0.5)
print("done")
rownames(out)<-models_baseline[[i]]$method
out
}
### replace names for tables ###
rownames(out_training) <- gsub(pattern = "glm", replacement = "Logit", rownames(out_training))
rownames(out_training) <- gsub(pattern = "Logit1", replacement = "Null", rownames(out_training))
rownames(out_training) <- gsub(pattern = "rpart", replacement = "CART", rownames(out_training))
rownames(out_training) <- gsub(pattern = "Logitnet", replacement = "Elastic Net", rownames(out_training))
rownames(out_training) <- gsub(pattern = "LogitBoost", replacement = "Bosted Logit", rownames(out_training))
rownames(out_training) <- gsub(pattern = "earth", replacement = "MARS", rownames(out_training))
rownames(out_training) <- gsub(pattern = "nb", replacement = "Naive Bayes", rownames(out_training))
rownames(out_training) <- gsub(pattern = "knn", replacement = "KNN", rownames(out_training))
rownames(out_validation) <- gsub(pattern = "glm", replacement = "Logit", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "Logit1", replacement = "Null", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "rpart", replacement = "CART", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "Logitnet", replacement = "Elastic Net", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "LogitBoost", replacement = "Bosted Logit", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "earth", replacement = "MARS", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "nb", replacement = "Naive Bayes", rownames(out_validation))
rownames(out_validation) <- gsub(pattern = "knn", replacement = "KNN", rownames(out_validation))
### make tables ###
training_perf<-out_training[order(out_training$logLoss), , drop = FALSE]
training_perf$time<-training_perf$time/60
valid<-dplyr::select(out_validation, auc, sensitivity, specificity, accuracy, precision, logLoss)
validation_perf<-valid[order(valid$logLoss), , drop = FALSE]
if(!dir.exists("Tables")) dir.create("Tables")
save(training_perf, file="Tables/training_perf")
save(validation_perf, file="Tables/validation_perf")
### output to latex ###
library(Hmisc)
latex(round(training_perf, 3), file="")
latex(round(validation_perf, 3), file="")
|
# **************************************************
# Condições iniciais
# **************************************************
N0 <- 1000 # População inicial
rate_A <- 1.2 # Taxa de crescimento do alelo "A"
rate_a <- 1.2 # Taxa de crescimento do alelo "a"
fA <- 0.3 # Frequência do alelo "A"
max_gen <- 20 # Número de gerações a simular
# **************************************************
# Calculando variáveis derivadas
# **************************************************
fa <- 1.0 - fA # Frequência do alelo "a"
NA_0 <- N0 * fA # População inicial do alelo "A"
Na_0 <- N0 * fa # Polulação inicial do alelo "a"
# **************************************************
# Simulação
# **************************************************
evosimu<-function(N0,rate_A,rate_a,fA,max_gen) {
fa <-1.0-fA
NA_0<- N0 * fA
Na_0<- N0 * fa
resultado<-matrix(NA,ncol=5,nrow=max_gen+1)
colnames(resultado)<-c("Nt","NA_t","Na_t","fA_t","fa_t")
resultado[1,]<-c(N0,NA_0,Na_0,fA,fa)
for(t in 2:(max_gen+1)) {
resultado[t,"NA_t"] = NA_0 * (rate_A ^ t)
resultado[t,"Na_t"] = Na_0 * (rate_a ^ t)
resultado[t,"Nt"] = resultado[t,"NA_t"] + resultado[t,"Na_t"]
resultado[t,"fA_t"] = resultado[t,"NA_t"] / resultado[t,"Nt"]
resultado[t,"fa_t"] = resultado[t,"Na_t"] / resultado[t,"Nt"]
}
return(resultado)
}
evosimu(N0=N0,rate_A=rate_A,rate_a=rate_a,fA=fA,max_gen=10)
saida<-evosimu(N0=N0,rate_A=rate_A,rate_a=rate_a,fA=fA,max_gen=max_gen)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topleft",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
#N0 = 50 rate_A = 1.2 rate_a = 1.2 fA = 0.3
saida<-evosimu(N0=50,rate_A=1.2,rate_a=1.2,fA=0.3,max_gen=20)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topleft",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
#N0 = 1000 rate_A = 0.7 rate_a = 0.7 fA = 0.3
saida<-evosimu(N0=100,rate_A=0.7,rate_a=0.7,fA=0.3,max_gen=max_gen)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topright",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
#N0 = 1000 rate_A = 2.0 rate_a = 1.5 fA = 0.02
saida<-evosimu(N0=1000,rate_A=2.0,rate_a=1.5,fA=0.02,max_gen=max_gen)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topleft",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
#N0 = 1000 rate_A = 1.2 rate_a = 0.9 fA = 0.02
saida<-evosimu(N0=1000,rate_A=1.2,rate_a=0.9,fA=0.02,max_gen=max_gen)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topleft",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
#N0 = 10000 rate_A = 0.8 rate_a = 0.6 fA = 0.02
saida<-evosimu(N0=10000,rate_A=0.8,rate_a=0.6,fA=0.02,max_gen=max_gen)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topleft",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
| /evosimu.r | no_license | Squiercg/recologia | R | false | false | 5,175 | r | # **************************************************
# Condições iniciais
# **************************************************
N0 <- 1000 # População inicial
rate_A <- 1.2 # Taxa de crescimento do alelo "A"
rate_a <- 1.2 # Taxa de crescimento do alelo "a"
fA <- 0.3 # Frequência do alelo "A"
max_gen <- 20 # Número de gerações a simular
# **************************************************
# Calculando variáveis derivadas
# **************************************************
fa <- 1.0 - fA # Frequência do alelo "a"
NA_0 <- N0 * fA # População inicial do alelo "A"
Na_0 <- N0 * fa # Polulação inicial do alelo "a"
# **************************************************
# Simulação
# **************************************************
evosimu<-function(N0,rate_A,rate_a,fA,max_gen) {
fa <-1.0-fA
NA_0<- N0 * fA
Na_0<- N0 * fa
resultado<-matrix(NA,ncol=5,nrow=max_gen+1)
colnames(resultado)<-c("Nt","NA_t","Na_t","fA_t","fa_t")
resultado[1,]<-c(N0,NA_0,Na_0,fA,fa)
for(t in 2:(max_gen+1)) {
resultado[t,"NA_t"] = NA_0 * (rate_A ^ t)
resultado[t,"Na_t"] = Na_0 * (rate_a ^ t)
resultado[t,"Nt"] = resultado[t,"NA_t"] + resultado[t,"Na_t"]
resultado[t,"fA_t"] = resultado[t,"NA_t"] / resultado[t,"Nt"]
resultado[t,"fa_t"] = resultado[t,"Na_t"] / resultado[t,"Nt"]
}
return(resultado)
}
evosimu(N0=N0,rate_A=rate_A,rate_a=rate_a,fA=fA,max_gen=10)
saida<-evosimu(N0=N0,rate_A=rate_A,rate_a=rate_a,fA=fA,max_gen=max_gen)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topleft",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
#N0 = 50 rate_A = 1.2 rate_a = 1.2 fA = 0.3
saida<-evosimu(N0=50,rate_A=1.2,rate_a=1.2,fA=0.3,max_gen=20)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topleft",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
#N0 = 1000 rate_A = 0.7 rate_a = 0.7 fA = 0.3
saida<-evosimu(N0=100,rate_A=0.7,rate_a=0.7,fA=0.3,max_gen=max_gen)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topright",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
#N0 = 1000 rate_A = 2.0 rate_a = 1.5 fA = 0.02
saida<-evosimu(N0=1000,rate_A=2.0,rate_a=1.5,fA=0.02,max_gen=max_gen)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topleft",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
#N0 = 1000 rate_A = 1.2 rate_a = 0.9 fA = 0.02
saida<-evosimu(N0=1000,rate_A=1.2,rate_a=0.9,fA=0.02,max_gen=max_gen)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topleft",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
#N0 = 10000 rate_A = 0.8 rate_a = 0.6 fA = 0.02
saida<-evosimu(N0=10000,rate_A=0.8,rate_a=0.6,fA=0.02,max_gen=max_gen)
layout(matrix(c(1,2),nrow=2,ncol=1))
plot(0:max_gen,saida[,"Nt"],frame=F,xlab="Geração",ylab="População",type="l",ylim=c(0,max(saida[,"Nt"])))
points(0:max_gen,saida[,"NA_t"],type="l",col="red")
points(0:max_gen,saida[,"Na_t"],type="l",col="blue")
legend("topleft",col=c("black","red","blue"),lty=1,legend=c("Total","A","a"),bty="n")
plot(0:max_gen,saida[,"fA_t"],frame=F,xlab="Geração",ylab="Frequência Gênica",type="l",ylim=c(0,1),col="red")
points(0:max_gen,saida[,"fa_t"],type="l",col="blue")
|
#' y_probs
#'
#' Detection history probabilities, given model parameters
#' @param psi Predicted route-level occupancy
#' @param xpsi spatial correlation parameters
#' @param p detection probability
#' @export
y_probs <- function(psi, xpsi, p){
pi <- xpsi[1] / (xpsi[1] + 1 - xpsi[2])
pr_y1 <- pi
pr_y0 <- 1 - pi
pr_h1 <- pi * p
pr_h0 <- pi * (1 - p)
pr <- matrix(NA, nrow = length(psi), ncol = 32)
#h00000
pr[,1] <- psi * (pr_y0 * (1 - xpsi[1]) ^ 4 +#00000
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) ^ 3 +#10000
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) ^ 2 + #01000
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #00100
pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #00010
pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #00001
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * (1 - xpsi[1]) + #11000
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #10100
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #10010
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #10001
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #01100
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #01010
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #01001
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #00110
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #00101
pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #00011
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) + #00111
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #01011
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #01101
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #01110
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #10011
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #10101
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #10110
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #11001
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #11010
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #11100
pr_y0 * xpsi[1] * (1 - p) * (xpsi[2] * (1 - p)) ^ 3 + #01111
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (xpsi[2] * (1 - p)) ^ 2 + #10111
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #11011
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #11101
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #11110
pr_h0 * (xpsi[2] * (1 - p)) ^ 4) + (1 - psi)
#h00001
pr[,2] <- psi * (pr_y0 * (1 - xpsi[1]) ^ 3 * xpsi[1] + #0000
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1])^2 * xpsi[1] + #1000
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] + #0100
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #0010
pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1- p) * xpsi[2] + #0001
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] + #1100
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #1010
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] + #1001
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #0110
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] + #0101
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #0011
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #0111
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #1011
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] + #1101
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #1110
pr_h0 * (xpsi[2] * (1 - p)) ^ 3 * xpsi[2]) * p
#h00010
pr[,3] <- psi * (pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] + #000
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] + #001
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] + #100
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #010
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #110
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] + #101
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #011
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h00011
pr[,4] <- psi * (pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] + #000
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] + #001
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] + #100
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #010
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #110
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] + #101
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #011
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p
#h00100
pr[,5] <- psi * (pr_y0 * (1 - xpsi[1]) * xpsi[1] + #00
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] + #01
pr_h0 * (1 - xpsi[2]) * xpsi[1] +#10
pr_h0 * xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) * (1 - xpsi[1]) +
(1 - xpsi[2]) * xpsi[1] * (1 - p) +
xpsi[2] * (1 - p) * (1 - xpsi[2]) +
(xpsi[2] * (1 - p))^2)
#h00101
pr[,6] <- psi * (pr_y0 * (1 - xpsi[1]) * xpsi[1] + #00
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] + #01
pr_h0 * (1 - xpsi[2]) * xpsi[1] +#10
pr_h0 * xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) * xpsi[1] + xpsi[2] * (1 - p) * xpsi[2]) * p
#h00110
pr[,7] <- psi * (pr_y0 * (1 - xpsi[1]) * xpsi[1] + #00
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] + #01
pr_h0 * (1 - xpsi[2]) * xpsi[1] +#10
pr_h0 * xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h00111
pr[,8] <- psi * (pr_y0 * (1 - xpsi[1]) * xpsi[1] + #00
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] + #01
pr_h0 * (1 - xpsi[2]) * xpsi[1] +#10
pr_h0 * xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p * xpsi[2] * p
#h01000
pr[,9] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * ((1 - xpsi[2]) * (1 - xpsi[1]) ^ 2 + #000
xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #100
(1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #010
(1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #001
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #110
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #101
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #011
(xpsi[2] * (1 - p))^3)
#h01001
pr[,10] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * ((1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] +
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] +
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p
#h01010
pr[,11] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h01011
pr[,12] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p
#h01100
pr[,13] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * xpsi[2] * p * ((1 - xpsi[2]) * (1 - xpsi[1]) + #00
(1 - xpsi[2]) * xpsi[1] * (1 - p) + #01
xpsi[2] * (1 - p) * (1 - xpsi[2]) + #10
(xpsi[2] * (1 - p)) ^ 2) #11
#h01101
pr[,14] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * xpsi[2] * p * ((1 - xpsi[2]) * xpsi[1] + xpsi[2] * (1 - p) * xpsi[2]) * p
#h01110
pr[,15] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * xpsi[2] * p * xpsi[2] * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h01111
pr[,16] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * (xpsi[2] * p) ^ 3
#h10000
pr[,17] <- psi * pr_h1 * ((1 - xpsi[2]) * (1 - xpsi[1]) ^ 3 + #0000
xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1])^2 + #1000
(1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #0100
(1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #0010
(1 - xpsi[2]) * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1- p) + #0001
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #1100
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #1010
xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #1001
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #0110
(1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #0101
(1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #0011
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) + #0111
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #1011
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #1101
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #1110
(xpsi[2] * (1 - p)) ^ 4)
#h10001
pr[,18] <- psi * pr_h1 * ((1 - xpsi[2]) * (1 - xpsi[1]) ^ 2 * xpsi[1] + #0001
(1 - xpsi[2])* (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] + #0011
(1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #0101
xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] + #1001
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #0111
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] + #1011
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #1101
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p
#h10010
pr[,19] <- psi * pr_h1 * ((1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] +
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] +
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h10011
pr[,20] <- psi * pr_h1 * ((1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] +
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] +
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p
#h10100
pr[,21] <- psi * pr_h1 * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) * (1 - xpsi[1]) + #00
(1 - xpsi[2]) * xpsi[1] * (1 - p) + #01
xpsi[2] * (1 - p) * (1 - xpsi[2]) + #10
(xpsi[2] * (1 - p)) ^ 2) #11
#h10101
pr[,22] <- psi * pr_h1 * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p
#h10110
pr[,23] <- psi * pr_h1 * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h10111
pr[,24] <- psi * pr_h1 * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p * xpsi[2] * p
#h11000
pr[,25] <- psi * pr_h1 * xpsi[2] * p * ((1 - xpsi[2]) * (1 - xpsi[1]) ^ 2 + #000
xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #100
(1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #010
(1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #001
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #110
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #101
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #011
(xpsi[2] * (1 - p))^3)
#h11001
pr[,26] <- psi * pr_h1 * xpsi[2] * p * ((1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] +
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] +
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p
#h11010
pr[,27] <- psi * pr_h1 * xpsi[2] * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h11011
pr[,28] <- psi * pr_h1 * xpsi[2] * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p
#h11100
pr[,29] <- psi * pr_h1 * xpsi[2] * p * xpsi[2] * p * ((1 - xpsi[2]) * (1 - xpsi[1]) + #00
(1 - xpsi[2]) * xpsi[1] * (1 - p) + #01
xpsi[2] * (1 - p) * (1 - xpsi[2]) + #10
(xpsi[2] * (1 - p)) ^ 2) #11
#h11101
pr[,30] <- psi * pr_h1 * xpsi[2] * p * xpsi[2] * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p
#h11110
pr[,31] <- psi * pr_h1 * xpsi[2] * p * xpsi[2] * p * xpsi[2] * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h11111
pr[,32] <- psi * pr_h1 * (xpsi[2] * p) ^ 4
return(pr)
}
| /R/y_probs.R | no_license | crushing05/BayesCorrOcc | R | false | false | 16,903 | r | #' y_probs
#'
#' Detection history probabilities, given model parameters
#' @param psi Predicted route-level occupancy
#' @param xpsi spatial correlation parameters
#' @param p detection probability
#' @export
y_probs <- function(psi, xpsi, p){
pi <- xpsi[1] / (xpsi[1] + 1 - xpsi[2])
pr_y1 <- pi
pr_y0 <- 1 - pi
pr_h1 <- pi * p
pr_h0 <- pi * (1 - p)
pr <- matrix(NA, nrow = length(psi), ncol = 32)
#h00000
pr[,1] <- psi * (pr_y0 * (1 - xpsi[1]) ^ 4 +#00000
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) ^ 3 +#10000
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) ^ 2 + #01000
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #00100
pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #00010
pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #00001
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * (1 - xpsi[1]) + #11000
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #10100
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #10010
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #10001
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #01100
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #01010
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #01001
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #00110
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #00101
pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #00011
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) + #00111
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #01011
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #01101
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #01110
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #10011
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #10101
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #10110
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #11001
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #11010
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #11100
pr_y0 * xpsi[1] * (1 - p) * (xpsi[2] * (1 - p)) ^ 3 + #01111
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (xpsi[2] * (1 - p)) ^ 2 + #10111
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #11011
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #11101
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #11110
pr_h0 * (xpsi[2] * (1 - p)) ^ 4) + (1 - psi)
#h00001
pr[,2] <- psi * (pr_y0 * (1 - xpsi[1]) ^ 3 * xpsi[1] + #0000
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1])^2 * xpsi[1] + #1000
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] + #0100
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #0010
pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1- p) * xpsi[2] + #0001
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] + #1100
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #1010
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] + #1001
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #0110
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] + #0101
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #0011
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #0111
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #1011
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] + #1101
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #1110
pr_h0 * (xpsi[2] * (1 - p)) ^ 3 * xpsi[2]) * p
#h00010
pr[,3] <- psi * (pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] + #000
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] + #001
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] + #100
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #010
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #110
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] + #101
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #011
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h00011
pr[,4] <- psi * (pr_y0 * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] + #000
pr_y0 * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] + #001
pr_h0 * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] + #100
pr_y0 * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #010
pr_h0 * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #110
pr_h0 * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] + #101
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #011
pr_h0 * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p
#h00100
pr[,5] <- psi * (pr_y0 * (1 - xpsi[1]) * xpsi[1] + #00
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] + #01
pr_h0 * (1 - xpsi[2]) * xpsi[1] +#10
pr_h0 * xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) * (1 - xpsi[1]) +
(1 - xpsi[2]) * xpsi[1] * (1 - p) +
xpsi[2] * (1 - p) * (1 - xpsi[2]) +
(xpsi[2] * (1 - p))^2)
#h00101
pr[,6] <- psi * (pr_y0 * (1 - xpsi[1]) * xpsi[1] + #00
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] + #01
pr_h0 * (1 - xpsi[2]) * xpsi[1] +#10
pr_h0 * xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) * xpsi[1] + xpsi[2] * (1 - p) * xpsi[2]) * p
#h00110
pr[,7] <- psi * (pr_y0 * (1 - xpsi[1]) * xpsi[1] + #00
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] + #01
pr_h0 * (1 - xpsi[2]) * xpsi[1] +#10
pr_h0 * xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h00111
pr[,8] <- psi * (pr_y0 * (1 - xpsi[1]) * xpsi[1] + #00
pr_y0 * xpsi[1] * (1 - p) * xpsi[2] + #01
pr_h0 * (1 - xpsi[2]) * xpsi[1] +#10
pr_h0 * xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p * xpsi[2] * p
#h01000
pr[,9] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * ((1 - xpsi[2]) * (1 - xpsi[1]) ^ 2 + #000
xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #100
(1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #010
(1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #001
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #110
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #101
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #011
(xpsi[2] * (1 - p))^3)
#h01001
pr[,10] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * ((1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] +
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] +
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p
#h01010
pr[,11] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h01011
pr[,12] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p
#h01100
pr[,13] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * xpsi[2] * p * ((1 - xpsi[2]) * (1 - xpsi[1]) + #00
(1 - xpsi[2]) * xpsi[1] * (1 - p) + #01
xpsi[2] * (1 - p) * (1 - xpsi[2]) + #10
(xpsi[2] * (1 - p)) ^ 2) #11
#h01101
pr[,14] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * xpsi[2] * p * ((1 - xpsi[2]) * xpsi[1] + xpsi[2] * (1 - p) * xpsi[2]) * p
#h01110
pr[,15] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * xpsi[2] * p * xpsi[2] * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h01111
pr[,16] <- psi * (pr_y0 * xpsi[1] +
pr_h0 * xpsi[2]) * p * (xpsi[2] * p) ^ 3
#h10000
pr[,17] <- psi * pr_h1 * ((1 - xpsi[2]) * (1 - xpsi[1]) ^ 3 + #0000
xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1])^2 + #1000
(1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #0100
(1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #0010
(1 - xpsi[2]) * (1 - xpsi[1]) * (1 - xpsi[1]) * xpsi[1] * (1- p) + #0001
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #1100
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #1010
xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #1001
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #0110
(1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #0101
(1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #0011
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) + #0111
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #1011
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #1101
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #1110
(xpsi[2] * (1 - p)) ^ 4)
#h10001
pr[,18] <- psi * pr_h1 * ((1 - xpsi[2]) * (1 - xpsi[1]) ^ 2 * xpsi[1] + #0001
(1 - xpsi[2])* (1 - xpsi[1]) * xpsi[1] * (1 - p) * xpsi[2] + #0011
(1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #0101
xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] + #1001
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] + #0111
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] + #1011
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] + #1101
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p
#h10010
pr[,19] <- psi * pr_h1 * ((1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] +
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] +
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h10011
pr[,20] <- psi * pr_h1 * ((1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] +
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] +
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p
#h10100
pr[,21] <- psi * pr_h1 * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) * (1 - xpsi[1]) + #00
(1 - xpsi[2]) * xpsi[1] * (1 - p) + #01
xpsi[2] * (1 - p) * (1 - xpsi[2]) + #10
(xpsi[2] * (1 - p)) ^ 2) #11
#h10101
pr[,22] <- psi * pr_h1 * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p
#h10110
pr[,23] <- psi * pr_h1 * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h10111
pr[,24] <- psi * pr_h1 * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p * xpsi[2] * p
#h11000
pr[,25] <- psi * pr_h1 * xpsi[2] * p * ((1 - xpsi[2]) * (1 - xpsi[1]) ^ 2 + #000
xpsi[2] * (1 - p) * (1 - xpsi[2]) * (1 - xpsi[1]) + #100
(1 - xpsi[2]) * xpsi[1] * (1 - p) * (1 - xpsi[2]) + #010
(1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] * (1 - p) + #001
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * (1 - xpsi[2]) + #110
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] * (1 - p) + #101
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] * (1 - p) + #011
(xpsi[2] * (1 - p))^3)
#h11001
pr[,26] <- psi * pr_h1 * xpsi[2] * p * ((1 - xpsi[2]) * (1 - xpsi[1]) * xpsi[1] +
(1 - xpsi[2]) * xpsi[1] * (1 - p) * xpsi[2] +
xpsi[2] * (1 - p) * (1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2] * (1 - p) * xpsi[2]) * p
#h11010
pr[,27] <- psi * pr_h1 * xpsi[2] * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h11011
pr[,28] <- psi * pr_h1 * xpsi[2] * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p * xpsi[2] * p
#h11100
pr[,29] <- psi * pr_h1 * xpsi[2] * p * xpsi[2] * p * ((1 - xpsi[2]) * (1 - xpsi[1]) + #00
(1 - xpsi[2]) * xpsi[1] * (1 - p) + #01
xpsi[2] * (1 - p) * (1 - xpsi[2]) + #10
(xpsi[2] * (1 - p)) ^ 2) #11
#h11101
pr[,30] <- psi * pr_h1 * xpsi[2] * p * xpsi[2] * p * ((1 - xpsi[2]) * xpsi[1] +
xpsi[2] * (1 - p) * xpsi[2]) * p
#h11110
pr[,31] <- psi * pr_h1 * xpsi[2] * p * xpsi[2] * p * xpsi[2] * p * ((1 - xpsi[2]) + xpsi[2] * (1 - p))
#h11111
pr[,32] <- psi * pr_h1 * (xpsi[2] * p) ^ 4
return(pr)
}
|
oliveOil <- read.csv("https://www.scss.tcd.ie/~arwhite/Teaching/STU33011/olive.csv")
x = oliveOil[ ,3:10]
library(mclust)
?mclust
plot(Mclust(faithful))
## Get 4 options
# 1: Want to optimize BIC for this version, so higher value better ( Note Different signs in lecture notes)
# 2:
fit = Mclust(faithful)
fit ##Tells you best model: is EEE, 3. EEE tells us about like whether equal variance are assumed, are datapoints assumed to be ind etc. And 3 is the number of clusters
plot(Mclust(faithful))
# 2: Gives classification plot
# 3: The uncertainty, can tell which are well seperated and obv belong to one cluster and which dont (i.e. hard to cluster)
# 4: Gives prob density contour lines
fit = Mclust(x, G=1:15)
fit
fitk = kmeans(x, 9 , nstart =20)
fitk
adjustedRandIndex(fitk$cl, fit$cl)
adjustedRandIndex(fitk$cl, oliveOil$Region)
adjustedRandIndex(fit$cl, oliveOil$Region)
| /MvLa Further Clustering Methods.R | no_license | haskinsm/R-projects | R | false | false | 887 | r | oliveOil <- read.csv("https://www.scss.tcd.ie/~arwhite/Teaching/STU33011/olive.csv")
x = oliveOil[ ,3:10]
library(mclust)
?mclust
plot(Mclust(faithful))
## Get 4 options
# 1: Want to optimize BIC for this version, so higher value better ( Note Different signs in lecture notes)
# 2:
fit = Mclust(faithful)
fit ##Tells you best model: is EEE, 3. EEE tells us about like whether equal variance are assumed, are datapoints assumed to be ind etc. And 3 is the number of clusters
plot(Mclust(faithful))
# 2: Gives classification plot
# 3: The uncertainty, can tell which are well seperated and obv belong to one cluster and which dont (i.e. hard to cluster)
# 4: Gives prob density contour lines
fit = Mclust(x, G=1:15)
fit
fitk = kmeans(x, 9 , nstart =20)
fitk
adjustedRandIndex(fitk$cl, fit$cl)
adjustedRandIndex(fitk$cl, oliveOil$Region)
adjustedRandIndex(fit$cl, oliveOil$Region)
|
# various utility functions
#
# peter d smits
# psmits@uchicago.edu
###############################################################################
library(pacman)
p_load(caret)
data.maker <- function(gr, data, p = 0.75) {
nd <- data[, colnames(data) %in% gr]
out <- apply(nd, 2, createDataPartition,
p = p, list = FALSE)
out
}
make.form <- function(vari, resp) {
form <- vector(mode = 'list', length = length(vari))
for (ii in seq(length(vari))) {
form[[ii]] <- as.formula(paste(paste(resp, ' ~ ', collapse = ''),
paste(vari[seq(ii)],
collapse = '+')))
}
form
}
multi.train <- function(form, data, seed = 1, ...) {
# train across multiple formulas with the same data set
#
# Args
# form: list of formula objects
# data: data frame
# seed: random seed
# ...: arguments (matched exactly) for train.formula from caret
#
# Returns:
# list of model training results
set.seed(seed)
rr <- lapply(form, train.formula, data = data, ...)
rr
}
flatten.next <- function(xx) {
ll <- Filter(function(x) 'list' %in% class(x), xx)
uu <- Filter(function(x) !('list' %in% class(x)), xx)
smod <- list()
for(kk in seq(length(uu))) {
smod[[length(smod) + 1]] <- uu[[kk]]
}
for(ii in seq(length(ll))) {
for(jj in seq(length(ll[[ii]]))) {
smod[[length(smod) + 1]] <- ll[[ii]][[jj]]
}
}
uu.nam <- names(uu)
ll.nam <- names(unlist(Map(function(x, n) rep(x, n),
x = names(ll),
n = lapply(ll, length))))
nam <- c(uu.nam, ll.nam)
names(smod) <- nam
smod
}
| /R/helper07_support_functions.r | no_license | psmits/shape_identification | R | false | false | 1,686 | r | # various utility functions
#
# peter d smits
# psmits@uchicago.edu
###############################################################################
library(pacman)
p_load(caret)
data.maker <- function(gr, data, p = 0.75) {
nd <- data[, colnames(data) %in% gr]
out <- apply(nd, 2, createDataPartition,
p = p, list = FALSE)
out
}
make.form <- function(vari, resp) {
form <- vector(mode = 'list', length = length(vari))
for (ii in seq(length(vari))) {
form[[ii]] <- as.formula(paste(paste(resp, ' ~ ', collapse = ''),
paste(vari[seq(ii)],
collapse = '+')))
}
form
}
multi.train <- function(form, data, seed = 1, ...) {
# train across multiple formulas with the same data set
#
# Args
# form: list of formula objects
# data: data frame
# seed: random seed
# ...: arguments (matched exactly) for train.formula from caret
#
# Returns:
# list of model training results
set.seed(seed)
rr <- lapply(form, train.formula, data = data, ...)
rr
}
flatten.next <- function(xx) {
ll <- Filter(function(x) 'list' %in% class(x), xx)
uu <- Filter(function(x) !('list' %in% class(x)), xx)
smod <- list()
for(kk in seq(length(uu))) {
smod[[length(smod) + 1]] <- uu[[kk]]
}
for(ii in seq(length(ll))) {
for(jj in seq(length(ll[[ii]]))) {
smod[[length(smod) + 1]] <- ll[[ii]][[jj]]
}
}
uu.nam <- names(uu)
ll.nam <- names(unlist(Map(function(x, n) rep(x, n),
x = names(ll),
n = lapply(ll, length))))
nam <- c(uu.nam, ll.nam)
names(smod) <- nam
smod
}
|
library(lubridate)
setwd("~/R")
## download the data zip file and unzips it
file_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(file_url, "dataset.zip")
unzip("dataset.zip")
## reads the data file and chooses the records belonging to the specific dates
data <- "household_power_consumption.txt"
hhpc <- read.table(data, header=TRUE, sep=";", stringsAsFactors = FALSE, dec=".", na="?")
hhpc2 <- hhpc[hhpc$Date %in% c("1/2/2007","2/2/2007") ,]
## Re-formats the date into 'date' format
hhpc2$newcol <- paste(hhpc2$Date, hhpc2$Time, sep=" ")
hhpc2$newcol <- dmy_hms(hhpc2$newcol)
## creates the graph
hhpc2$Sub_metering_1 <- as.numeric(hhpc2$Sub_metering_1)
hhpc2$Sub_metering_2 <- as.numeric(hhpc2$Sub_metering_2)
hhpc2$Sub_metering_3 <- as.numeric(hhpc2$Sub_metering_3)
plot(hhpc2$newcol, hhpc2$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(hhpc2$newcol, hhpc2$Sub_metering_2, type="l", col="red")
lines(hhpc2$newcol, hhpc2$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, bty="o", cex=0.75, col=c("black", "red", "blue"))
## saves the plot on the screen to a png file
dev.copy(png, file = "plot3.png", width = 480, height =480)
## closes the graphics device
dev.off()
| /plot3.R | no_license | sypdatame/ExData_Plotting1 | R | false | false | 1,403 | r |
library(lubridate)
setwd("~/R")
## download the data zip file and unzips it
file_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(file_url, "dataset.zip")
unzip("dataset.zip")
## reads the data file and chooses the records belonging to the specific dates
data <- "household_power_consumption.txt"
hhpc <- read.table(data, header=TRUE, sep=";", stringsAsFactors = FALSE, dec=".", na="?")
hhpc2 <- hhpc[hhpc$Date %in% c("1/2/2007","2/2/2007") ,]
## Re-formats the date into 'date' format
hhpc2$newcol <- paste(hhpc2$Date, hhpc2$Time, sep=" ")
hhpc2$newcol <- dmy_hms(hhpc2$newcol)
## creates the graph
hhpc2$Sub_metering_1 <- as.numeric(hhpc2$Sub_metering_1)
hhpc2$Sub_metering_2 <- as.numeric(hhpc2$Sub_metering_2)
hhpc2$Sub_metering_3 <- as.numeric(hhpc2$Sub_metering_3)
plot(hhpc2$newcol, hhpc2$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(hhpc2$newcol, hhpc2$Sub_metering_2, type="l", col="red")
lines(hhpc2$newcol, hhpc2$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, bty="o", cex=0.75, col=c("black", "red", "blue"))
## saves the plot on the screen to a png file
dev.copy(png, file = "plot3.png", width = 480, height =480)
## closes the graphics device
dev.off()
|
library(Rnightlights)
### Name: ctryShpLyrName2Num
### Title: Get the integer number of the layer.
### Aliases: ctryShpLyrName2Num
### ** Examples
## Not run:
##D Rnightlights:::ctryShpLyrName2Num("KEN", "KEN_adm1") #returns 1
## End(Not run)
| /data/genthat_extracted_code/Rnightlights/examples/ctryShpLyrName2Num.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 254 | r | library(Rnightlights)
### Name: ctryShpLyrName2Num
### Title: Get the integer number of the layer.
### Aliases: ctryShpLyrName2Num
### ** Examples
## Not run:
##D Rnightlights:::ctryShpLyrName2Num("KEN", "KEN_adm1") #returns 1
## End(Not run)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/find_dp_cautious.R
\name{find_dp_cautious}
\alias{find_dp_cautious}
\title{Identify the best policy which harvests at P percent of optimum harvest}
\usage{
find_dp_cautious(SDP_Mat, x_grid, h_grid, OptTime, xT, profit, delta,
reward = 0, P = 1)
}
\arguments{
\item{SDP_Mat}{the stochastic transition matrix at each h value}
\item{x_grid}{the discrete values allowed for the population size, x}
\item{h_grid}{the discrete values of harvest levels to optimize over}
\item{OptTime}{the stopping time}
\item{xT}{the boundary condition population size at OptTime}
\item{profit}{the cost/profit function, a function of harvested level}
\item{delta}{the exponential discounting rate}
\item{reward}{the profit for finishing with >= Xt fish at the end
(i.e. enforces the boundary condition)}
\item{P}{fraction of optimal harvest to use as the policy.}
}
\value{
list containing the matrices D and V. D is an x_grid by OptTime
matrix with the indices of h_grid giving the optimal h at each value x
as the columns, with a column for each time.
V is a matrix of x_grid by x_grid, which is used to store the value
function at each point along the grid at each point in time.
The returned V gives the value matrix at the first (last) time.
}
\description{
Identify the best policy which harvests at P percent of optimum harvest
}
| /man/find_dp_cautious.Rd | permissive | cboettig/pdg_control | R | false | false | 1,419 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/find_dp_cautious.R
\name{find_dp_cautious}
\alias{find_dp_cautious}
\title{Identify the best policy which harvests at P percent of optimum harvest}
\usage{
find_dp_cautious(SDP_Mat, x_grid, h_grid, OptTime, xT, profit, delta,
reward = 0, P = 1)
}
\arguments{
\item{SDP_Mat}{the stochastic transition matrix at each h value}
\item{x_grid}{the discrete values allowed for the population size, x}
\item{h_grid}{the discrete values of harvest levels to optimize over}
\item{OptTime}{the stopping time}
\item{xT}{the boundary condition population size at OptTime}
\item{profit}{the cost/profit function, a function of harvested level}
\item{delta}{the exponential discounting rate}
\item{reward}{the profit for finishing with >= Xt fish at the end
(i.e. enforces the boundary condition)}
\item{P}{fraction of optimal harvest to use as the policy.}
}
\value{
list containing the matrices D and V. D is an x_grid by OptTime
matrix with the indices of h_grid giving the optimal h at each value x
as the columns, with a column for each time.
V is a matrix of x_grid by x_grid, which is used to store the value
function at each point along the grid at each point in time.
The returned V gives the value matrix at the first (last) time.
}
\description{
Identify the best policy which harvests at P percent of optimum harvest
}
|
# This file contains all the helper funcitons that the outer exported
# functions utilize. I try to have a pr at the start of the name for all
# the private functions.
#
# Author: max
###############################################################################
#' Looks for unique rowname match without grep
#'
#' Since a rowname may contain characters reserved by regular
#' expressions I've found it easier to deal with the rowname
#' finding by just checking for matching strings at the beginning
#' of the name while at the same time excluding names that have the
#' same stem, i.e. DM and DM_COMP will cause an issue since DM will
#' match both rows.
#'
#' @param rnames A vector with the rownames that are looked for
#' @param vn The variable name that is of interest
#' @param vars A vector with all the names and the potentially competing names
#' @return integer A vector containing the position of the matches
#'
#' TODO: remove this function in favor of the more powerful prMapVariable2Name
#' @keywords internal
prFindRownameMatches <- function(rnames, vn, vars){
# Find the beginning of the string that matches exactly to the var. name
name_stub <- substr(rnames, 1, nchar(vn))
matches <- which(name_stub == vn)
# Since the beginning of the name may not be unique we need to
# check for other "competing matches"
# TODO: make this fix more elegant
vars_name_stub <- substr(vars, 1, nchar(vn))
if (sum(vars_name_stub == vn) > 1){
competing_vars <- vars[vars != vn &
vars_name_stub == vn]
competing_matches <- NULL
for(comp_vn in competing_vars){
competing_name_stub <- substr(rnames, 1, nchar(comp_vn))
competing_matches <-
c(competing_matches,
which(competing_name_stub == comp_vn))
}
# Clean out competing matches
matches <- matches[!matches %in% competing_matches]
}
return(matches)
}
#' Get model outcome
#'
#' Uses the model to extract the outcome variable. Throws
#' error if unable to find the outcome.
#'
#' @param model The fitted model
#' @param mf The dataset that the model is fitted to - if missing it
#' uses the \code{\link[stats]{model.frame}()} dataset. This can cause
#' length issues as there may be variables that are excluded from the
#' model for different reasons.
#' @return vector
#'
#' @keywords internal
prExtractOutcomeFromModel <- function(model, mf){
if (missing(mf)){
mf <- model.frame(model)
outcome <- mf[,names(mf) == deparse(as.formula(model)[[2]])]
}else{
outcome <- eval(as.formula(model)[[2]], envir = mf)
}
if (is.null(outcome))
stop("Could not identify the outcome: ", deparse(as.formula(model)[[2]]),
" among the model.frame variables: '", paste(names(mf), collapse="', '"),"'")
# Only use the status when used for survival::Surv objects
if (inherits(outcome, "Surv"))
return(outcome[,"status"])
return(outcome)
}
#' Get model data.frame
#'
#' Returns the raw variables from the original data
#' frame using the \code{\link[stats]{get_all_vars}()}
#' but with the twist that it also performs any associated
#' subsetting based on the model's \code{\link[base]{subset}()} argument.
#'
#' @param x The fitted model.
#' @return data.frame
#'
#' @keywords internal
prGetModelData <- function(x){
# Extract the variable names
true_vars <- all.vars(as.formula(x))
# Get the environment of the formula
env <- environment(as.formula(x))
data <- eval(x$call$data,
envir = env)
# The data frame without the
mf <- get_all_vars(as.formula(x),
data=data)
if (!is.null(x$call$subset)){
if (!is.null(data)){
# As we don't know if the subsetting argument
# contained data from the data frame or the environment
# we need this additional check
mf <- tryCatch(mf[eval(x$call$subset,
envir = data,
enclos = env), ],
error = function(e){
stop("Could not deduce the correct subset argument when extracting the data. ", e)
})
}else{
mf <- mf[eval(x$call$subset,
envir=env), ]
}
}
return(mf)
}
#' Get the models variables
#'
#' This function extract the modelled variables. Any interaction
#' terms are removed as those should already be represented by
#' the individual terms.
#'
#' @param model A model fit
#' @param remove_splines If splines, etc. should be cleaned
#' from the variables as these no longer are "pure" variables
#' @param remove_interaction_vars If interaction variables are
#' not interesting then these should be removed. Often in
#' the case of \code{\link{printCrudeAndAdjustedModel}()} it is impossible
#' to properly show interaction variables and it's better to show
#' these in a separate table
#' @param add_intercept Adds the intercept if it exists
#' @return vector with names
#'
#' @importFrom stringr str_split
#' @importFrom stringr str_trim
#' @keywords internal
prGetModelVariables <- function(model,
remove_splines = TRUE,
remove_interaction_vars=FALSE,
add_intercept = FALSE){
# We need the call names in order to identify
# - interactions
# - functions such as splines, I()
if (inherits(model, "nlme")){
vars <- attr(model$fixDF$terms, "names")
}else{
vars <- attr(model$terms, "term.labels")
}
strata <- NULL
if (any(grepl("^strat[a]{0,1}\\(", vars))){
strata <- vars[grep("^strat[a]{0,1}\\(", vars)]
vars <- vars[-grep("^strat[a]{0,1}\\(", vars)]
}
cluster <- NULL
if (any(grepl("^cluster{0,1}\\(", vars))){
cluster <- vars[grep("^cluster{0,1}\\(", vars)]
vars <- vars[-grep("^cluster{0,1}\\(", vars)]
}
# Fix for bug in cph and coxph
if (is.null(cluster) &&
inherits(model, c("cph", "coxph"))){
alt_terms <- stringr::str_trim(strsplit(deparse(model$call$formula[[3]]),
"+", fixed = TRUE)[[1]])
if (any(grepl("^cluster{0,1}\\(", alt_terms))){
cluster <- alt_terms[grep("^cluster{0,1}\\(", alt_terms)]
}
}
# Remove I() as these are not true variables
unwanted_vars <- grep("^I\\(.*$", vars)
if (length(unwanted_vars) > 0){
attr(vars, "I() removed") <- vars[unwanted_vars]
vars <- vars[-unwanted_vars]
}
pat <- "^[[:alpha:]\\.]+[^(]+\\(.*$"
fn_vars <- grep(pat, vars)
if(length(fn_vars) > 0){
if (remove_splines){
# Remove splines and other functions
attr(vars, "functions removed") <- vars[fn_vars]
vars <- vars[-fn_vars]
}else{
# Cleane the variable names into proper names
# the assumption here is that the real variable
# name is the first one in the parameters
pat <- "^[[:alpha:]\\.]+.*\\(([^,)]+).*$"
vars[fn_vars] <- sub(pat, "\\1", vars[fn_vars])
}
}
# Remove interaction terms as these are not variables
int_term <- "^.+:.+$"
in_vars <- grep(int_term, vars)
if (length(in_vars) > 0){
if (remove_interaction_vars){
in_vn <- unlist(str_split(vars[in_vars], ":"),
use.names = FALSE)
in_vars <- unique(c(in_vars, which(vars %in% in_vn)))
}
attr(vars, "interactions removed") <- vars[in_vars]
vars <- vars[-in_vars]
}
if (add_intercept &&
grepl("intercept", names(coef(model))[1], ignore.case = TRUE)){
vars <- c(names(coef(model))[1],
vars)
}
clean_vars <- unique(vars)
attributes(clean_vars) <- attributes(vars)
if (!is.null(strata))
attr(clean_vars, "strata") <- strata
if (!is.null(cluster))
attr(clean_vars, "cluster") <- cluster
return(clean_vars)
}
#' Get statistics according to the type
#'
#' A simple function applied by the \code{\link[Gmisc]{getDescriptionStatsBy}()}
#' for the total column. This function is also used by \code{\link{printCrudeAndAdjustedModel}()}
#' in case of a basic linear regression is asked for a raw stat column
#'
#' @param x The variable that we want the statistics for
#' @param show_perc If this is a factor/proportion variable then we
#' might want to show the percentages
#' @param html If the output should be in html or LaTeX formatting
#' @param digits Number of decimal digits
#' @param numbers_first If number is to be prior to the percentage
#' @param useNA If missing should be included
#' @param show_all_values This is by default false as for instance if there is
#' no missing and there is only one variable then it is most sane to only show
#' one option as the other one will just be a complement to the first. For instance
#' sex - if you know gender then automatically you know the distribution of the
#' other sex as it's 100 \% - other \%.
#' @param continuous_fn A function for describing continuous variables
#' defaults to \code{\link{describeMean}()}
#' @param prop_fn A function for describing proportions, defaults to
#' the factor function
#' @param factor_fn A function for describing factors, defaults to
#' \code{\link{describeFactors}()}
#' @param percentage_sign If you want to suppress the percentage sign you
#' can set this variable to FALSE. You can also choose something else that
#' the default \% if you so wish by setting this variable.
#' @return A matrix or a vector depending on the settings
#'
#' TODO: Use the Gmisc function instead of this copy
#'
#' @importFrom Gmisc describeMean
#' @importFrom Gmisc describeFactors
#' @keywords internal
prGetStatistics <- function(x,
show_perc = FALSE,
html = TRUE,
digits = 1,
numbers_first = TRUE,
useNA = "no",
show_all_values = FALSE,
continuous_fn = describeMean,
factor_fn = describeFactors,
prop_fn = factor_fn,
percentage_sign = percentage_sign)
{
useNA <- prConvertShowMissing(useNA)
if (is.factor(x) ||
is.logical(x) ||
is.character(x)){
if (length(unique(x)) == 2){
if (show_perc){
total_table <- prop_fn(x,
html=html,
digits=digits,
number_first=numbers_first,
useNA = useNA,
percentage_sign = percentage_sign)
}else{
total_table <- table(x, useNA=useNA)
names(total_table)[is.na(names(total_table))] <- "Missing"
# Choose only the reference level
# Note: Currently references are required
if (show_all_values == FALSE && FALSE)
total_table <- total_table[names(total_table) %in% c(levels(x)[1], "Missing")]
}
} else {
if (show_perc)
total_table <- factor_fn(x,
html=html,
digits=digits,
number_first=numbers_first,
useNA = useNA,
percentage_sign = percentage_sign)
else{
total_table <- table(x, useNA=useNA)
names(total_table)[is.na(names(total_table))] <- "Missing"
}
}
}else{
total_table <- continuous_fn(x,
html=html, digits=digits,
number_first=numbers_first,
useNA = useNA)
# If a continuous variable has two rows then it's assumed that the second is the missing
if (length(total_table) == 2 &&
show_perc == FALSE)
total_table[2] <- sum(is.na(x))
}
return(total_table)
}
#' Gets the boundaries for a survival fit
#'
#' @param fit A survival model of either competing risk regression or cox regression type
#' @param conf.int The interval of interest 0-1, see levels in confint()
#' @param exp If the value should be in exponential form (default)
#' @return A matrix with the columns:
#' \item{beta}{The estimated coefficient}
#' \item{p_val}{P-value}
#' \item{low}{The lower confidence interval}
#' \item{high}{The upper confidence interval}
#' \item{order}{A column that later can be used in ordering}
#'
#' @keywords internal
prGetFpDataFromSurvivalFit <- function (fit,
conf.int = 0.95,
exp = TRUE){
# Get the p-value, I use the method in the
# print.cph -> prModFit from the rms package
Z <- coef(fit)/sqrt(diag(fit$var))
p_val <- signif(1 - pchisq(Z^2, 1), 5)
order <- rep(-1, length(beta))
ci <- confint(fit, level=conf.int)
if (exp){
ret_matrix <- cbind(
beta=exp(coef(fit)),
p_val=p_val,
low=exp(ci[,1]),
high=exp(ci[,2]),
order=order)
}else{
ret_matrix <- cbind(
beta=coef(fit),
p_val=p_val,
low=ci[,1],
high=ci[,2],
order=order)
}
# Set the names of the rows
rownames(ret_matrix) <- names(fit$coef)
return(ret_matrix)
}
#' Gets the boundaries for a GLM fit that is poisson or quasipoisson based
#'
#' @param glm.fit A regression model
#' @param conf.int The interval of interest 0-1, see levels in confint()
#' @param exp If the value should be in exponential form (default)
#' @return A matrix with the columns:
#' \item{beta}{The estimated coefficient}
#' \item{p_val}{P-value}
#' \item{low}{The lower confidence interval}
#' \item{high}{The upper confidence interval}
#' \item{order}{A column that later can be used in ordering}
#'
#' @keywords internal
prGetFpDataFromGlmFit <- function(glm.fit,
conf.int = 0.95,
exp = TRUE){
summary_glm <- summary.glm(glm.fit)
# Extract the summary values of interest
summary_se <- summary_glm$coefficients[,colnames(summary_glm$coefficients) == "Std. Error"]
if ("quasipoisson" %in% glm.fit$family){
summary_p_val <- summary_glm$coefficients[,colnames(summary_glm$coefficients) == "Pr(>|t|)"]
}else if ("poisson" %in% glm.fit$family){
summary_p_val <- summary_glm$coefficients[,colnames(summary_glm$coefficients) == "Pr(>|z|)"]
}else{
stop("Type of analysis not prepared!")
}
order = rep(-1, length(glm.fit$coefficients))
ci <- confint(glm.fit, level=conf.int)
if (exp){
ret_matrix <- cbind(
beta=exp(coef(glm.fit)),
p_val=summary_p_val,
low=exp(ci[,1]),
high=exp(ci[,2]),
order=order)
}else{
ret_matrix <- cbind(
beta=coef(glm.fit),
p_val=summary_p_val,
low=ci[,1],
high=ci[,2],
order=order)
}
# Set the names of the rows
rownames(ret_matrix) <- names(glm.fit$coefficients)
# Remove the intercept
ret_matrix <- ret_matrix[names(glm.fit$coefficients) != "(Intercept)", ]
return(ret_matrix)
}
#' Gets the confidence interval, p-values,
#' coefficients from a survival object
#'
#' @param model_fit A regression fit from CRR, coxph, cph object
#' @param conf.int The interval of interest 0-1, see levels in confint()
#' @param exp If the value should be in exponential form (default)
#' @return A matrix with the columns:
#' \item{beta}{The estimated coefficient}
#' \item{p_val}{P-value}
#' \item{low}{The lower confidence interval}
#' \item{high}{The upper confidence interval}
#' \item{order}{A column that later can be used in ordering}
#'
#' @keywords internal
prGetFpDataFromFit <- function(model_fit,
conf.int = 0.95,
exp = TRUE){
# Get the estimates, confidence intervals and the p_values
if (any(class(model_fit) %in% "coxph") ||
any(class(model_fit) %in% "crr")){
sd <- prGetFpDataFromSurvivalFit(fit = model_fit, conf.int = conf.int, exp = exp)
} else if (any(class(model_fit) %in% "glm")){
sd <- prGetFpDataFromGlmFit(glm.fit = model_fit, conf.int = conf.int, exp = exp)
} else {
stop(paste("Unknown fit class type:", class(model_fit)))
}
return(sd)
}
#' A functuon for converting a useNA variable
#'
#' The variable is suppose to be directly compatible with
#' table(..., useNA=useNA). It throughs an error
#' if not compatible
#'
#' @param useNA Boolean or "no", "ifany", "always"
#' @return string
#'
#' @keywords internal
prConvertShowMissing <- function(useNA){
if (useNA == FALSE || useNA == "no")
useNA <- "no"
else if (useNA == TRUE)
useNA <- "ifany"
if (!useNA %in% c("no", "ifany", "always"))
stop(sprintf("You have set an invalid option for useNA variable, '%s' ,it should be boolean or one of the options: no, ifany or always.", useNA))
return(useNA)
}
#' A function that tries to resolve what variable corresponds to what row
#'
#' As both the \code{\link{getCrudeAndAdjustedModelData}()} and the
#' \code{\link{printCrudeAndAdjustedModel}()} need to now exactly
#' what name from the \code{\link[stats]{coef}()}/\code{\link[rms]{summary.rms}()}
#' correspond to we for generalizeability this rather elaborate function.
#'
#' @param var_names The variable names that are saught after
#' @param available_names The names that are available to search through
#' @param data The data set that is saught after
#' @param force_match Whether all variables need to be identified or not.
#' E.g. you may only want to use some variables and already pruned the
#' \code{available_names} and therefore wont have matches. This is the
#' case when \code{\link{getCrudeAndAdjustedModelData}()} has been used together
#' with the \code{var_select} argument.
#' @return \code{list} Returns a list with each element has the corresponding
#' variable name and a subsequent list with the parameters \code{no_rows}
#' and \code{location} indiciting the number of rows corresponding to that
#' element and where those rows are located. For factors the list also contains
#' \code{lvls} and \code{no_lvls}.
#' @keywords internal
#' @import utils
prMapVariable2Name <- function(var_names, available_names,
data, force_match = TRUE){
if (any(duplicated(available_names)))
stop("You have non-unique names. You probably need to adjust",
" (1) variable names or (2) factor labels.")
# Start with figuring out how many rows each variable
var_data <- list()
for (name in var_names){
if (grepl("intercept", name, ignore.case = TRUE)){
var_data[[name]] <-
list(no_rows = 1)
}else if (is.factor(data[,name])){
var_data[[name]] <-
list(lvls = levels(data[,name]))
# Sometimes due to subsetting some factors don't exist
# we therefore need to remove those not actually in the dataset
var_data[[name]]$lvls <-
var_data[[name]]$lvls[var_data[[name]]$lvls %in%
as.character(unique(data[, name][!is.na(data[, name])]))]
var_data[[name]][["no_lvls"]] <- length(var_data[[name]]$lvls)
var_data[[name]][["no_rows"]] <- length(var_data[[name]]$lvls) - 1
}else{
var_data[[name]] <-
list(no_rows = 1)
}
}
# A function for stripping the name and the additional information
# from the available name in order to get the cleanest form
getResidualCharacters <- function(search, conflicting_name){
residual_chars <- substring(conflicting_name, nchar(search) + 1)
if (!is.null(var_data[[search]]$lvls)){
best_resid <- residual_chars
for (lvl in var_data[[search]]$lvls){
new_resid <- sub(lvl, "", residual_chars,
fixed = TRUE)
if (nchar(new_resid) < nchar(best_resid)){
best_resid <- new_resid
if (nchar(new_resid) == 0)
break;
}
}
residual_chars <- best_resid
}
return(residual_chars)
}
matched_names <- c()
matched_numbers <- c()
org_available_names <- available_names
# Start with simple non-factored variables as these should give a single-line match
# then continue with the longest named variable
for (name in var_names[order(sapply(var_data, function(x) is.null(x$lvls)),
nchar(var_names), decreasing = TRUE)]){
matches <- which(name == substr(available_names, 1, nchar(name)))
if (length(matches) == 0){
if (force_match)
stop("Sorry but the function could not find a match for '", name , "'",
" among any of the available names: '", paste(org_available_names,
collapse="', '") ,"'")
}else if(length(matches) == 1){
if (var_data[[name]]$no_rows != 1)
stop("Expected more than one match for varible '", name, "'",
" the only positive match was '", available_names[matches], "'")
}else if (length(var_names) > length(matched_names) + 1){
if (is.null(var_data[[name]]$lvls) &&
sum(name == available_names) == 1){
# Check if the searched for variable is a non-factor variable
# if so then match if there is a perfect match
matches <- which(name == available_names)
}else if (length(var_names) > length(matched_names) + 1){
# Check that there is no conflicting match
conflicting_vars <- var_names[var_names != name &
!var_names %in% matched_names]
possible_conflicts <- c()
for (conf_var in conflicting_vars){
possible_conflicts <-
union(possible_conflicts,
which(substr(available_names, 1, nchar(conflicting_vars)) %in%
conflicting_vars))
}
conflicts <- intersect(possible_conflicts, matches)
if (length(conflicts) > 0){
conflicting_vars <- conflicting_vars[sapply(conflicting_vars,
function(search)
any(search == substr(available_names, 1, nchar(search))))]
for (conflict in conflicts){
# We will try to find a better match that leaves fewer "residual characters"
# than what we started with
start_res_chars <- getResidualCharacters(name, available_names[conflict])
best_match <- NULL
best_conf_name <- NULL
for (conf_name in conflicting_vars){
resid_chars <- getResidualCharacters(conf_name, available_names[conflict])
if (is.null(best_match) ||
nchar(best_match) > nchar(resid_chars)){
best_match <- resid_chars
best_conf_name <- conf_name
}
}
if (nchar(start_res_chars) == nchar(best_match)){
stop("The software can't decide which name belongs to which variable.",
" The variable that is searched for is '", name, "'",
" and there is a conflict with the variable '", best_conf_name ,"'.",
" The best match for '", name, "' leaves: '", start_res_chars, "'",
" while the conflict '", best_conf_name ,"' leaves: '", best_match ,"'",
" when trying to match the name: '", available_names[conflict] ,"'")
}else if(nchar(start_res_chars) > nchar(best_match)){
# Now remove the matched row if we actually found a better match
matches <- matches[matches != conflict]
}
}
}
}
if (length(matches) == 0){
stop("Could not identify the rows corresponding to the variable '", name ,"'",
" this could possibly be to similarity between different variable names",
" and factor levels. Try to make sure that all variable names are unique",
" the variables that are currently looked for are:",
" '", paste(var_names,
collapse="', '"),
"'.")
}
}
# Check that multiple matches are continuous, everything else is suspicious
if (length(matches) > 1){
matches <- matches[order(matches)]
if (any(1 != tail(matches, length(matches) - 1) -
head(matches, length(matches) -1)))
stop("The variable '", name, "' failed to provide an adequate",
" consequent number of matches, the names matched are located at:",
" '", paste(matches, collapse="', '"), "'")
}
# Since we remove the matched names we need to look back at the original and
# find the exact match in order to deduce the true number
true_matches <- which(org_available_names %in%
available_names[matches])
# Avoid accidentally rematching
true_matches <- setdiff(true_matches, matched_numbers)
var_data[[name]][["location"]] <- true_matches
# Update the loop vars
if (length(matches) > 0)
available_names <- available_names[-matches]
matched_names <- c(matched_names, name)
matched_numbers <- c(matched_numbers, true_matches)
if (length(var_data[[name]][["location"]]) == 0 &
!force_match){
# Remove variable as it is not available
var_data[[name]] <- NULL
}else if (length(var_data[[name]][["location"]]) !=
var_data[[name]][["no_rows"]]){
warning("Expected the variable '", name ,"'",
" to contain '",var_data[[name]][["no_rows"]],"' no. rows",
" but got '", length(var_data[[name]][["location"]]), "' no. rows.")
var_data[[name]][["no_rows"]] <- length(var_data[[name]][["location"]])
}
}
return(var_data)
}
#' Runs an \code{\link[Gmisc]{fastDoCall}()} within the environment of the model
#'
#' Sometimes the function can't find some of the variables that
#' were available when running the original variable. This function
#' uses the \code{\link[stats]{as.formula}()} together with
#' \code{\link[base]{environment}()} in order to get the environment
#' that the original code used.
#'
#' @param model The model used
#' @param what The function or non-empty character string used for
#' \code{\link[Gmisc]{fastDoCall}()}
#' @param ... Additional arguments passed to the function
#' @keywords internal
prEnvModelCall <- function(model, what, ...){
call_lst <- list(object = model)
dots <- list(...)
if (length(dots) > 0){
for(i in 1:length(dots)){
if (!is.null(names(dots)[i])){
call_lst[[names(dots)[i]]] <- dots[[i]]
}else{
call_lst <- c(call_lst,
dots[[i]])
}
}
}
model_env <- new.env(parent=environment(as.formula(model)))
model_env$what <- what
model_env$call_lst <- call_lst
fastDoCall(what, call_lst,
envir = model_env)
} | /R/private_functions.R | no_license | guhjy/Greg | R | false | false | 26,802 | r | # This file contains all the helper funcitons that the outer exported
# functions utilize. I try to have a pr at the start of the name for all
# the private functions.
#
# Author: max
###############################################################################
#' Looks for unique rowname match without grep
#'
#' Since a rowname may contain characters reserved by regular
#' expressions I've found it easier to deal with the rowname
#' finding by just checking for matching strings at the beginning
#' of the name while at the same time excluding names that have the
#' same stem, i.e. DM and DM_COMP will cause an issue since DM will
#' match both rows.
#'
#' @param rnames A vector with the rownames that are looked for
#' @param vn The variable name that is of interest
#' @param vars A vector with all the names and the potentially competing names
#' @return integer A vector containing the position of the matches
#'
#' TODO: remove this function in favor of the more powerful prMapVariable2Name
#' @keywords internal
prFindRownameMatches <- function(rnames, vn, vars){
# Find the beginning of the string that matches exactly to the var. name
name_stub <- substr(rnames, 1, nchar(vn))
matches <- which(name_stub == vn)
# Since the beginning of the name may not be unique we need to
# check for other "competing matches"
# TODO: make this fix more elegant
vars_name_stub <- substr(vars, 1, nchar(vn))
if (sum(vars_name_stub == vn) > 1){
competing_vars <- vars[vars != vn &
vars_name_stub == vn]
competing_matches <- NULL
for(comp_vn in competing_vars){
competing_name_stub <- substr(rnames, 1, nchar(comp_vn))
competing_matches <-
c(competing_matches,
which(competing_name_stub == comp_vn))
}
# Clean out competing matches
matches <- matches[!matches %in% competing_matches]
}
return(matches)
}
#' Get model outcome
#'
#' Uses the model to extract the outcome variable. Throws
#' error if unable to find the outcome.
#'
#' @param model The fitted model
#' @param mf The dataset that the model is fitted to - if missing it
#' uses the \code{\link[stats]{model.frame}()} dataset. This can cause
#' length issues as there may be variables that are excluded from the
#' model for different reasons.
#' @return vector
#'
#' @keywords internal
prExtractOutcomeFromModel <- function(model, mf){
if (missing(mf)){
mf <- model.frame(model)
outcome <- mf[,names(mf) == deparse(as.formula(model)[[2]])]
}else{
outcome <- eval(as.formula(model)[[2]], envir = mf)
}
if (is.null(outcome))
stop("Could not identify the outcome: ", deparse(as.formula(model)[[2]]),
" among the model.frame variables: '", paste(names(mf), collapse="', '"),"'")
# Only use the status when used for survival::Surv objects
if (inherits(outcome, "Surv"))
return(outcome[,"status"])
return(outcome)
}
#' Get model data.frame
#'
#' Returns the raw variables from the original data
#' frame using the \code{\link[stats]{get_all_vars}()}
#' but with the twist that it also performs any associated
#' subsetting based on the model's \code{\link[base]{subset}()} argument.
#'
#' @param x The fitted model.
#' @return data.frame
#'
#' @keywords internal
prGetModelData <- function(x){
# Extract the variable names
true_vars <- all.vars(as.formula(x))
# Get the environment of the formula
env <- environment(as.formula(x))
data <- eval(x$call$data,
envir = env)
# The data frame without the
mf <- get_all_vars(as.formula(x),
data=data)
if (!is.null(x$call$subset)){
if (!is.null(data)){
# As we don't know if the subsetting argument
# contained data from the data frame or the environment
# we need this additional check
mf <- tryCatch(mf[eval(x$call$subset,
envir = data,
enclos = env), ],
error = function(e){
stop("Could not deduce the correct subset argument when extracting the data. ", e)
})
}else{
mf <- mf[eval(x$call$subset,
envir=env), ]
}
}
return(mf)
}
#' Get the models variables
#'
#' This function extract the modelled variables. Any interaction
#' terms are removed as those should already be represented by
#' the individual terms.
#'
#' @param model A model fit
#' @param remove_splines If splines, etc. should be cleaned
#' from the variables as these no longer are "pure" variables
#' @param remove_interaction_vars If interaction variables are
#' not interesting then these should be removed. Often in
#' the case of \code{\link{printCrudeAndAdjustedModel}()} it is impossible
#' to properly show interaction variables and it's better to show
#' these in a separate table
#' @param add_intercept Adds the intercept if it exists
#' @return vector with names
#'
#' @importFrom stringr str_split
#' @importFrom stringr str_trim
#' @keywords internal
prGetModelVariables <- function(model,
remove_splines = TRUE,
remove_interaction_vars=FALSE,
add_intercept = FALSE){
# We need the call names in order to identify
# - interactions
# - functions such as splines, I()
if (inherits(model, "nlme")){
vars <- attr(model$fixDF$terms, "names")
}else{
vars <- attr(model$terms, "term.labels")
}
strata <- NULL
if (any(grepl("^strat[a]{0,1}\\(", vars))){
strata <- vars[grep("^strat[a]{0,1}\\(", vars)]
vars <- vars[-grep("^strat[a]{0,1}\\(", vars)]
}
cluster <- NULL
if (any(grepl("^cluster{0,1}\\(", vars))){
cluster <- vars[grep("^cluster{0,1}\\(", vars)]
vars <- vars[-grep("^cluster{0,1}\\(", vars)]
}
# Fix for bug in cph and coxph
if (is.null(cluster) &&
inherits(model, c("cph", "coxph"))){
alt_terms <- stringr::str_trim(strsplit(deparse(model$call$formula[[3]]),
"+", fixed = TRUE)[[1]])
if (any(grepl("^cluster{0,1}\\(", alt_terms))){
cluster <- alt_terms[grep("^cluster{0,1}\\(", alt_terms)]
}
}
# Remove I() as these are not true variables
unwanted_vars <- grep("^I\\(.*$", vars)
if (length(unwanted_vars) > 0){
attr(vars, "I() removed") <- vars[unwanted_vars]
vars <- vars[-unwanted_vars]
}
pat <- "^[[:alpha:]\\.]+[^(]+\\(.*$"
fn_vars <- grep(pat, vars)
if(length(fn_vars) > 0){
if (remove_splines){
# Remove splines and other functions
attr(vars, "functions removed") <- vars[fn_vars]
vars <- vars[-fn_vars]
}else{
# Cleane the variable names into proper names
# the assumption here is that the real variable
# name is the first one in the parameters
pat <- "^[[:alpha:]\\.]+.*\\(([^,)]+).*$"
vars[fn_vars] <- sub(pat, "\\1", vars[fn_vars])
}
}
# Remove interaction terms as these are not variables
int_term <- "^.+:.+$"
in_vars <- grep(int_term, vars)
if (length(in_vars) > 0){
if (remove_interaction_vars){
in_vn <- unlist(str_split(vars[in_vars], ":"),
use.names = FALSE)
in_vars <- unique(c(in_vars, which(vars %in% in_vn)))
}
attr(vars, "interactions removed") <- vars[in_vars]
vars <- vars[-in_vars]
}
if (add_intercept &&
grepl("intercept", names(coef(model))[1], ignore.case = TRUE)){
vars <- c(names(coef(model))[1],
vars)
}
clean_vars <- unique(vars)
attributes(clean_vars) <- attributes(vars)
if (!is.null(strata))
attr(clean_vars, "strata") <- strata
if (!is.null(cluster))
attr(clean_vars, "cluster") <- cluster
return(clean_vars)
}
#' Get statistics according to the type
#'
#' A simple function applied by the \code{\link[Gmisc]{getDescriptionStatsBy}()}
#' for the total column. This function is also used by \code{\link{printCrudeAndAdjustedModel}()}
#' in case of a basic linear regression is asked for a raw stat column
#'
#' @param x The variable that we want the statistics for
#' @param show_perc If this is a factor/proportion variable then we
#' might want to show the percentages
#' @param html If the output should be in html or LaTeX formatting
#' @param digits Number of decimal digits
#' @param numbers_first If number is to be prior to the percentage
#' @param useNA If missing should be included
#' @param show_all_values This is by default false as for instance if there is
#' no missing and there is only one variable then it is most sane to only show
#' one option as the other one will just be a complement to the first. For instance
#' sex - if you know gender then automatically you know the distribution of the
#' other sex as it's 100 \% - other \%.
#' @param continuous_fn A function for describing continuous variables
#' defaults to \code{\link{describeMean}()}
#' @param prop_fn A function for describing proportions, defaults to
#' the factor function
#' @param factor_fn A function for describing factors, defaults to
#' \code{\link{describeFactors}()}
#' @param percentage_sign If you want to suppress the percentage sign you
#' can set this variable to FALSE. You can also choose something else that
#' the default \% if you so wish by setting this variable.
#' @return A matrix or a vector depending on the settings
#'
#' TODO: Use the Gmisc function instead of this copy
#'
#' @importFrom Gmisc describeMean
#' @importFrom Gmisc describeFactors
#' @keywords internal
prGetStatistics <- function(x,
show_perc = FALSE,
html = TRUE,
digits = 1,
numbers_first = TRUE,
useNA = "no",
show_all_values = FALSE,
continuous_fn = describeMean,
factor_fn = describeFactors,
prop_fn = factor_fn,
percentage_sign = percentage_sign)
{
useNA <- prConvertShowMissing(useNA)
if (is.factor(x) ||
is.logical(x) ||
is.character(x)){
if (length(unique(x)) == 2){
if (show_perc){
total_table <- prop_fn(x,
html=html,
digits=digits,
number_first=numbers_first,
useNA = useNA,
percentage_sign = percentage_sign)
}else{
total_table <- table(x, useNA=useNA)
names(total_table)[is.na(names(total_table))] <- "Missing"
# Choose only the reference level
# Note: Currently references are required
if (show_all_values == FALSE && FALSE)
total_table <- total_table[names(total_table) %in% c(levels(x)[1], "Missing")]
}
} else {
if (show_perc)
total_table <- factor_fn(x,
html=html,
digits=digits,
number_first=numbers_first,
useNA = useNA,
percentage_sign = percentage_sign)
else{
total_table <- table(x, useNA=useNA)
names(total_table)[is.na(names(total_table))] <- "Missing"
}
}
}else{
total_table <- continuous_fn(x,
html=html, digits=digits,
number_first=numbers_first,
useNA = useNA)
# If a continuous variable has two rows then it's assumed that the second is the missing
if (length(total_table) == 2 &&
show_perc == FALSE)
total_table[2] <- sum(is.na(x))
}
return(total_table)
}
#' Gets the boundaries for a survival fit
#'
#' @param fit A survival model of either competing risk regression or cox regression type
#' @param conf.int The interval of interest 0-1, see levels in confint()
#' @param exp If the value should be in exponential form (default)
#' @return A matrix with the columns:
#' \item{beta}{The estimated coefficient}
#' \item{p_val}{P-value}
#' \item{low}{The lower confidence interval}
#' \item{high}{The upper confidence interval}
#' \item{order}{A column that later can be used in ordering}
#'
#' @keywords internal
prGetFpDataFromSurvivalFit <- function (fit,
conf.int = 0.95,
exp = TRUE){
# Get the p-value, I use the method in the
# print.cph -> prModFit from the rms package
Z <- coef(fit)/sqrt(diag(fit$var))
p_val <- signif(1 - pchisq(Z^2, 1), 5)
order <- rep(-1, length(beta))
ci <- confint(fit, level=conf.int)
if (exp){
ret_matrix <- cbind(
beta=exp(coef(fit)),
p_val=p_val,
low=exp(ci[,1]),
high=exp(ci[,2]),
order=order)
}else{
ret_matrix <- cbind(
beta=coef(fit),
p_val=p_val,
low=ci[,1],
high=ci[,2],
order=order)
}
# Set the names of the rows
rownames(ret_matrix) <- names(fit$coef)
return(ret_matrix)
}
#' Gets the boundaries for a GLM fit that is poisson or quasipoisson based
#'
#' @param glm.fit A regression model
#' @param conf.int The interval of interest 0-1, see levels in confint()
#' @param exp If the value should be in exponential form (default)
#' @return A matrix with the columns:
#' \item{beta}{The estimated coefficient}
#' \item{p_val}{P-value}
#' \item{low}{The lower confidence interval}
#' \item{high}{The upper confidence interval}
#' \item{order}{A column that later can be used in ordering}
#'
#' @keywords internal
prGetFpDataFromGlmFit <- function(glm.fit,
conf.int = 0.95,
exp = TRUE){
summary_glm <- summary.glm(glm.fit)
# Extract the summary values of interest
summary_se <- summary_glm$coefficients[,colnames(summary_glm$coefficients) == "Std. Error"]
if ("quasipoisson" %in% glm.fit$family){
summary_p_val <- summary_glm$coefficients[,colnames(summary_glm$coefficients) == "Pr(>|t|)"]
}else if ("poisson" %in% glm.fit$family){
summary_p_val <- summary_glm$coefficients[,colnames(summary_glm$coefficients) == "Pr(>|z|)"]
}else{
stop("Type of analysis not prepared!")
}
order = rep(-1, length(glm.fit$coefficients))
ci <- confint(glm.fit, level=conf.int)
if (exp){
ret_matrix <- cbind(
beta=exp(coef(glm.fit)),
p_val=summary_p_val,
low=exp(ci[,1]),
high=exp(ci[,2]),
order=order)
}else{
ret_matrix <- cbind(
beta=coef(glm.fit),
p_val=summary_p_val,
low=ci[,1],
high=ci[,2],
order=order)
}
# Set the names of the rows
rownames(ret_matrix) <- names(glm.fit$coefficients)
# Remove the intercept
ret_matrix <- ret_matrix[names(glm.fit$coefficients) != "(Intercept)", ]
return(ret_matrix)
}
#' Gets the confidence interval, p-values,
#' coefficients from a survival object
#'
#' @param model_fit A regression fit from CRR, coxph, cph object
#' @param conf.int The interval of interest 0-1, see levels in confint()
#' @param exp If the value should be in exponential form (default)
#' @return A matrix with the columns:
#' \item{beta}{The estimated coefficient}
#' \item{p_val}{P-value}
#' \item{low}{The lower confidence interval}
#' \item{high}{The upper confidence interval}
#' \item{order}{A column that later can be used in ordering}
#'
#' @keywords internal
prGetFpDataFromFit <- function(model_fit,
conf.int = 0.95,
exp = TRUE){
# Get the estimates, confidence intervals and the p_values
if (any(class(model_fit) %in% "coxph") ||
any(class(model_fit) %in% "crr")){
sd <- prGetFpDataFromSurvivalFit(fit = model_fit, conf.int = conf.int, exp = exp)
} else if (any(class(model_fit) %in% "glm")){
sd <- prGetFpDataFromGlmFit(glm.fit = model_fit, conf.int = conf.int, exp = exp)
} else {
stop(paste("Unknown fit class type:", class(model_fit)))
}
return(sd)
}
#' A functuon for converting a useNA variable
#'
#' The variable is suppose to be directly compatible with
#' table(..., useNA=useNA). It throughs an error
#' if not compatible
#'
#' @param useNA Boolean or "no", "ifany", "always"
#' @return string
#'
#' @keywords internal
prConvertShowMissing <- function(useNA){
if (useNA == FALSE || useNA == "no")
useNA <- "no"
else if (useNA == TRUE)
useNA <- "ifany"
if (!useNA %in% c("no", "ifany", "always"))
stop(sprintf("You have set an invalid option for useNA variable, '%s' ,it should be boolean or one of the options: no, ifany or always.", useNA))
return(useNA)
}
#' A function that tries to resolve what variable corresponds to what row
#'
#' As both the \code{\link{getCrudeAndAdjustedModelData}()} and the
#' \code{\link{printCrudeAndAdjustedModel}()} need to now exactly
#' what name from the \code{\link[stats]{coef}()}/\code{\link[rms]{summary.rms}()}
#' correspond to we for generalizeability this rather elaborate function.
#'
#' @param var_names The variable names that are saught after
#' @param available_names The names that are available to search through
#' @param data The data set that is saught after
#' @param force_match Whether all variables need to be identified or not.
#' E.g. you may only want to use some variables and already pruned the
#' \code{available_names} and therefore wont have matches. This is the
#' case when \code{\link{getCrudeAndAdjustedModelData}()} has been used together
#' with the \code{var_select} argument.
#' @return \code{list} Returns a list with each element has the corresponding
#' variable name and a subsequent list with the parameters \code{no_rows}
#' and \code{location} indiciting the number of rows corresponding to that
#' element and where those rows are located. For factors the list also contains
#' \code{lvls} and \code{no_lvls}.
#' @keywords internal
#' @import utils
prMapVariable2Name <- function(var_names, available_names,
data, force_match = TRUE){
if (any(duplicated(available_names)))
stop("You have non-unique names. You probably need to adjust",
" (1) variable names or (2) factor labels.")
# Start with figuring out how many rows each variable
var_data <- list()
for (name in var_names){
if (grepl("intercept", name, ignore.case = TRUE)){
var_data[[name]] <-
list(no_rows = 1)
}else if (is.factor(data[,name])){
var_data[[name]] <-
list(lvls = levels(data[,name]))
# Sometimes due to subsetting some factors don't exist
# we therefore need to remove those not actually in the dataset
var_data[[name]]$lvls <-
var_data[[name]]$lvls[var_data[[name]]$lvls %in%
as.character(unique(data[, name][!is.na(data[, name])]))]
var_data[[name]][["no_lvls"]] <- length(var_data[[name]]$lvls)
var_data[[name]][["no_rows"]] <- length(var_data[[name]]$lvls) - 1
}else{
var_data[[name]] <-
list(no_rows = 1)
}
}
# A function for stripping the name and the additional information
# from the available name in order to get the cleanest form
getResidualCharacters <- function(search, conflicting_name){
residual_chars <- substring(conflicting_name, nchar(search) + 1)
if (!is.null(var_data[[search]]$lvls)){
best_resid <- residual_chars
for (lvl in var_data[[search]]$lvls){
new_resid <- sub(lvl, "", residual_chars,
fixed = TRUE)
if (nchar(new_resid) < nchar(best_resid)){
best_resid <- new_resid
if (nchar(new_resid) == 0)
break;
}
}
residual_chars <- best_resid
}
return(residual_chars)
}
matched_names <- c()
matched_numbers <- c()
org_available_names <- available_names
# Start with simple non-factored variables as these should give a single-line match
# then continue with the longest named variable
for (name in var_names[order(sapply(var_data, function(x) is.null(x$lvls)),
nchar(var_names), decreasing = TRUE)]){
matches <- which(name == substr(available_names, 1, nchar(name)))
if (length(matches) == 0){
if (force_match)
stop("Sorry but the function could not find a match for '", name , "'",
" among any of the available names: '", paste(org_available_names,
collapse="', '") ,"'")
}else if(length(matches) == 1){
if (var_data[[name]]$no_rows != 1)
stop("Expected more than one match for varible '", name, "'",
" the only positive match was '", available_names[matches], "'")
}else if (length(var_names) > length(matched_names) + 1){
if (is.null(var_data[[name]]$lvls) &&
sum(name == available_names) == 1){
# Check if the searched for variable is a non-factor variable
# if so then match if there is a perfect match
matches <- which(name == available_names)
}else if (length(var_names) > length(matched_names) + 1){
# Check that there is no conflicting match
conflicting_vars <- var_names[var_names != name &
!var_names %in% matched_names]
possible_conflicts <- c()
for (conf_var in conflicting_vars){
possible_conflicts <-
union(possible_conflicts,
which(substr(available_names, 1, nchar(conflicting_vars)) %in%
conflicting_vars))
}
conflicts <- intersect(possible_conflicts, matches)
if (length(conflicts) > 0){
conflicting_vars <- conflicting_vars[sapply(conflicting_vars,
function(search)
any(search == substr(available_names, 1, nchar(search))))]
for (conflict in conflicts){
# We will try to find a better match that leaves fewer "residual characters"
# than what we started with
start_res_chars <- getResidualCharacters(name, available_names[conflict])
best_match <- NULL
best_conf_name <- NULL
for (conf_name in conflicting_vars){
resid_chars <- getResidualCharacters(conf_name, available_names[conflict])
if (is.null(best_match) ||
nchar(best_match) > nchar(resid_chars)){
best_match <- resid_chars
best_conf_name <- conf_name
}
}
if (nchar(start_res_chars) == nchar(best_match)){
stop("The software can't decide which name belongs to which variable.",
" The variable that is searched for is '", name, "'",
" and there is a conflict with the variable '", best_conf_name ,"'.",
" The best match for '", name, "' leaves: '", start_res_chars, "'",
" while the conflict '", best_conf_name ,"' leaves: '", best_match ,"'",
" when trying to match the name: '", available_names[conflict] ,"'")
}else if(nchar(start_res_chars) > nchar(best_match)){
# Now remove the matched row if we actually found a better match
matches <- matches[matches != conflict]
}
}
}
}
if (length(matches) == 0){
stop("Could not identify the rows corresponding to the variable '", name ,"'",
" this could possibly be to similarity between different variable names",
" and factor levels. Try to make sure that all variable names are unique",
" the variables that are currently looked for are:",
" '", paste(var_names,
collapse="', '"),
"'.")
}
}
# Check that multiple matches are continuous, everything else is suspicious
if (length(matches) > 1){
matches <- matches[order(matches)]
if (any(1 != tail(matches, length(matches) - 1) -
head(matches, length(matches) -1)))
stop("The variable '", name, "' failed to provide an adequate",
" consequent number of matches, the names matched are located at:",
" '", paste(matches, collapse="', '"), "'")
}
# Since we remove the matched names we need to look back at the original and
# find the exact match in order to deduce the true number
true_matches <- which(org_available_names %in%
available_names[matches])
# Avoid accidentally rematching
true_matches <- setdiff(true_matches, matched_numbers)
var_data[[name]][["location"]] <- true_matches
# Update the loop vars
if (length(matches) > 0)
available_names <- available_names[-matches]
matched_names <- c(matched_names, name)
matched_numbers <- c(matched_numbers, true_matches)
if (length(var_data[[name]][["location"]]) == 0 &
!force_match){
# Remove variable as it is not available
var_data[[name]] <- NULL
}else if (length(var_data[[name]][["location"]]) !=
var_data[[name]][["no_rows"]]){
warning("Expected the variable '", name ,"'",
" to contain '",var_data[[name]][["no_rows"]],"' no. rows",
" but got '", length(var_data[[name]][["location"]]), "' no. rows.")
var_data[[name]][["no_rows"]] <- length(var_data[[name]][["location"]])
}
}
return(var_data)
}
#' Runs an \code{\link[Gmisc]{fastDoCall}()} within the environment of the model
#'
#' Sometimes the function can't find some of the variables that
#' were available when running the original variable. This function
#' uses the \code{\link[stats]{as.formula}()} together with
#' \code{\link[base]{environment}()} in order to get the environment
#' that the original code used.
#'
#' @param model The model used
#' @param what The function or non-empty character string used for
#' \code{\link[Gmisc]{fastDoCall}()}
#' @param ... Additional arguments passed to the function
#' @keywords internal
prEnvModelCall <- function(model, what, ...){
call_lst <- list(object = model)
dots <- list(...)
if (length(dots) > 0){
for(i in 1:length(dots)){
if (!is.null(names(dots)[i])){
call_lst[[names(dots)[i]]] <- dots[[i]]
}else{
call_lst <- c(call_lst,
dots[[i]])
}
}
}
model_env <- new.env(parent=environment(as.formula(model)))
model_env$what <- what
model_env$call_lst <- call_lst
fastDoCall(what, call_lst,
envir = model_env)
} |
\name{string.to.colors}
\alias{string.to.color}
\alias{string.to.colors}
\alias{stringToColors}
\alias{stringtocolor}
\title{Convert between strings to colors}
\usage{
string.to.colors(string, colors = NULL)
}
\arguments{
\item{string}{a vector of strings representing groups.}
\item{colors}{a vector of colors, one for each unique
element in \code{string}.}
}
\value{
a vector of colors, one for each element in \code{string}
}
\description{
Automatically convert a vector of strings into a color
for easy plotting
}
\note{
This function can also be used to specify pch values, cex
values, or any other plotting values the user may wish to
differ across groups. See examples.
}
\examples{
groups = sample(LETTERS[1:5], size=100, replace=TRUE)
plot(rnorm(100), rnorm(100), col=string.to.color(groups))
plot(rnorm(100), rnorm(100), col=string.to.color(groups),
pch=as.numeric(string.to.color(groups, colors=c(16:20))))
}
\author{
Dustin Fife
}
| /man/string.to.colors.Rd | no_license | mrdwab/fifer | R | false | false | 970 | rd | \name{string.to.colors}
\alias{string.to.color}
\alias{string.to.colors}
\alias{stringToColors}
\alias{stringtocolor}
\title{Convert between strings to colors}
\usage{
string.to.colors(string, colors = NULL)
}
\arguments{
\item{string}{a vector of strings representing groups.}
\item{colors}{a vector of colors, one for each unique
element in \code{string}.}
}
\value{
a vector of colors, one for each element in \code{string}
}
\description{
Automatically convert a vector of strings into a color
for easy plotting
}
\note{
This function can also be used to specify pch values, cex
values, or any other plotting values the user may wish to
differ across groups. See examples.
}
\examples{
groups = sample(LETTERS[1:5], size=100, replace=TRUE)
plot(rnorm(100), rnorm(100), col=string.to.color(groups))
plot(rnorm(100), rnorm(100), col=string.to.color(groups),
pch=as.numeric(string.to.color(groups, colors=c(16:20))))
}
\author{
Dustin Fife
}
|
## Test rootograms() and related residuals functions
## load packages
library("testthat")
library("gratia")
library("mgcv")
## Need a local wrapper to allow conditional use of vdiffr
`expect_doppelganger` <- function(title, fig, ...) {
testthat::skip_if_not_installed("vdiffr")
vdiffr::expect_doppelganger(title, fig, ...)
}
N <- 500L
df_gauss <- data_sim("eg1", n = N, seed = 42)
df_pois <- data_sim("eg1", dist = "poisson", n = N, scale = 0.2, seed = 42)
## fit the model
m_gauss <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df_gauss,
method = "REML", family = gaussian())
b_pois <- bam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df_pois,
method = "fREML", family = poisson())
m_nb <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df_pois,
method = "REML", family = nb())
m_tw <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df_pois,
method = "REML", family = tw())
test_that("rootogram works for a continuous Gaussian response", {
skip_on_cran()
expect_silent(rg <- rootogram(m_gauss))
expect_doppelganger("draw gaussian rootogram", draw(rg))
})
test_that("rootogram works for a discrete Poisson response", {
expect_silent(rg <- rootogram(b_pois))
expect_doppelganger("draw poisson rootogram", draw(rg))
})
test_that("rootogram works for a discrete negative binomial response", {
skip_on_cran()
expect_silent(rg <- rootogram(m_nb))
expect_doppelganger("draw neg bin rootogram", draw(rg))
})
test_that("rootogram fails for a a non-supported response", {
skip_on_cran()
expect_error(rootogram(m_tw),
"Only <Poisson, Negative Binomial, Gaussian> models supported.",
fixed = TRUE)
}) | /tests/testthat/test-rootograms.R | permissive | Memo1986/gratia | R | false | false | 1,747 | r | ## Test rootograms() and related residuals functions
## load packages
library("testthat")
library("gratia")
library("mgcv")
## Need a local wrapper to allow conditional use of vdiffr
`expect_doppelganger` <- function(title, fig, ...) {
testthat::skip_if_not_installed("vdiffr")
vdiffr::expect_doppelganger(title, fig, ...)
}
N <- 500L
df_gauss <- data_sim("eg1", n = N, seed = 42)
df_pois <- data_sim("eg1", dist = "poisson", n = N, scale = 0.2, seed = 42)
## fit the model
m_gauss <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df_gauss,
method = "REML", family = gaussian())
b_pois <- bam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df_pois,
method = "fREML", family = poisson())
m_nb <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df_pois,
method = "REML", family = nb())
m_tw <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = df_pois,
method = "REML", family = tw())
test_that("rootogram works for a continuous Gaussian response", {
skip_on_cran()
expect_silent(rg <- rootogram(m_gauss))
expect_doppelganger("draw gaussian rootogram", draw(rg))
})
test_that("rootogram works for a discrete Poisson response", {
expect_silent(rg <- rootogram(b_pois))
expect_doppelganger("draw poisson rootogram", draw(rg))
})
test_that("rootogram works for a discrete negative binomial response", {
skip_on_cran()
expect_silent(rg <- rootogram(m_nb))
expect_doppelganger("draw neg bin rootogram", draw(rg))
})
test_that("rootogram fails for a a non-supported response", {
skip_on_cran()
expect_error(rootogram(m_tw),
"Only <Poisson, Negative Binomial, Gaussian> models supported.",
fixed = TRUE)
}) |
# Part of R package 'stabledist' (part of the Rmetrics project).
## The stabledist R package is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This R package is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Library General Public License for more details.
##
## A copy of the GNU General Public License is available at
## http://www.r-project.org/Licenses/
################################################################################
## FUNCTIONS: DESCRIPTION:
## dstable Returns density for stable DF
## pstable Returns probabilities for stable DF
## qstable Returns quantiles for stable DF
## rstable Returns random variates for stable DF
## UTILITY FUNCTION DESCRIPTION:
## .integrate2 Integrates internal functions for *stable
################################################################################
##==============================================================================
### MM TODO:
## 0) All d, p, q, q -- have identical parts
## a) 'parametrization' (pm) check
## b) checking, alpha, beta,
## c) subdivisions etc {all but rstable}
## --- to do: "Fix" these in dstable(), then copy/paste to others
##==============================================================================
pi2 <- pi/2 # - we use it so often
##' @title omega() according to Lambert & Lindsey (1999), p.412
##' @param gamma [dpqr]stable()'s scale parameter, > 0 -- of length 1
##' @param alpha [dpqr]stable()'s "main" parameter, in [0, 2] -- of length 1
##' @return omega(.) = tan(pi/2 alpha) if alpha != 1 ...
.om <- function(gamma,alpha) {
if(alpha != round(alpha)) # non-integer usual case
tan(pi2*alpha)# not tanpi2() !
else if(alpha == 1)
(2/pi)*log(gamma)
else 0 # for alpha = 0 or = 2
}
##' @title C_alpha - the tail constant
##' @param alpha numeric vector of stable tail parameters, in [0,2]
##' @return
##' @author Martin Maechler
C.stable.tail <- function(alpha, log = FALSE) {
stopifnot(0 <= alpha, alpha <= 2)
r <- alpha
i0 <- alpha == 0
r[i0] <- if(log) -log(2) else 0.5
al <- alpha[!i0]
r[!i0] <-
if(log) lgamma(al)-log(pi)+ log(sin(al*pi2))
else gamma(al)/pi * sin(al*pi2)
if(any(a2 <- alpha == 2)) r[a2] <- if(log) -Inf else 0
r
}
##' @title tan(pi/2*x), for x in [-1,1] with correct limits
##' i.e. tanpi2(-/+ 1) == -/+ Inf
##' @param x numeric vector
##' @return numeric vector of values tan(pi/2*x)
##' @author Martin Maechler
tanpi2 <- function(x) {
r <- x
if(any(i <- x & x == round(x)))# excluding 0
r[i] <- (2 - (x[i] %% 4))*Inf
io <- which(!i)
r[io] <- tan(pi2* x[io])
r
}
##' @title cos(pi/2*x), for x in [-1,1] with correct limits
##' i.e. cospi2(+- 1) == 0
##' @param x numeric vector
##' @return numeric vector of values cos(pi/2*x)
##' @author Martin Maechler
cospi2 <- function(x) {
r <- x
if(any(i <- x == round(x)))
r[i] <- as.numeric(x[i] == 0)# 1 or 0 - iff x \in [-1,1] !
io <- which(!i)
r[io] <- cos(pi2* x[io])
r
}
##' According to Nolan's "tail.pdf" paper, where he takes *derivatives*
##' of the tail approximation 1-F(x) ~ (1+b) C_a x^{-a} to prove
##' that f(x) ~ a(1+b) C_a x^{-(1+a)} ...
##'
##' @title tail approximation density for dstable()
##' @param x
##' @param alpha
##' @param beta
##' @param log if true, return log(f(.))
##' @return
##' @author Martin Maechler
dPareto <- function(x, alpha, beta, log = FALSE) {
if(any(neg <- x < 0)) { ## left tail
x [neg] <- -x [neg]
beta <- rep(beta, length.out=length(x))
beta[neg] <- -beta[neg]
}
if(log)
log(alpha)+ log1p(beta)+ C.stable.tail(alpha, log=TRUE) -(1+alpha)*log(x)
else
alpha*(1+beta)* C.stable.tail(alpha)* x^(-(1+alpha))
}
pPareto <- function(x, alpha, beta, lower.tail = TRUE, log.p = FALSE) {
if(any(neg <- x < 0)) { ## left tail
x [neg] <- -x [neg]
beta <- rep(beta, length.out=length(x))
beta[neg] <- -beta[neg]
stop("FIXME --- pPareto() is not correct for negative x")## switch 1-iF / iF
}
if(log.p) {
if(lower.tail) ## log(1 - iF)
log1p(-(1+beta)* C.stable.tail(alpha)* x^(-alpha))
else ## log(iF)
log1p(beta)+ C.stable.tail(alpha, log=TRUE) - alpha*log(x)
} else {
iF <- (1+beta)* C.stable.tail(alpha)* x^(-alpha)
if(lower.tail) 1-iF else iF
}
}
dstable <- function(x, alpha, beta,
gamma = 1, delta = 0, pm = 0, log = FALSE,
tol = 64*.Machine$double.eps, zeta.tol= NULL,
subdivisions = 1000)
{
## Original implemented by Diethelm Wuertz;
## Changes for efficiency and accuracy by Martin Maechler
## Description:
## Returns density for stable DF
## Details:
## The function uses the approach of J.P. Nolan for general
## stable distributions. Nolan derived expressions in form
## of integrals based on the charcteristic function for
## standardized stable random variables. These integrals
## can be numerically evaluated.
## Arguments:
## alpha = index of stability, in the range (0,2]
## beta = skewness, in the range [-1, 1]
## gamma = scale, in the range (0, infinity)
## delta = location, in the range (-infinity, +infinity)
## param = type of parmeterization
## Note: S+ compatibility no longer considered (explicitly)
## Parameter Check:
## NB: (gamma, delta) can be *vector*s (vectorized along x)
stopifnot( 0 < alpha, alpha <= 2, length(alpha) == 1,
-1 <= beta, beta <= 1, length(beta) == 1,
0 <= gamma, length(pm) == 1, pm %in% 0:2,
tol > 0, subdivisions > 0)
## not an official argument {no doc!}:
verbose <- getOption("dstable.debug", default=FALSE)
## Parameterizations:
if (pm == 1) {
delta <- delta + beta*gamma * .om(gamma,alpha)
} else if (pm == 2) {
delta <- delta - alpha^(-1/alpha)*gamma*stableMode(alpha, beta)
gamma <- alpha^(-1/alpha) * gamma
} ## else pm == 0
## Shift and Scale:
x <- (x - delta) / gamma
ans <-
## Special Cases:
if (alpha == 2) {
dnorm(x, mean = 0, sd = sqrt(2), log=log)
} else if (alpha == 1 && beta == 0) {
dcauchy(x, log=log)
} else {
## General Case
if (alpha != 1) { ## 0 < alpha < 2 & |beta| <= 1 from above
tanpa2 <- tan(pi2*alpha)
betan <- beta * tanpa2
zeta <- -betan
theta0 <- min(max(-pi2, atan(betan) / alpha), pi2)
if(verbose) cat(sprintf(
"dstable(., alpha=%g, beta=%g,..): --> theta0=%g, zeta=%g,",
alpha, beta, theta0, zeta))
if(is.null(zeta.tol)) {
zeta.tol <-
if(betan == 0) .4e-15
else if(1-abs(beta) < .01 || alpha < .01) 2e-15 else 5e-5
if(verbose) cat(sprintf(" --> zeta.tol= %g", zeta.tol))
}
else stopifnot(is.numeric(zeta.tol), zeta.tol >= 0)
if(verbose) cat("\n")
## Loop over all x values ( < , = , or > zeta):
vapply(x, .fct1, 0.,
zeta=zeta, alpha=alpha, beta=beta, theta0=theta0, log=log,
verbose=verbose,
tol=tol, zeta.tol=zeta.tol, subdivisions=subdivisions)
}
## Special Case alpha == 1 and -1 <= beta <= 1 (but not = 0) :
else { ## (alpha == 1) and 0 < |beta| <= 1 from above
## Loop over all x values:
vapply(x, function(z) {
if (z >= 0) {
.fct2( z , beta, log=log, tol=tol, subdivisions=subdivisions)
} else {
.fct2(-z, -beta, log=log, tol=tol, subdivisions=subdivisions)
}
}, 0.)
}
}
i0 <- ans == (if(log)-Inf else 0) # --> we can do better using asymptotic:
if(any(i0)) {
d <- dPareto(x[i0], alpha, beta, log=log)
## do recycle correctly:
if(length(gamma) > 1)
gamma <- rep(gamma, length.out=length(x))[i0]
ans[i0] <- if(log) d - log(gamma) else d/gamma
}
if(any(io <- !i0)) {
d <- ans[io]
if(length(gamma) > 1)
gamma <- rep(gamma, length.out=length(x))[io]
ans[io] <- if (log) d - log(gamma) else d/gamma
}
ans
}## {dstable}
## ------------------------------------------------------------------------------
.large.exp.arg <- -(.Machine$double.min.exp * log(2)) ## == 708.396...
##' @title x*exp(-x) numerically stably, with correct limit 0 for x --> Inf
##' @param x numeric
##' @return x*exp(x)
##' @author Martin Maechler
x.exp.m.x <- function(x) {
r <- x*exp(-x)
if(any(nax <- is.na(x)))
r[nax] <- NA_real_
if(any(lrg <- !nax & x > .large.exp.arg))# e.g. x == Inf
r[lrg] <- 0
r
}
.e.plus <- function(x, eps) x + eps* abs(x)
.e.minus<- function(x, eps) x - eps* abs(x)
pi2.. <- function(eps) pi2 * (1 - eps) ## == .e.minus(pi/2, eps), slight more efficiently
##' dstable() for very small alpha > 0
##' ok only for x > zeta := - beta * tan(pi/2 *alpha)
dstable.smallA <- function(x, alpha, beta, log=FALSE) {
r <- log(alpha) + log1p(beta) - (1 + log(2*x + pi*alpha*beta))
if(log) r else exp(r)
}
## 1e-17: seems "good", but not "optimized" at all -- hidden for now
.alpha.small.dstable <- 1e-17
.fct1 <- function(x, zeta, alpha, beta, theta0, log,
tol, subdivisions, zeta.tol,
verbose = getOption("dstable.debug", default=FALSE))
{
## --- dstable(x, alpha, beta, ..) for alpha < 2 ---
## For x = zeta, have special case formula [Nolan(1997)];
## need to use it also for x ~= zeta, i.e., x.m.zet := |x - zeta| < delta :
stopifnot(is.finite(zeta))
x.m.zet <- abs(x - zeta)
f.zeta <- function(log)
if(log)
lgamma(1+1/alpha)+ log(cos(theta0)) - (log(pi)+ log1p(zeta^2)/(2*alpha))
else
gamma(1+1/alpha)*cos(theta0) / (pi*(1+zeta^2)^(1/(2*alpha)))
## Modified: originally was if (z == zeta),
## then (D.W.) if (x.m.zet < 2 * .Machine$double.eps)
## then (M.M.) if (x.m.zet <= 1e-5 * abs(x))
if(is.finite(x) && x.m.zet <= zeta.tol * (zeta.tol+ max(abs(x),abs(zeta)))) {
if(verbose)
cat(sprintf(".fct1(%.11g, %.10g,..): x ~= zeta => using f.zeta()\n",
x, zeta))
return(f.zeta(log))
}
## the real check should be about the feasibility of g() below, or its integration
smallAlpha <- (alpha < .alpha.small.dstable)
if(x < zeta) {
theta0 <- -theta0 # see Nolan(1997), Thm.1 (c)
if(smallAlpha) {
beta <- -beta
x <- -x
}
}
if(smallAlpha) {
## here, *MUST* have __ x > zeta __
if(verbose)
cat(sprintf(".fct1(%.11g, %.10g,..): small alpha=%g\n",
x, zeta, alpha))
return(dstable.smallA(x, alpha, beta, log=log))
}
## constants ( independent of integrand g1(th) = g*exp(-g) ):
## zeta <- -beta * tan(pi*alpha/2)
## theta0 <- (1/alpha) * atan( beta * tan(pi*alpha/2))
## x.m.zet <- abs(x - zeta)
##-------->>> identically as in .FCT1() for pstable() below: <<<-----------
a_1 <- alpha - 1
cat0 <- cos(at0 <- alpha*theta0)
##' g() is strictly monotone -- Nolan(1997) ["3. Numerical Considerations"]
##' alpha >= 1 <==> g() is falling, ie. from Inf --> 0; otherwise growing from 0 to +Inf
g <- function(th) {
r <- th
## g(-pi/2) or g(pi/2) could become NaN --> work around
i.bnd <- abs(pi2 -sign(a_1)*th) < 64*.Machine$double.eps
r[i.bnd] <- 0
th <- th[io <- !i.bnd]
att <- at0 + alpha*th ## = alpha*(theta0 + theta)
r[io] <- (cat0 * cos(th) * (x.m.zet/sin(att))^alpha)^(1/a_1) * cos(att-th)
r
}
## Function to integrate: dstable(..)= f(..) = c2 * \int_{-\theta_0}^{\pi/2} g1(u) du
g1 <- function(th) {
## g1 := g(.) exp(-g(.))
x.exp.m.x( g(th) )
}
c2 <- ( alpha / (pi*abs(a_1)*x.m.zet) )
## Now, result = c2 * \int_{-t0}^{pi/2} g1(u) du , we "only" need the integral
## where however, g1(.) may look to be (almost) zero almost everywhere and just have a small peak
## ==> Find the peak, split the integral into two parts of for intervals (t0, t_max) + (t_max, pi/2)
## However, this may still be bad, e.g., for dstable(71.61531, alpha=1.001, beta=0.6),
## or dstable(1.205, 0.75, -0.5)
## the 2nd integral was "completely wrong" (basically zero, instead of ..e-5)
## NB: g() is monotone, see above
if((alpha >= 1 &&
((!is.na(g. <- g( pi2 )) && g. > .large.exp.arg) || identical(g(-theta0), 0))) ||
(alpha < 1 &&
((!is.na(g. <- g(-theta0)) && g. > .large.exp.arg) || identical(g(pi2), 0)))) {
## g() is numerically too large *or* 0 even where it should be inf
## ===> g() * exp(-g()) is 0 everywhere
if(verbose)
cat(sprintf(".fct1(%.11g, %.10g,..): g() is 'Inf' (or 0) ==> result 0", x,zeta))
return(if(log)-Inf else 0)
}
g. <- if(alpha >= 1) g(.e.plus(-theta0, 1e-6)) else g(pi2..(1e-6))
if(is.na(g.))# g() is not usable --- FIXME rather use *asymptotic dPareto()?
if(max(x.m.zet, x.m.zet / abs(x)) < .01)
return(f.zeta(log))
if(verbose)
cat(sprintf(".fct1(%.11g, %.10g,..): c2*sum(r[1:4])= %.11g*", x,zeta, c2))
Int <- function(a,b)
.integrate2(g1, lower = a, upper = b,
subdivisions=subdivisions, rel.tol= tol, abs.tol= tol)
## We know that the maximum of g1(.) is = exp(-1) = 0.3679 "at" g(.) == 1
## find that by uniroot :
## g(.) == 1 <==> log(g(.)) == 0 --- the latter is better conditioned,
## e.g., for (x = -1, alpha = 0.95, beta = 0.6)
## the former is better for dstable(-122717558, alpha = 1.8, beta = 0.3, pm = 1)
## However, it can be that the maximum is at the boundary, and
## g(.) > 1 everywhere or g(.) < 1 everywhere {in that case we could revert to optimize..}
if((alpha >= 1 && !is.na(g. <- g(pi2)) && g. > 1) ||
(alpha < 1 && !is.na(g. <- g(pi2)) && g. < 1))
g1.th2 <- g1( theta2 <- pi2..(1e-6) )
else if((alpha < 1 && g(-theta0) > 1) ||
(alpha >= 1 && g(-theta0) < 1))
g1.th2 <- g1( theta2 <- .e.plus(-theta0, 1e-6) )
else {
## when alpha ~=< 1 (0.998 e.g.), g(x) is == 0 (numerically) on a wide range;
## uniroot is not good enough, and we should *increase* -theta0
## or decrease pi2 such that it can find the root:
l.th <- -theta0
u.th <- pi2
if(alpha < 1) { ## g() is *in*creasing from 0 ..
while ((g.t <- g(.th <- (l.th + pi2)/2)) == 0) l.th <- .th
if(g.t == 1)# decrease upper limit {needed, e.g. for alpha = 1e-20}
while ((g.t <- g(.th <- (l.th + u.th)/2)) == 1) u.th <- .th
if(abs(u.th - l.th) < 1e-13)# do not trust g()
return(if(log)-Inf else 0)
if(verbose >= 2)
cat(sprintf("\n -theta0=%g %s l.th=%g .. u.th=%g <= pi/2\n",
-theta0, if(-theta0 == l.th) "=" else "<",
l.th, u.th))
}
ur1 <- uniroot(function(th) g(th) - 1,
lower = l.th, upper = u.th, tol = .Machine$double.eps)
## consider using safeUroot() [ ~/R/Pkgs/copula/R/safeUroot.R ] !!
ur2 <- tryCatch(uniroot(function(th) log(g(th)),
lower = l.th, upper = u.th, tol = .Machine$double.eps),
error=function(e)e)
g.1 <- x.exp.m.x(ur1$f.root+1)
g.2 <- if(inherits(ur2, "error")) -Inf else x.exp.m.x(exp(ur2$f.root))
if(g.1 >= g.2) {
theta2 <- ur1$root
g1.th2 <- g.1 ## == g1(theta2)
} else {
theta2 <- ur2$root
g1.th2 <- g.2
}
}
## now, because g1()'s peak (at th = theta2) may be extreme, we find two more intermediate values
## NB: Theoretically: Max = 0.3679 = g1(theta2) ==> 1e-4 is a very small fraction of that
## to the left:
eps <- 1e-4
if((do1 <- g1.th2 > eps && g1(-theta0) < eps))
th1 <- uniroot(function(th) g1(th) - eps, lower = -theta0, upper = theta2,
tol = tol)$root
if((do4 <- g1.th2 > eps && g1(pi2) < eps))
## to the right:
th3 <- uniroot(function(th) g1(th) - eps, lower = theta2, upper = pi2,
tol = tol)$root
if(do1) {
r1 <- Int(-theta0, th1)
r2 <- Int( th1, theta2)
} else {
r1 <- 0
r2 <- Int(-theta0, theta2)
}
if(do4) {
r3 <- Int( theta2, th3)
r4 <- Int( th3, pi2)
} else {
r3 <- Int( theta2, pi2)
r4 <- 0
}
if(verbose)
cat(sprintf("(%6.4g + %6.4g + %6.4g + %6.4g)= %g\n",
r1,r2,r3,r4, c2*(r1+r2+r3+r4)))
if(log)
log(c2)+ log(r1+r2+r3+r4)
else
c2*(r1+r2+r3+r4)
} ## {.fct1}
## ------------------------------------------------------------------------------
##' Auxiliary for dstable() only used when alpha == 1 :
##' @param x numeric *scalar*, >= 0
##' @param beta 0 < |beta| <= 1
##' @param tol
##' @param subdivisions
.fct2 <- function(x, beta, log, tol, subdivisions,
verbose = getOption("dstable.debug", default=FALSE))
{
i2b <- 1/(2*beta)
p2b <- pi*i2b # = pi/(2 beta)
ea <- -p2b*x
if(is.infinite(ea)) return(if(log)-Inf else 0)
##' g() is strictly monotone;
##' g(u) := original_g(u*pi/2)
##' for beta > 0: increasing from g(-1) = 0 to g(+1) = Inf
##' for beta < 0: decreasing from g(-1) = Inf to g(+1) = 0
##t0 <- -sign(beta)*pi2# g(t0) == 0 mathematically, but not always numerically
u0 <- -sign(beta)# g(u0) == 0 mathematically, but not always numerically
g <- function(u) {
r <- u
r[i <- abs(u-u0) < 1e-10] <- 0
u <- u[!i]
th <- u*pi2
h <- p2b+ th # == g'/beta where g' := pi/2 + beta*th = pi/2* (1 + beta*u)
r[!i] <- (h/p2b) * exp(ea + h*tanpi2(u)) / cospi2(u)
r
}
## Function to Integrate; u is a non-sorted vector!
g2 <- function(u) {
## g2 = g(.) exp(-g(.))
x.exp.m.x( g(u) )
}
## We know that the maximum of g2(.) is = exp(-1) = 0.3679 "at" g(.) == 1
## find that by uniroot :
ur <- uniroot(function(u) g(u) - 1, lower = -1, upper = 1, tol = tol)
u2 <- ur$root
r1 <- .integrate2(g2, lower = -1, upper = u2,
subdivisions = subdivisions, rel.tol = tol, abs.tol = tol)
r2 <- .integrate2(g2, lower = u2, upper = 1,
subdivisions = subdivisions, rel.tol = tol, abs.tol = tol)
if(verbose) {
cc <- pi2*abs(i2b)
cat(sprintf(".fct2(%.11g, %.6g,..): c*sum(r1+r2)= %.11g*(%6.4g + %6.4g)= %g\n",
x,beta, cc, r1, r2, cc*(r1+r2)))
}
if(log)
log(pi2) + log(abs(i2b)) + log(r1 + r2)
else
pi2*abs(i2b)*(r1 + r2)
}## {.fct2}
### ------------------------------------------------------------------------------
pstable <- function(q, alpha, beta, gamma = 1, delta = 0, pm = 0,
lower.tail = TRUE, log.p = FALSE,
tol = 64*.Machine$double.eps, subdivisions = 1000)
{
## A function implemented by Diethelm Wuertz
## Description:
## Returns probability for stable DF
x <- q
## Parameter Check:
## NB: (gamma, delta) can be *vector*s (vectorized along x)
stopifnot( 0 < alpha, alpha <= 2, length(alpha) == 1,
-1 <= beta, beta <= 1, length(beta) == 1,
0 <= gamma, length(pm) == 1, pm %in% 0:2,
tol > 0, subdivisions > 0)
## Parameterizations:
if (pm == 1) {
delta <- delta + beta*gamma * .om(gamma,alpha)
} else if (pm == 2) {
delta <- delta - alpha^(-1/alpha)*gamma*stableMode(alpha, beta)
gamma <- alpha^(-1/alpha) * gamma
} ## else pm == 0
## Shift and Scale:
x <- (x - delta) / gamma
## Return directly
## ------ first, special cases:
if (alpha == 2) {
pnorm(x, mean = 0, sd = sqrt(2), lower.tail=lower.tail, log.p=log.p)
} else if (alpha == 1 && beta == 0) {
pcauchy(x, lower.tail=lower.tail, log.p=log.p)
} else {
retValue <- function(F, useLower) { ## (vectorized in F)
if(useLower) {
if(log.p) log(F) else F
} else { ## upper: 1 - F
if(log.p) log1p(-F) else 1 - F
}
}
## General Case
if (alpha != 1) { ## 0 < alpha < 2 & |beta| <= 1 from above
tanpa2 <- tan(pi2*alpha)
zeta <- -beta * tanpa2
theta0 <- min(max(-pi2, atan(-zeta) / alpha), pi2)
if(finSupp <- (abs(beta) == 1 && alpha < 1)) {
## has *finite* support [zeta, Inf) if beta == 1
## (-Inf, zeta] if beta == -1
}
## Loop over all x values:
vapply(x, function(z) {
if(finSupp) {
if(beta == 1 && z <= zeta)
return(retValue(0., useLower=lower.tail))
else if(beta == -1 && z >= zeta)
return(retValue(1., useLower=lower.tail))
## else .. one of the cases below
}
if(abs(z - zeta) < 2 * .Machine$double.eps) {
## FIXME? same problem as dstable
r <- if(lower.tail) (1/2- theta0/pi) else 1/2+ theta0/pi
if(log.p) log(r) else r
} else {
useLower <-
((z > zeta && lower.tail) ||
(z < zeta && !lower.tail))
## FIXME: for alpha > 1 -- the following computes F1 = 1 -c3*r(x)
## and suffers from cancellation when 1-F1 is used below:
giveI <- !useLower && alpha > 1 # if TRUE, .FCT1() return 1-F
.F1 <- .FCT1(z, zeta, alpha=alpha, theta0=theta0,
giveI = giveI,
tol = tol, subdivisions = subdivisions)
if(giveI)
if(log.p) log(.F1) else .F1
else retValue(.F1, useLower=useLower)
}
}, 0.)
}
## Special Case alpha == 1 and -1 <= beta <= 1 (but not = 0) :
else { ## (alpha == 1) and 0 < |beta| <= 1 from above
useL <-
if(beta >= 0)
lower.tail
else {
beta <- -beta
x <- -x
!lower.tail
}
if(giveI <- !useL && !log.p)
useL <- TRUE
## Loop over all x values:
retValue(vapply(x, function(z)
.FCT2(z, beta = beta, tol=tol, subdivisions=subdivisions,
giveI = giveI),
0.),
useLower = useL)
}
}
}## {pstable}
## ------------------------------------------------------------------------------
##' Auxiliary for pstable() (for alpha != 1)
.FCT1 <- function(x, zeta, alpha, theta0, giveI, tol, subdivisions,
verbose = getOption("pstable.debug", default=FALSE))
{
if(is.infinite(x))
return(if(giveI) 0 else 1)
stopifnot(is.finite(zeta))
x.m.zet <- abs(x - zeta)
##-------->>> identically as in .fct1() for dstable() above: <<<-----------
## FIXME: also provide "very small alpha" case, as in .fct1()
if(x < zeta) theta0 <- -theta0
a_1 <- alpha - 1
cat0 <- cos(at0 <- alpha*theta0)
g <- function(th) {
r <- th
## g(-pi/2) or g(pi/2) could become NaN --> work around
i.bnd <- abs(pi2 -sign(a_1)*th) < 64*.Machine$double.eps
r[i.bnd] <- 0
th <- th[io <- !i.bnd]
att <- at0 + alpha*th ## = alpha*(theta0 + theta)
r[io] <- (cat0 * cos(th) * (x.m.zet/sin(att))^alpha)^(1/a_1) * cos(att-th)
r
}
if(verbose) cat(sprintf(".FCT1(%9g, %10g, th0=%.10g, %s..): ",
x,zeta, theta0, if(giveI)"giveI=TRUE," else ""))
## as g() is montone, the integrand exp(-g(.)) is too ==> maximum is at the boundary
## however, integration can be inaccuracte when g(.) quickly jumps from Inf to 0
## _BUT_ empirically I find that good values l.th / u.th below are *INDEPENDENT* of x,
l.th <- .e.plus(-theta0, 1e-6)
if(alpha > 1 && g(l.th) == Inf) {
ur <- uniroot(function(t) 1-2*(g(t)==Inf), lower=l.th, upper=pi2,
f.lower= -1, f.upper= 1, tol = 1e-8)
l.th <- ur$root
if(verbose) cat(sprintf(" g(-th0 +1e-6)=Inf: unirt(%d it) -> l.th=%.10g ",
ur$iter, l.th))
}
u.th <- .e.minus(pi2, 1e-6)
if(alpha < 1 && g(u.th) == Inf) {
ur <- uniroot(function(t) 1-2*(g(t)==Inf), lower=l.th, upper=u.th,
f.upper= -1, tol = 1e-8)
u.th <- ur$root
if(verbose) cat(sprintf(" g(pi/2 -1e-6)=Inf: unirt(%d it) -> u.th=%.10g ",
ur$iter, u.th))
}
r <- .integrate2(function(th) exp(-g(th)),
lower = l.th, upper = u.th, subdivisions = subdivisions,
rel.tol = tol, abs.tol = tol)
if(verbose) cat(sprintf("--> Int r= %.11g\n", r))
if(giveI) { ## { ==> alpha > 1 ==> c1 = 1; c3 = -1/pi}
## return (1 - F) = 1 - (1 -1/pi * r) = r/pi :
r/pi
} else {
c1 <- if(alpha < 1) 1/2 - theta0/pi else 1
c3 <- sign(1-alpha)/pi
## FIXME: for alpha > 1, F = 1 - |.|*r(x)
## <==> cancellation iff we eventually want 1 - F() [-> 'lower.tail']
c1 + c3* r
}
} ## {.FCT1}
## ------------------------------------------------------------------------------
##' Auxiliary for pstable() only used when alpha == 1 :
##' @param x numeric *scalar*
##' @param beta >= 0 here
##' @param tol
##' @param subdivisions
.FCT2 <- function(x, beta, tol, subdivisions, giveI = FALSE,
verbose = getOption("pstable.debug", default=FALSE))
{
i2b <- 1/(2*beta)
p2b <- pi*i2b # = pi/(2 beta)
ea <- -p2b*x
if(is.infinite(ea))
return(R.D.Lval(if(ea < 0) ## == -Inf ==> g(.) == 0 ==> G2(.) == 1
1 else 0, ## == +Inf ==> g(.) == Inf ==> G2(.) == 0
lower.tail= !giveI))
##' g() is strictly monotone;
##' g(u) := original_g(u*pi/2)
##' for beta > 0: increasing from g(-1) = 0 to g(+1) = Inf
##' for beta < 0: decreasing from g(-1) = Inf to g(+1) = 0
## original_g :
## g <- function(th) {
## h <- p2b+ th # == g'/beta where g' := pi/2 + beta*th
## (h/p2b) * exp(ea + h*tan(th)) / cos(th)
## }
##t0 <- -pi2# g(t0) == 0 mathematically, but not always numerically
u0 <- -1 # g(u0) == 0 mathematically, but not always numerically
g <- function(u) {
r <- u
r[i <- abs(u-u0) < 1e-10] <- 0
u <- u[!i]
th <- u*pi2
h <- p2b+ th # == g'/beta where g' := pi/2 + beta*th = pi/2* (1 + beta*u)
r[!i] <- (h/p2b) * exp(ea + h*tanpi2(u)) / cospi2(u)
r
}
if(verbose)
cat(sprintf(".FCT2(%.11g, %.6g, %s..): ",
x,beta, if(giveI) "giveI=TRUE," else ""))
## g(-u0) == +Inf {at other end}, mathematically ==> exp(-g(.)) == 0
## in the outer tails, the numerical integration can be inaccurate,
## because g(.) jumps from 0 to Inf, but is 0 almost always
## <==> g1(.) = exp(-g(.)) jumps from 1 to 0 and is 1 almost everywhere
## ---> the integration "does not see the 0" and returns too large..
u. <- 1
if(g(uu <- .e.minus(u., 1e-6)) == Inf) {
ur <- uniroot(function(t) 1-2*(g(t)==Inf), lower=-1, upper= uu,
f.lower= +1, f.upper= -1, tol = 1e-8)
u. <- ur$root
if(verbose) cat(sprintf(" g(%g)=Inf: unirt(%d it) -> u.=%.10g",
uu, ur$iter, u.))
}
##' G2(.) = exp(-g(.)) is strictly monotone .. no need for 'theta2' !
G2 <- if(giveI) function(u) expm1(-g(u)) else function(u) exp(-g(u))
r <- .integrate2(G2, lower = -1, upper = u.,
subdivisions = subdivisions, rel.tol = tol, abs.tol = tol) / 2
if(verbose) cat(sprintf("--> Int r= %.11g\n", r))
if(giveI) -r else r
}## {.FCT2}
### ------------------------------------------------------------------------------
## -- utilities (==^== Macros in R's src/nmath/dpq.h ) :
R.D.Lval <- function(p, lower.tail) if(lower.tail) p else (1 - p) # p
R.D.Cval <- function(p, lower.tail) if(lower.tail) (1 - p) else p # 1 - p
## R.D.qIv <- function(p, log.p) if(log.p) exp(p) else p # p in qF(p,..)
##' == R.D.Lval(R.D.qIv(p)) "===" p in qF !
R.DT.qIv <- function(p, lower.tail, log.p) {
if(log.p) if(lower.tail) exp(p) else - expm1(p)
else R.D.Lval(p, lower.tail)
}
##' == R.D.Cval(R.D.qIv(p)) "===" (1 - p) in qF
R.DT.CIv <- function(p, lower.tail, log.p) {
if(log.p) if(lower.tail) -expm1(p) else exp(p)
else R.D.Cval(p, lower.tail)
}
qstable <- function(p, alpha, beta, gamma = 1, delta = 0, pm = 0,
lower.tail = TRUE, log.p = FALSE,
tol = .Machine$double.eps^0.25, maxiter = 1000, trace = 0,
integ.tol = 1e-7, subdivisions = 200)
{
## A function implemented by Diethelm Wuertz
## Description:
## Returns quantiles for stable DF
## Parameter Check:
## NB: (gamma, delta) can be *vector*s (vectorized along x)
stopifnot( 0 < alpha, alpha <= 2, length(alpha) == 1,
-1 <= beta, beta <= 1, length(beta) == 1,
0 <= gamma, length(pm) == 1, pm %in% 0:2,
tol > 0, subdivisions > 0)
## Parameterizations:
if (pm == 1) {
delta <- delta + beta*gamma * .om(gamma,alpha)
} else if (pm == 2) {
delta <- delta - alpha^(-1/alpha)*gamma*stableMode(alpha, beta)
gamma <- alpha^(-1/alpha) * gamma
} ## else pm == 0
result <-
## Special Cases:
if (alpha == 2)
qnorm(p, mean = 0, sd = sqrt(2), lower.tail=lower.tail, log.p=log.p)
else if (alpha == 1 && beta == 0)
qcauchy(p, lower.tail=lower.tail, log.p=log.p)
else { ## -------------- 0 < alpha < 2 ---------------
.froot <- function(x, p) {
pstable(q = x, alpha=alpha, beta=beta, pm = 0,
lower.tail=lower.tail, log.p=log.p,
tol=integ.tol, subdivisions=subdivisions) - p
}
## for approximate interval:
.qN <- function(p) qnorm (p, mean = 0, sd = sqrt(2),
lower.tail=lower.tail, log.p=log.p)
.qC <- function(p) qcauchy(p, lower.tail=lower.tail, log.p=log.p)
## Calculate:
qst1 <- function(pp) {
## 1) Find narrow interval [xmin, xmax] -----------------------
## NB: will deal with a too narrow interval later
p0 <- R.DT.qIv(pp, lower.tail=lower.tail, log.p=log.p)
left <- p0 < 0.5
if (beta < 0) {
xmin <- -R.DT.CIv(pp, lower.tail=lower.tail, log.p=log.p)/p0
xmax <- if (left) .qN(pp) else .qC(pp)
}
else if (beta > 0 ) {
xmin <- if (left) .qC(pp) else .qN(pp)
xmax <- p0/R.DT.CIv(pp, lower.tail=lower.tail, log.p=log.p)
}
else { ## (beta == 0)
xmin <- if (left) .qC(pp) else .qN(pp)
xmax <- if (left) .qN(pp) else .qC(pp)
}
if(xmin >= xmax) { # fixup interval such that xmin < xmax
fdx <- if(xmin == xmax) .01*max(1e-7, abs(xmin)) else 1.01*(xmin-xmax)
xmin <- xmin - fdx
xmax <- xmax + fdx
stopifnot(xmin < xmax)
}
## 2) root-finding pstable(..) = p inside the interval: -------
dx <- 1
repeat {
root <- .unirootNA(.froot, interval = c(xmin, xmax), p = pp,
extendInt = if(lower.tail) "upX" else "downX",
tol=tol, maxiter=maxiter, trace=trace)
if(!is.na(root))
break
xmin <- xmin- dx
xmax <- xmax+ dx
if(xmin == -Inf && xmax == +Inf)
stop("could not find an interval for x where pstable(x,*) - p changes sign")
dx <- dx * 2
}
root
}
vapply(p, qst1, 0.)
}
## Result:
result * gamma + delta
}
## ------------------------------------------------------------------------------
rstable <- function(n, alpha, beta, gamma = 1, delta = 0, pm = 0)
{
## Description:
## Returns random variates for stable DF
## slightly amended along copula::rstable1
## Parameter Check:
## NB: (gamma, delta) can be *vector*s (vectorized along x)
stopifnot( 0 < alpha, alpha <= 2, length(alpha) == 1,
-1 <= beta, beta <= 1, length(beta) == 1,
0 <= gamma, length(pm) == 1, pm %in% 0:2)
## Parameterizations:
if (pm == 1) {
delta <- delta + beta*gamma * .om(gamma,alpha)
} else if (pm == 2) {
delta <- delta - alpha^(-1/alpha)*gamma*stableMode(alpha, beta)
gamma <- alpha^(-1/alpha) * gamma
} ## else pm == 0
## Calculate uniform and exponential distributed random numbers:
theta <- pi * (runif(n)-1/2)
w <- -log(runif(n))
result <-
## If alpha is equal 1 then:
if (alpha == 1 & beta == 0) {
rcauchy(n)
## Otherwise, if alpha is different from 1:
} else {
## FIXME: learn from nacopula::rstable1R()
b.tan.pa <- beta*tan(pi2*alpha)
theta0 <- min(max(-pi2, atan(b.tan.pa) / alpha), pi2)
c <- (1+b.tan.pa^2)^(1/(2*alpha))
a.tht <- alpha*(theta+theta0)
r <- ( c*sin(a.tht)/
(cos(theta))^(1/alpha) ) *
(cos(theta-a.tht)/w)^((1-alpha)/alpha)
## Use Parametrization 0:
r - b.tan.pa
}
## Result:
result * gamma + delta
}
## ------------------------------------------------------------------------------
##' Numerically Integrate -- basically the same as R's integrate()
##' --------------------- main difference: no errors, but warnings
.integrate2 <- function(f, lower, upper, ..., subdivisions, rel.tol, abs.tol,
stop.on.error = FALSE)
{
ri <- integrate(f, lower, upper, ..., subdivisions=subdivisions,
rel.tol=rel.tol, abs.tol=abs.tol, stop.on.error=stop.on.error)
if((msg <- ri[["message"]]) != "OK")
warning(msg) ## NB: "roundoff error ..." happens many times
ri[["value"]]
}
| /pkg/stabledist/R/dpqr-stable.R | no_license | xashely/rmetrics | R | false | false | 33,303 | r | # Part of R package 'stabledist' (part of the Rmetrics project).
## The stabledist R package is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This R package is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Library General Public License for more details.
##
## A copy of the GNU General Public License is available at
## http://www.r-project.org/Licenses/
################################################################################
## FUNCTIONS: DESCRIPTION:
## dstable Returns density for stable DF
## pstable Returns probabilities for stable DF
## qstable Returns quantiles for stable DF
## rstable Returns random variates for stable DF
## UTILITY FUNCTION DESCRIPTION:
## .integrate2 Integrates internal functions for *stable
################################################################################
##==============================================================================
### MM TODO:
## 0) All d, p, q, q -- have identical parts
## a) 'parametrization' (pm) check
## b) checking, alpha, beta,
## c) subdivisions etc {all but rstable}
## --- to do: "Fix" these in dstable(), then copy/paste to others
##==============================================================================
pi2 <- pi/2 # - we use it so often
##' @title omega() according to Lambert & Lindsey (1999), p.412
##' @param gamma [dpqr]stable()'s scale parameter, > 0 -- of length 1
##' @param alpha [dpqr]stable()'s "main" parameter, in [0, 2] -- of length 1
##' @return omega(.) = tan(pi/2 alpha) if alpha != 1 ...
.om <- function(gamma,alpha) {
if(alpha != round(alpha)) # non-integer usual case
tan(pi2*alpha)# not tanpi2() !
else if(alpha == 1)
(2/pi)*log(gamma)
else 0 # for alpha = 0 or = 2
}
##' @title C_alpha - the tail constant
##' @param alpha numeric vector of stable tail parameters, in [0,2]
##' @return
##' @author Martin Maechler
C.stable.tail <- function(alpha, log = FALSE) {
stopifnot(0 <= alpha, alpha <= 2)
r <- alpha
i0 <- alpha == 0
r[i0] <- if(log) -log(2) else 0.5
al <- alpha[!i0]
r[!i0] <-
if(log) lgamma(al)-log(pi)+ log(sin(al*pi2))
else gamma(al)/pi * sin(al*pi2)
if(any(a2 <- alpha == 2)) r[a2] <- if(log) -Inf else 0
r
}
##' @title tan(pi/2*x), for x in [-1,1] with correct limits
##' i.e. tanpi2(-/+ 1) == -/+ Inf
##' @param x numeric vector
##' @return numeric vector of values tan(pi/2*x)
##' @author Martin Maechler
tanpi2 <- function(x) {
r <- x
if(any(i <- x & x == round(x)))# excluding 0
r[i] <- (2 - (x[i] %% 4))*Inf
io <- which(!i)
r[io] <- tan(pi2* x[io])
r
}
##' @title cos(pi/2*x), for x in [-1,1] with correct limits
##' i.e. cospi2(+- 1) == 0
##' @param x numeric vector
##' @return numeric vector of values cos(pi/2*x)
##' @author Martin Maechler
cospi2 <- function(x) {
r <- x
if(any(i <- x == round(x)))
r[i] <- as.numeric(x[i] == 0)# 1 or 0 - iff x \in [-1,1] !
io <- which(!i)
r[io] <- cos(pi2* x[io])
r
}
##' According to Nolan's "tail.pdf" paper, where he takes *derivatives*
##' of the tail approximation 1-F(x) ~ (1+b) C_a x^{-a} to prove
##' that f(x) ~ a(1+b) C_a x^{-(1+a)} ...
##'
##' @title tail approximation density for dstable()
##' @param x
##' @param alpha
##' @param beta
##' @param log if true, return log(f(.))
##' @return
##' @author Martin Maechler
dPareto <- function(x, alpha, beta, log = FALSE) {
if(any(neg <- x < 0)) { ## left tail
x [neg] <- -x [neg]
beta <- rep(beta, length.out=length(x))
beta[neg] <- -beta[neg]
}
if(log)
log(alpha)+ log1p(beta)+ C.stable.tail(alpha, log=TRUE) -(1+alpha)*log(x)
else
alpha*(1+beta)* C.stable.tail(alpha)* x^(-(1+alpha))
}
pPareto <- function(x, alpha, beta, lower.tail = TRUE, log.p = FALSE) {
if(any(neg <- x < 0)) { ## left tail
x [neg] <- -x [neg]
beta <- rep(beta, length.out=length(x))
beta[neg] <- -beta[neg]
stop("FIXME --- pPareto() is not correct for negative x")## switch 1-iF / iF
}
if(log.p) {
if(lower.tail) ## log(1 - iF)
log1p(-(1+beta)* C.stable.tail(alpha)* x^(-alpha))
else ## log(iF)
log1p(beta)+ C.stable.tail(alpha, log=TRUE) - alpha*log(x)
} else {
iF <- (1+beta)* C.stable.tail(alpha)* x^(-alpha)
if(lower.tail) 1-iF else iF
}
}
dstable <- function(x, alpha, beta,
gamma = 1, delta = 0, pm = 0, log = FALSE,
tol = 64*.Machine$double.eps, zeta.tol= NULL,
subdivisions = 1000)
{
## Original implemented by Diethelm Wuertz;
## Changes for efficiency and accuracy by Martin Maechler
## Description:
## Returns density for stable DF
## Details:
## The function uses the approach of J.P. Nolan for general
## stable distributions. Nolan derived expressions in form
## of integrals based on the charcteristic function for
## standardized stable random variables. These integrals
## can be numerically evaluated.
## Arguments:
## alpha = index of stability, in the range (0,2]
## beta = skewness, in the range [-1, 1]
## gamma = scale, in the range (0, infinity)
## delta = location, in the range (-infinity, +infinity)
## param = type of parmeterization
## Note: S+ compatibility no longer considered (explicitly)
## Parameter Check:
## NB: (gamma, delta) can be *vector*s (vectorized along x)
stopifnot( 0 < alpha, alpha <= 2, length(alpha) == 1,
-1 <= beta, beta <= 1, length(beta) == 1,
0 <= gamma, length(pm) == 1, pm %in% 0:2,
tol > 0, subdivisions > 0)
## not an official argument {no doc!}:
verbose <- getOption("dstable.debug", default=FALSE)
## Parameterizations:
if (pm == 1) {
delta <- delta + beta*gamma * .om(gamma,alpha)
} else if (pm == 2) {
delta <- delta - alpha^(-1/alpha)*gamma*stableMode(alpha, beta)
gamma <- alpha^(-1/alpha) * gamma
} ## else pm == 0
## Shift and Scale:
x <- (x - delta) / gamma
ans <-
## Special Cases:
if (alpha == 2) {
dnorm(x, mean = 0, sd = sqrt(2), log=log)
} else if (alpha == 1 && beta == 0) {
dcauchy(x, log=log)
} else {
## General Case
if (alpha != 1) { ## 0 < alpha < 2 & |beta| <= 1 from above
tanpa2 <- tan(pi2*alpha)
betan <- beta * tanpa2
zeta <- -betan
theta0 <- min(max(-pi2, atan(betan) / alpha), pi2)
if(verbose) cat(sprintf(
"dstable(., alpha=%g, beta=%g,..): --> theta0=%g, zeta=%g,",
alpha, beta, theta0, zeta))
if(is.null(zeta.tol)) {
zeta.tol <-
if(betan == 0) .4e-15
else if(1-abs(beta) < .01 || alpha < .01) 2e-15 else 5e-5
if(verbose) cat(sprintf(" --> zeta.tol= %g", zeta.tol))
}
else stopifnot(is.numeric(zeta.tol), zeta.tol >= 0)
if(verbose) cat("\n")
## Loop over all x values ( < , = , or > zeta):
vapply(x, .fct1, 0.,
zeta=zeta, alpha=alpha, beta=beta, theta0=theta0, log=log,
verbose=verbose,
tol=tol, zeta.tol=zeta.tol, subdivisions=subdivisions)
}
## Special Case alpha == 1 and -1 <= beta <= 1 (but not = 0) :
else { ## (alpha == 1) and 0 < |beta| <= 1 from above
## Loop over all x values:
vapply(x, function(z) {
if (z >= 0) {
.fct2( z , beta, log=log, tol=tol, subdivisions=subdivisions)
} else {
.fct2(-z, -beta, log=log, tol=tol, subdivisions=subdivisions)
}
}, 0.)
}
}
i0 <- ans == (if(log)-Inf else 0) # --> we can do better using asymptotic:
if(any(i0)) {
d <- dPareto(x[i0], alpha, beta, log=log)
## do recycle correctly:
if(length(gamma) > 1)
gamma <- rep(gamma, length.out=length(x))[i0]
ans[i0] <- if(log) d - log(gamma) else d/gamma
}
if(any(io <- !i0)) {
d <- ans[io]
if(length(gamma) > 1)
gamma <- rep(gamma, length.out=length(x))[io]
ans[io] <- if (log) d - log(gamma) else d/gamma
}
ans
}## {dstable}
## ------------------------------------------------------------------------------
.large.exp.arg <- -(.Machine$double.min.exp * log(2)) ## == 708.396...
##' @title x*exp(-x) numerically stably, with correct limit 0 for x --> Inf
##' @param x numeric
##' @return x*exp(x)
##' @author Martin Maechler
x.exp.m.x <- function(x) {
r <- x*exp(-x)
if(any(nax <- is.na(x)))
r[nax] <- NA_real_
if(any(lrg <- !nax & x > .large.exp.arg))# e.g. x == Inf
r[lrg] <- 0
r
}
.e.plus <- function(x, eps) x + eps* abs(x)
.e.minus<- function(x, eps) x - eps* abs(x)
pi2.. <- function(eps) pi2 * (1 - eps) ## == .e.minus(pi/2, eps), slight more efficiently
##' dstable() for very small alpha > 0
##' ok only for x > zeta := - beta * tan(pi/2 *alpha)
dstable.smallA <- function(x, alpha, beta, log=FALSE) {
r <- log(alpha) + log1p(beta) - (1 + log(2*x + pi*alpha*beta))
if(log) r else exp(r)
}
## 1e-17: seems "good", but not "optimized" at all -- hidden for now
.alpha.small.dstable <- 1e-17
.fct1 <- function(x, zeta, alpha, beta, theta0, log,
tol, subdivisions, zeta.tol,
verbose = getOption("dstable.debug", default=FALSE))
{
## --- dstable(x, alpha, beta, ..) for alpha < 2 ---
## For x = zeta, have special case formula [Nolan(1997)];
## need to use it also for x ~= zeta, i.e., x.m.zet := |x - zeta| < delta :
stopifnot(is.finite(zeta))
x.m.zet <- abs(x - zeta)
f.zeta <- function(log)
if(log)
lgamma(1+1/alpha)+ log(cos(theta0)) - (log(pi)+ log1p(zeta^2)/(2*alpha))
else
gamma(1+1/alpha)*cos(theta0) / (pi*(1+zeta^2)^(1/(2*alpha)))
## Modified: originally was if (z == zeta),
## then (D.W.) if (x.m.zet < 2 * .Machine$double.eps)
## then (M.M.) if (x.m.zet <= 1e-5 * abs(x))
if(is.finite(x) && x.m.zet <= zeta.tol * (zeta.tol+ max(abs(x),abs(zeta)))) {
if(verbose)
cat(sprintf(".fct1(%.11g, %.10g,..): x ~= zeta => using f.zeta()\n",
x, zeta))
return(f.zeta(log))
}
## the real check should be about the feasibility of g() below, or its integration
smallAlpha <- (alpha < .alpha.small.dstable)
if(x < zeta) {
theta0 <- -theta0 # see Nolan(1997), Thm.1 (c)
if(smallAlpha) {
beta <- -beta
x <- -x
}
}
if(smallAlpha) {
## here, *MUST* have __ x > zeta __
if(verbose)
cat(sprintf(".fct1(%.11g, %.10g,..): small alpha=%g\n",
x, zeta, alpha))
return(dstable.smallA(x, alpha, beta, log=log))
}
## constants ( independent of integrand g1(th) = g*exp(-g) ):
## zeta <- -beta * tan(pi*alpha/2)
## theta0 <- (1/alpha) * atan( beta * tan(pi*alpha/2))
## x.m.zet <- abs(x - zeta)
##-------->>> identically as in .FCT1() for pstable() below: <<<-----------
a_1 <- alpha - 1
cat0 <- cos(at0 <- alpha*theta0)
##' g() is strictly monotone -- Nolan(1997) ["3. Numerical Considerations"]
##' alpha >= 1 <==> g() is falling, ie. from Inf --> 0; otherwise growing from 0 to +Inf
g <- function(th) {
r <- th
## g(-pi/2) or g(pi/2) could become NaN --> work around
i.bnd <- abs(pi2 -sign(a_1)*th) < 64*.Machine$double.eps
r[i.bnd] <- 0
th <- th[io <- !i.bnd]
att <- at0 + alpha*th ## = alpha*(theta0 + theta)
r[io] <- (cat0 * cos(th) * (x.m.zet/sin(att))^alpha)^(1/a_1) * cos(att-th)
r
}
## Function to integrate: dstable(..)= f(..) = c2 * \int_{-\theta_0}^{\pi/2} g1(u) du
g1 <- function(th) {
## g1 := g(.) exp(-g(.))
x.exp.m.x( g(th) )
}
c2 <- ( alpha / (pi*abs(a_1)*x.m.zet) )
## Now, result = c2 * \int_{-t0}^{pi/2} g1(u) du , we "only" need the integral
## where however, g1(.) may look to be (almost) zero almost everywhere and just have a small peak
## ==> Find the peak, split the integral into two parts of for intervals (t0, t_max) + (t_max, pi/2)
## However, this may still be bad, e.g., for dstable(71.61531, alpha=1.001, beta=0.6),
## or dstable(1.205, 0.75, -0.5)
## the 2nd integral was "completely wrong" (basically zero, instead of ..e-5)
## NB: g() is monotone, see above
if((alpha >= 1 &&
((!is.na(g. <- g( pi2 )) && g. > .large.exp.arg) || identical(g(-theta0), 0))) ||
(alpha < 1 &&
((!is.na(g. <- g(-theta0)) && g. > .large.exp.arg) || identical(g(pi2), 0)))) {
## g() is numerically too large *or* 0 even where it should be inf
## ===> g() * exp(-g()) is 0 everywhere
if(verbose)
cat(sprintf(".fct1(%.11g, %.10g,..): g() is 'Inf' (or 0) ==> result 0", x,zeta))
return(if(log)-Inf else 0)
}
g. <- if(alpha >= 1) g(.e.plus(-theta0, 1e-6)) else g(pi2..(1e-6))
if(is.na(g.))# g() is not usable --- FIXME rather use *asymptotic dPareto()?
if(max(x.m.zet, x.m.zet / abs(x)) < .01)
return(f.zeta(log))
if(verbose)
cat(sprintf(".fct1(%.11g, %.10g,..): c2*sum(r[1:4])= %.11g*", x,zeta, c2))
Int <- function(a,b)
.integrate2(g1, lower = a, upper = b,
subdivisions=subdivisions, rel.tol= tol, abs.tol= tol)
## We know that the maximum of g1(.) is = exp(-1) = 0.3679 "at" g(.) == 1
## find that by uniroot :
## g(.) == 1 <==> log(g(.)) == 0 --- the latter is better conditioned,
## e.g., for (x = -1, alpha = 0.95, beta = 0.6)
## the former is better for dstable(-122717558, alpha = 1.8, beta = 0.3, pm = 1)
## However, it can be that the maximum is at the boundary, and
## g(.) > 1 everywhere or g(.) < 1 everywhere {in that case we could revert to optimize..}
if((alpha >= 1 && !is.na(g. <- g(pi2)) && g. > 1) ||
(alpha < 1 && !is.na(g. <- g(pi2)) && g. < 1))
g1.th2 <- g1( theta2 <- pi2..(1e-6) )
else if((alpha < 1 && g(-theta0) > 1) ||
(alpha >= 1 && g(-theta0) < 1))
g1.th2 <- g1( theta2 <- .e.plus(-theta0, 1e-6) )
else {
## when alpha ~=< 1 (0.998 e.g.), g(x) is == 0 (numerically) on a wide range;
## uniroot is not good enough, and we should *increase* -theta0
## or decrease pi2 such that it can find the root:
l.th <- -theta0
u.th <- pi2
if(alpha < 1) { ## g() is *in*creasing from 0 ..
while ((g.t <- g(.th <- (l.th + pi2)/2)) == 0) l.th <- .th
if(g.t == 1)# decrease upper limit {needed, e.g. for alpha = 1e-20}
while ((g.t <- g(.th <- (l.th + u.th)/2)) == 1) u.th <- .th
if(abs(u.th - l.th) < 1e-13)# do not trust g()
return(if(log)-Inf else 0)
if(verbose >= 2)
cat(sprintf("\n -theta0=%g %s l.th=%g .. u.th=%g <= pi/2\n",
-theta0, if(-theta0 == l.th) "=" else "<",
l.th, u.th))
}
ur1 <- uniroot(function(th) g(th) - 1,
lower = l.th, upper = u.th, tol = .Machine$double.eps)
## consider using safeUroot() [ ~/R/Pkgs/copula/R/safeUroot.R ] !!
ur2 <- tryCatch(uniroot(function(th) log(g(th)),
lower = l.th, upper = u.th, tol = .Machine$double.eps),
error=function(e)e)
g.1 <- x.exp.m.x(ur1$f.root+1)
g.2 <- if(inherits(ur2, "error")) -Inf else x.exp.m.x(exp(ur2$f.root))
if(g.1 >= g.2) {
theta2 <- ur1$root
g1.th2 <- g.1 ## == g1(theta2)
} else {
theta2 <- ur2$root
g1.th2 <- g.2
}
}
## now, because g1()'s peak (at th = theta2) may be extreme, we find two more intermediate values
## NB: Theoretically: Max = 0.3679 = g1(theta2) ==> 1e-4 is a very small fraction of that
## to the left:
eps <- 1e-4
if((do1 <- g1.th2 > eps && g1(-theta0) < eps))
th1 <- uniroot(function(th) g1(th) - eps, lower = -theta0, upper = theta2,
tol = tol)$root
if((do4 <- g1.th2 > eps && g1(pi2) < eps))
## to the right:
th3 <- uniroot(function(th) g1(th) - eps, lower = theta2, upper = pi2,
tol = tol)$root
if(do1) {
r1 <- Int(-theta0, th1)
r2 <- Int( th1, theta2)
} else {
r1 <- 0
r2 <- Int(-theta0, theta2)
}
if(do4) {
r3 <- Int( theta2, th3)
r4 <- Int( th3, pi2)
} else {
r3 <- Int( theta2, pi2)
r4 <- 0
}
if(verbose)
cat(sprintf("(%6.4g + %6.4g + %6.4g + %6.4g)= %g\n",
r1,r2,r3,r4, c2*(r1+r2+r3+r4)))
if(log)
log(c2)+ log(r1+r2+r3+r4)
else
c2*(r1+r2+r3+r4)
} ## {.fct1}
## ------------------------------------------------------------------------------
##' Auxiliary for dstable() only used when alpha == 1 :
##' @param x numeric *scalar*, >= 0
##' @param beta 0 < |beta| <= 1
##' @param tol
##' @param subdivisions
.fct2 <- function(x, beta, log, tol, subdivisions,
verbose = getOption("dstable.debug", default=FALSE))
{
i2b <- 1/(2*beta)
p2b <- pi*i2b # = pi/(2 beta)
ea <- -p2b*x
if(is.infinite(ea)) return(if(log)-Inf else 0)
##' g() is strictly monotone;
##' g(u) := original_g(u*pi/2)
##' for beta > 0: increasing from g(-1) = 0 to g(+1) = Inf
##' for beta < 0: decreasing from g(-1) = Inf to g(+1) = 0
##t0 <- -sign(beta)*pi2# g(t0) == 0 mathematically, but not always numerically
u0 <- -sign(beta)# g(u0) == 0 mathematically, but not always numerically
g <- function(u) {
r <- u
r[i <- abs(u-u0) < 1e-10] <- 0
u <- u[!i]
th <- u*pi2
h <- p2b+ th # == g'/beta where g' := pi/2 + beta*th = pi/2* (1 + beta*u)
r[!i] <- (h/p2b) * exp(ea + h*tanpi2(u)) / cospi2(u)
r
}
## Function to Integrate; u is a non-sorted vector!
g2 <- function(u) {
## g2 = g(.) exp(-g(.))
x.exp.m.x( g(u) )
}
## We know that the maximum of g2(.) is = exp(-1) = 0.3679 "at" g(.) == 1
## find that by uniroot :
ur <- uniroot(function(u) g(u) - 1, lower = -1, upper = 1, tol = tol)
u2 <- ur$root
r1 <- .integrate2(g2, lower = -1, upper = u2,
subdivisions = subdivisions, rel.tol = tol, abs.tol = tol)
r2 <- .integrate2(g2, lower = u2, upper = 1,
subdivisions = subdivisions, rel.tol = tol, abs.tol = tol)
if(verbose) {
cc <- pi2*abs(i2b)
cat(sprintf(".fct2(%.11g, %.6g,..): c*sum(r1+r2)= %.11g*(%6.4g + %6.4g)= %g\n",
x,beta, cc, r1, r2, cc*(r1+r2)))
}
if(log)
log(pi2) + log(abs(i2b)) + log(r1 + r2)
else
pi2*abs(i2b)*(r1 + r2)
}## {.fct2}
### ------------------------------------------------------------------------------
pstable <- function(q, alpha, beta, gamma = 1, delta = 0, pm = 0,
lower.tail = TRUE, log.p = FALSE,
tol = 64*.Machine$double.eps, subdivisions = 1000)
{
## A function implemented by Diethelm Wuertz
## Description:
## Returns probability for stable DF
x <- q
## Parameter Check:
## NB: (gamma, delta) can be *vector*s (vectorized along x)
stopifnot( 0 < alpha, alpha <= 2, length(alpha) == 1,
-1 <= beta, beta <= 1, length(beta) == 1,
0 <= gamma, length(pm) == 1, pm %in% 0:2,
tol > 0, subdivisions > 0)
## Parameterizations:
if (pm == 1) {
delta <- delta + beta*gamma * .om(gamma,alpha)
} else if (pm == 2) {
delta <- delta - alpha^(-1/alpha)*gamma*stableMode(alpha, beta)
gamma <- alpha^(-1/alpha) * gamma
} ## else pm == 0
## Shift and Scale:
x <- (x - delta) / gamma
## Return directly
## ------ first, special cases:
if (alpha == 2) {
pnorm(x, mean = 0, sd = sqrt(2), lower.tail=lower.tail, log.p=log.p)
} else if (alpha == 1 && beta == 0) {
pcauchy(x, lower.tail=lower.tail, log.p=log.p)
} else {
retValue <- function(F, useLower) { ## (vectorized in F)
if(useLower) {
if(log.p) log(F) else F
} else { ## upper: 1 - F
if(log.p) log1p(-F) else 1 - F
}
}
## General Case
if (alpha != 1) { ## 0 < alpha < 2 & |beta| <= 1 from above
tanpa2 <- tan(pi2*alpha)
zeta <- -beta * tanpa2
theta0 <- min(max(-pi2, atan(-zeta) / alpha), pi2)
if(finSupp <- (abs(beta) == 1 && alpha < 1)) {
## has *finite* support [zeta, Inf) if beta == 1
## (-Inf, zeta] if beta == -1
}
## Loop over all x values:
vapply(x, function(z) {
if(finSupp) {
if(beta == 1 && z <= zeta)
return(retValue(0., useLower=lower.tail))
else if(beta == -1 && z >= zeta)
return(retValue(1., useLower=lower.tail))
## else .. one of the cases below
}
if(abs(z - zeta) < 2 * .Machine$double.eps) {
## FIXME? same problem as dstable
r <- if(lower.tail) (1/2- theta0/pi) else 1/2+ theta0/pi
if(log.p) log(r) else r
} else {
useLower <-
((z > zeta && lower.tail) ||
(z < zeta && !lower.tail))
## FIXME: for alpha > 1 -- the following computes F1 = 1 -c3*r(x)
## and suffers from cancellation when 1-F1 is used below:
giveI <- !useLower && alpha > 1 # if TRUE, .FCT1() return 1-F
.F1 <- .FCT1(z, zeta, alpha=alpha, theta0=theta0,
giveI = giveI,
tol = tol, subdivisions = subdivisions)
if(giveI)
if(log.p) log(.F1) else .F1
else retValue(.F1, useLower=useLower)
}
}, 0.)
}
## Special Case alpha == 1 and -1 <= beta <= 1 (but not = 0) :
else { ## (alpha == 1) and 0 < |beta| <= 1 from above
useL <-
if(beta >= 0)
lower.tail
else {
beta <- -beta
x <- -x
!lower.tail
}
if(giveI <- !useL && !log.p)
useL <- TRUE
## Loop over all x values:
retValue(vapply(x, function(z)
.FCT2(z, beta = beta, tol=tol, subdivisions=subdivisions,
giveI = giveI),
0.),
useLower = useL)
}
}
}## {pstable}
## ------------------------------------------------------------------------------
##' Auxiliary for pstable() (for alpha != 1)
.FCT1 <- function(x, zeta, alpha, theta0, giveI, tol, subdivisions,
verbose = getOption("pstable.debug", default=FALSE))
{
if(is.infinite(x))
return(if(giveI) 0 else 1)
stopifnot(is.finite(zeta))
x.m.zet <- abs(x - zeta)
##-------->>> identically as in .fct1() for dstable() above: <<<-----------
## FIXME: also provide "very small alpha" case, as in .fct1()
if(x < zeta) theta0 <- -theta0
a_1 <- alpha - 1
cat0 <- cos(at0 <- alpha*theta0)
g <- function(th) {
r <- th
## g(-pi/2) or g(pi/2) could become NaN --> work around
i.bnd <- abs(pi2 -sign(a_1)*th) < 64*.Machine$double.eps
r[i.bnd] <- 0
th <- th[io <- !i.bnd]
att <- at0 + alpha*th ## = alpha*(theta0 + theta)
r[io] <- (cat0 * cos(th) * (x.m.zet/sin(att))^alpha)^(1/a_1) * cos(att-th)
r
}
if(verbose) cat(sprintf(".FCT1(%9g, %10g, th0=%.10g, %s..): ",
x,zeta, theta0, if(giveI)"giveI=TRUE," else ""))
## as g() is montone, the integrand exp(-g(.)) is too ==> maximum is at the boundary
## however, integration can be inaccuracte when g(.) quickly jumps from Inf to 0
## _BUT_ empirically I find that good values l.th / u.th below are *INDEPENDENT* of x,
l.th <- .e.plus(-theta0, 1e-6)
if(alpha > 1 && g(l.th) == Inf) {
ur <- uniroot(function(t) 1-2*(g(t)==Inf), lower=l.th, upper=pi2,
f.lower= -1, f.upper= 1, tol = 1e-8)
l.th <- ur$root
if(verbose) cat(sprintf(" g(-th0 +1e-6)=Inf: unirt(%d it) -> l.th=%.10g ",
ur$iter, l.th))
}
u.th <- .e.minus(pi2, 1e-6)
if(alpha < 1 && g(u.th) == Inf) {
ur <- uniroot(function(t) 1-2*(g(t)==Inf), lower=l.th, upper=u.th,
f.upper= -1, tol = 1e-8)
u.th <- ur$root
if(verbose) cat(sprintf(" g(pi/2 -1e-6)=Inf: unirt(%d it) -> u.th=%.10g ",
ur$iter, u.th))
}
r <- .integrate2(function(th) exp(-g(th)),
lower = l.th, upper = u.th, subdivisions = subdivisions,
rel.tol = tol, abs.tol = tol)
if(verbose) cat(sprintf("--> Int r= %.11g\n", r))
if(giveI) { ## { ==> alpha > 1 ==> c1 = 1; c3 = -1/pi}
## return (1 - F) = 1 - (1 -1/pi * r) = r/pi :
r/pi
} else {
c1 <- if(alpha < 1) 1/2 - theta0/pi else 1
c3 <- sign(1-alpha)/pi
## FIXME: for alpha > 1, F = 1 - |.|*r(x)
## <==> cancellation iff we eventually want 1 - F() [-> 'lower.tail']
c1 + c3* r
}
} ## {.FCT1}
## ------------------------------------------------------------------------------
##' Auxiliary for pstable() only used when alpha == 1 :
##' @param x numeric *scalar*
##' @param beta >= 0 here
##' @param tol
##' @param subdivisions
.FCT2 <- function(x, beta, tol, subdivisions, giveI = FALSE,
verbose = getOption("pstable.debug", default=FALSE))
{
i2b <- 1/(2*beta)
p2b <- pi*i2b # = pi/(2 beta)
ea <- -p2b*x
if(is.infinite(ea))
return(R.D.Lval(if(ea < 0) ## == -Inf ==> g(.) == 0 ==> G2(.) == 1
1 else 0, ## == +Inf ==> g(.) == Inf ==> G2(.) == 0
lower.tail= !giveI))
##' g() is strictly monotone;
##' g(u) := original_g(u*pi/2)
##' for beta > 0: increasing from g(-1) = 0 to g(+1) = Inf
##' for beta < 0: decreasing from g(-1) = Inf to g(+1) = 0
## original_g :
## g <- function(th) {
## h <- p2b+ th # == g'/beta where g' := pi/2 + beta*th
## (h/p2b) * exp(ea + h*tan(th)) / cos(th)
## }
##t0 <- -pi2# g(t0) == 0 mathematically, but not always numerically
u0 <- -1 # g(u0) == 0 mathematically, but not always numerically
g <- function(u) {
r <- u
r[i <- abs(u-u0) < 1e-10] <- 0
u <- u[!i]
th <- u*pi2
h <- p2b+ th # == g'/beta where g' := pi/2 + beta*th = pi/2* (1 + beta*u)
r[!i] <- (h/p2b) * exp(ea + h*tanpi2(u)) / cospi2(u)
r
}
if(verbose)
cat(sprintf(".FCT2(%.11g, %.6g, %s..): ",
x,beta, if(giveI) "giveI=TRUE," else ""))
## g(-u0) == +Inf {at other end}, mathematically ==> exp(-g(.)) == 0
## in the outer tails, the numerical integration can be inaccurate,
## because g(.) jumps from 0 to Inf, but is 0 almost always
## <==> g1(.) = exp(-g(.)) jumps from 1 to 0 and is 1 almost everywhere
## ---> the integration "does not see the 0" and returns too large..
u. <- 1
if(g(uu <- .e.minus(u., 1e-6)) == Inf) {
ur <- uniroot(function(t) 1-2*(g(t)==Inf), lower=-1, upper= uu,
f.lower= +1, f.upper= -1, tol = 1e-8)
u. <- ur$root
if(verbose) cat(sprintf(" g(%g)=Inf: unirt(%d it) -> u.=%.10g",
uu, ur$iter, u.))
}
##' G2(.) = exp(-g(.)) is strictly monotone .. no need for 'theta2' !
G2 <- if(giveI) function(u) expm1(-g(u)) else function(u) exp(-g(u))
r <- .integrate2(G2, lower = -1, upper = u.,
subdivisions = subdivisions, rel.tol = tol, abs.tol = tol) / 2
if(verbose) cat(sprintf("--> Int r= %.11g\n", r))
if(giveI) -r else r
}## {.FCT2}
### ------------------------------------------------------------------------------
## -- utilities (==^== Macros in R's src/nmath/dpq.h ) :
R.D.Lval <- function(p, lower.tail) if(lower.tail) p else (1 - p) # p
R.D.Cval <- function(p, lower.tail) if(lower.tail) (1 - p) else p # 1 - p
## R.D.qIv <- function(p, log.p) if(log.p) exp(p) else p # p in qF(p,..)
##' == R.D.Lval(R.D.qIv(p)) "===" p in qF !
R.DT.qIv <- function(p, lower.tail, log.p) {
if(log.p) if(lower.tail) exp(p) else - expm1(p)
else R.D.Lval(p, lower.tail)
}
##' == R.D.Cval(R.D.qIv(p)) "===" (1 - p) in qF
R.DT.CIv <- function(p, lower.tail, log.p) {
if(log.p) if(lower.tail) -expm1(p) else exp(p)
else R.D.Cval(p, lower.tail)
}
qstable <- function(p, alpha, beta, gamma = 1, delta = 0, pm = 0,
lower.tail = TRUE, log.p = FALSE,
tol = .Machine$double.eps^0.25, maxiter = 1000, trace = 0,
integ.tol = 1e-7, subdivisions = 200)
{
## A function implemented by Diethelm Wuertz
## Description:
## Returns quantiles for stable DF
## Parameter Check:
## NB: (gamma, delta) can be *vector*s (vectorized along x)
stopifnot( 0 < alpha, alpha <= 2, length(alpha) == 1,
-1 <= beta, beta <= 1, length(beta) == 1,
0 <= gamma, length(pm) == 1, pm %in% 0:2,
tol > 0, subdivisions > 0)
## Parameterizations:
if (pm == 1) {
delta <- delta + beta*gamma * .om(gamma,alpha)
} else if (pm == 2) {
delta <- delta - alpha^(-1/alpha)*gamma*stableMode(alpha, beta)
gamma <- alpha^(-1/alpha) * gamma
} ## else pm == 0
result <-
## Special Cases:
if (alpha == 2)
qnorm(p, mean = 0, sd = sqrt(2), lower.tail=lower.tail, log.p=log.p)
else if (alpha == 1 && beta == 0)
qcauchy(p, lower.tail=lower.tail, log.p=log.p)
else { ## -------------- 0 < alpha < 2 ---------------
.froot <- function(x, p) {
pstable(q = x, alpha=alpha, beta=beta, pm = 0,
lower.tail=lower.tail, log.p=log.p,
tol=integ.tol, subdivisions=subdivisions) - p
}
## for approximate interval:
.qN <- function(p) qnorm (p, mean = 0, sd = sqrt(2),
lower.tail=lower.tail, log.p=log.p)
.qC <- function(p) qcauchy(p, lower.tail=lower.tail, log.p=log.p)
## Calculate:
qst1 <- function(pp) {
## 1) Find narrow interval [xmin, xmax] -----------------------
## NB: will deal with a too narrow interval later
p0 <- R.DT.qIv(pp, lower.tail=lower.tail, log.p=log.p)
left <- p0 < 0.5
if (beta < 0) {
xmin <- -R.DT.CIv(pp, lower.tail=lower.tail, log.p=log.p)/p0
xmax <- if (left) .qN(pp) else .qC(pp)
}
else if (beta > 0 ) {
xmin <- if (left) .qC(pp) else .qN(pp)
xmax <- p0/R.DT.CIv(pp, lower.tail=lower.tail, log.p=log.p)
}
else { ## (beta == 0)
xmin <- if (left) .qC(pp) else .qN(pp)
xmax <- if (left) .qN(pp) else .qC(pp)
}
if(xmin >= xmax) { # fixup interval such that xmin < xmax
fdx <- if(xmin == xmax) .01*max(1e-7, abs(xmin)) else 1.01*(xmin-xmax)
xmin <- xmin - fdx
xmax <- xmax + fdx
stopifnot(xmin < xmax)
}
## 2) root-finding pstable(..) = p inside the interval: -------
dx <- 1
repeat {
root <- .unirootNA(.froot, interval = c(xmin, xmax), p = pp,
extendInt = if(lower.tail) "upX" else "downX",
tol=tol, maxiter=maxiter, trace=trace)
if(!is.na(root))
break
xmin <- xmin- dx
xmax <- xmax+ dx
if(xmin == -Inf && xmax == +Inf)
stop("could not find an interval for x where pstable(x,*) - p changes sign")
dx <- dx * 2
}
root
}
vapply(p, qst1, 0.)
}
## Result:
result * gamma + delta
}
## ------------------------------------------------------------------------------
rstable <- function(n, alpha, beta, gamma = 1, delta = 0, pm = 0)
{
## Description:
## Returns random variates for stable DF
## slightly amended along copula::rstable1
## Parameter Check:
## NB: (gamma, delta) can be *vector*s (vectorized along x)
stopifnot( 0 < alpha, alpha <= 2, length(alpha) == 1,
-1 <= beta, beta <= 1, length(beta) == 1,
0 <= gamma, length(pm) == 1, pm %in% 0:2)
## Parameterizations:
if (pm == 1) {
delta <- delta + beta*gamma * .om(gamma,alpha)
} else if (pm == 2) {
delta <- delta - alpha^(-1/alpha)*gamma*stableMode(alpha, beta)
gamma <- alpha^(-1/alpha) * gamma
} ## else pm == 0
## Calculate uniform and exponential distributed random numbers:
theta <- pi * (runif(n)-1/2)
w <- -log(runif(n))
result <-
## If alpha is equal 1 then:
if (alpha == 1 & beta == 0) {
rcauchy(n)
## Otherwise, if alpha is different from 1:
} else {
## FIXME: learn from nacopula::rstable1R()
b.tan.pa <- beta*tan(pi2*alpha)
theta0 <- min(max(-pi2, atan(b.tan.pa) / alpha), pi2)
c <- (1+b.tan.pa^2)^(1/(2*alpha))
a.tht <- alpha*(theta+theta0)
r <- ( c*sin(a.tht)/
(cos(theta))^(1/alpha) ) *
(cos(theta-a.tht)/w)^((1-alpha)/alpha)
## Use Parametrization 0:
r - b.tan.pa
}
## Result:
result * gamma + delta
}
## ------------------------------------------------------------------------------
##' Numerically Integrate -- basically the same as R's integrate()
##' --------------------- main difference: no errors, but warnings
.integrate2 <- function(f, lower, upper, ..., subdivisions, rel.tol, abs.tol,
stop.on.error = FALSE)
{
ri <- integrate(f, lower, upper, ..., subdivisions=subdivisions,
rel.tol=rel.tol, abs.tol=abs.tol, stop.on.error=stop.on.error)
if((msg <- ri[["message"]]) != "OK")
warning(msg) ## NB: "roundoff error ..." happens many times
ri[["value"]]
}
|
works_with_R("3.2.0",
ggplot2="1.0",
dplyr="0.4.0")
load("cheating.error.RData")
load("step1.error.RData")
load("step2.error.RData")
load("weighted.error.RData")
PeakSeg.results <- read.csv("PeakSeg-results.csv") %>%
filter(learning != "unsupervised") %>%
filter(!grepl("oracle.[13]", algorithm)) %>%
mutate(algorithm=ifelse(algorithm=="oracle.41", "PeakSeg.41",
paste(algorithm))) %>%
mutate(algorithm=sub("[.][0-9]*$", "", algorithm))
step2.stats <- step2.error %>%
mutate(algorithm="step2") %>%
group_by(set.name, split.i, algorithm) %>%
summarise(errors=sum(errors))
step2.stats$regions <- cheating.error$regions
step2.stats$percent <- with(step2.stats, errors/regions*100)
step2.stats$algo.type <- "PeakSegJoint"
step2.stats$learning <- "interval\nregression"
step2.all.stats <- step2.error.all %>%
mutate(algorithm="PeakSegJoint") %>%
group_by(set.name, split.i, algorithm) %>%
summarise(errors=sum(fn+fp),
regions=n()) %>%
mutate(percent=errors/regions*100,
algo.type="PeakSegJoint",
learning="interval\nregression")
weighted.all.stats <- weighted.error.all %>%
mutate(algorithm="weighted") %>%
group_by(set.name, split.i, algorithm) %>%
summarise(errors=sum(fn+fp),
regions=n()) %>%
mutate(percent=errors/regions*100,
algo.type="PeakSegJoint",
learning="interval\nregression")
step1.stats <- step1.error %>%
mutate(algorithm="step1") %>%
group_by(set.name, split.i, algorithm) %>%
summarise(errors=sum(fp+fn),
regions=n()) %>%
mutate(percent=errors/regions*100,
algo.type="PeakSegJoint",
learning="interval\nregression")
step1.best.stats <- data.frame(best.for.train.res) %>%
mutate(algorithm="best.for\nselected\nresolution",
percent=errors/regions*100,
algo.type="PeakSegJoint",
learning="cheating")
cheating.stats <- data.frame(cheating.error) %>%
mutate(algorithm="test.res",
percent=errors/regions*100,
algo.type="PeakSegJoint",
learning="cheating")
common.names <- names(PeakSeg.results)
all.stats <-
rbind(
PeakSeg.results,
##cheating.stats[, common.names], #comment to hide cheaters.
##step1.best.stats[, common.names], #comment to hide cheaters.
##step1.stats,
##step2.stats,
##weighted.all.stats,
step2.all.stats)
show.stats <- all.stats %>%
filter(!grepl("AIC/BIC", algorithm),
!grepl("NTNU", set.name))
region.range <- show.stats %>%
group_by(set.name, split.i) %>%
summarise(min=min(regions),
max=max(regions)) %>%
mutate(diff=max-min)
##stopifnot(with(region.range, min == max))
data.frame(region.range)
show.means <- show.stats %>%
group_by(set.name, algorithm, learning, algo.type) %>%
summarise(percent=mean(percent))
## Some quantitative results for the epigenomics abstract.
med.stats <- show.stats %>%
group_by(set.name, algorithm, learning, algo.type) %>%
summarise(median=median(percent),
mean=mean(percent),
sd=sd(percent),
quartile25=quantile(percent, 0.25),
quartile75=quantile(percent, 0.75)) %>%
filter(grepl("TDH_immune", set.name),
algorithm != "PeakSeg") %>%
group_by()
med.stats %>%
select(set.name, algorithm, quartile25, median, quartile75)
med.stats %>%
select(set.name, algorithm, mean, sd)
show.vlines <- show.means %>%
group_by(set.name) %>%
filter(seq_along(percent) == which.min(percent)) %>%
select(-algo.type)
show.vlines <- show.means %>%
group_by(set.name) %>%
filter(algorithm=="PeakSegJoint") %>%
select(-algo.type)
algo.colors <-
c("cheating"="grey",
"interval\nregression"="#D95F02",
"grid\nsearch"="#1B9E77",
"unsupervised"="#7570B3")
dots <- #with 1 set of facets.
ggplot()+
geom_vline(aes(xintercept=percent),
data=show.vlines)+
geom_point(aes(percent, algorithm, color=learning),
data=show.means,
alpha=0.2,
size=3)+
geom_point(aes(percent, algorithm, color=learning),
data=show.stats, pch=1)+
facet_grid(. ~ set.name, labeller=function(var, val){
gsub("_", "\n", val)
}, scales="free_y", space="free_y")+
scale_y_discrete("model")+
theme_bw()+
guides(color=guide_legend())+
theme(panel.margin=grid::unit(0, "cm"),
legend.position="top")+
scale_color_manual("learning\nalgorithm", values=algo.colors,
breaks=names(algo.colors))+
scale_fill_manual("learning\nalgorithm", values=algo.colors,
breaks=names(algo.colors))+
scale_x_continuous("percent incorrect peak region labels (test error)",
breaks=seq(0, 100, by=25))
pdf("figure-test-error-dots.pdf", h=2.5, w=7)
print(dots)
dev.off()
| /figure-test-error-dots.R | no_license | tdhock/PeakSegJoint-paper | R | false | false | 4,809 | r | works_with_R("3.2.0",
ggplot2="1.0",
dplyr="0.4.0")
load("cheating.error.RData")
load("step1.error.RData")
load("step2.error.RData")
load("weighted.error.RData")
PeakSeg.results <- read.csv("PeakSeg-results.csv") %>%
filter(learning != "unsupervised") %>%
filter(!grepl("oracle.[13]", algorithm)) %>%
mutate(algorithm=ifelse(algorithm=="oracle.41", "PeakSeg.41",
paste(algorithm))) %>%
mutate(algorithm=sub("[.][0-9]*$", "", algorithm))
step2.stats <- step2.error %>%
mutate(algorithm="step2") %>%
group_by(set.name, split.i, algorithm) %>%
summarise(errors=sum(errors))
step2.stats$regions <- cheating.error$regions
step2.stats$percent <- with(step2.stats, errors/regions*100)
step2.stats$algo.type <- "PeakSegJoint"
step2.stats$learning <- "interval\nregression"
step2.all.stats <- step2.error.all %>%
mutate(algorithm="PeakSegJoint") %>%
group_by(set.name, split.i, algorithm) %>%
summarise(errors=sum(fn+fp),
regions=n()) %>%
mutate(percent=errors/regions*100,
algo.type="PeakSegJoint",
learning="interval\nregression")
weighted.all.stats <- weighted.error.all %>%
mutate(algorithm="weighted") %>%
group_by(set.name, split.i, algorithm) %>%
summarise(errors=sum(fn+fp),
regions=n()) %>%
mutate(percent=errors/regions*100,
algo.type="PeakSegJoint",
learning="interval\nregression")
step1.stats <- step1.error %>%
mutate(algorithm="step1") %>%
group_by(set.name, split.i, algorithm) %>%
summarise(errors=sum(fp+fn),
regions=n()) %>%
mutate(percent=errors/regions*100,
algo.type="PeakSegJoint",
learning="interval\nregression")
step1.best.stats <- data.frame(best.for.train.res) %>%
mutate(algorithm="best.for\nselected\nresolution",
percent=errors/regions*100,
algo.type="PeakSegJoint",
learning="cheating")
cheating.stats <- data.frame(cheating.error) %>%
mutate(algorithm="test.res",
percent=errors/regions*100,
algo.type="PeakSegJoint",
learning="cheating")
common.names <- names(PeakSeg.results)
all.stats <-
rbind(
PeakSeg.results,
##cheating.stats[, common.names], #comment to hide cheaters.
##step1.best.stats[, common.names], #comment to hide cheaters.
##step1.stats,
##step2.stats,
##weighted.all.stats,
step2.all.stats)
show.stats <- all.stats %>%
filter(!grepl("AIC/BIC", algorithm),
!grepl("NTNU", set.name))
region.range <- show.stats %>%
group_by(set.name, split.i) %>%
summarise(min=min(regions),
max=max(regions)) %>%
mutate(diff=max-min)
##stopifnot(with(region.range, min == max))
data.frame(region.range)
show.means <- show.stats %>%
group_by(set.name, algorithm, learning, algo.type) %>%
summarise(percent=mean(percent))
## Some quantitative results for the epigenomics abstract.
med.stats <- show.stats %>%
group_by(set.name, algorithm, learning, algo.type) %>%
summarise(median=median(percent),
mean=mean(percent),
sd=sd(percent),
quartile25=quantile(percent, 0.25),
quartile75=quantile(percent, 0.75)) %>%
filter(grepl("TDH_immune", set.name),
algorithm != "PeakSeg") %>%
group_by()
med.stats %>%
select(set.name, algorithm, quartile25, median, quartile75)
med.stats %>%
select(set.name, algorithm, mean, sd)
show.vlines <- show.means %>%
group_by(set.name) %>%
filter(seq_along(percent) == which.min(percent)) %>%
select(-algo.type)
show.vlines <- show.means %>%
group_by(set.name) %>%
filter(algorithm=="PeakSegJoint") %>%
select(-algo.type)
algo.colors <-
c("cheating"="grey",
"interval\nregression"="#D95F02",
"grid\nsearch"="#1B9E77",
"unsupervised"="#7570B3")
dots <- #with 1 set of facets.
ggplot()+
geom_vline(aes(xintercept=percent),
data=show.vlines)+
geom_point(aes(percent, algorithm, color=learning),
data=show.means,
alpha=0.2,
size=3)+
geom_point(aes(percent, algorithm, color=learning),
data=show.stats, pch=1)+
facet_grid(. ~ set.name, labeller=function(var, val){
gsub("_", "\n", val)
}, scales="free_y", space="free_y")+
scale_y_discrete("model")+
theme_bw()+
guides(color=guide_legend())+
theme(panel.margin=grid::unit(0, "cm"),
legend.position="top")+
scale_color_manual("learning\nalgorithm", values=algo.colors,
breaks=names(algo.colors))+
scale_fill_manual("learning\nalgorithm", values=algo.colors,
breaks=names(algo.colors))+
scale_x_continuous("percent incorrect peak region labels (test error)",
breaks=seq(0, 100, by=25))
pdf("figure-test-error-dots.pdf", h=2.5, w=7)
print(dots)
dev.off()
|
context("A made-up context")
test_that("1 + 1 = 2", {
expect_equal(1 + 1, 2)
})
| /tests/testthat/test-something.R | no_license | pbs-assess/tmbpop | R | false | false | 83 | r | context("A made-up context")
test_that("1 + 1 = 2", {
expect_equal(1 + 1, 2)
})
|
# s.XGB.R
# ::rtemis::
# 2016 Efstathios D. Gennatas egenn.lambdamd.org
# TODO: check if all objective functions must be minimized, or change which.min to variable
# TODO: weights and ipw do not seem to work, upsample works, check weights passing
# and add scale_pos_weight
# TODO: change fittedClass.raw to fitted.prob
# add which.max / which.min dependent on maximize
#' XGboost Classification and Regression [C, R]
#'
#' Tune hyperparameters using grid search and resampling,
#' train a final model, and validate it
#'
#' [gS]: indicates parameter will be autotuned by grid search if multiple values are passed.
#' (s.XGB does its own grid search, similar to gridSearchLearn, may switch to gridSearchLearn similar to s.GBM)
#' Learn more about XGboost's parameters here: http://xgboost.readthedocs.io/en/latest/parameter.html
#' Case weights and therefore IPW do not seem to work, despite following documentation.
#' See how ipw = T fails and upsample = T works in imbalanced dataset.
#' 11.24.16: Updated to work with latest development version of XGBoost from github, which changed some of
#' \code{xgboost}'s return values and is therefore not compatible with older versions
#' \link{s.XGBLIN} is a wrapper for \code{s.XGB} with \code{booster = "gblinear"}
#' @inheritParams s.GLM
#' @param booster Character: Booster to use. Options: "gbtree", "gblinear"
#' @param silent 0: print XGBoost messages; 1: print no XGBoost messages
#' @param nrounds Integer: Maximum number of rounds to run. Can be set to a high number as early stopping
#' will limit nrounds by monitoring inner CV error
#' @param force.nrounds Integer: Number of rounds to run if not estimating optimal number by CV
#' @param lambda [gS] L2 regularization on weights
#' @param lambda_bias [gS] for *linear* booster: L2 regularization on bias
#' @param alpha [gS] L1 regularization on weights
#' @param eta [gS] Float (0, 1): Learning rate. Default = .1
#' @param gamma [gS] Float: Minimum loss reduction required to make further partition
#' @param max.depth [gS] Integer: Maximum tree depth. (Default = 6)
#' @param subsample [gS] Float:
#' @param colsample.bytree [gS]
#' @param colsample.bylevel [gS]
#' @param tree.method [gS] XGBoost tree construction algorithm (Default = "auto")
#' @param sketch.eps [gS] Float (0, 1):
#' @param num.parallel.tree Integer: N of trees to grow in parallel: Results in Random Forest -like algorithm.
#' (Default = 1; i.e. regular boosting)
#' @param base.score Float: The mean outcome response (no need to set)
#' @param objective (Default = NULL)
#' @param sample.type (Default = "uniform")
#' @param normalize.type (Default = "forest")
#' @param obj Function: Custom objective function. See \code{?xgboost::xgboost}
#' @param feval Function: Custom evaluation function. See \code{?xgboost::xgboost}
#' @param xgb.verbose Integer: Verbose level for XGB learners used for tuning.
#' @param print_every_n Integer: Print evaluation metrics every this many iterations
#' @param early.stopping.rounds Integer: Training on resamples of \code{x.train} (tuning) will stop if performance
#' does not improve for this many rounds
#' @param missing String or Numeric: Which values to consider as missing. Default = NA
#' @param nthread Integer: Number of threads for xgboost using OpenMP. Only parallelize resamples
#' using \code{n.cores} or the xgboost execution using this setting. At the moment of writing, parallelization via this
#' parameter causes a linear booster to fail most of the times. Therefore, default is rtCores
#' for 'gbtree', 1 for 'gblinear'
#' @return \link{rtMod} object
#' @author Efstathios D. Gennatas
#' @seealso \link{elevate} for external cross-validation
#' @family Supervised Learning
#' @family Tree-based methods
#' @export
s.XGB <- function(x, y = NULL,
x.test = NULL, y.test = NULL,
x.name = NULL, y.name = NULL,
booster = c("gbtree", "gblinear", "dart"),
silent = 1,
missing = NA,
nrounds = 500L,
force.nrounds = NULL,
weights = NULL,
ipw = TRUE,
ipw.type = 2,
upsample = FALSE,
downsample = FALSE,
resample.seed = NULL,
obj = NULL,
feval = NULL,
maximize = NULL,
xgb.verbose = NULL,
print_every_n = 100L,
early.stopping.rounds = 50L,
eta = .1,
gamma = 0,
max.depth = 3,
min.child.weight = 5,
max.delta.step = 0,
subsample = .75,
colsample.bytree = NULL,
colsample.bylevel = 1,
lambda = NULL,
lambda.bias = 0,
alpha = 0,
tree.method = "auto",
sketch.eps = .03,
num.parallel.tree = 1,
base.score = NULL,
objective = NULL,
sample.type = "uniform",
normalize.type = "forest",
rate.drop = .1,
skip.drop = .5,
resampler = "strat.sub",
n.resamples = 10,
train.p = 0.75,
strat.n.bins = 4,
stratify.var = NULL,
target.length = NULL,
seed = NULL,
# outcome = NULL,
error.curve = FALSE,
plot.res = TRUE,
save.res = FALSE,
save.res.mod = FALSE,
importance = FALSE,
print.plot = TRUE,
plot.fitted = NULL,
plot.predicted = NULL,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
question = NULL,
rtclass = NULL,
save.dump = FALSE,
verbose = TRUE,
n.cores = 1,
nthread = NULL,
parallel.type = c("psock", "fork"),
outdir = NULL,
save.mod = ifelse(!is.null(outdir), TRUE, FALSE)) {
# [ INTRO ] ====
if (missing(x)) {
print(args(s.XGB))
return(invisible(9))
}
if (!is.null(outdir)) outdir <- normalizePath(outdir, mustWork = FALSE)
logFile <- if (!is.null(outdir)) {
paste0(outdir, "/", sys.calls()[[1]][[1]], ".", format(Sys.time(), "%Y%m%d.%H%M%S"), ".log")
} else {
NULL
}
start.time <- intro(verbose = verbose, logFile = logFile)
# [ DEPENDENCIES ] ====
if (!depCheck(c("xgboost", "pbapply"), verbose = FALSE)) {
cat("\n"); stop("Please install dependencies and try again")
}
# [ ARGUMENTS ] ====
if (is.null(y) & NCOL(x) < 2) {
print(args(s.XGB))
stop("y is missing")
}
if (is.null(x.name)) x.name <- getName(x, "x")
if (is.null(y.name)) y.name <- getName(y, "y")
booster <- match.arg(booster)
if (booster == "gbtree") {
mod.name <- "XGB"
} else if (booster == "dart") {
mod.name <- "XGBDART"
} else {
mod.name <- "XGBLIN"
}
if (is.null(nthread)) nthread <- ifelse(booster == "gblinear", 1, rtCores)
if (is.null(lambda)) lambda <- ifelse(booster == "gblinear", 0, 1)
if (is.null(colsample.bytree)) colsample.bytree <- ifelse(NCOL(x) > 100, .75, 1)
if (is.null(n.cores)) n.cores <- rtCores
if (is.null(xgb.verbose)) xgb.verbose <- ifelse(verbose, 1, 0)
if (!verbose) print.plot <- FALSE
verbose <- verbose | !is.null(logFile)
if (save.mod & is.null(outdir)) outdir <- paste0("./s.", mod.name)
if (!is.null(outdir)) outdir <- paste0(normalizePath(outdir, mustWork = FALSE), "/")
parallel.type <- match.arg(parallel.type)
# [ DATA ] ====
dt <- dataPrepare(x, y, x.test, y.test,
ipw = ipw, ipw.type = ipw.type,
upsample = upsample,
downsample = downsample,
resample.seed = resample.seed,
verbose = verbose)
x <- dt$x
y <- dt$y
index.factor <- which(sapply(x, is.factor))
n.factor <- length(index.factor)
if (n.factor > 0) stop("Please convert all features to numeric before running s.XGB")
x.test <- dt$x.test
y.test <- dt$y.test
xnames <- dt$xnames
type <- dt$type
checkType(type, c("Classification", "Regression"), mod.name)
.weights <- if (is.null(weights) & ipw) dt$weights else weights
if (verbose) dataSummary(x, y, x.test, y.test, type)
if (type == "Classification") y.num <- as.numeric(y) - 1
nclass <- ifelse(type == "Classification", length(levels(y)), 0)
if (is.null(objective)) {
if (type == "Regression") {
objective <- "reg:linear"
} else {
objective <- ifelse(nclass == 2, "binary:logistic", "multi:softmax")
}
}
if (type == "Regression") {
if (is.null(base.score)) base.score <- mean(y)
xg.dat <- xgboost::xgb.DMatrix(as.matrix(x),
label = y,
missing = missing)
} else {
if (is.null(base.score)) base.score <- mean(as.numeric(y.num))
xg.dat <- xgboost::xgb.DMatrix(as.matrix(x),
label = y.num,
weight = .weights,
missing = missing)
}
if (is.null(stratify.var)) stratify.var <- y
if (print.plot) {
if (is.null(plot.fitted)) plot.fitted <- if (is.null(y.test)) TRUE else FALSE
if (is.null(plot.predicted)) plot.predicted <- if (!is.null(y.test)) TRUE else FALSE
} else {
plot.fitted <- plot.predicted <- FALSE
}
# [ MAIN ] ====
if (n.resamples > 0) {
# {{ GRID SEARCH WITH INTERNAL RESAMPLING }}
# [ RESAMPLES ] ====
n.resamples <- as.integer(n.resamples)
if (is.null(target.length)) target.length <- length(y)
res.part <- resample(y = stratify.var,
n.resamples = n.resamples,
resampler = resampler,
train.p = train.p,
strat.n.bins = strat.n.bins,
target.length = target.length,
seed = seed)
# [ {GRID} FN ] ====
xgb.1 <- function(index, grid,
x.int, y.int,
res.part,
nrounds,
objective,
nclass,
weights,
xgb.verbose,
nthread) {
s.out.1 <- list(mod.name = "grid.XGB", call = NULL)
grid.line <- grid[index, ]
params.1 <- as.list(grid.line[1, 1:(ncol(grid.line) - 2)])
params.1$booster <- booster
params.1$objective <- objective
if (objective == "multi:softmax") params.1$num.class <- nclass
s.out.1$params.1 <- params.1
res.id <- grid.line$res.id
x.train.g <- as.matrix(x.int[res.part[[res.id]], ])
x.test.g <- as.matrix(x.int[-res.part[[res.id]], ])
y.train.g <- y.int[res.part[[res.id]]]
y.test.g <- y.int[-res.part[[res.id]]]
data.train.1 <- xgboost::xgb.DMatrix(as.matrix(x.train.g),
missing = missing,
label = y.train.g)
if (!is.null(weights)) xgboost::setinfo(data.train.1, "weight", weights[res.part[[res.id]]])
data.test.1 <- xgboost::xgb.DMatrix(data = as.matrix(x.test.g), missing = missing, label = y.test.g)
# xgboost will minimizwe the second element of this list
# - check by making verbose and running on 1 core
watchlist <- list(train = data.train.1, test = data.test.1)
if (verbose) cat("\n")
mod.xgb.1 <- xgboost::xgb.train(params = params.1,
data = data.train.1,
nrounds = nrounds,
watchlist = watchlist,
obj = obj,
feval = feval,
verbose = xgb.verbose,
print_every_n = print_every_n,
early_stopping_rounds = early.stopping.rounds,
maximize = maximize,
nthread = nthread)
if (save.res.mod) s.out.1$mod.xgb.1 <- mod.xgb.1
s.out.1$best_iteration <- bestInd <- mod.xgb.1$best_iteration
s.out.1$best_score <- mod.xgb.1$best_score
# Check error curves
if (error.curve) {
if (type == "Regression") {
ntreelimit <- 1:(mod.xgb.1$bestInd + early.stopping.rounds)
fitted.1.series <- sapply(ntreelimit, function(i) {
predict(mod.xgb.1, data.train.1, ntreelimit = i) })
mse.train.1.series <- apply(fitted.1.series, 2, function(f) mse(y.train.g, f))
predicted.1.series <- sapply(ntreelimit, function(i) {
predict(mod.xgb.1, data.test.1, ntreelimit = i) })
mse.test.1.series <- apply(predicted.1.series, 2, function(p) mse(y.test.g, p))
if (plot.res) mplot3.xy(ntreelimit, list(mse.test.1.series, mse.train.1.series),
type = "l", group.legend = F, xlab = "N iterations", ylab = "MSE", lwd = 4,
vline = mod.xgb.1$bestInd, vline.lty = 2, vline.lwd = 2,
legend.tc = paste("best n.trees =", mod.xgb.1$bestInd,
"\nMSE.test =", ddSci(mse.test.1.series[bestInd])))
} # add else for Classification accuracy curves
}
return(s.out.1)
} # END {GRID} FN
# [ GRID ] ====
if (booster == "gbtree") {
grid <- expand.grid(eta = eta,
gamma = gamma,
max.depth = max.depth,
min.child.weight = min.child.weight,
max.delta.step = max.delta.step,
subsample = subsample,
colsample.bytree = colsample.bytree,
colsample.bylevel = colsample.bylevel,
lambda = lambda,
alpha = alpha,
tree.method = tree.method,
sketch.eps = sketch.eps,
num.parallel.tree = num.parallel.tree,
res.id = 1:n.resamples)
if (verbose) gridSummary(eta, gamma, max.depth, min.child.weight, max.delta.step, subsample,
colsample.bytree, colsample.bylevel, lambda, alpha, tree.method, sketch.eps,
num.parallel.tree)
} else if (booster == "dart") {
grid <- expand.grid(eta = eta,
gamma = gamma,
max.depth = max.depth,
min.child.weight = min.child.weight,
max.delta.step = max.delta.step,
subsample = subsample,
colsample.bytree = colsample.bytree,
colsample.bylevel = colsample.bylevel,
lambda = lambda,
alpha = alpha,
tree.method = tree.method,
sample.type = sample.type,
normalize.type = normalize.type,
rate.drop = rate.drop,
skip.drop = skip.drop,
sketch.eps = sketch.eps,
num.parallel.tree = num.parallel.tree,
res.id = 1:n.resamples)
if (verbose) gridSummary(eta, gamma, max.depth, min.child.weight, max.delta.step, subsample,
colsample.bytree, colsample.bylevel, lambda, alpha, tree.method,
sample.type, normalize.type, rate.drop, skip.drop, sketch.eps,
num.parallel.tree)
} else {
grid <- expand.grid(lambda = lambda,
alpha = alpha,
lambda.bias = lambda.bias,
res.id = 1:n.resamples)
if (verbose) gridSummary(lambda, alpha, lambda.bias)
}
grid$id <- 1:NROW(grid)
n.gridLines <- NROW(grid)
if (n.gridLines < n.cores) n.cores <- n.gridLines
# [ GRID RUN ] ====
if (verbose) msg("Running XGB grid search:",
"\n N models total = ", n.gridLines,
"\n N resamples = ", n.resamples,
"\n N parallel resamples = ", n.cores,
"\n N XGboost threads = ", nthread, sep = "")
if (type == "Regression") y.int <- y else y.int <- y.num
if (!verbose) pbapply::pboptions(type = "none") # no progress bar
if (n.cores > 1) {
if (parallel.type == "psock") {
if (verbose) msg("Starting PSOCK cluster on", n.cores, "cores...")
cl <- makePSOCKcluster(n.cores)
on.exit(stopCluster(cl))
clusterEvalQ(cl, library("rtemis"))
} else {
if (verbose) msg("Parallelizing by forking on", n.cores, "cores...")
cl <- n.cores
}
} else {
cl <- 1
}
if (!is.null(logFile)) sink() # pause writing to file
grid.run <- pbapply::pblapply(1:n.gridLines, xgb.1,
grid = grid,
x.int = as.data.frame(x),
y.int = y.int,
res.part = res.part,
nrounds = nrounds,
objective = objective,
nclass = nclass,
weights = .weights,
xgb.verbose = xgb.verbose,
nthread = nthread,
cl = n.cores)
if (!is.null(logFile)) sink(logFile, append = TRUE, split = verbose) # Resume writing to log
if (verbose) msg("Grid search complete")
names(grid.run) <- paste0("xgb.gridLine.", 1:n.gridLines)
grid.performance <- data.frame(grid, plyr::ldply(grid.run,
function(g) data.frame(best.nrounds = g$best_iteration,
bestScore = g$best_score)))
grid.performance$tune.id <- factor(rep(c(1:(n.gridLines/n.resamples)), n.resamples))
if (booster == "gbtree") {
grid.by.tune.id <- expand.grid(eta = eta,
gamma = gamma,
max.depth = max.depth,
min.child.weight = min.child.weight,
max.delta.step = max.delta.step,
subsample = subsample,
colsample.bytree = colsample.bytree,
colsample.bylevel = colsample.bylevel,
lambda = lambda,
alpha = alpha,
tree.method = tree.method,
sketch.eps = sketch.eps,
num.parallel.tree = num.parallel.tree)
} else {
grid.by.tune.id <- expand.grid(lambda = lambda,
alpha = alpha,
lambda.bias = lambda.bias)
}
grid.performance.by.tune.id <- data.frame(grid.by.tune.id,
aggregate(cbind(best.nrounds = grid.performance$best.nrounds,
bestScore = grid.performance$bestScore),
by = list(tune.id = grid.performance$tune.id),
mean))
best.tune <- grid.performance.by.tune.id[which.min(grid.performance.by.tune.id$bestScore), ]
best.tune$best.nrounds <- as.integer(best.tune$best.nrounds)
if (booster == "gbtree") {
params <- list(booster = booster,
silent = silent,
eta = best.tune$eta,
gamma = best.tune$gamma,
max.depth = best.tune$max.depth,
min.child.weight = best.tune$min.child.weight,
max.delta.step = best.tune$max.delta.step,
subsample = best.tune$subsample,
colsample.bytree = best.tune$colsample.bytree,
colsample.bylevel = best.tune$colsample.bylevel,
lambda = best.tune$lambda,
alpha = best.tune$alpha,
tree.method = best.tune$tree.method,
sketch.eps = best.tune$sketch.eps,
num.parallel.tree = best.tune$num.parallel.tree,
objective = objective,
base.score = base.score)
if (objective == "multi:softmax") params$num.class <- nclass
} else if (booster == "dart") {
params <- list(booster = booster,
sample.type = sample.type,
normalize.type = normalize.type,
rate.drop = rate.drop,
skip.drop = skip.drop,
silent = silent,
eta = eta,
gamma = gamma,
max.depth = max.depth,
min.child.weight = min.child.weight,
max.delta.step = max.delta.step,
subsample = subsample,
colsample.bytree = colsample.bytree,
colsample.bylevel = colsample.bylevel,
lambda = lambda,
alpha = alpha,
tree.method = tree.method,
sketch.eps = sketch.eps,
num.parallel.tree = num.parallel.tree,
objective = objective,
base.score = base.score,
nthread = nthread)
} else {
params <- list(booster = booster,
silent = silent,
lambda = best.tune$lambda,
alpha = best.tune$alpha,
lambda.bias = best.tune$lambda.bias,
objective = objective,
base.score = base.score)
if (objective == "multi:softmax") params$num.class <- nclass
}
nrounds <- best.tune$best.nrounds
if (verbose) parameterSummary(best.tune, title = "Tuning Results",
newline.pre = TRUE)
} else {
# {{ NO GRID SEARCH NOR INTERNAL RESAMPLING }} ====
res.part <- grid.performance <- grid.performance.by.tune.id <- best.tune <- NULL
if (booster == "gbtree") {
params <- list(booster = booster,
silent = silent,
eta = eta,
gamma = gamma,
max.depth = max.depth,
min.child.weight = min.child.weight,
max.delta.step = max.delta.step,
subsample = subsample,
colsample.bytree = colsample.bytree,
colsample.bylevel = colsample.bylevel,
lambda = lambda,
alpha = alpha,
tree.method = tree.method,
sketch.eps = sketch.eps,
num.parallel.tree = num.parallel.tree,
objective = objective,
base.score = base.score)
if (objective == "multi:softmax") params$num.class <- nclass
} else {
params <- list(booster = booster,
silent = silent,
lambda = lambda,
alpha = alpha,
lambda.bias = lambda.bias,
objective = objective,
base.score = base.score)
if (objective == "multi:softmax") params$num.class <- nclass
}
}
# [ FULL XGBOOST ] ====
if (verbose) msg("Training full XGB model with", booster, "booster...", newline.pre = TRUE)
if (!is.null(objective)) objective <- deparse(substitute(objective))
if (!is.null(feval)) feval <- deparse(substitute(feval))
if (!is.null(force.nrounds)) nrounds <- force.nrounds
mod <- xgboost::xgb.train(params,
xg.dat,
nrounds,
obj = obj,
feval = feval,
verbose = verbose,
print_every_n = print_every_n)
# [ FITTED ] ====
fitted <- predict(mod, xg.dat)
if (type == "Classification") {
# round() gives correct result whether response is integer or probability
fitted.prob <- 1 - fitted
fitted <- factor(ifelse(fitted.prob >= .5, 1, 0), levels = c(1, 0))
levels(fitted) <- levels(y)
} else {
fitted.prob <- NULL
}
error.train <- modError(y, fitted, fitted.prob)
if (verbose) errorSummary(error.train, mod.name)
# [ PREDICTED ] ====
predicted.prob <- predicted <- error.test <- NULL
if (!is.null(x.test)) {
data.test <- xgboost::xgb.DMatrix(data = as.matrix(x.test), missing = missing)
predicted <- predict(mod, data.test)
if (type == "Classification") {
predicted.prob <- 1 - predicted
predicted <- factor(ifelse(predicted.prob >= .5, 1, 0), levels = c(1, 0))
levels(predicted) <- levels(y)
}
if (!is.null(y.test)) {
error.test <- modError(y.test, predicted, predicted.prob)
if (verbose) errorSummary(error.test, mod.name)
}
}
# [ RELATIVE INFLUENCE / VARIABLE IMPORTANCE ] ====
.importance <- NULL
# This may take a while
if (importance) {
if (verbose) msg("Estimating variable importance...")
.importance <- xgboost::xgb.importance(model = mod, feature_names = colnames(x))
}
# [ OUTRO ] ====
# sink(logOut, append = T, split = T)
extra <- list(resampler = resampler,
booster = booster,
base.score = base.score,
resamples = res.part,
grid = grid,
grid.run = if (save.res) grid.run else NULL,
grid.performance = grid.performance,
grid.performance.by.tune.id = grid.performance.by.tune.id,
best.tune = best.tune,
params = params,
nrounds = nrounds,
objective = objective,
feval = feval)
rt <- rtModSet(rtclass = rtclass,
mod = mod,
mod.name = mod.name,
type = type,
y.train = y,
y.test = y.test,
x.name = x.name,
y.name = y.name,
xnames = xnames,
fitted = fitted,
fitted.prob = fitted.prob,
se.fit = NULL,
error.train = error.train,
predicted = predicted,
predicted.prob = predicted.prob,
se.prediction = NULL,
error.test = error.test,
varimp = .importance,
question = question,
extra = extra)
rtMod.out(rt,
print.plot,
plot.fitted,
plot.predicted,
y.test,
mod.name,
outdir,
save.mod,
verbose,
plot.theme)
outro(start.time, verbose = verbose, sinkOff = ifelse(is.null(logFile), FALSE, TRUE))
rt
} # rtemis::s.XGB
| /R/s.XGB.R | no_license | zeta1999/rtemis | R | false | false | 28,014 | r | # s.XGB.R
# ::rtemis::
# 2016 Efstathios D. Gennatas egenn.lambdamd.org
# TODO: check if all objective functions must be minimized, or change which.min to variable
# TODO: weights and ipw do not seem to work, upsample works, check weights passing
# and add scale_pos_weight
# TODO: change fittedClass.raw to fitted.prob
# add which.max / which.min dependent on maximize
#' XGboost Classification and Regression [C, R]
#'
#' Tune hyperparameters using grid search and resampling,
#' train a final model, and validate it
#'
#' [gS]: indicates parameter will be autotuned by grid search if multiple values are passed.
#' (s.XGB does its own grid search, similar to gridSearchLearn, may switch to gridSearchLearn similar to s.GBM)
#' Learn more about XGboost's parameters here: http://xgboost.readthedocs.io/en/latest/parameter.html
#' Case weights and therefore IPW do not seem to work, despite following documentation.
#' See how ipw = T fails and upsample = T works in imbalanced dataset.
#' 11.24.16: Updated to work with latest development version of XGBoost from github, which changed some of
#' \code{xgboost}'s return values and is therefore not compatible with older versions
#' \link{s.XGBLIN} is a wrapper for \code{s.XGB} with \code{booster = "gblinear"}
#' @inheritParams s.GLM
#' @param booster Character: Booster to use. Options: "gbtree", "gblinear"
#' @param silent 0: print XGBoost messages; 1: print no XGBoost messages
#' @param nrounds Integer: Maximum number of rounds to run. Can be set to a high number as early stopping
#' will limit nrounds by monitoring inner CV error
#' @param force.nrounds Integer: Number of rounds to run if not estimating optimal number by CV
#' @param lambda [gS] L2 regularization on weights
#' @param lambda_bias [gS] for *linear* booster: L2 regularization on bias
#' @param alpha [gS] L1 regularization on weights
#' @param eta [gS] Float (0, 1): Learning rate. Default = .1
#' @param gamma [gS] Float: Minimum loss reduction required to make further partition
#' @param max.depth [gS] Integer: Maximum tree depth. (Default = 6)
#' @param subsample [gS] Float:
#' @param colsample.bytree [gS]
#' @param colsample.bylevel [gS]
#' @param tree.method [gS] XGBoost tree construction algorithm (Default = "auto")
#' @param sketch.eps [gS] Float (0, 1):
#' @param num.parallel.tree Integer: N of trees to grow in parallel: Results in Random Forest -like algorithm.
#' (Default = 1; i.e. regular boosting)
#' @param base.score Float: The mean outcome response (no need to set)
#' @param objective (Default = NULL)
#' @param sample.type (Default = "uniform")
#' @param normalize.type (Default = "forest")
#' @param obj Function: Custom objective function. See \code{?xgboost::xgboost}
#' @param feval Function: Custom evaluation function. See \code{?xgboost::xgboost}
#' @param xgb.verbose Integer: Verbose level for XGB learners used for tuning.
#' @param print_every_n Integer: Print evaluation metrics every this many iterations
#' @param early.stopping.rounds Integer: Training on resamples of \code{x.train} (tuning) will stop if performance
#' does not improve for this many rounds
#' @param missing String or Numeric: Which values to consider as missing. Default = NA
#' @param nthread Integer: Number of threads for xgboost using OpenMP. Only parallelize resamples
#' using \code{n.cores} or the xgboost execution using this setting. At the moment of writing, parallelization via this
#' parameter causes a linear booster to fail most of the times. Therefore, default is rtCores
#' for 'gbtree', 1 for 'gblinear'
#' @return \link{rtMod} object
#' @author Efstathios D. Gennatas
#' @seealso \link{elevate} for external cross-validation
#' @family Supervised Learning
#' @family Tree-based methods
#' @export
s.XGB <- function(x, y = NULL,
x.test = NULL, y.test = NULL,
x.name = NULL, y.name = NULL,
booster = c("gbtree", "gblinear", "dart"),
silent = 1,
missing = NA,
nrounds = 500L,
force.nrounds = NULL,
weights = NULL,
ipw = TRUE,
ipw.type = 2,
upsample = FALSE,
downsample = FALSE,
resample.seed = NULL,
obj = NULL,
feval = NULL,
maximize = NULL,
xgb.verbose = NULL,
print_every_n = 100L,
early.stopping.rounds = 50L,
eta = .1,
gamma = 0,
max.depth = 3,
min.child.weight = 5,
max.delta.step = 0,
subsample = .75,
colsample.bytree = NULL,
colsample.bylevel = 1,
lambda = NULL,
lambda.bias = 0,
alpha = 0,
tree.method = "auto",
sketch.eps = .03,
num.parallel.tree = 1,
base.score = NULL,
objective = NULL,
sample.type = "uniform",
normalize.type = "forest",
rate.drop = .1,
skip.drop = .5,
resampler = "strat.sub",
n.resamples = 10,
train.p = 0.75,
strat.n.bins = 4,
stratify.var = NULL,
target.length = NULL,
seed = NULL,
# outcome = NULL,
error.curve = FALSE,
plot.res = TRUE,
save.res = FALSE,
save.res.mod = FALSE,
importance = FALSE,
print.plot = TRUE,
plot.fitted = NULL,
plot.predicted = NULL,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
question = NULL,
rtclass = NULL,
save.dump = FALSE,
verbose = TRUE,
n.cores = 1,
nthread = NULL,
parallel.type = c("psock", "fork"),
outdir = NULL,
save.mod = ifelse(!is.null(outdir), TRUE, FALSE)) {
# [ INTRO ] ====
if (missing(x)) {
print(args(s.XGB))
return(invisible(9))
}
if (!is.null(outdir)) outdir <- normalizePath(outdir, mustWork = FALSE)
logFile <- if (!is.null(outdir)) {
paste0(outdir, "/", sys.calls()[[1]][[1]], ".", format(Sys.time(), "%Y%m%d.%H%M%S"), ".log")
} else {
NULL
}
start.time <- intro(verbose = verbose, logFile = logFile)
# [ DEPENDENCIES ] ====
if (!depCheck(c("xgboost", "pbapply"), verbose = FALSE)) {
cat("\n"); stop("Please install dependencies and try again")
}
# [ ARGUMENTS ] ====
if (is.null(y) & NCOL(x) < 2) {
print(args(s.XGB))
stop("y is missing")
}
if (is.null(x.name)) x.name <- getName(x, "x")
if (is.null(y.name)) y.name <- getName(y, "y")
booster <- match.arg(booster)
if (booster == "gbtree") {
mod.name <- "XGB"
} else if (booster == "dart") {
mod.name <- "XGBDART"
} else {
mod.name <- "XGBLIN"
}
if (is.null(nthread)) nthread <- ifelse(booster == "gblinear", 1, rtCores)
if (is.null(lambda)) lambda <- ifelse(booster == "gblinear", 0, 1)
if (is.null(colsample.bytree)) colsample.bytree <- ifelse(NCOL(x) > 100, .75, 1)
if (is.null(n.cores)) n.cores <- rtCores
if (is.null(xgb.verbose)) xgb.verbose <- ifelse(verbose, 1, 0)
if (!verbose) print.plot <- FALSE
verbose <- verbose | !is.null(logFile)
if (save.mod & is.null(outdir)) outdir <- paste0("./s.", mod.name)
if (!is.null(outdir)) outdir <- paste0(normalizePath(outdir, mustWork = FALSE), "/")
parallel.type <- match.arg(parallel.type)
# [ DATA ] ====
dt <- dataPrepare(x, y, x.test, y.test,
ipw = ipw, ipw.type = ipw.type,
upsample = upsample,
downsample = downsample,
resample.seed = resample.seed,
verbose = verbose)
x <- dt$x
y <- dt$y
index.factor <- which(sapply(x, is.factor))
n.factor <- length(index.factor)
if (n.factor > 0) stop("Please convert all features to numeric before running s.XGB")
x.test <- dt$x.test
y.test <- dt$y.test
xnames <- dt$xnames
type <- dt$type
checkType(type, c("Classification", "Regression"), mod.name)
.weights <- if (is.null(weights) & ipw) dt$weights else weights
if (verbose) dataSummary(x, y, x.test, y.test, type)
if (type == "Classification") y.num <- as.numeric(y) - 1
nclass <- ifelse(type == "Classification", length(levels(y)), 0)
if (is.null(objective)) {
if (type == "Regression") {
objective <- "reg:linear"
} else {
objective <- ifelse(nclass == 2, "binary:logistic", "multi:softmax")
}
}
if (type == "Regression") {
if (is.null(base.score)) base.score <- mean(y)
xg.dat <- xgboost::xgb.DMatrix(as.matrix(x),
label = y,
missing = missing)
} else {
if (is.null(base.score)) base.score <- mean(as.numeric(y.num))
xg.dat <- xgboost::xgb.DMatrix(as.matrix(x),
label = y.num,
weight = .weights,
missing = missing)
}
if (is.null(stratify.var)) stratify.var <- y
if (print.plot) {
if (is.null(plot.fitted)) plot.fitted <- if (is.null(y.test)) TRUE else FALSE
if (is.null(plot.predicted)) plot.predicted <- if (!is.null(y.test)) TRUE else FALSE
} else {
plot.fitted <- plot.predicted <- FALSE
}
# [ MAIN ] ====
if (n.resamples > 0) {
# {{ GRID SEARCH WITH INTERNAL RESAMPLING }}
# [ RESAMPLES ] ====
n.resamples <- as.integer(n.resamples)
if (is.null(target.length)) target.length <- length(y)
res.part <- resample(y = stratify.var,
n.resamples = n.resamples,
resampler = resampler,
train.p = train.p,
strat.n.bins = strat.n.bins,
target.length = target.length,
seed = seed)
# [ {GRID} FN ] ====
xgb.1 <- function(index, grid,
x.int, y.int,
res.part,
nrounds,
objective,
nclass,
weights,
xgb.verbose,
nthread) {
s.out.1 <- list(mod.name = "grid.XGB", call = NULL)
grid.line <- grid[index, ]
params.1 <- as.list(grid.line[1, 1:(ncol(grid.line) - 2)])
params.1$booster <- booster
params.1$objective <- objective
if (objective == "multi:softmax") params.1$num.class <- nclass
s.out.1$params.1 <- params.1
res.id <- grid.line$res.id
x.train.g <- as.matrix(x.int[res.part[[res.id]], ])
x.test.g <- as.matrix(x.int[-res.part[[res.id]], ])
y.train.g <- y.int[res.part[[res.id]]]
y.test.g <- y.int[-res.part[[res.id]]]
data.train.1 <- xgboost::xgb.DMatrix(as.matrix(x.train.g),
missing = missing,
label = y.train.g)
if (!is.null(weights)) xgboost::setinfo(data.train.1, "weight", weights[res.part[[res.id]]])
data.test.1 <- xgboost::xgb.DMatrix(data = as.matrix(x.test.g), missing = missing, label = y.test.g)
# xgboost will minimizwe the second element of this list
# - check by making verbose and running on 1 core
watchlist <- list(train = data.train.1, test = data.test.1)
if (verbose) cat("\n")
mod.xgb.1 <- xgboost::xgb.train(params = params.1,
data = data.train.1,
nrounds = nrounds,
watchlist = watchlist,
obj = obj,
feval = feval,
verbose = xgb.verbose,
print_every_n = print_every_n,
early_stopping_rounds = early.stopping.rounds,
maximize = maximize,
nthread = nthread)
if (save.res.mod) s.out.1$mod.xgb.1 <- mod.xgb.1
s.out.1$best_iteration <- bestInd <- mod.xgb.1$best_iteration
s.out.1$best_score <- mod.xgb.1$best_score
# Check error curves
if (error.curve) {
if (type == "Regression") {
ntreelimit <- 1:(mod.xgb.1$bestInd + early.stopping.rounds)
fitted.1.series <- sapply(ntreelimit, function(i) {
predict(mod.xgb.1, data.train.1, ntreelimit = i) })
mse.train.1.series <- apply(fitted.1.series, 2, function(f) mse(y.train.g, f))
predicted.1.series <- sapply(ntreelimit, function(i) {
predict(mod.xgb.1, data.test.1, ntreelimit = i) })
mse.test.1.series <- apply(predicted.1.series, 2, function(p) mse(y.test.g, p))
if (plot.res) mplot3.xy(ntreelimit, list(mse.test.1.series, mse.train.1.series),
type = "l", group.legend = F, xlab = "N iterations", ylab = "MSE", lwd = 4,
vline = mod.xgb.1$bestInd, vline.lty = 2, vline.lwd = 2,
legend.tc = paste("best n.trees =", mod.xgb.1$bestInd,
"\nMSE.test =", ddSci(mse.test.1.series[bestInd])))
} # add else for Classification accuracy curves
}
return(s.out.1)
} # END {GRID} FN
# [ GRID ] ====
if (booster == "gbtree") {
grid <- expand.grid(eta = eta,
gamma = gamma,
max.depth = max.depth,
min.child.weight = min.child.weight,
max.delta.step = max.delta.step,
subsample = subsample,
colsample.bytree = colsample.bytree,
colsample.bylevel = colsample.bylevel,
lambda = lambda,
alpha = alpha,
tree.method = tree.method,
sketch.eps = sketch.eps,
num.parallel.tree = num.parallel.tree,
res.id = 1:n.resamples)
if (verbose) gridSummary(eta, gamma, max.depth, min.child.weight, max.delta.step, subsample,
colsample.bytree, colsample.bylevel, lambda, alpha, tree.method, sketch.eps,
num.parallel.tree)
} else if (booster == "dart") {
grid <- expand.grid(eta = eta,
gamma = gamma,
max.depth = max.depth,
min.child.weight = min.child.weight,
max.delta.step = max.delta.step,
subsample = subsample,
colsample.bytree = colsample.bytree,
colsample.bylevel = colsample.bylevel,
lambda = lambda,
alpha = alpha,
tree.method = tree.method,
sample.type = sample.type,
normalize.type = normalize.type,
rate.drop = rate.drop,
skip.drop = skip.drop,
sketch.eps = sketch.eps,
num.parallel.tree = num.parallel.tree,
res.id = 1:n.resamples)
if (verbose) gridSummary(eta, gamma, max.depth, min.child.weight, max.delta.step, subsample,
colsample.bytree, colsample.bylevel, lambda, alpha, tree.method,
sample.type, normalize.type, rate.drop, skip.drop, sketch.eps,
num.parallel.tree)
} else {
grid <- expand.grid(lambda = lambda,
alpha = alpha,
lambda.bias = lambda.bias,
res.id = 1:n.resamples)
if (verbose) gridSummary(lambda, alpha, lambda.bias)
}
grid$id <- 1:NROW(grid)
n.gridLines <- NROW(grid)
if (n.gridLines < n.cores) n.cores <- n.gridLines
# [ GRID RUN ] ====
if (verbose) msg("Running XGB grid search:",
"\n N models total = ", n.gridLines,
"\n N resamples = ", n.resamples,
"\n N parallel resamples = ", n.cores,
"\n N XGboost threads = ", nthread, sep = "")
if (type == "Regression") y.int <- y else y.int <- y.num
if (!verbose) pbapply::pboptions(type = "none") # no progress bar
if (n.cores > 1) {
if (parallel.type == "psock") {
if (verbose) msg("Starting PSOCK cluster on", n.cores, "cores...")
cl <- makePSOCKcluster(n.cores)
on.exit(stopCluster(cl))
clusterEvalQ(cl, library("rtemis"))
} else {
if (verbose) msg("Parallelizing by forking on", n.cores, "cores...")
cl <- n.cores
}
} else {
cl <- 1
}
if (!is.null(logFile)) sink() # pause writing to file
grid.run <- pbapply::pblapply(1:n.gridLines, xgb.1,
grid = grid,
x.int = as.data.frame(x),
y.int = y.int,
res.part = res.part,
nrounds = nrounds,
objective = objective,
nclass = nclass,
weights = .weights,
xgb.verbose = xgb.verbose,
nthread = nthread,
cl = n.cores)
if (!is.null(logFile)) sink(logFile, append = TRUE, split = verbose) # Resume writing to log
if (verbose) msg("Grid search complete")
names(grid.run) <- paste0("xgb.gridLine.", 1:n.gridLines)
grid.performance <- data.frame(grid, plyr::ldply(grid.run,
function(g) data.frame(best.nrounds = g$best_iteration,
bestScore = g$best_score)))
grid.performance$tune.id <- factor(rep(c(1:(n.gridLines/n.resamples)), n.resamples))
if (booster == "gbtree") {
grid.by.tune.id <- expand.grid(eta = eta,
gamma = gamma,
max.depth = max.depth,
min.child.weight = min.child.weight,
max.delta.step = max.delta.step,
subsample = subsample,
colsample.bytree = colsample.bytree,
colsample.bylevel = colsample.bylevel,
lambda = lambda,
alpha = alpha,
tree.method = tree.method,
sketch.eps = sketch.eps,
num.parallel.tree = num.parallel.tree)
} else {
grid.by.tune.id <- expand.grid(lambda = lambda,
alpha = alpha,
lambda.bias = lambda.bias)
}
grid.performance.by.tune.id <- data.frame(grid.by.tune.id,
aggregate(cbind(best.nrounds = grid.performance$best.nrounds,
bestScore = grid.performance$bestScore),
by = list(tune.id = grid.performance$tune.id),
mean))
best.tune <- grid.performance.by.tune.id[which.min(grid.performance.by.tune.id$bestScore), ]
best.tune$best.nrounds <- as.integer(best.tune$best.nrounds)
if (booster == "gbtree") {
params <- list(booster = booster,
silent = silent,
eta = best.tune$eta,
gamma = best.tune$gamma,
max.depth = best.tune$max.depth,
min.child.weight = best.tune$min.child.weight,
max.delta.step = best.tune$max.delta.step,
subsample = best.tune$subsample,
colsample.bytree = best.tune$colsample.bytree,
colsample.bylevel = best.tune$colsample.bylevel,
lambda = best.tune$lambda,
alpha = best.tune$alpha,
tree.method = best.tune$tree.method,
sketch.eps = best.tune$sketch.eps,
num.parallel.tree = best.tune$num.parallel.tree,
objective = objective,
base.score = base.score)
if (objective == "multi:softmax") params$num.class <- nclass
} else if (booster == "dart") {
params <- list(booster = booster,
sample.type = sample.type,
normalize.type = normalize.type,
rate.drop = rate.drop,
skip.drop = skip.drop,
silent = silent,
eta = eta,
gamma = gamma,
max.depth = max.depth,
min.child.weight = min.child.weight,
max.delta.step = max.delta.step,
subsample = subsample,
colsample.bytree = colsample.bytree,
colsample.bylevel = colsample.bylevel,
lambda = lambda,
alpha = alpha,
tree.method = tree.method,
sketch.eps = sketch.eps,
num.parallel.tree = num.parallel.tree,
objective = objective,
base.score = base.score,
nthread = nthread)
} else {
params <- list(booster = booster,
silent = silent,
lambda = best.tune$lambda,
alpha = best.tune$alpha,
lambda.bias = best.tune$lambda.bias,
objective = objective,
base.score = base.score)
if (objective == "multi:softmax") params$num.class <- nclass
}
nrounds <- best.tune$best.nrounds
if (verbose) parameterSummary(best.tune, title = "Tuning Results",
newline.pre = TRUE)
} else {
# {{ NO GRID SEARCH NOR INTERNAL RESAMPLING }} ====
res.part <- grid.performance <- grid.performance.by.tune.id <- best.tune <- NULL
if (booster == "gbtree") {
params <- list(booster = booster,
silent = silent,
eta = eta,
gamma = gamma,
max.depth = max.depth,
min.child.weight = min.child.weight,
max.delta.step = max.delta.step,
subsample = subsample,
colsample.bytree = colsample.bytree,
colsample.bylevel = colsample.bylevel,
lambda = lambda,
alpha = alpha,
tree.method = tree.method,
sketch.eps = sketch.eps,
num.parallel.tree = num.parallel.tree,
objective = objective,
base.score = base.score)
if (objective == "multi:softmax") params$num.class <- nclass
} else {
params <- list(booster = booster,
silent = silent,
lambda = lambda,
alpha = alpha,
lambda.bias = lambda.bias,
objective = objective,
base.score = base.score)
if (objective == "multi:softmax") params$num.class <- nclass
}
}
# [ FULL XGBOOST ] ====
if (verbose) msg("Training full XGB model with", booster, "booster...", newline.pre = TRUE)
if (!is.null(objective)) objective <- deparse(substitute(objective))
if (!is.null(feval)) feval <- deparse(substitute(feval))
if (!is.null(force.nrounds)) nrounds <- force.nrounds
mod <- xgboost::xgb.train(params,
xg.dat,
nrounds,
obj = obj,
feval = feval,
verbose = verbose,
print_every_n = print_every_n)
# [ FITTED ] ====
fitted <- predict(mod, xg.dat)
if (type == "Classification") {
# round() gives correct result whether response is integer or probability
fitted.prob <- 1 - fitted
fitted <- factor(ifelse(fitted.prob >= .5, 1, 0), levels = c(1, 0))
levels(fitted) <- levels(y)
} else {
fitted.prob <- NULL
}
error.train <- modError(y, fitted, fitted.prob)
if (verbose) errorSummary(error.train, mod.name)
# [ PREDICTED ] ====
predicted.prob <- predicted <- error.test <- NULL
if (!is.null(x.test)) {
data.test <- xgboost::xgb.DMatrix(data = as.matrix(x.test), missing = missing)
predicted <- predict(mod, data.test)
if (type == "Classification") {
predicted.prob <- 1 - predicted
predicted <- factor(ifelse(predicted.prob >= .5, 1, 0), levels = c(1, 0))
levels(predicted) <- levels(y)
}
if (!is.null(y.test)) {
error.test <- modError(y.test, predicted, predicted.prob)
if (verbose) errorSummary(error.test, mod.name)
}
}
# [ RELATIVE INFLUENCE / VARIABLE IMPORTANCE ] ====
.importance <- NULL
# This may take a while
if (importance) {
if (verbose) msg("Estimating variable importance...")
.importance <- xgboost::xgb.importance(model = mod, feature_names = colnames(x))
}
# [ OUTRO ] ====
# sink(logOut, append = T, split = T)
extra <- list(resampler = resampler,
booster = booster,
base.score = base.score,
resamples = res.part,
grid = grid,
grid.run = if (save.res) grid.run else NULL,
grid.performance = grid.performance,
grid.performance.by.tune.id = grid.performance.by.tune.id,
best.tune = best.tune,
params = params,
nrounds = nrounds,
objective = objective,
feval = feval)
rt <- rtModSet(rtclass = rtclass,
mod = mod,
mod.name = mod.name,
type = type,
y.train = y,
y.test = y.test,
x.name = x.name,
y.name = y.name,
xnames = xnames,
fitted = fitted,
fitted.prob = fitted.prob,
se.fit = NULL,
error.train = error.train,
predicted = predicted,
predicted.prob = predicted.prob,
se.prediction = NULL,
error.test = error.test,
varimp = .importance,
question = question,
extra = extra)
rtMod.out(rt,
print.plot,
plot.fitted,
plot.predicted,
y.test,
mod.name,
outdir,
save.mod,
verbose,
plot.theme)
outro(start.time, verbose = verbose, sinkOff = ifelse(is.null(logFile), FALSE, TRUE))
rt
} # rtemis::s.XGB
|
# working with text, combining and splitting.
# sprintf and paste.
paste('hello', 'Jared', 'and others')
paste('hello', 'Jared', 'and others', sep = '/')
paste(c('Hello', 'Hey', 'Howdy'), c('Jared', 'Bob', 'David'))
paste(c('Hello', 'Hey', 'Howdy'), c('Jared', 'Bob', 'David'), sep = ', ')
paste('Hello', c('Jared', 'Bob', 'David'), sep = ', ')
paste('Hello', c('Jared', 'Bob', 'David'), sep = ', ', c('Goodbye', 'Seeya'))
vectoroftext <- c('Hello', 'Everyone', 'Out There', '.')
vectoroftext
paste(vectoroftext, collapse = ' ') # removes empty spaces and
#makes it a single text (string)
person <- 'Jared'
partysize <- 8
waittime <- 25
paste('Hello, ', person, ', your party of ',
partysize, ' will be seated in ', waittime, ' minutes.', sep = "")
# paste is too laborious, let's use sprintf or even python
sprintf('Hello %s, your party of %s will be seated in %s minutes.'
, person, partysize, waittime)
# text extraction: using RegEx like in python re package
require(XML)
theurl <- "http://www.loc.gov/rr/print/list/057_chron.html"
presidents <- readHTMLTable(theurl, which = 3,
as.data.frame = TRUE,
skip.rows = 1,
header = TRUE,
stringsAsFactors = FALSE)
View(presidents)
# we noticed the the last meaningful row is 65.
# we need to remove the rest.
presidents <- presidents[1:65, ]
View(presidents) # much cleaner.
# 80% of the time spent on stats are data munging and cleaning.
# we can use specific packages to help us.
# such as stringr
require(stringr)
yearlist <- str_split(string = presidents$YEAR, pattern = '-')
View(yearlist)
# combine them in a matrix by rbind and reduce (such as re in python)
yearmatrix <- data.frame(Reduce(rbind, yearlist))
View(yearmatrix)
head(yearmatrix)
names(yearmatrix) <- c('Begining', 'Ending')
View(yearmatrix)
presidents <- cbind(presidents, yearmatrix)
View(presidents)
# if we need to get the first 3 characters of
# the presidents name( do not know why, but just in case).
# as in re python re.sub
str_sub(string = presidents$PRESIDENT, start = 1, end = 3)
str_sub(string = presidents$PRESIDENT, start = 4, end = 8)
# find all presidents that their presidency start in
#year ending in 1, such as, Begining in 1981, 1971.
# The start =4 refers to the 4th digit of the year, and
# the end is also 4, because we are looking for this digit alone.
presidents[str_sub(string = presidents$Begining,
start = 4, end = 4) == 1,
c('YEAR', "PRESIDENT", "Begining", "Ending")]
# we can do a generic search to check if the president
# has a 'john' in their name.
str_detect(presidents$PRESIDENT, 'john')
# case sensitive
# ignore.case did not work for me.
str_detect(presidents$PRESIDENT, fixed('john', ignore_case=TRUE))
View(presidents[str_detect(presidents$PRESIDENT,
fixed('john', ignore_case=TRUE)), ])
con <- url("http://jaredlander.com/data/warTimes.rdata")
load(con)
close(con)
View(warTimes)
warTimes[str_detect(string = warTimes, pattern = "-")]
thetimes <- str_split(string = warTimes,pattern = "(ACAEA)|-", n = 2)
head(thetimes)
starttimes <- sapply(thetimes, FUN = function(x) x[1])
head(starttimes)
starttimes <- str_trim(starttimes)
View(starttimes)
#how to extract information from the data,
#say we are looking for January:
str_extract(string = starttimes,pattern = 'January')
starttimes[str_detect(string = starttimes,pattern = 'January')]
# if we want to extract a year (4 digit).
head(str_extract(string = starttimes,pattern = "[0-9][0-9][0-9][0-9]"), 30)
# it would be very hard to do 16 [] if we are looking for 16 digits.
head(str_extract(string = starttimes,pattern = "[0-9]{4}"), 30)
# or another way:
head(str_extract(string = starttimes,pattern = "\\d{4}"), 30)
# how to find a string that contain 1, 2, or 3 digit:
head(str_extract(string = starttimes,pattern = "\\d{1,3}"), 30)
# Same as Regex in python, ^ means begining of the line.
head(str_extract(string = starttimes,pattern = "^\\d{4}"), 30)
# and $ is at the end of the line- which means in this case
#the only date os the 4 digit year
head(str_extract(string = starttimes,pattern = "^\\d{4}$"), 30)
# to replace, use str_replace, which replaces only the first match.
head(str_replace(string = starttimes,pattern = "\\d",
replacement = 'x'), 30)
#if we need to replace all matches:
head(str_replace_all(string = starttimes,pattern = "\\d",
replacement = 'x'), 30)
#if we nedd to replace all digit strings with a single x:
head(str_replace_all(string = starttimes,pattern = "\\d{1,4}", 'x'), 30)
# if we a scraping a website for info:
comm <- c("<a href=index.html> the link is here</a>",
"<b>this is bold text</b>")
comm
# we need to extract the info: we need to do a lazy wild card search.
# < start of search, . is a wildcard will search for 1 character
# + means search for all characters
# ? to stop the search once you hit the ending >
#"<a href=index.html> the link is here</a>"
#[2] "<b>this is bold text</b>"
#> str_replace(string = comm,pattern = "<.+?>(.+?)<.+>", replacement = "\\1")
#[1] " the link is here" "this is bold text"
str_replace(string = comm,pattern = "<.+?>(.+?)<.+>", replacement = "\\1")
| /combine_strings.r | no_license | kimalaacer/Tutorial-on-R | R | false | false | 5,569 | r | # working with text, combining and splitting.
# sprintf and paste.
paste('hello', 'Jared', 'and others')
paste('hello', 'Jared', 'and others', sep = '/')
paste(c('Hello', 'Hey', 'Howdy'), c('Jared', 'Bob', 'David'))
paste(c('Hello', 'Hey', 'Howdy'), c('Jared', 'Bob', 'David'), sep = ', ')
paste('Hello', c('Jared', 'Bob', 'David'), sep = ', ')
paste('Hello', c('Jared', 'Bob', 'David'), sep = ', ', c('Goodbye', 'Seeya'))
vectoroftext <- c('Hello', 'Everyone', 'Out There', '.')
vectoroftext
paste(vectoroftext, collapse = ' ') # removes empty spaces and
#makes it a single text (string)
person <- 'Jared'
partysize <- 8
waittime <- 25
paste('Hello, ', person, ', your party of ',
partysize, ' will be seated in ', waittime, ' minutes.', sep = "")
# paste is too laborious, let's use sprintf or even python
sprintf('Hello %s, your party of %s will be seated in %s minutes.'
, person, partysize, waittime)
# text extraction: using RegEx like in python re package
require(XML)
theurl <- "http://www.loc.gov/rr/print/list/057_chron.html"
presidents <- readHTMLTable(theurl, which = 3,
as.data.frame = TRUE,
skip.rows = 1,
header = TRUE,
stringsAsFactors = FALSE)
View(presidents)
# we noticed the the last meaningful row is 65.
# we need to remove the rest.
presidents <- presidents[1:65, ]
View(presidents) # much cleaner.
# 80% of the time spent on stats are data munging and cleaning.
# we can use specific packages to help us.
# such as stringr
require(stringr)
yearlist <- str_split(string = presidents$YEAR, pattern = '-')
View(yearlist)
# combine them in a matrix by rbind and reduce (such as re in python)
yearmatrix <- data.frame(Reduce(rbind, yearlist))
View(yearmatrix)
head(yearmatrix)
names(yearmatrix) <- c('Begining', 'Ending')
View(yearmatrix)
presidents <- cbind(presidents, yearmatrix)
View(presidents)
# if we need to get the first 3 characters of
# the presidents name( do not know why, but just in case).
# as in re python re.sub
str_sub(string = presidents$PRESIDENT, start = 1, end = 3)
str_sub(string = presidents$PRESIDENT, start = 4, end = 8)
# find all presidents that their presidency start in
#year ending in 1, such as, Begining in 1981, 1971.
# The start =4 refers to the 4th digit of the year, and
# the end is also 4, because we are looking for this digit alone.
presidents[str_sub(string = presidents$Begining,
start = 4, end = 4) == 1,
c('YEAR', "PRESIDENT", "Begining", "Ending")]
# we can do a generic search to check if the president
# has a 'john' in their name.
str_detect(presidents$PRESIDENT, 'john')
# case sensitive
# ignore.case did not work for me.
str_detect(presidents$PRESIDENT, fixed('john', ignore_case=TRUE))
View(presidents[str_detect(presidents$PRESIDENT,
fixed('john', ignore_case=TRUE)), ])
con <- url("http://jaredlander.com/data/warTimes.rdata")
load(con)
close(con)
View(warTimes)
warTimes[str_detect(string = warTimes, pattern = "-")]
thetimes <- str_split(string = warTimes,pattern = "(ACAEA)|-", n = 2)
head(thetimes)
starttimes <- sapply(thetimes, FUN = function(x) x[1])
head(starttimes)
starttimes <- str_trim(starttimes)
View(starttimes)
#how to extract information from the data,
#say we are looking for January:
str_extract(string = starttimes,pattern = 'January')
starttimes[str_detect(string = starttimes,pattern = 'January')]
# if we want to extract a year (4 digit).
head(str_extract(string = starttimes,pattern = "[0-9][0-9][0-9][0-9]"), 30)
# it would be very hard to do 16 [] if we are looking for 16 digits.
head(str_extract(string = starttimes,pattern = "[0-9]{4}"), 30)
# or another way:
head(str_extract(string = starttimes,pattern = "\\d{4}"), 30)
# how to find a string that contain 1, 2, or 3 digit:
head(str_extract(string = starttimes,pattern = "\\d{1,3}"), 30)
# Same as Regex in python, ^ means begining of the line.
head(str_extract(string = starttimes,pattern = "^\\d{4}"), 30)
# and $ is at the end of the line- which means in this case
#the only date os the 4 digit year
head(str_extract(string = starttimes,pattern = "^\\d{4}$"), 30)
# to replace, use str_replace, which replaces only the first match.
head(str_replace(string = starttimes,pattern = "\\d",
replacement = 'x'), 30)
#if we need to replace all matches:
head(str_replace_all(string = starttimes,pattern = "\\d",
replacement = 'x'), 30)
#if we nedd to replace all digit strings with a single x:
head(str_replace_all(string = starttimes,pattern = "\\d{1,4}", 'x'), 30)
# if we a scraping a website for info:
comm <- c("<a href=index.html> the link is here</a>",
"<b>this is bold text</b>")
comm
# we need to extract the info: we need to do a lazy wild card search.
# < start of search, . is a wildcard will search for 1 character
# + means search for all characters
# ? to stop the search once you hit the ending >
#"<a href=index.html> the link is here</a>"
#[2] "<b>this is bold text</b>"
#> str_replace(string = comm,pattern = "<.+?>(.+?)<.+>", replacement = "\\1")
#[1] " the link is here" "this is bold text"
str_replace(string = comm,pattern = "<.+?>(.+?)<.+>", replacement = "\\1")
|
\name{tdeath_other}
\alias{tdeath_other}
\title{Predict the age at death from a cause other than lung cancer
}
\description{Function to predict the age (years) at which a person may die from a cause other than lung cancer given age, gender and smoking intensity, when relevant.
}
\usage{
tdeath_other(u1, u2, status, covs_other)
}
\arguments{
\item{u1, u2}{random numbers from Unif[0,1] required for the simulation
}
\item{status}{smoking status ("never", "former", or "current" smoker)
}
\item{covs_other}{3-dimensional vector with values for the covariates (other than smoking status) related to death from other causes, i.e., age (years) at the beginning of the prediction period, gender, smoking intensity expressed as average number of cigarettes smoked per day.
}
}
\value{
An R-object of class "list" with the following six components:
[[1]]: random number u1 used in the simulation
[[2]]: random number u2 used in the simulation
[[3]]: index number of the time interval
[[4]]: time interval at which death from other causes may occur
[[5]]: age (years) at death from cause other than lung cancer
[[6]]: R-object of class "list" with the relevant CIF estimates
}
\note{
Components [[1]]-[[4]] and [[6]] are returned for testing purposes only.
}
\author{
Stavroula A. Chrysanthopoulou}
\seealso{\code{\link{current.other}, \link{former.other}, \link{never.other}, \link{tdeath_lung}}
}
\examples{
# Predict the age at death from a cause other than lung cancer for a man 52 years old,
# who have never smoked.
data(current.other, former.other, never.other)
d.other <- tdeath_other(runif(1), runif(1), "never", c(52, "male", NA))
d.other[[1]]
d.other[[2]]
d.other[[3]]
d.other[[4]]
d.other[[5]]
d.other[[6]]
}
\keyword{Functions} | /man/tdeath_other.Rd | no_license | Qingys/MILC_backup | R | false | false | 1,798 | rd | \name{tdeath_other}
\alias{tdeath_other}
\title{Predict the age at death from a cause other than lung cancer
}
\description{Function to predict the age (years) at which a person may die from a cause other than lung cancer given age, gender and smoking intensity, when relevant.
}
\usage{
tdeath_other(u1, u2, status, covs_other)
}
\arguments{
\item{u1, u2}{random numbers from Unif[0,1] required for the simulation
}
\item{status}{smoking status ("never", "former", or "current" smoker)
}
\item{covs_other}{3-dimensional vector with values for the covariates (other than smoking status) related to death from other causes, i.e., age (years) at the beginning of the prediction period, gender, smoking intensity expressed as average number of cigarettes smoked per day.
}
}
\value{
An R-object of class "list" with the following six components:
[[1]]: random number u1 used in the simulation
[[2]]: random number u2 used in the simulation
[[3]]: index number of the time interval
[[4]]: time interval at which death from other causes may occur
[[5]]: age (years) at death from cause other than lung cancer
[[6]]: R-object of class "list" with the relevant CIF estimates
}
\note{
Components [[1]]-[[4]] and [[6]] are returned for testing purposes only.
}
\author{
Stavroula A. Chrysanthopoulou}
\seealso{\code{\link{current.other}, \link{former.other}, \link{never.other}, \link{tdeath_lung}}
}
\examples{
# Predict the age at death from a cause other than lung cancer for a man 52 years old,
# who have never smoked.
data(current.other, former.other, never.other)
d.other <- tdeath_other(runif(1), runif(1), "never", c(52, "male", NA))
d.other[[1]]
d.other[[2]]
d.other[[3]]
d.other[[4]]
d.other[[5]]
d.other[[6]]
}
\keyword{Functions} |
fct_count <- function(f, sort = FALSE, prop = FALSE) {
f2 <- check_factor(f)
n_na <- sum(is.na(f))
df <- tibble::tibble(
f = fct_inorder(c(levels(f2), if (n_na > 0) NA)),
n = c(tabulate(f2, nlevels(f)), if (n_na > 0) n_na)
)
if (sort) {
df <- df[order(df$n, decreasing = TRUE), ]
}
if (prop) {
df$p <- prop.table(df$n)
}
df
}
| /R/count.R | no_license | sindribaldur/poorcats | R | false | false | 364 | r | fct_count <- function(f, sort = FALSE, prop = FALSE) {
f2 <- check_factor(f)
n_na <- sum(is.na(f))
df <- tibble::tibble(
f = fct_inorder(c(levels(f2), if (n_na > 0) NA)),
n = c(tabulate(f2, nlevels(f)), if (n_na > 0) n_na)
)
if (sort) {
df <- df[order(df$n, decreasing = TRUE), ]
}
if (prop) {
df$p <- prop.table(df$n)
}
df
}
|
mod <- "model{
# -------- Priors --------
for (s in 1:n_seasons){ # Iterate over the season
for (x in 1:n_x_vals){ # Iterate over the constraint categories
S4[s,x] ~ dbeta(1,1) # Non-informative Beta Prior for the Survival Probability in PWS
S8[s,x] ~ dbeta(1,1) # Non-informative Beta Prior for the Survival/Permenent Immigration Probability in the GOA
psi2[1:2,s,x] ~ ddirch(c(2,2)) # Non-Informative Dirichlet Prior for the Movement Probabilities at the Spawning Arrays
psi3[1:2,s,x] ~ ddirch(c(2,2)) # Non-Informative Dirichlet Prior for the Movement Probabilities at the Other PWS Arrays
psi4[1:6,s,x] ~ ddirch(c(2,2,2,2,2,2)) # Non-Informative Dirichlet Prior for the Movement Probabilities in PWS
psi5[1:3,s,x] ~ ddirch(h_data[,s,x]) # Informative Dirichlet Prior at Hinchinbrook
psi6[1:3,s,x] ~ ddirch(m_data[,s,x]) # Informative Dirichlet Prior at Montague
psi7[1:3,s,x] ~ ddirch(s_data[,s,x]) # Informative Dirichlet Prior for the Southwest Passage Arrays
psi8[1:4,s,x] ~ ddirch(c(2,2,2,2)) # Non-Informative Dirichlet Prior for the Movement Probabilities in the GOA
}
}
# -------- Transition Matrix --------
for (s in 1:n_seasons){ # Iterate over the season
for (x in 1:n_x_vals){ # Iterate over the constraint categories
tr[1,1,s,x] <- 1 # dead to dead
tr[2,1,s,x] <- 0 # dead to spawning
tr[3,1,s,x] <- 0 # dead to pws array
tr[4,1,s,x] <- 0 # dead to pws
tr[5,1,s,x] <- 0 # dead to hinchinbrook
tr[6,1,s,x] <- 0 # dead to montague
tr[7,1,s,x] <- 0 # dead to southwest
tr[8,1,s,x] <- 0 # dead to goa
tr[1,2,s,x] <- 0 # spawning to dead
tr[2,2,s,x] <- psi2[1,s,x] # spawning to spawning
tr[3,2,s,x] <- 0 # spawning to pws array
tr[4,2,s,x] <- psi2[2,s,x] # spawning to pws
tr[5,2,s,x] <- 0 # spawning to hinchinbrook
tr[6,2,s,x] <- 0 # spawning to montague
tr[7,2,s,x] <- 0 # spawning to southwest
tr[8,2,s,x] <- 0 # spawning to goa
tr[1,3,s,x] <- 0 # pws array to dead
tr[2,3,s,x] <- 0 # pws array to spawing
tr[3,3,s,x] <- psi3[1,s,x] # pws array to pws array
tr[4,3,s,x] <- psi3[2,s,x] # pws array to pws
tr[5,3,s,x] <- 0 # pws array to hinchinbrook
tr[6,3,s,x] <- 0 # pws array to montague
tr[7,3,s,x] <- 0 # pws array to southwest
tr[8,3,s,x] <- 0 # pws array to goa
tr[1,4,s,x] <- 1-S4[s,x] # pws to dead
tr[2,4,s,x] <- S4[s,x]*psi4[1,s,x] # pws to spawning
tr[3,4,s,x] <- S4[s,x]*psi4[2,s,x] # pws to pws array
tr[4,4,s,x] <- S4[s,x]*psi4[3,s,x] # pws to pws
tr[5,4,s,x] <- S4[s,x]*psi4[4,s,x] # pws to hinchinbrook
tr[6,4,s,x] <- S4[s,x]*psi4[5,s,x] # pws to montague
tr[7,4,s,x] <- S4[s,x]*psi4[6,s,x] # pws to southwest
tr[8,4,s,x] <- 0 # pws to goa
tr[1,5,s,x] <- 0 # hinchinbrook to dead
tr[2,5,s,x] <- 0 # hinchinbrook to spawning
tr[3,5,s,x] <- 0 # hinchinbrook pws array
tr[4,5,s,x] <- psi5[1,s,x] # hinchinbrook to pws
tr[5,5,s,x] <- psi5[2,s,x] # hinchinbrook to hinchinbrook
tr[6,5,s,x] <- 0 # hinchinbrook to montague
tr[7,5,s,x] <- 0 # hinchinbrook to southwest
tr[8,5,s,x] <- psi5[3,s,x] # hinchinbrook to goa
tr[1,6,s,x] <- 0 # montague to dead
tr[2,6,s,x] <- 0 # montague to spawning
tr[3,6,s,x] <- 0 # montague to pws array
tr[4,6,s,x] <- psi6[1,s,x] # montague to pws
tr[5,6,s,x] <- 0 # montague to hinchinbrook
tr[6,6,s,x] <- psi6[2,s,x] # montague to montague
tr[7,6,s,x] <- 0 # montague to southwest
tr[8,6,s,x] <- psi6[3,s,x] # montague to goa
tr[1,7,s,x] <- 0 # southwest to dead
tr[2,7,s,x] <- 0 # southwest to spawning
tr[3,7,s,x] <- 0 # southwest to pws array
tr[4,7,s,x] <- psi7[1,s,x] # southwest to pws
tr[5,7,s,x] <- 0 # southwest to hinchinbrook
tr[6,7,s,x] <- 0 # southwest to montague
tr[7,7,s,x] <- psi7[2,s,x] # southwest to southwest
tr[8,7,s,x] <- psi7[3,s,x] # southwest to goa
tr[1,8,s,x] <- 1-S8[s,x] # goa to dead
tr[2,8,s,x] <- 0 # goa to spawning
tr[3,8,s,x] <- 0 # goa to pws array
tr[4,8,s,x] <- 0 # goa to pws
tr[5,8,s,x] <- S8[s,x]*psi8[1,s,x] # goa to hinchinbrook
tr[6,8,s,x] <- S8[s,x]*psi8[2,s,x] # goa to montague
tr[7,8,s,x] <- S8[s,x]*psi8[3,s,x] # goa to southwest
tr[8,8,s,x] <- S8[s,x]*psi8[4,s,x] # goa to goa
}
}
# -------- Emission Matrix --------
em[1,1] <- 1 # dead and no detect
em[2,1] <- 0 # dead and spawning detect
em[3,1] <- 0 # dead and pws array detect
em[4,1] <- 0 # dead and hinchinbrook detect
em[5,1] <- 0 # dead and montague detect
em[6,1] <- 0 # dead and southwest detect
em[1,2] <- 0 # spawning and no detect
em[2,2] <- 1 # spawning and spawning detect
em[3,2] <- 0 # spawning and pws array detect
em[4,2] <- 0 # spawning and hinchinbrook detect
em[5,2] <- 0 # spawning and montague detect
em[6,2] <- 0 # spawning and southwest detect
em[1,3] <- 0 # pws array and no detect
em[2,3] <- 0 # pws array and spawning detect
em[3,3] <- 1 # pws array and pws array detect
em[4,3] <- 0 # pws array and hinchinbrook detect
em[5,3] <- 0 # pws array and montague detect
em[6,3] <- 0 # pws array and southwest detect
em[1,4] <- 1 # pws and no detect
em[2,4] <- 0 # pws and spawning detect
em[3,4] <- 0 # pws and pws array detect
em[4,4] <- 0 # pws and hinchinbrook detect
em[5,4] <- 0 # pws and montague detect
em[6,4] <- 0 # pws and southwest detect
em[1,5] <- 0 # hinchinbrook and no detect
em[2,5] <- 0 # hinchinbrook and spawning detect
em[3,5] <- 0 # hinchinbrook and pws array detect
em[4,5] <- 1 # hinchinbrook and hinchinbrook detect
em[5,5] <- 0 # hinchinbrook and montague detect
em[6,5] <- 0 # hinchinbrook and southwest detect
em[1,6] <- 0 # montague and no detect
em[2,6] <- 0 # montague and spawning detect
em[3,6] <- 0 # montague and pws array detect
em[4,6] <- 0 # montague and hinchinbrook detect
em[5,6] <- 1 # montague and montague detect
em[6,6] <- 0 # montague and southwest detect
em[1,7] <- 0 # southwest and no detect
em[2,7] <- 0 # southwest and spawning detect
em[3,7] <- 0 # southwest and pws array detect
em[4,7] <- 0 # southwest and hinchinbrook detect
em[5,7] <- 0 # southwest and montague detect
em[6,7] <- 1 # southwest and southwest detect
em[1,8] <- 1 # goa and no detect
em[2,8] <- 0 # goa and spawning detect
em[3,8] <- 0 # goa and pws array detect
em[4,8] <- 0 # goa and hinchinbrook detect
em[5,8] <- 0 # goa and montague detect
em[6,8] <- 0 # goa and southwest detect
# -------- Likelihood --------
for (i in 1:M){ # Iterate through fish
z[i,t_0[i]] ~ dcat(c(0,.25,0,.75,0,0,0,0)) # XX To Modify this Piece XX
for (t in t_0[i]:min((N-1),(t_0[i]+tl[i]-2))){ # Iterate over times when tag is active
z[i,t+1] ~ dcat(tr[1:8, z[i,t], season[t+1], x_data[i]]) # Latent process
}
}
for(i in 1:M){ # Iterate through fish
for (t in (t_0[i]+1):min((N-1),(t_0[i]+tl[i]-1))){ # Iterate over the times when the tag is active
y_data[i,t] ~ dcat(em[1:6,z[i,t]] ) # Conditional likelihood
}
}
# -------- Re-parametrize to Interpret Constraint Effects --------
for (s in 1:n_seasons){ # Iterate over the season
for (x in 1:n_x_vals){ # Iterate over the constraint categories
# Backtransform the survival and movement probabilities
mus4[s,x] <- logit(S4[s,x]) # Survival PWS
mus8[s,x] <- logit(S8[s,x]) # Survival GOA
mup2[1:2,s,x] <- logit(psi2[1:2,s,x]) # Movement Spawning
mup3[1:2,s,x] <- logit(psi3[1:2,s,x]) # Movement Other PWS
mup4[1:6,s,x] <- logit(psi4[1:6,s,x]) # Movement PWS
mup5[1:3,s,x] <- logit(psi5[1:3,s,x]) # Movement Hinchinbrook
mup6[1:3,s,x] <- logit(psi6[1:3,s,x]) # Movement Montague
mup7[1:3,s,x] <- logit(psi7[1:3,s,x]) # Movement Southwest Passages
mup8[1:4,s,x] <- logit(psi8[1:4,s,x]) # Movement GOA
}
}
for (s in 1:n_seasons){ # Iterate over the season
for (x in 1:n_x_vals){ # Iterate over the constraint categories
# Calculate the beta coefficients for the linear constraints
betas4[s,x] <- mus4[s,x]-mus4[s,1] # Survival PWS
betas8[s,x] <- mus8[s,x]-mus8[s,1] # Survival GOA
betap2[1:2,s,x] <- mup2[1:2,s,x]-mup2[1:2,s,1] # Movement Spawning
betap3[1:2,s,x] <- mup3[1:2,s,x]-mup3[1:2,s,1] # Movement Other PWS
betap4[1:6,s,x] <- mup4[1:6,s,x]-mup4[1:6,s,1] # Movement PWS
betap5[1:3,s,x] <- mup5[1:3,s,x]-mup5[1:3,s,1] # Movement Hinchinbrook
betap6[1:3,s,x] <- mup6[1:3,s,x]-mup6[1:3,s,1] # Movement Montague
betap7[1:3,s,x] <- mup7[1:3,s,x]-mup7[1:3,s,1] # Movement Southwest Passages
betap8[1:4,s,x] <- mup8[1:4,s,x]-mup8[1:4,s,1] # Movement GOA
}
}
}"
file_name = "C:/Users/19708/Desktop/Herring/Modeling/R Multistate CJS/Jags/09-27-jags.bugs"
writeLines(mod, con=file_name)
| /R and JAGS Code Sample/JAGS.R | no_license | jordyBernard/herring | R | false | false | 8,857 | r | mod <- "model{
# -------- Priors --------
for (s in 1:n_seasons){ # Iterate over the season
for (x in 1:n_x_vals){ # Iterate over the constraint categories
S4[s,x] ~ dbeta(1,1) # Non-informative Beta Prior for the Survival Probability in PWS
S8[s,x] ~ dbeta(1,1) # Non-informative Beta Prior for the Survival/Permenent Immigration Probability in the GOA
psi2[1:2,s,x] ~ ddirch(c(2,2)) # Non-Informative Dirichlet Prior for the Movement Probabilities at the Spawning Arrays
psi3[1:2,s,x] ~ ddirch(c(2,2)) # Non-Informative Dirichlet Prior for the Movement Probabilities at the Other PWS Arrays
psi4[1:6,s,x] ~ ddirch(c(2,2,2,2,2,2)) # Non-Informative Dirichlet Prior for the Movement Probabilities in PWS
psi5[1:3,s,x] ~ ddirch(h_data[,s,x]) # Informative Dirichlet Prior at Hinchinbrook
psi6[1:3,s,x] ~ ddirch(m_data[,s,x]) # Informative Dirichlet Prior at Montague
psi7[1:3,s,x] ~ ddirch(s_data[,s,x]) # Informative Dirichlet Prior for the Southwest Passage Arrays
psi8[1:4,s,x] ~ ddirch(c(2,2,2,2)) # Non-Informative Dirichlet Prior for the Movement Probabilities in the GOA
}
}
# -------- Transition Matrix --------
for (s in 1:n_seasons){ # Iterate over the season
for (x in 1:n_x_vals){ # Iterate over the constraint categories
tr[1,1,s,x] <- 1 # dead to dead
tr[2,1,s,x] <- 0 # dead to spawning
tr[3,1,s,x] <- 0 # dead to pws array
tr[4,1,s,x] <- 0 # dead to pws
tr[5,1,s,x] <- 0 # dead to hinchinbrook
tr[6,1,s,x] <- 0 # dead to montague
tr[7,1,s,x] <- 0 # dead to southwest
tr[8,1,s,x] <- 0 # dead to goa
tr[1,2,s,x] <- 0 # spawning to dead
tr[2,2,s,x] <- psi2[1,s,x] # spawning to spawning
tr[3,2,s,x] <- 0 # spawning to pws array
tr[4,2,s,x] <- psi2[2,s,x] # spawning to pws
tr[5,2,s,x] <- 0 # spawning to hinchinbrook
tr[6,2,s,x] <- 0 # spawning to montague
tr[7,2,s,x] <- 0 # spawning to southwest
tr[8,2,s,x] <- 0 # spawning to goa
tr[1,3,s,x] <- 0 # pws array to dead
tr[2,3,s,x] <- 0 # pws array to spawing
tr[3,3,s,x] <- psi3[1,s,x] # pws array to pws array
tr[4,3,s,x] <- psi3[2,s,x] # pws array to pws
tr[5,3,s,x] <- 0 # pws array to hinchinbrook
tr[6,3,s,x] <- 0 # pws array to montague
tr[7,3,s,x] <- 0 # pws array to southwest
tr[8,3,s,x] <- 0 # pws array to goa
tr[1,4,s,x] <- 1-S4[s,x] # pws to dead
tr[2,4,s,x] <- S4[s,x]*psi4[1,s,x] # pws to spawning
tr[3,4,s,x] <- S4[s,x]*psi4[2,s,x] # pws to pws array
tr[4,4,s,x] <- S4[s,x]*psi4[3,s,x] # pws to pws
tr[5,4,s,x] <- S4[s,x]*psi4[4,s,x] # pws to hinchinbrook
tr[6,4,s,x] <- S4[s,x]*psi4[5,s,x] # pws to montague
tr[7,4,s,x] <- S4[s,x]*psi4[6,s,x] # pws to southwest
tr[8,4,s,x] <- 0 # pws to goa
tr[1,5,s,x] <- 0 # hinchinbrook to dead
tr[2,5,s,x] <- 0 # hinchinbrook to spawning
tr[3,5,s,x] <- 0 # hinchinbrook pws array
tr[4,5,s,x] <- psi5[1,s,x] # hinchinbrook to pws
tr[5,5,s,x] <- psi5[2,s,x] # hinchinbrook to hinchinbrook
tr[6,5,s,x] <- 0 # hinchinbrook to montague
tr[7,5,s,x] <- 0 # hinchinbrook to southwest
tr[8,5,s,x] <- psi5[3,s,x] # hinchinbrook to goa
tr[1,6,s,x] <- 0 # montague to dead
tr[2,6,s,x] <- 0 # montague to spawning
tr[3,6,s,x] <- 0 # montague to pws array
tr[4,6,s,x] <- psi6[1,s,x] # montague to pws
tr[5,6,s,x] <- 0 # montague to hinchinbrook
tr[6,6,s,x] <- psi6[2,s,x] # montague to montague
tr[7,6,s,x] <- 0 # montague to southwest
tr[8,6,s,x] <- psi6[3,s,x] # montague to goa
tr[1,7,s,x] <- 0 # southwest to dead
tr[2,7,s,x] <- 0 # southwest to spawning
tr[3,7,s,x] <- 0 # southwest to pws array
tr[4,7,s,x] <- psi7[1,s,x] # southwest to pws
tr[5,7,s,x] <- 0 # southwest to hinchinbrook
tr[6,7,s,x] <- 0 # southwest to montague
tr[7,7,s,x] <- psi7[2,s,x] # southwest to southwest
tr[8,7,s,x] <- psi7[3,s,x] # southwest to goa
tr[1,8,s,x] <- 1-S8[s,x] # goa to dead
tr[2,8,s,x] <- 0 # goa to spawning
tr[3,8,s,x] <- 0 # goa to pws array
tr[4,8,s,x] <- 0 # goa to pws
tr[5,8,s,x] <- S8[s,x]*psi8[1,s,x] # goa to hinchinbrook
tr[6,8,s,x] <- S8[s,x]*psi8[2,s,x] # goa to montague
tr[7,8,s,x] <- S8[s,x]*psi8[3,s,x] # goa to southwest
tr[8,8,s,x] <- S8[s,x]*psi8[4,s,x] # goa to goa
}
}
# -------- Emission Matrix --------
em[1,1] <- 1 # dead and no detect
em[2,1] <- 0 # dead and spawning detect
em[3,1] <- 0 # dead and pws array detect
em[4,1] <- 0 # dead and hinchinbrook detect
em[5,1] <- 0 # dead and montague detect
em[6,1] <- 0 # dead and southwest detect
em[1,2] <- 0 # spawning and no detect
em[2,2] <- 1 # spawning and spawning detect
em[3,2] <- 0 # spawning and pws array detect
em[4,2] <- 0 # spawning and hinchinbrook detect
em[5,2] <- 0 # spawning and montague detect
em[6,2] <- 0 # spawning and southwest detect
em[1,3] <- 0 # pws array and no detect
em[2,3] <- 0 # pws array and spawning detect
em[3,3] <- 1 # pws array and pws array detect
em[4,3] <- 0 # pws array and hinchinbrook detect
em[5,3] <- 0 # pws array and montague detect
em[6,3] <- 0 # pws array and southwest detect
em[1,4] <- 1 # pws and no detect
em[2,4] <- 0 # pws and spawning detect
em[3,4] <- 0 # pws and pws array detect
em[4,4] <- 0 # pws and hinchinbrook detect
em[5,4] <- 0 # pws and montague detect
em[6,4] <- 0 # pws and southwest detect
em[1,5] <- 0 # hinchinbrook and no detect
em[2,5] <- 0 # hinchinbrook and spawning detect
em[3,5] <- 0 # hinchinbrook and pws array detect
em[4,5] <- 1 # hinchinbrook and hinchinbrook detect
em[5,5] <- 0 # hinchinbrook and montague detect
em[6,5] <- 0 # hinchinbrook and southwest detect
em[1,6] <- 0 # montague and no detect
em[2,6] <- 0 # montague and spawning detect
em[3,6] <- 0 # montague and pws array detect
em[4,6] <- 0 # montague and hinchinbrook detect
em[5,6] <- 1 # montague and montague detect
em[6,6] <- 0 # montague and southwest detect
em[1,7] <- 0 # southwest and no detect
em[2,7] <- 0 # southwest and spawning detect
em[3,7] <- 0 # southwest and pws array detect
em[4,7] <- 0 # southwest and hinchinbrook detect
em[5,7] <- 0 # southwest and montague detect
em[6,7] <- 1 # southwest and southwest detect
em[1,8] <- 1 # goa and no detect
em[2,8] <- 0 # goa and spawning detect
em[3,8] <- 0 # goa and pws array detect
em[4,8] <- 0 # goa and hinchinbrook detect
em[5,8] <- 0 # goa and montague detect
em[6,8] <- 0 # goa and southwest detect
# -------- Likelihood --------
for (i in 1:M){ # Iterate through fish
z[i,t_0[i]] ~ dcat(c(0,.25,0,.75,0,0,0,0)) # XX To Modify this Piece XX
for (t in t_0[i]:min((N-1),(t_0[i]+tl[i]-2))){ # Iterate over times when tag is active
z[i,t+1] ~ dcat(tr[1:8, z[i,t], season[t+1], x_data[i]]) # Latent process
}
}
for(i in 1:M){ # Iterate through fish
for (t in (t_0[i]+1):min((N-1),(t_0[i]+tl[i]-1))){ # Iterate over the times when the tag is active
y_data[i,t] ~ dcat(em[1:6,z[i,t]] ) # Conditional likelihood
}
}
# -------- Re-parametrize to Interpret Constraint Effects --------
for (s in 1:n_seasons){ # Iterate over the season
for (x in 1:n_x_vals){ # Iterate over the constraint categories
# Backtransform the survival and movement probabilities
mus4[s,x] <- logit(S4[s,x]) # Survival PWS
mus8[s,x] <- logit(S8[s,x]) # Survival GOA
mup2[1:2,s,x] <- logit(psi2[1:2,s,x]) # Movement Spawning
mup3[1:2,s,x] <- logit(psi3[1:2,s,x]) # Movement Other PWS
mup4[1:6,s,x] <- logit(psi4[1:6,s,x]) # Movement PWS
mup5[1:3,s,x] <- logit(psi5[1:3,s,x]) # Movement Hinchinbrook
mup6[1:3,s,x] <- logit(psi6[1:3,s,x]) # Movement Montague
mup7[1:3,s,x] <- logit(psi7[1:3,s,x]) # Movement Southwest Passages
mup8[1:4,s,x] <- logit(psi8[1:4,s,x]) # Movement GOA
}
}
for (s in 1:n_seasons){ # Iterate over the season
for (x in 1:n_x_vals){ # Iterate over the constraint categories
# Calculate the beta coefficients for the linear constraints
betas4[s,x] <- mus4[s,x]-mus4[s,1] # Survival PWS
betas8[s,x] <- mus8[s,x]-mus8[s,1] # Survival GOA
betap2[1:2,s,x] <- mup2[1:2,s,x]-mup2[1:2,s,1] # Movement Spawning
betap3[1:2,s,x] <- mup3[1:2,s,x]-mup3[1:2,s,1] # Movement Other PWS
betap4[1:6,s,x] <- mup4[1:6,s,x]-mup4[1:6,s,1] # Movement PWS
betap5[1:3,s,x] <- mup5[1:3,s,x]-mup5[1:3,s,1] # Movement Hinchinbrook
betap6[1:3,s,x] <- mup6[1:3,s,x]-mup6[1:3,s,1] # Movement Montague
betap7[1:3,s,x] <- mup7[1:3,s,x]-mup7[1:3,s,1] # Movement Southwest Passages
betap8[1:4,s,x] <- mup8[1:4,s,x]-mup8[1:4,s,1] # Movement GOA
}
}
}"
file_name = "C:/Users/19708/Desktop/Herring/Modeling/R Multistate CJS/Jags/09-27-jags.bugs"
writeLines(mod, con=file_name)
|
\name{scoreOverlap}
\alias{cluster.cor}
\alias{scoreOverlap}
\title{Find correlations of composite variables (corrected for overlap) from a larger matrix.}
\description{
Given a n x c cluster definition matrix of -1s, 0s, and 1s (the keys) , and a n x n correlation matrix, or an N x n data matrix, find the correlations of the composite clusters. The keys matrix can be entered by hand, copied from the clipboard (\code{\link{read.clipboard}}), or taken as output from the \code{\link{factor2cluster}} or \code{\link{make.keys}} functions. Similar functionality to \code{\link{scoreItems}} which also gives item by cluster correlations.
}
\usage{
scoreOverlap(keys, r, correct = TRUE, SMC = TRUE, av.r = TRUE, item.smc = NULL,
impute = TRUE,select=TRUE)
cluster.cor(keys, r.mat, correct = TRUE,SMC=TRUE,item.smc=NULL,impute=TRUE)
}
\arguments{
\item{keys}{A list of scale/cluster keys, or a matrix of cluster keys }
\item{r.mat}{A correlation matrix }
\item{r}{Either a correlation matrix or a raw data matrix}
\item{correct}{ TRUE shows both raw and corrected for attenuation correlations}
\item{SMC}{Should squared multiple correlations be used as communality estimates for the correlation matrix? }
\item{item.smc}{the smcs of the items may be passed into the function for speed, or calculated if SMC=TRUE }
\item{impute}{if TRUE, impute missing scale correlations based upon the average interitem correlation, otherwise return NA.}
\item{av.r}{Should the average r be used in correcting for overlap? smcs otherwise.}
\item{select}{By default, just find statistics for items included in the scoring keys. This allows for finding scores from matrices with bad items if they are not included in the set of scoring keys.}
}
\details{This are two of the functions used in the SAPA (\url{http://sapa-project.org}) procedures to form synthetic correlation matrices. Given any correlation matrix of items, it is easy to find the correlation matrix of scales made up of those items. This can also be done from the original data matrix or from the correlation matrix using \code{\link{scoreItems}} which is probably preferred unless the keys are overlapping.
In the case of overlapping keys, (items being scored on multiple scales), \code{\link{scoreOverlap}} will adjust for this overlap by replacing the overlapping covariances (which are variances when overlapping) with the corresponding best estimate of an item's ``true" variance using either the average correlation or the smc estimate for that item. This parallels the operation done when finding alpha reliability. This is similar to ideas suggested by Cureton (1966) and Bashaw and Anderson (1966) but uses the smc or the average interitem correlation (default).
A typical use in the SAPA project is to form item composites by clustering or factoring (see \code{\link{fa}}, \code{\link{ICLUST}}, \code{\link{principal}}), extract the clusters from these results (\code{\link{factor2cluster}}), and then form the composite correlation matrix using \code{\link{cluster.cor}}. The variables in this reduced matrix may then be used in multiple correlatin procedures using \code{\link{mat.regress}}.
The original correlation is pre and post multiplied by the (transpose) of the keys matrix.
If some correlations are missing from the original matrix this will lead to missing values (NA) for scale intercorrelations based upon those lower level correlations. If impute=TRUE (the default), a warning is issued and the correlations are imputed based upon the average correlations of the non-missing elements of each scale.
Because the alpha estimate of reliability is based upon the correlations of the items rather than upon the covariances, this estimate of alpha is sometimes called ``standardized alpha". If the raw items are available, it is useful to compare standardized alpha with the raw alpha found using \code{\link{scoreItems}}. They will differ substantially only if the items differ a great deal in their variances.
\code{\link{scoreOverlap}} answers an important question when developing scales and related subscales, or when comparing alternative versions of scales. For by removing the effect of item overlap, it gives a better estimate the relationship between the latent variables estimated by the observed sum (mean) scores.
}
\value{
\item{cor }{the (raw) correlation matrix of the clusters}
\item{sd }{standard deviation of the cluster scores}
\item{corrected }{raw correlations below the diagonal, alphas on diagonal, disattenuated above diagonal}
\item{alpha}{The (standardized) alpha reliability of each scale.}
\item{G6}{Guttman's Lambda 6 reliability estimate is based upon the smcs for each item in a scale. G6 uses the smc based upon the entire item domain.}
\item{av.r}{The average inter item correlation within a scale}
\item{size}{How many items are in each cluster?}
}
\references{
Bashaw, W. and Anderson Jr, H. E. (1967). A correction for replicated error in correlation coefficients. Psychometrika, 32(4):435-441.
Cureton, E. (1966). Corrected item-test correlations. Psychometrika, 31(1):93-96.
}
\author{
Maintainer: William Revelle \email{revelle@northwestern.edu}
}
\note{ See SAPA Revelle, W., Wilt, J., and Rosenthal, A. (2010) Personality and Cognition: The Personality-Cognition Link. In Gruszka, A. and Matthews, G. and Szymura, B. (Eds.) Handbook of Individual Differences in Cognition: Attention, Memory and Executive Control, Springer.
The second example uses the \code{\link{msq}} data set of 72 measures of motivational state to examine the overlap between four lower level scales and two higher level scales.
}
\seealso{ \code{\link{factor2cluster}}, \code{\link{mat.regress}}, \code{\link{alpha}}, and most importantly, \code{\link{scoreItems}}, which will do all of what cluster.cor does for most users. cluster.cor is an important helper function for \code{\link{iclust}}
}
\examples{
#use the msq data set that shows the structure of energetic and tense arousal
small.msq <- msq[ c("active", "energetic", "vigorous", "wakeful", "wide.awake",
"full.of.pep", "lively", "sleepy", "tired", "drowsy","intense", "jittery", "fearful",
"tense", "clutched.up", "quiet", "still", "placid", "calm", "at.rest") ]
small.R <- cor(small.msq,use="pairwise")
keys.list <- list(
EA = c("active", "energetic", "vigorous", "wakeful", "wide.awake", "full.of.pep",
"lively", "-sleepy", "-tired", "-drowsy"),
TA =c("intense", "jittery", "fearful", "tense", "clutched.up", "-quiet", "-still",
"-placid", "-calm", "-at.rest") ,
high.EA = c("active", "energetic", "vigorous", "wakeful", "wide.awake", "full.of.pep",
"lively"),
low.EA =c("sleepy", "tired", "drowsy"),
lowTA= c("quiet", "still", "placid", "calm", "at.rest"),
highTA = c("intense", "jittery", "fearful", "tense", "clutched.up")
)
keys <- make.keys(small.R,keys.list)
adjusted.scales <- scoreOverlap(keys.list,small.R)
#compare with unadjusted
confounded.scales <- cluster.cor(keys,small.R)
summary(adjusted.scales)
#note that the EA and high and low EA and TA and high and low TA
# scale correlations are confounded
summary(confounded.scales)
}
\keyword{ multivariate }
\keyword{ models }
| /man/cluster.cor.Rd | no_license | canshot/psych | R | false | false | 7,258 | rd | \name{scoreOverlap}
\alias{cluster.cor}
\alias{scoreOverlap}
\title{Find correlations of composite variables (corrected for overlap) from a larger matrix.}
\description{
Given a n x c cluster definition matrix of -1s, 0s, and 1s (the keys) , and a n x n correlation matrix, or an N x n data matrix, find the correlations of the composite clusters. The keys matrix can be entered by hand, copied from the clipboard (\code{\link{read.clipboard}}), or taken as output from the \code{\link{factor2cluster}} or \code{\link{make.keys}} functions. Similar functionality to \code{\link{scoreItems}} which also gives item by cluster correlations.
}
\usage{
scoreOverlap(keys, r, correct = TRUE, SMC = TRUE, av.r = TRUE, item.smc = NULL,
impute = TRUE,select=TRUE)
cluster.cor(keys, r.mat, correct = TRUE,SMC=TRUE,item.smc=NULL,impute=TRUE)
}
\arguments{
\item{keys}{A list of scale/cluster keys, or a matrix of cluster keys }
\item{r.mat}{A correlation matrix }
\item{r}{Either a correlation matrix or a raw data matrix}
\item{correct}{ TRUE shows both raw and corrected for attenuation correlations}
\item{SMC}{Should squared multiple correlations be used as communality estimates for the correlation matrix? }
\item{item.smc}{the smcs of the items may be passed into the function for speed, or calculated if SMC=TRUE }
\item{impute}{if TRUE, impute missing scale correlations based upon the average interitem correlation, otherwise return NA.}
\item{av.r}{Should the average r be used in correcting for overlap? smcs otherwise.}
\item{select}{By default, just find statistics for items included in the scoring keys. This allows for finding scores from matrices with bad items if they are not included in the set of scoring keys.}
}
\details{This are two of the functions used in the SAPA (\url{http://sapa-project.org}) procedures to form synthetic correlation matrices. Given any correlation matrix of items, it is easy to find the correlation matrix of scales made up of those items. This can also be done from the original data matrix or from the correlation matrix using \code{\link{scoreItems}} which is probably preferred unless the keys are overlapping.
In the case of overlapping keys, (items being scored on multiple scales), \code{\link{scoreOverlap}} will adjust for this overlap by replacing the overlapping covariances (which are variances when overlapping) with the corresponding best estimate of an item's ``true" variance using either the average correlation or the smc estimate for that item. This parallels the operation done when finding alpha reliability. This is similar to ideas suggested by Cureton (1966) and Bashaw and Anderson (1966) but uses the smc or the average interitem correlation (default).
A typical use in the SAPA project is to form item composites by clustering or factoring (see \code{\link{fa}}, \code{\link{ICLUST}}, \code{\link{principal}}), extract the clusters from these results (\code{\link{factor2cluster}}), and then form the composite correlation matrix using \code{\link{cluster.cor}}. The variables in this reduced matrix may then be used in multiple correlatin procedures using \code{\link{mat.regress}}.
The original correlation is pre and post multiplied by the (transpose) of the keys matrix.
If some correlations are missing from the original matrix this will lead to missing values (NA) for scale intercorrelations based upon those lower level correlations. If impute=TRUE (the default), a warning is issued and the correlations are imputed based upon the average correlations of the non-missing elements of each scale.
Because the alpha estimate of reliability is based upon the correlations of the items rather than upon the covariances, this estimate of alpha is sometimes called ``standardized alpha". If the raw items are available, it is useful to compare standardized alpha with the raw alpha found using \code{\link{scoreItems}}. They will differ substantially only if the items differ a great deal in their variances.
\code{\link{scoreOverlap}} answers an important question when developing scales and related subscales, or when comparing alternative versions of scales. For by removing the effect of item overlap, it gives a better estimate the relationship between the latent variables estimated by the observed sum (mean) scores.
}
\value{
\item{cor }{the (raw) correlation matrix of the clusters}
\item{sd }{standard deviation of the cluster scores}
\item{corrected }{raw correlations below the diagonal, alphas on diagonal, disattenuated above diagonal}
\item{alpha}{The (standardized) alpha reliability of each scale.}
\item{G6}{Guttman's Lambda 6 reliability estimate is based upon the smcs for each item in a scale. G6 uses the smc based upon the entire item domain.}
\item{av.r}{The average inter item correlation within a scale}
\item{size}{How many items are in each cluster?}
}
\references{
Bashaw, W. and Anderson Jr, H. E. (1967). A correction for replicated error in correlation coefficients. Psychometrika, 32(4):435-441.
Cureton, E. (1966). Corrected item-test correlations. Psychometrika, 31(1):93-96.
}
\author{
Maintainer: William Revelle \email{revelle@northwestern.edu}
}
\note{ See SAPA Revelle, W., Wilt, J., and Rosenthal, A. (2010) Personality and Cognition: The Personality-Cognition Link. In Gruszka, A. and Matthews, G. and Szymura, B. (Eds.) Handbook of Individual Differences in Cognition: Attention, Memory and Executive Control, Springer.
The second example uses the \code{\link{msq}} data set of 72 measures of motivational state to examine the overlap between four lower level scales and two higher level scales.
}
\seealso{ \code{\link{factor2cluster}}, \code{\link{mat.regress}}, \code{\link{alpha}}, and most importantly, \code{\link{scoreItems}}, which will do all of what cluster.cor does for most users. cluster.cor is an important helper function for \code{\link{iclust}}
}
\examples{
#use the msq data set that shows the structure of energetic and tense arousal
small.msq <- msq[ c("active", "energetic", "vigorous", "wakeful", "wide.awake",
"full.of.pep", "lively", "sleepy", "tired", "drowsy","intense", "jittery", "fearful",
"tense", "clutched.up", "quiet", "still", "placid", "calm", "at.rest") ]
small.R <- cor(small.msq,use="pairwise")
keys.list <- list(
EA = c("active", "energetic", "vigorous", "wakeful", "wide.awake", "full.of.pep",
"lively", "-sleepy", "-tired", "-drowsy"),
TA =c("intense", "jittery", "fearful", "tense", "clutched.up", "-quiet", "-still",
"-placid", "-calm", "-at.rest") ,
high.EA = c("active", "energetic", "vigorous", "wakeful", "wide.awake", "full.of.pep",
"lively"),
low.EA =c("sleepy", "tired", "drowsy"),
lowTA= c("quiet", "still", "placid", "calm", "at.rest"),
highTA = c("intense", "jittery", "fearful", "tense", "clutched.up")
)
keys <- make.keys(small.R,keys.list)
adjusted.scales <- scoreOverlap(keys.list,small.R)
#compare with unadjusted
confounded.scales <- cluster.cor(keys,small.R)
summary(adjusted.scales)
#note that the EA and high and low EA and TA and high and low TA
# scale correlations are confounded
summary(confounded.scales)
}
\keyword{ multivariate }
\keyword{ models }
|
library(shiny)
library(data.table)
data <- fread("ReshHF.csv", encoding = "Latin-1")
ui3 <- fluidPage(
headerPanel("NTR Test"),
sidebarLayout(
sidebarPanel(
uiOutput("valgEnhet"),
uiOutput("valgEnhetNavn"),
checkboxInput("compare", "Sammenligne med hele landet")
),
mainPanel(
)
)
)
helseEnhet <- c("RHF", "HF", "Sykehus")
ser3 <- function(input, output, session) {
output$valgEnhet <- renderUI({
radioButtons("enhet", "Valg Enheten",
choices = as.list(helseEnhet),
selected = NULL)
})
output$valgEnhetNavn <- renderUI({
# if missing input, return to avoid error
if(is.null(input$enhet)) return()
valgetEnhet <- input$enhet
enhetNavn <- data[, sort(unique(get(valgetEnhet)))]
selectInput("helseNavn", "Helse Enheten",
choices = enhetNavn)
})
}
shinyApp(ui = ui3, server = ser3)
| /publicapp/valgEnhet.R | no_license | ybkamaleri/traume | R | false | false | 916 | r |
library(shiny)
library(data.table)
data <- fread("ReshHF.csv", encoding = "Latin-1")
ui3 <- fluidPage(
headerPanel("NTR Test"),
sidebarLayout(
sidebarPanel(
uiOutput("valgEnhet"),
uiOutput("valgEnhetNavn"),
checkboxInput("compare", "Sammenligne med hele landet")
),
mainPanel(
)
)
)
helseEnhet <- c("RHF", "HF", "Sykehus")
ser3 <- function(input, output, session) {
output$valgEnhet <- renderUI({
radioButtons("enhet", "Valg Enheten",
choices = as.list(helseEnhet),
selected = NULL)
})
output$valgEnhetNavn <- renderUI({
# if missing input, return to avoid error
if(is.null(input$enhet)) return()
valgetEnhet <- input$enhet
enhetNavn <- data[, sort(unique(get(valgetEnhet)))]
selectInput("helseNavn", "Helse Enheten",
choices = enhetNavn)
})
}
shinyApp(ui = ui3, server = ser3)
|
## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | veronsj/ProgrammingAssignment2 | R | false | false | 1,059 | r | ## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
################################################################################
## Ridge-augmented SCM
################################################################################
#' Ridge augmented weights (possibly with covariates)
#'
#' @param wide_data Output of `format_data`
#' @param synth_data Output of `format_synth`
#' @param Z Matrix of covariates, default is NULL
#' @param lambda Ridge hyper-parameter, if NULL use CV
#' @param ridge Include ridge or not
#' @param scm Include SCM or not
#' @param lambda_min_ratio Ratio of the smallest to largest lambda when tuning lambda values
#' @param n_lambda Number of lambdas to consider between the smallest and largest lambda value
#' @param lambda_max Initial (largest) lambda, if NULL sets it to be (1+norm(X_1-X_c))^2
#' @param holdout_length Length of conseuctive holdout period for when tuning lambdas
#' @param min_1se If TRUE, chooses the maximum lambda within 1 standard error of the lambda that minimizes the CV error, if FALSE chooses the optimal lambda; default TRUE
#' @param V V matrix for synth, default NULL
#' @param residualize Whether to residualize auxiliary covariates or balance directly, default TRUE
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{"weights"}{Ridge ASCM weights}
#' \item{"l2_imbalance"}{Imbalance in pre-period outcomes, measured by the L2 norm}
#' \item{"scaled_l2_imbalance"}{L2 imbalance scaled by L2 imbalance of uniform weights}
#' \item{"mhat"}{Outcome model estimate (zero in this case)}
#' \item{"lambda"}{Value of the ridge hyperparameter}
#' \item{"ridge_mhat"}{The ridge regression predictions (for estimating the bias)}
#' \item{"synw"}{The synth weights(for estimating the bias)}
#' \item{"lambdas"}{List of lambda values evaluated to tune ridge regression}
#' \item{"lambda_errors"}{"The MSE associated with each lambda term in lambdas."}
#' \item{"lambda_errors_se"}{"The SE of the MSE associated with each lambda term in lambdas."}
#' }
fit_ridgeaug_formatted <- function(wide_data, synth_data,
Z=NULL, lambda=NULL, ridge=T, scm=T,
lambda_min_ratio = 1e-8, n_lambda = 20,
lambda_max = NULL,
holdout_length = 1, min_1se = T,
V = NULL,
residualize = FALSE, ...) {
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters in using ridge augmented weights: ", paste(names(extra_params), collapse = ", "))
}
X <- wide_data$X
y <- wide_data$y
trt <- wide_data$trt
lambda_errors <- NULL
lambda_errors_se <- NULL
lambdas <- NULL
## center outcomes
X_cent <- apply(X, 2, function(x) x - mean(x[trt==0]))
X_c <- X_cent[trt==0,,drop=FALSE]
X_1 <- matrix(colMeans(X_cent[trt==1,,drop=FALSE]), nrow=1)
y_cent <- apply(y, 2, function(x) x - mean(x[trt==0]))
y_c <- y_cent[trt==0,,drop=FALSE]
t0 <- ncol(X_c)
V <- make_V_matrix(t0, V)
# apply V matrix transformation
X_c <- X_c %*% V
X_1 <- X_1 %*% V
new_synth_data <- synth_data
## if there are auxiliary covariates, use them
if(!is.null(Z)) {
## center covariates
Z_cent <- apply(Z, 2, function(x) x - mean(x[trt==0]))
Z_c <- Z_cent[trt==0,,drop=FALSE]
Z_1 <- matrix(colMeans(Z_cent[trt==1,,drop=FALSE]), nrow=1)
if(residualize) {
## regress out covariates
Xc_hat <- Z_c %*% solve(t(Z_c) %*% Z_c) %*% t(Z_c) %*% X_c
X1_hat <- Z_1 %*% solve(t(Z_c) %*% Z_c) %*% t(Z_c) %*% X_c
# take residuals
res_t <- X_1 - X1_hat
res_c <- X_c - Xc_hat
X_c <- res_c
X_1 <- res_t
X_cent[trt == 0,] <- res_c
X_cent[trt == 1,] <- res_t
new_synth_data$Z1 <- t(res_t)
new_synth_data$X1 <- t(res_t)
new_synth_data$Z0 <- t(res_c)
new_synth_data$X0 <- t(res_c)
} else {
# standardize covariates to be on the same scale as the outcomes
sdz <- apply(Z_c, 2, sd)
sdx <- sd(X_c)
Z_c <- sdx * t(t(Z_c) / sdz)
Z_1 <- sdx * Z_1 / sdz
# concatenate
X_c <- cbind(X_c, Z_c)
X_1 <- cbind(X_1, Z_1)
new_synth_data$Z1 <- t(X_1)
new_synth_data$X1 <- t(X_1)
new_synth_data$Z0 <- t(X_c)
new_synth_data$X0 <- t(X_c)
V <- diag(ncol(X_c))
}
} else {
new_synth_data$Z1 <- t(X_1)
new_synth_data$X1 <- t(X_1)
new_synth_data$Z0 <- t(X_c)
new_synth_data$X0 <- t(X_c)
}
out <- fit_ridgeaug_inner(X_c, X_1, trt, new_synth_data,
lambda, ridge, scm,
lambda_min_ratio, n_lambda,
lambda_max,
holdout_length, min_1se)
weights <- out$weights
synw <- out$synw
lambda <- out$lambda
lambdas <- out$lambdas
lambda_errors <- out$lambda_errors
lambda_errors_se <- out$lambda_errors_se
# add back in covariate weights
if(!is.null(Z)) {
if(residualize) {
no_cov_weights <- weights
ridge_w <- t(t(Z_1) - t(Z_c) %*% weights) %*%
solve(t(Z_c) %*% Z_c) %*% t(Z_c)
weights <- weights + t(ridge_w)
} else {
no_cov_weights <- NULL
}
}
l2_imbalance <- sqrt(sum((synth_data$X0 %*% weights - synth_data$X1)^2))
## primal objective value scaled by least squares difference for mean
uni_w <- matrix(1/ncol(synth_data$X0), nrow=ncol(synth_data$X0), ncol=1)
unif_l2_imbalance <- sqrt(sum((synth_data$X0 %*% uni_w - synth_data$X1)^2))
scaled_l2_imabalance <- l2_imbalance / unif_l2_imbalance
## no outcome model
mhat <- matrix(0, nrow=nrow(y), ncol=ncol(y))
ridge_mhat <- mhat
if(!is.null(Z)) {
if(residualize) {
ridge_mhat <- ridge_mhat + Z_cent %*% solve(t(Z_c) %*% Z_c) %*%
t(Z_c) %*% y_c
## regress out covariates for outcomes
yc_hat <- ridge_mhat[trt == 0,, drop = F]
# take residuals of outcomes
y_c <- y_c - yc_hat
} else {
X_cent <- cbind(X_cent, Z_cent)
}
}
if(ridge) {
ridge_mhat <- ridge_mhat + X_cent %*% solve(t(X_c) %*% X_c +
lambda * diag(ncol(X_c))) %*%
t(X_c) %*% y_c
}
output <- list(weights = weights,
l2_imbalance = l2_imbalance,
scaled_l2_imbalance = scaled_l2_imabalance,
mhat = mhat,
lambda = lambda,
ridge_mhat = ridge_mhat,
synw = synw,
lambdas = lambdas,
lambda_errors = lambda_errors,
lambda_errors_se = lambda_errors_se)
if(!is.null(Z)) {
output$no_cov_weights <- no_cov_weights
z_l2_imbalance <- sqrt(sum((t(Z_c) %*% weights - t(Z_1))^2))
z_unif_l2_imbalance <- sqrt(sum((t(Z_c) %*% uni_w - t(Z_1))^2))
z_scaled_l2_imbalance <- z_l2_imbalance / z_unif_l2_imbalance
output$covariate_l2_imbalance <- z_l2_imbalance
output$scaled_covariate_l2_imbalance <- z_scaled_l2_imbalance
}
return(output)
}
#' Helper function to fit ridge ASCM
#' @param X_c Matrix of control lagged outcomes
#' @param X_1 Vector of treated leagged outcomes
#' @param trt Vector of treatment indicators
#' @param synth_data Output of `format_synth`
#' @param lambda Ridge hyper-parameter, if NULL use CV
#' @param ridge Include ridge or not
#' @param scm Include SCM or not
#' @param lambda_min_ratio Ratio of the smallest to largest lambda when tuning lambda values
#' @param n_lambda Number of lambdas to consider between the smallest and largest lambda value
#' @param lambda_max Initial (largest) lambda, if NULL sets it to be (1+norm(X_1-X_c))^2
#' @param holdout_length Length of conseuctive holdout period for when tuning lambdas
#' @param min_1se If TRUE, chooses the maximum lambda within 1 standard error of the lambda that minimizes the CV error, if FALSE chooses the optimal lambda; default TRUE
#' @noRd
#' @return \itemize{
#' \item{"weights"}{Ridge ASCM weights}
#' \item{"lambda"}{Value of the ridge hyperparameter}
#' \item{"synw"}{The synth weights(for estimating the bias)}
#' \item{"lambdas"}{List of lambda values evaluated to tune ridge regression}
#' \item{"lambda_errors"}{"The MSE associated with each lambda term in lambdas."}
#' \item{"lambda_errors_se"}{"The SE of the MSE associated with each lambda term in lambdas."}
#' }
fit_ridgeaug_inner <- function(X_c, X_1, trt, synth_data,
lambda, ridge, scm,
lambda_min_ratio, n_lambda,
lambda_max,
holdout_length, min_1se) {
lambda_errors <- NULL
lambda_errors_se <- NULL
lambdas <- NULL
## if SCM fit scm
if(scm) {
syn <- fit_synth_formatted(synth_data)$weights
} else {
## else use uniform weights
syn <- rep(1 / sum(trt == 0), sum(trt == 0))
}
if(ridge) {
if(is.null(lambda)) {
cv_out <- cv_lambda(X_c, X_1, synth_data, trt, holdout_length, scm,
lambda_max, lambda_min_ratio, n_lambda, min_1se)
lambda <- cv_out$lambda
lambda_errors <- cv_out$lambda_errors
lambda_errors_se <- cv_out$lambda_errors_se
lambdas <- cv_out$lambdas
}
# get ridge weights
ridge_w <- t(t(X_1) - t(X_c) %*% syn) %*%
solve(t(X_c) %*% X_c + lambda * diag(ncol(X_c))) %*% t(X_c)
} else {
ridge_w <- matrix(0, ncol = sum(trt == 0), nrow=1)
}
## combine weights
weights <- syn + t(ridge_w)
return(list(weights = weights,
synw = syn,
lambda = lambda,
lambdas = lambdas,
lambda_errors = lambda_errors,
lambda_errors_se = lambda_errors_se))
}
#' Choose max lambda as largest eigenvalue of control X
#' @param X_c matrix of control lagged outcomes
#' @noRd
#' @return max lambda
get_lambda_max <- function(X_c) {
svd(X_c)$d[1] ^ 2
}
#' Create list of lambdas
#' @param lambda_min_ratio Ratio of the smallest to largest lambda when tuning lambda values
#' @param n_lambda Number of lambdas to consider between the smallest and largest lambda value
#' @param lambda_max Initial (largest) lambda, if NULL sets it to be (1+norm(X_1-X_c))^2
#' @noRd
#' @return List of lambdas
create_lambda_list <- function(lambda_max, lambda_min_ratio, n_lambda) {
scaler <- (lambda_min_ratio) ^ (1/n_lambda)
lambdas <- lambda_max * (scaler ^ (seq(0:n_lambda) - 1))
return(lambdas)
}
#' Choose either the lambda that minimizes CV MSE or largest lambda within 1 se of min
#' @param lambdas list of lambdas
#' @param lambda_errors The MSE associated with each lambda term in lambdas.
#' @param lambda_errors_se The SE of the MSE associated with each lambda
#' @param min_1se If TRUE, chooses the maximum lambda within 1 standard error of the lambda that minimizes the CV error, if FALSE chooses the optimal lambda; default TRUE
#' @noRd
#' @return optimal lambda
choose_lambda <- function(lambdas, lambda_errors, lambda_errors_se, min_1se) {
# lambda with smallest error
min_idx <- which.min(lambda_errors)
min_error <- lambda_errors[min_idx]
min_se <- lambda_errors_se[min_idx]
lambda_min <- lambdas[min_idx]
# max lambda with error within one se of min
lambda_1se <- max(lambdas[lambda_errors <= min_error + min_se])
return(if(min_1se) lambda_1se else lambda_min)
}
#' Choose best lambda with CV
#' @param X_c Matrix of control lagged outcomes
#' @param X_1 Vector of treated leagged outcomes
#' @param synth_data Output of `format_synth`
#' @param trt Vector of treatment indicators
#' @param holdout_length Length of conseuctive holdout period for when tuning lambdas
#' @param scm Include SCM or not
#' @param lambda_max Initial (largest) lambda, if NULL sets it to be (1+norm(X_1-X_c))^2
#' @param lambda_min_ratio Ratio of the smallest to largest lambda when tuning lambda values
#' @param n_lambda Number of lambdas to consider between the smallest and largest lambda value
#' @param min_1se If TRUE, chooses the maximum lambda within 1 standard error of the lambda
#' @noRd
#' @return \itemize{
#' \item{"lambda"}{Value of the ridge hyperparameter}
#' \item{"lambdas"}{List of lambda values evaluated to tune ridge regression}
#' \item{"lambda_errors"}{"The MSE associated with each lambda term in lambdas."}
#' \item{"lambda_errors_se"}{"The SE of the MSE associated with each lambda term}
#' }
cv_lambda <- function(X_c, X_1, synth_data, trt, holdout_length, scm,
lambda_max, lambda_min_ratio, n_lambda, min_1se) {
if(is.null(lambda_max)) {
lambda_max <- get_lambda_max(X_c)
}
lambdas <- create_lambda_list(lambda_max, lambda_min_ratio, n_lambda)
lambda_out <- get_lambda_errors(lambdas, X_c, X_1,
synth_data, trt,
holdout_length, scm)
lambda_errors <- lambda_out$lambda_errors
lambda_errors_se <- lambda_out$lambda_errors_se
lambda <- choose_lambda(lambdas, lambda_errors, lambda_errors_se, min_1se)
return(list(lambda = lambda, lambda_errors = lambda_errors,
lambda_errors_se = lambda_errors_se, lambdas = lambdas))
}
| /R/ridge.R | permissive | ebenmichael/augsynth | R | false | false | 13,898 | r | ################################################################################
## Ridge-augmented SCM
################################################################################
#' Ridge augmented weights (possibly with covariates)
#'
#' @param wide_data Output of `format_data`
#' @param synth_data Output of `format_synth`
#' @param Z Matrix of covariates, default is NULL
#' @param lambda Ridge hyper-parameter, if NULL use CV
#' @param ridge Include ridge or not
#' @param scm Include SCM or not
#' @param lambda_min_ratio Ratio of the smallest to largest lambda when tuning lambda values
#' @param n_lambda Number of lambdas to consider between the smallest and largest lambda value
#' @param lambda_max Initial (largest) lambda, if NULL sets it to be (1+norm(X_1-X_c))^2
#' @param holdout_length Length of conseuctive holdout period for when tuning lambdas
#' @param min_1se If TRUE, chooses the maximum lambda within 1 standard error of the lambda that minimizes the CV error, if FALSE chooses the optimal lambda; default TRUE
#' @param V V matrix for synth, default NULL
#' @param residualize Whether to residualize auxiliary covariates or balance directly, default TRUE
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{"weights"}{Ridge ASCM weights}
#' \item{"l2_imbalance"}{Imbalance in pre-period outcomes, measured by the L2 norm}
#' \item{"scaled_l2_imbalance"}{L2 imbalance scaled by L2 imbalance of uniform weights}
#' \item{"mhat"}{Outcome model estimate (zero in this case)}
#' \item{"lambda"}{Value of the ridge hyperparameter}
#' \item{"ridge_mhat"}{The ridge regression predictions (for estimating the bias)}
#' \item{"synw"}{The synth weights(for estimating the bias)}
#' \item{"lambdas"}{List of lambda values evaluated to tune ridge regression}
#' \item{"lambda_errors"}{"The MSE associated with each lambda term in lambdas."}
#' \item{"lambda_errors_se"}{"The SE of the MSE associated with each lambda term in lambdas."}
#' }
fit_ridgeaug_formatted <- function(wide_data, synth_data,
Z=NULL, lambda=NULL, ridge=T, scm=T,
lambda_min_ratio = 1e-8, n_lambda = 20,
lambda_max = NULL,
holdout_length = 1, min_1se = T,
V = NULL,
residualize = FALSE, ...) {
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters in using ridge augmented weights: ", paste(names(extra_params), collapse = ", "))
}
X <- wide_data$X
y <- wide_data$y
trt <- wide_data$trt
lambda_errors <- NULL
lambda_errors_se <- NULL
lambdas <- NULL
## center outcomes
X_cent <- apply(X, 2, function(x) x - mean(x[trt==0]))
X_c <- X_cent[trt==0,,drop=FALSE]
X_1 <- matrix(colMeans(X_cent[trt==1,,drop=FALSE]), nrow=1)
y_cent <- apply(y, 2, function(x) x - mean(x[trt==0]))
y_c <- y_cent[trt==0,,drop=FALSE]
t0 <- ncol(X_c)
V <- make_V_matrix(t0, V)
# apply V matrix transformation
X_c <- X_c %*% V
X_1 <- X_1 %*% V
new_synth_data <- synth_data
## if there are auxiliary covariates, use them
if(!is.null(Z)) {
## center covariates
Z_cent <- apply(Z, 2, function(x) x - mean(x[trt==0]))
Z_c <- Z_cent[trt==0,,drop=FALSE]
Z_1 <- matrix(colMeans(Z_cent[trt==1,,drop=FALSE]), nrow=1)
if(residualize) {
## regress out covariates
Xc_hat <- Z_c %*% solve(t(Z_c) %*% Z_c) %*% t(Z_c) %*% X_c
X1_hat <- Z_1 %*% solve(t(Z_c) %*% Z_c) %*% t(Z_c) %*% X_c
# take residuals
res_t <- X_1 - X1_hat
res_c <- X_c - Xc_hat
X_c <- res_c
X_1 <- res_t
X_cent[trt == 0,] <- res_c
X_cent[trt == 1,] <- res_t
new_synth_data$Z1 <- t(res_t)
new_synth_data$X1 <- t(res_t)
new_synth_data$Z0 <- t(res_c)
new_synth_data$X0 <- t(res_c)
} else {
# standardize covariates to be on the same scale as the outcomes
sdz <- apply(Z_c, 2, sd)
sdx <- sd(X_c)
Z_c <- sdx * t(t(Z_c) / sdz)
Z_1 <- sdx * Z_1 / sdz
# concatenate
X_c <- cbind(X_c, Z_c)
X_1 <- cbind(X_1, Z_1)
new_synth_data$Z1 <- t(X_1)
new_synth_data$X1 <- t(X_1)
new_synth_data$Z0 <- t(X_c)
new_synth_data$X0 <- t(X_c)
V <- diag(ncol(X_c))
}
} else {
new_synth_data$Z1 <- t(X_1)
new_synth_data$X1 <- t(X_1)
new_synth_data$Z0 <- t(X_c)
new_synth_data$X0 <- t(X_c)
}
out <- fit_ridgeaug_inner(X_c, X_1, trt, new_synth_data,
lambda, ridge, scm,
lambda_min_ratio, n_lambda,
lambda_max,
holdout_length, min_1se)
weights <- out$weights
synw <- out$synw
lambda <- out$lambda
lambdas <- out$lambdas
lambda_errors <- out$lambda_errors
lambda_errors_se <- out$lambda_errors_se
# add back in covariate weights
if(!is.null(Z)) {
if(residualize) {
no_cov_weights <- weights
ridge_w <- t(t(Z_1) - t(Z_c) %*% weights) %*%
solve(t(Z_c) %*% Z_c) %*% t(Z_c)
weights <- weights + t(ridge_w)
} else {
no_cov_weights <- NULL
}
}
l2_imbalance <- sqrt(sum((synth_data$X0 %*% weights - synth_data$X1)^2))
## primal objective value scaled by least squares difference for mean
uni_w <- matrix(1/ncol(synth_data$X0), nrow=ncol(synth_data$X0), ncol=1)
unif_l2_imbalance <- sqrt(sum((synth_data$X0 %*% uni_w - synth_data$X1)^2))
scaled_l2_imabalance <- l2_imbalance / unif_l2_imbalance
## no outcome model
mhat <- matrix(0, nrow=nrow(y), ncol=ncol(y))
ridge_mhat <- mhat
if(!is.null(Z)) {
if(residualize) {
ridge_mhat <- ridge_mhat + Z_cent %*% solve(t(Z_c) %*% Z_c) %*%
t(Z_c) %*% y_c
## regress out covariates for outcomes
yc_hat <- ridge_mhat[trt == 0,, drop = F]
# take residuals of outcomes
y_c <- y_c - yc_hat
} else {
X_cent <- cbind(X_cent, Z_cent)
}
}
if(ridge) {
ridge_mhat <- ridge_mhat + X_cent %*% solve(t(X_c) %*% X_c +
lambda * diag(ncol(X_c))) %*%
t(X_c) %*% y_c
}
output <- list(weights = weights,
l2_imbalance = l2_imbalance,
scaled_l2_imbalance = scaled_l2_imabalance,
mhat = mhat,
lambda = lambda,
ridge_mhat = ridge_mhat,
synw = synw,
lambdas = lambdas,
lambda_errors = lambda_errors,
lambda_errors_se = lambda_errors_se)
if(!is.null(Z)) {
output$no_cov_weights <- no_cov_weights
z_l2_imbalance <- sqrt(sum((t(Z_c) %*% weights - t(Z_1))^2))
z_unif_l2_imbalance <- sqrt(sum((t(Z_c) %*% uni_w - t(Z_1))^2))
z_scaled_l2_imbalance <- z_l2_imbalance / z_unif_l2_imbalance
output$covariate_l2_imbalance <- z_l2_imbalance
output$scaled_covariate_l2_imbalance <- z_scaled_l2_imbalance
}
return(output)
}
#' Helper function to fit ridge ASCM
#' @param X_c Matrix of control lagged outcomes
#' @param X_1 Vector of treated leagged outcomes
#' @param trt Vector of treatment indicators
#' @param synth_data Output of `format_synth`
#' @param lambda Ridge hyper-parameter, if NULL use CV
#' @param ridge Include ridge or not
#' @param scm Include SCM or not
#' @param lambda_min_ratio Ratio of the smallest to largest lambda when tuning lambda values
#' @param n_lambda Number of lambdas to consider between the smallest and largest lambda value
#' @param lambda_max Initial (largest) lambda, if NULL sets it to be (1+norm(X_1-X_c))^2
#' @param holdout_length Length of conseuctive holdout period for when tuning lambdas
#' @param min_1se If TRUE, chooses the maximum lambda within 1 standard error of the lambda that minimizes the CV error, if FALSE chooses the optimal lambda; default TRUE
#' @noRd
#' @return \itemize{
#' \item{"weights"}{Ridge ASCM weights}
#' \item{"lambda"}{Value of the ridge hyperparameter}
#' \item{"synw"}{The synth weights(for estimating the bias)}
#' \item{"lambdas"}{List of lambda values evaluated to tune ridge regression}
#' \item{"lambda_errors"}{"The MSE associated with each lambda term in lambdas."}
#' \item{"lambda_errors_se"}{"The SE of the MSE associated with each lambda term in lambdas."}
#' }
fit_ridgeaug_inner <- function(X_c, X_1, trt, synth_data,
lambda, ridge, scm,
lambda_min_ratio, n_lambda,
lambda_max,
holdout_length, min_1se) {
lambda_errors <- NULL
lambda_errors_se <- NULL
lambdas <- NULL
## if SCM fit scm
if(scm) {
syn <- fit_synth_formatted(synth_data)$weights
} else {
## else use uniform weights
syn <- rep(1 / sum(trt == 0), sum(trt == 0))
}
if(ridge) {
if(is.null(lambda)) {
cv_out <- cv_lambda(X_c, X_1, synth_data, trt, holdout_length, scm,
lambda_max, lambda_min_ratio, n_lambda, min_1se)
lambda <- cv_out$lambda
lambda_errors <- cv_out$lambda_errors
lambda_errors_se <- cv_out$lambda_errors_se
lambdas <- cv_out$lambdas
}
# get ridge weights
ridge_w <- t(t(X_1) - t(X_c) %*% syn) %*%
solve(t(X_c) %*% X_c + lambda * diag(ncol(X_c))) %*% t(X_c)
} else {
ridge_w <- matrix(0, ncol = sum(trt == 0), nrow=1)
}
## combine weights
weights <- syn + t(ridge_w)
return(list(weights = weights,
synw = syn,
lambda = lambda,
lambdas = lambdas,
lambda_errors = lambda_errors,
lambda_errors_se = lambda_errors_se))
}
#' Choose max lambda as largest eigenvalue of control X
#' @param X_c matrix of control lagged outcomes
#' @noRd
#' @return max lambda
get_lambda_max <- function(X_c) {
svd(X_c)$d[1] ^ 2
}
#' Create list of lambdas
#' @param lambda_min_ratio Ratio of the smallest to largest lambda when tuning lambda values
#' @param n_lambda Number of lambdas to consider between the smallest and largest lambda value
#' @param lambda_max Initial (largest) lambda, if NULL sets it to be (1+norm(X_1-X_c))^2
#' @noRd
#' @return List of lambdas
create_lambda_list <- function(lambda_max, lambda_min_ratio, n_lambda) {
scaler <- (lambda_min_ratio) ^ (1/n_lambda)
lambdas <- lambda_max * (scaler ^ (seq(0:n_lambda) - 1))
return(lambdas)
}
#' Choose either the lambda that minimizes CV MSE or largest lambda within 1 se of min
#' @param lambdas list of lambdas
#' @param lambda_errors The MSE associated with each lambda term in lambdas.
#' @param lambda_errors_se The SE of the MSE associated with each lambda
#' @param min_1se If TRUE, chooses the maximum lambda within 1 standard error of the lambda that minimizes the CV error, if FALSE chooses the optimal lambda; default TRUE
#' @noRd
#' @return optimal lambda
choose_lambda <- function(lambdas, lambda_errors, lambda_errors_se, min_1se) {
# lambda with smallest error
min_idx <- which.min(lambda_errors)
min_error <- lambda_errors[min_idx]
min_se <- lambda_errors_se[min_idx]
lambda_min <- lambdas[min_idx]
# max lambda with error within one se of min
lambda_1se <- max(lambdas[lambda_errors <= min_error + min_se])
return(if(min_1se) lambda_1se else lambda_min)
}
#' Choose best lambda with CV
#' @param X_c Matrix of control lagged outcomes
#' @param X_1 Vector of treated leagged outcomes
#' @param synth_data Output of `format_synth`
#' @param trt Vector of treatment indicators
#' @param holdout_length Length of conseuctive holdout period for when tuning lambdas
#' @param scm Include SCM or not
#' @param lambda_max Initial (largest) lambda, if NULL sets it to be (1+norm(X_1-X_c))^2
#' @param lambda_min_ratio Ratio of the smallest to largest lambda when tuning lambda values
#' @param n_lambda Number of lambdas to consider between the smallest and largest lambda value
#' @param min_1se If TRUE, chooses the maximum lambda within 1 standard error of the lambda
#' @noRd
#' @return \itemize{
#' \item{"lambda"}{Value of the ridge hyperparameter}
#' \item{"lambdas"}{List of lambda values evaluated to tune ridge regression}
#' \item{"lambda_errors"}{"The MSE associated with each lambda term in lambdas."}
#' \item{"lambda_errors_se"}{"The SE of the MSE associated with each lambda term}
#' }
cv_lambda <- function(X_c, X_1, synth_data, trt, holdout_length, scm,
lambda_max, lambda_min_ratio, n_lambda, min_1se) {
if(is.null(lambda_max)) {
lambda_max <- get_lambda_max(X_c)
}
lambdas <- create_lambda_list(lambda_max, lambda_min_ratio, n_lambda)
lambda_out <- get_lambda_errors(lambdas, X_c, X_1,
synth_data, trt,
holdout_length, scm)
lambda_errors <- lambda_out$lambda_errors
lambda_errors_se <- lambda_out$lambda_errors_se
lambda <- choose_lambda(lambdas, lambda_errors, lambda_errors_se, min_1se)
return(list(lambda = lambda, lambda_errors = lambda_errors,
lambda_errors_se = lambda_errors_se, lambdas = lambdas))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scales.R
\name{as_scale}
\alias{as_scale}
\title{Convert variable to scale}
\usage{
as_scale(var)
}
\arguments{
\item{var}{A \code{numeric} vector.}
}
\description{
Takes a character or factor variable and converts it to a factor with labels
ordered by numerals in the label. Labels with duplicated numerals will be
merged in the output.
}
\details{
Note: \code{as_scale} is convenient for converting likert scales from \code{character}
to \code{factor}. This tends to happend when merging data with e.g.
\code{\link[dplyr]{bind_rows}}.
}
\examples{
NULL
}
\author{
Kristian D. Olsen
}
\seealso{
\code{\link{rescale_10}} to convert 100-point scales to 10-point.
}
| /man/as_scale.Rd | no_license | itsdalmo/reporttoolDT | R | false | true | 743 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scales.R
\name{as_scale}
\alias{as_scale}
\title{Convert variable to scale}
\usage{
as_scale(var)
}
\arguments{
\item{var}{A \code{numeric} vector.}
}
\description{
Takes a character or factor variable and converts it to a factor with labels
ordered by numerals in the label. Labels with duplicated numerals will be
merged in the output.
}
\details{
Note: \code{as_scale} is convenient for converting likert scales from \code{character}
to \code{factor}. This tends to happend when merging data with e.g.
\code{\link[dplyr]{bind_rows}}.
}
\examples{
NULL
}
\author{
Kristian D. Olsen
}
\seealso{
\code{\link{rescale_10}} to convert 100-point scales to 10-point.
}
|
# Processing output
###################
require(ggplot2)
require(dplyr)
# results <- read.csv("C:/Users/q19r165/Desktop/Model_Repo/25SSmod_pred.csv")
plot_results <- function(results, conf.from, conf.to, length){
conf <- seq(conf.from, conf.to, length.out = length)
right <- rep(NA, length)
proportion <- rep(NA, length)
for (i in 1:length){
high.conf <- results[results$confidence1 >= conf[i], ]
right[i] <- length(high.conf$answer[high.conf$answer==high.conf$guess1]==T)/length(high.conf$answer)
proportion[i] <- length(high.conf$answer)/length(results$answer)
df <- data.frame(conf, right, proportion)
}
print(df %>% ggplot(aes(x = conf)) +
geom_line(aes(y = right, color = "Model Assesment"), lwd = 2) +
geom_line(aes(y = proportion, color = "Model Containment"), lwd = 2) +
geom_hline(aes(yintercept = .95, color = "Acceptable Threshold"), lty = "dotted", lwd = 1.5) +
ylab("Proportion Correct") +
scale_y_continuous(sec.axis = sec_axis(~., name = "Proportion Contained")) +
scale_colour_manual(values = c("green", "blue", "black")) +
labs(y = "Proportion Correct",
x = "Confidence Level of First Guess",
colour = "Diagnostic") +
theme_classic() +
theme(legend.position = c(conf.from, 0.30)))
}
plot_results(read.csv("/Volumes/Seagate_Backup_Plus_Drive/25SSmod_pred.csv"), .25, .99, 99)
ggsave("ss_model_performance.tiff", dpi=500, width = 5, height = 5)
# results$right <- ifelse(results$answer-results$guess1 == 0, "Yes", "No")
#
# length(results$right[results$right=="Yes"])/length(results$right)
most.seen <- c(
unique(files$SS.code[which(files$ELEMENTNAME == "African Elephant")]),
unique(files$SS.code[which(files$ELEMENTNAME == "Hippo")]),
unique(files$SS.code[which(files$ELEMENTNAME == "Impala")])
)
mod.25SS <- read.csv("/Volumes/Seagate_Backup_Plus_Drive/25SSmod_pred.csv")
ms.25SS <- mod.25SS %>%
group_by(answer) %>%
filter(n()>500)
unique(mod.25SS$answer)
plot_results(ms.25SS, .25, .99, 99)
unique(mod.25SS$answer)
| /model_output_processing.R | no_license | will-rogers/Machine-Learning-ZCP | R | false | false | 2,053 | r | # Processing output
###################
require(ggplot2)
require(dplyr)
# results <- read.csv("C:/Users/q19r165/Desktop/Model_Repo/25SSmod_pred.csv")
plot_results <- function(results, conf.from, conf.to, length){
conf <- seq(conf.from, conf.to, length.out = length)
right <- rep(NA, length)
proportion <- rep(NA, length)
for (i in 1:length){
high.conf <- results[results$confidence1 >= conf[i], ]
right[i] <- length(high.conf$answer[high.conf$answer==high.conf$guess1]==T)/length(high.conf$answer)
proportion[i] <- length(high.conf$answer)/length(results$answer)
df <- data.frame(conf, right, proportion)
}
print(df %>% ggplot(aes(x = conf)) +
geom_line(aes(y = right, color = "Model Assesment"), lwd = 2) +
geom_line(aes(y = proportion, color = "Model Containment"), lwd = 2) +
geom_hline(aes(yintercept = .95, color = "Acceptable Threshold"), lty = "dotted", lwd = 1.5) +
ylab("Proportion Correct") +
scale_y_continuous(sec.axis = sec_axis(~., name = "Proportion Contained")) +
scale_colour_manual(values = c("green", "blue", "black")) +
labs(y = "Proportion Correct",
x = "Confidence Level of First Guess",
colour = "Diagnostic") +
theme_classic() +
theme(legend.position = c(conf.from, 0.30)))
}
plot_results(read.csv("/Volumes/Seagate_Backup_Plus_Drive/25SSmod_pred.csv"), .25, .99, 99)
ggsave("ss_model_performance.tiff", dpi=500, width = 5, height = 5)
# results$right <- ifelse(results$answer-results$guess1 == 0, "Yes", "No")
#
# length(results$right[results$right=="Yes"])/length(results$right)
most.seen <- c(
unique(files$SS.code[which(files$ELEMENTNAME == "African Elephant")]),
unique(files$SS.code[which(files$ELEMENTNAME == "Hippo")]),
unique(files$SS.code[which(files$ELEMENTNAME == "Impala")])
)
mod.25SS <- read.csv("/Volumes/Seagate_Backup_Plus_Drive/25SSmod_pred.csv")
ms.25SS <- mod.25SS %>%
group_by(answer) %>%
filter(n()>500)
unique(mod.25SS$answer)
plot_results(ms.25SS, .25, .99, 99)
unique(mod.25SS$answer)
|
# install.packages("qpcR")
library(qpcR)
# 1. 아리랑도서관 2020년 1월 추천도서 목록 csv 생성
arirang_list = readRDS('data/200103_19년12월_아리랑어린이도서관_인기도서별 추천도서 리스트.rds')
arirang_df = data.frame(dummy=1:10)
col_vec = c()
for (tmp_list in arirang_list) {
col_vec = c(col_vec, tmp_list$recommend_books[1])
arirang_df = qpcR:::cbind.na(arirang_df, tmp_list$recommend_books[-1])
}
arirang_df = arirang_df[-1]
colnames(arirang_df) = col_vec
# View(arirang_df)
write.csv(arirang_df,
'data/200103_2020년1월_아리랑어린이도서관_인기 아동도서별 추천도서 목록.csv',
row.names=FALSE)
# 2. 월곡꿈그린도서관 2020년 1월 추천도서 목록 csv 생성
wolkok_list = readRDS('data/200103_19년12월_월곡꿈그림도서관_인기도서별 추천도서 리스트.rds')
wolkok_df = data.frame(dummy=1:10)
col_vec = c()
for (tmp_list in wolkok_list) {
col_vec = c(col_vec, tmp_list$recommend_books[1])
wolkok_df = qpcR:::cbind.na(wolkok_df, tmp_list$recommend_books[-1])
}
wolkok_df = wolkok_df[-1]
colnames(wolkok_df) = col_vec
# View(wolkok_df)
write.csv(wolkok_df,
'data/200103_2020년1월_월곡꿈그림도서관_인기 아동도서별 추천도서 목록.csv',
row.names=FALSE)
| /200103_generateCSV_recommendList.R | no_license | sam351/Library_Book_Recommend | R | false | false | 1,314 | r | # install.packages("qpcR")
library(qpcR)
# 1. 아리랑도서관 2020년 1월 추천도서 목록 csv 생성
arirang_list = readRDS('data/200103_19년12월_아리랑어린이도서관_인기도서별 추천도서 리스트.rds')
arirang_df = data.frame(dummy=1:10)
col_vec = c()
for (tmp_list in arirang_list) {
col_vec = c(col_vec, tmp_list$recommend_books[1])
arirang_df = qpcR:::cbind.na(arirang_df, tmp_list$recommend_books[-1])
}
arirang_df = arirang_df[-1]
colnames(arirang_df) = col_vec
# View(arirang_df)
write.csv(arirang_df,
'data/200103_2020년1월_아리랑어린이도서관_인기 아동도서별 추천도서 목록.csv',
row.names=FALSE)
# 2. 월곡꿈그린도서관 2020년 1월 추천도서 목록 csv 생성
wolkok_list = readRDS('data/200103_19년12월_월곡꿈그림도서관_인기도서별 추천도서 리스트.rds')
wolkok_df = data.frame(dummy=1:10)
col_vec = c()
for (tmp_list in wolkok_list) {
col_vec = c(col_vec, tmp_list$recommend_books[1])
wolkok_df = qpcR:::cbind.na(wolkok_df, tmp_list$recommend_books[-1])
}
wolkok_df = wolkok_df[-1]
colnames(wolkok_df) = col_vec
# View(wolkok_df)
write.csv(wolkok_df,
'data/200103_2020년1월_월곡꿈그림도서관_인기 아동도서별 추천도서 목록.csv',
row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dorem-package.R
\docType{package}
\name{dorem-package}
\alias{dorem-package}
\alias{_PACKAGE}
\title{dorem: Dose Response Modeling}
\description{
\if{html}{\figure{logo.png}{options: align='right' alt='logo' width='120'}}
Understanding the effects of training dose to training response
(i.e., performance) is one of the major goals of sports science.
The goal of this package is to provide easy-to-use dose-response models
commonly applied in understanding aforementioned relationships.
}
\seealso{
Useful links:
\itemize{
\item \url{https://dorem.net}
\item Report bugs at \url{https://github.com/mladenjovanovic/dorem/issues}
}
}
\author{
\strong{Maintainer}: Mladen Jovanovic \email{coach.mladen.jovanovic@gmail.com}
Other contributors:
\itemize{
\item Benedict Stephens Hemingway \email{b.stephens-hemingway@rgu.ac.uk} [contributor]
\item Paul Swinton \email{p.swinton@rgu.ac.uk} [contributor]
}
}
\keyword{internal}
| /man/dorem-package.Rd | permissive | mladenjovanovic/dorem | R | false | true | 1,024 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dorem-package.R
\docType{package}
\name{dorem-package}
\alias{dorem-package}
\alias{_PACKAGE}
\title{dorem: Dose Response Modeling}
\description{
\if{html}{\figure{logo.png}{options: align='right' alt='logo' width='120'}}
Understanding the effects of training dose to training response
(i.e., performance) is one of the major goals of sports science.
The goal of this package is to provide easy-to-use dose-response models
commonly applied in understanding aforementioned relationships.
}
\seealso{
Useful links:
\itemize{
\item \url{https://dorem.net}
\item Report bugs at \url{https://github.com/mladenjovanovic/dorem/issues}
}
}
\author{
\strong{Maintainer}: Mladen Jovanovic \email{coach.mladen.jovanovic@gmail.com}
Other contributors:
\itemize{
\item Benedict Stephens Hemingway \email{b.stephens-hemingway@rgu.ac.uk} [contributor]
\item Paul Swinton \email{p.swinton@rgu.ac.uk} [contributor]
}
}
\keyword{internal}
|
#' Get all releases of economic data
#'
#' @param realtime_start A `Date` indicating the start of the real-time period.
#' Defaults to today's date. For more information, see
#' [Real-Time Periods](https://research.stlouisfed.org/docs/api/fred/realtime_period.html).
#'
#' @param realtime_end A `Date` indicating the end of the real-time period.
#' Defaults to today's date. For more information, see
#' [Real-Time Periods](https://research.stlouisfed.org/docs/api/fred/realtime_period.html).
#'
#' @param limit An integer limit on the maximum number of results to return.
#' Defaults to `1000`, the maximum.
#'
#' @param offset An integer used in conjunction with `limit` for long series.
#' This mimics the idea of _pagination_ to retrieve large amounts of data over
#' multiple calls. Defaults to `0`.
#'
#' @param sort_order A string representing the order of the resulting series.
#' Possible values are: `"asc"` (default), and `"desc"`.
#'
#' @param order_by Order results by values of the specified attribute.
#' Possible values include: `'release_id'` (default), `'name'`, `'press_release'`,
#' `'realtime_start'`, `'realtime_end'`.
#'
#' @return A `tibble` object.
#'
#' @section API Documentation:
#'
#' [fred/releases](https://research.stlouisfed.org/docs/api/fred/releases.html)
#'
#' @seealso [fredr_releases_dates()], [fredr_release()], [fredr_release_dates()],
#' [fredr_release_series()], [fredr_release_sources()], [fredr_release_tags()],
#' [fredr_release_related_tags()], [fredr_release_tables()],
#'
#' @examples
#' \donttest{
#' fredr_releases(limit = 20L)
#' }
#' @export
fredr_releases <- function(limit = NULL,
offset = NULL,
order_by = NULL,
sort_order = NULL,
realtime_start = NULL,
realtime_end = NULL) {
user_args <- capture_args(
limit,
offset,
order_by,
sort_order,
realtime_start,
realtime_end
)
fredr_args <- list(
endpoint = "releases"
)
do.call(fredr_request, c(fredr_args, user_args))
}
| /R/fredr_releases.R | no_license | tcweiss/fredr | R | false | false | 2,099 | r | #' Get all releases of economic data
#'
#' @param realtime_start A `Date` indicating the start of the real-time period.
#' Defaults to today's date. For more information, see
#' [Real-Time Periods](https://research.stlouisfed.org/docs/api/fred/realtime_period.html).
#'
#' @param realtime_end A `Date` indicating the end of the real-time period.
#' Defaults to today's date. For more information, see
#' [Real-Time Periods](https://research.stlouisfed.org/docs/api/fred/realtime_period.html).
#'
#' @param limit An integer limit on the maximum number of results to return.
#' Defaults to `1000`, the maximum.
#'
#' @param offset An integer used in conjunction with `limit` for long series.
#' This mimics the idea of _pagination_ to retrieve large amounts of data over
#' multiple calls. Defaults to `0`.
#'
#' @param sort_order A string representing the order of the resulting series.
#' Possible values are: `"asc"` (default), and `"desc"`.
#'
#' @param order_by Order results by values of the specified attribute.
#' Possible values include: `'release_id'` (default), `'name'`, `'press_release'`,
#' `'realtime_start'`, `'realtime_end'`.
#'
#' @return A `tibble` object.
#'
#' @section API Documentation:
#'
#' [fred/releases](https://research.stlouisfed.org/docs/api/fred/releases.html)
#'
#' @seealso [fredr_releases_dates()], [fredr_release()], [fredr_release_dates()],
#' [fredr_release_series()], [fredr_release_sources()], [fredr_release_tags()],
#' [fredr_release_related_tags()], [fredr_release_tables()],
#'
#' @examples
#' \donttest{
#' fredr_releases(limit = 20L)
#' }
#' @export
fredr_releases <- function(limit = NULL,
offset = NULL,
order_by = NULL,
sort_order = NULL,
realtime_start = NULL,
realtime_end = NULL) {
user_args <- capture_args(
limit,
offset,
order_by,
sort_order,
realtime_start,
realtime_end
)
fredr_args <- list(
endpoint = "releases"
)
do.call(fredr_request, c(fredr_args, user_args))
}
|
#ui.R
require(shiny)
require(shinydashboard)
require(leaflet)
dashboardPage(
dashboardHeader(title="Final Project"
),
dashboardSidebar(
sidebarMenu(
actionButton(inputId = "clicks1", label = "Update Data",width="100%"),
menuItem("Non-Aggregate1", tabName = "Non-Aggregate1"),
menuItem("Aggregate1", tabName = "Aggregate1"),
menuItem("Scatter1", tabName = "scatter1"),
menuItem("Scatter2", tabName = "scatter2"),
menuItem("Scatter3", tabName = "scatter3"),
menuItem("Scatter4", tabName = "scatter4"),
menuItem("Scatter5", tabName = "scatter5")
)
),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "scatter1",
sliderInput("margins","Margin of Error",min=0,max=1, value = 0.95, step = 0.001),
plotOutput(outputId = "scatter1",width='100%',height="800px")
),
tabItem(tabName = "scatter2",
sliderInput("year","Year",min=1992,max=1998, value = 1998, step = 1),
sliderInput("margins2","Margin of Error",min=0,max=1, value = 0.95, step = 0.001),
plotOutput(outputId = "scatter2",width='100%',height="400px")
),
tabItem(tabName = "scatter3",
sliderInput("year2","Year",min=1992,max=1998, value = 1998, step = 1),
sliderInput("margins2","Margin of Error",min=0,max=1, value = 0.95, step = 0.001),
plotOutput(outputId = "scatter3",width='100%',height="400px")
),
tabItem(tabName = "scatter4",
sliderInput("year3","Year",min=1992,max=1998, value = 1998, step = 1),
sliderInput("margins3","Margin of Error",min=0,max=1, value = 0.95, step = 0.001),
plotOutput(outputId = "scatter4",width='100%',height="400px")
),
tabItem(tabName = "scatter5",
plotOutput(outputId = "scatter5",width='80%',height="400px")
),
tabItem(tabName = "Non-Aggregate1",
plotOutput(outputId ="nonagg",width='50%',height="900px")
),
tabItem(tabName = "Aggregate1",
sliderInput("year3","Year",min=1992,max=1998, value = 1998, step = 1),
plotOutput(outputId = "agg",width='80%',height="450px")
)
)
)
)
| /04Shiny/UI.R | no_license | andrewnguyen42/DV_FinalProject | R | false | false | 2,168 | r | #ui.R
require(shiny)
require(shinydashboard)
require(leaflet)
dashboardPage(
dashboardHeader(title="Final Project"
),
dashboardSidebar(
sidebarMenu(
actionButton(inputId = "clicks1", label = "Update Data",width="100%"),
menuItem("Non-Aggregate1", tabName = "Non-Aggregate1"),
menuItem("Aggregate1", tabName = "Aggregate1"),
menuItem("Scatter1", tabName = "scatter1"),
menuItem("Scatter2", tabName = "scatter2"),
menuItem("Scatter3", tabName = "scatter3"),
menuItem("Scatter4", tabName = "scatter4"),
menuItem("Scatter5", tabName = "scatter5")
)
),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "scatter1",
sliderInput("margins","Margin of Error",min=0,max=1, value = 0.95, step = 0.001),
plotOutput(outputId = "scatter1",width='100%',height="800px")
),
tabItem(tabName = "scatter2",
sliderInput("year","Year",min=1992,max=1998, value = 1998, step = 1),
sliderInput("margins2","Margin of Error",min=0,max=1, value = 0.95, step = 0.001),
plotOutput(outputId = "scatter2",width='100%',height="400px")
),
tabItem(tabName = "scatter3",
sliderInput("year2","Year",min=1992,max=1998, value = 1998, step = 1),
sliderInput("margins2","Margin of Error",min=0,max=1, value = 0.95, step = 0.001),
plotOutput(outputId = "scatter3",width='100%',height="400px")
),
tabItem(tabName = "scatter4",
sliderInput("year3","Year",min=1992,max=1998, value = 1998, step = 1),
sliderInput("margins3","Margin of Error",min=0,max=1, value = 0.95, step = 0.001),
plotOutput(outputId = "scatter4",width='100%',height="400px")
),
tabItem(tabName = "scatter5",
plotOutput(outputId = "scatter5",width='80%',height="400px")
),
tabItem(tabName = "Non-Aggregate1",
plotOutput(outputId ="nonagg",width='50%',height="900px")
),
tabItem(tabName = "Aggregate1",
sliderInput("year3","Year",min=1992,max=1998, value = 1998, step = 1),
plotOutput(outputId = "agg",width='80%',height="450px")
)
)
)
)
|
/Simulation_scripts/Table_2/BCT_tau_4.R | no_license | GFGhantous/Scripts | R | false | false | 5,323 | r | ||
library(ggplot2)
reddit <- read.csv("EDA_Course_Materials/lesson2/reddit.csv")
str(reddit)
levels(reddit$age.range) <- c("Under 18", "18-24", "25-34", "35-44", "45-54",
"55-64", "65 or Above")
qplot(reddit$age.range)
reddit$age.range <- factor(reddit$age.range,
levels = c("Under 18", "18-24", "25-34","35-44",
"45-54", "55-64", "65 or Above"),
ordered = TRUE)
qplot(reddit$age.range)
| /EDA_Course_Materials/lesson2/reddit_question.R | no_license | 808sAndBR/Udacity_Data_Analysis_with_R | R | false | false | 515 | r | library(ggplot2)
reddit <- read.csv("EDA_Course_Materials/lesson2/reddit.csv")
str(reddit)
levels(reddit$age.range) <- c("Under 18", "18-24", "25-34", "35-44", "45-54",
"55-64", "65 or Above")
qplot(reddit$age.range)
reddit$age.range <- factor(reddit$age.range,
levels = c("Under 18", "18-24", "25-34","35-44",
"45-54", "55-64", "65 or Above"),
ordered = TRUE)
qplot(reddit$age.range)
|
#' Tests for Absolute Agreement
#' @description The agree_test function calculates a variety of agreement statistics. The hypothesis test of agreement is calculated by the method described by Shieh (2019). Bland-Altman limits of agreement, and confidence intervals, are also provided (Bland & Altman 1999; Bland & Altman 1986). In addition, the concordance correlation coefficient (CCC; Lin 1989) is also provided.
#' @param x Vector with first measurement
#' @param y Vector with second measurement
#' @param conf.level the confidence level required. Default is 95\%.
#' @param agree.level the agreement level required. Default is 95\%. The proportion of data that should lie between the thresholds, for 95\% limits of agreement this should be 0.95.
#' @param delta The threshold below which methods agree/can be considered equivalent, can be in any units. Equivalence Bound for Agreement.
#'
#' @return Returns single list with the results of the agreement analysis.
#'
#' \describe{
#' \item{\code{"shieh_test"}}{The TOST hypothesis test as described by Shieh.}
#' \item{\code{"ccc.xy"}}{Lin's concordance correlation coefficient and confidence intervals.}
#' \item{\code{"s.shift"}}{Scale shift from x to y.}
#' \item{\code{"l.shift"}}{Location shift from x to y.}
#' \item{\code{"bias"}}{a bias correction factor that measures how far the best-fit line deviates from a line at 45 degrees. No deviation from the 45 degree line occurs when bias = 1. See Lin 1989, page 258.}
#' \item{\code{"loa"}}{Data frame containing the limits of agreement calculations}
#' \item{\code{"h0_test"}}{Decision from hypothesis test.}
#' \item{\code{"identity.plot"}}{Plot of x and y with a line of identity with a linear regression line}
#' \item{\code{"bland_alt.plot"}}{Simple Bland-Altman plot. Red line are the upper and lower bounds for shieh test; grey box is the acceptable limits (delta). If the red lines are within the grey box then the shieh test should indicate 'reject h0', or to reject the null hypothesis that this not acceptable agreement between x & y.}
#'
#' }
#' @examples
#' data('reps')
#' agree_test(x=reps$x, y=reps$y, delta = 2)
#'
#' @section References:
#' Shieh (2019). Assessing Agreement Between Two Methods of Quantitative Measurements: Exact Test Procedure and Sample Size Calculation, Statistics in Biopharmaceutical Research, <https://doi.org/10.1080/19466315.2019.1677495>
#'
#' Bland, J. M., & Altman, D. G. (1999). Measuring agreement in method comparison studies. Statistical methods in medical research, 8(2), 135-160.
#'
#' Bland, J. M., & Altman, D. (1986). Statistical methods for assessing agreement between two methods of clinical measurement. The lancet, 327(8476), 307-310.
#'
#' Lawrence, I., & Lin, K. (1989). A concordance correlation coefficient to evaluate reproducibility. Biometrics, 255-268.
#' @importFrom stats pnorm pt qnorm qt lm anova aov complete.cases cor dchisq qchisq sd var
#' @import ggplot2
#' @export
agree_test <- function(x,
y,
delta,
conf.level = .95,
agree.level = .95) {
est <- lower.ci <- upper.ci <- NULL
if (agree.level >= 1 || agree.level <= 0) {
stop("agree.level (Limit of Agreement) must be a value between 0 and 1")
}
if (conf.level >= 1 || conf.level <= 0) {
stop("conf.level must be a value between 0 and 1")
}
#USER SPECIFICATIONS PORTION
#alpha<-0.05 #DESIGNATED ALPHA
#prop0<-0.8 #NULL CENTRAL PROPORTION or Limit of Agreement
#delta<-0.1 #THRESHOLD
#n<-15 #SAMPLE SIZE
#xbar<-0.011 #SAMPLE MEAN
#s<-0.044 #SAMPLE STANDARD DEVIATION
#END OF SPECIFICATION
prop0 = agree.level
alpha = 1 - conf.level
ccc_res = ccc.xy(x, y,
conf.level = conf.level,
agree.level = agree.level)
#pull values from ccc function output
xbar = ccc_res$delta$d #mean delta
s = ccc_res$delta$d.sd #sd of delta
n = nrow(ccc_res$df_diff)
pct <- 1 - (1 - prop0) / 2
zp <- qnorm(pct)
df <- n - 1
stdh <- s / sqrt(n)
numint <- 1000
coevec <- c(1, rep(c(4, 2), numint / 2 - 1), 4, 1)
cl <- 1e-6
cu <- qchisq(1 - cl, df)
int <- cu - cl
intl <- int / numint
cvec <- cl + intl * (0:numint)
wcpdf <- (intl / 3) * coevec * dchisq(cvec, df)
gaml <- 0
gamu <- 100
loop <- 0
dalpha <- 1
while (abs(dalpha) > 1e-8 | dalpha < 0) {
gam <- (gaml + gamu) / 2
h <- zp * sqrt(n) - gam * sqrt(cvec / df)
ht <- h * (cvec < n * df * (zp / gam) ^ 2)
alphat <- sum(wcpdf * (2 * pnorm(ht) - 1))
if (alphat > alpha)
gaml <- gam
else
gamu <- gam
loop <- loop + 1
dalpha <- alphat - alpha
}
el <- xbar - gam * stdh
eu <- xbar + gam * stdh
if (!missing(delta)){
rej <- (-delta < el) * (eu < delta)
rej_text = "don't reject h0"
if (rej == 1) {
rej_text = "reject h0"
} } else {
rej_text = "No Hypothesis Test"
}
shieh_test = data.frame(prop0,el,eu,rej_text,gam)
names(shieh_test) = c("prop0","lower.ci","upper.ci", "h0_test","test_statistic")
#######################
# Plot Results ----
#######################
z <- lm(y ~ x)
the_int <- summary(z)$coefficients[1,1]
the_slope <- summary(z)$coefficients[2,1]
tmp.lm <- data.frame(the_int, the_slope)
scalemin = min(c(min(x, na.rm = TRUE),min(y, na.rm = TRUE)))
scalemax = max(c(max(x, na.rm = TRUE),max(y, na.rm = TRUE)))
identity.plot = ggplot(ccc_res$df_diff,
aes(x = x, y = y)) +
geom_point(na.rm = TRUE) +
geom_abline(intercept = 0, slope = 1) +
geom_abline(
data = tmp.lm,
aes(intercept = the_int, slope = the_slope),
linetype = "dashed",
color = "red"
) +
xlab("Method: x") +
xlim(scalemin,scalemax) +
ylim(scalemin,scalemax) +
ylab("Method: y") +
coord_fixed(ratio = 1 / 1) +
theme_bw()
bland_alt.plot = ggplot(ccc_res$df_diff,
aes(x = mean, y = delta)) +
geom_point(na.rm = TRUE) +
annotate("rect",
xmin = -Inf, xmax = Inf,
ymin = ccc_res$delta$lower.lci,
ymax = ccc_res$delta$lower.uci,
alpha = .5,
fill = "#D55E00") +
annotate("rect",
xmin = -Inf, xmax = Inf,
ymin = ccc_res$delta$upper.lci,
ymax = ccc_res$delta$upper.uci,
alpha = .5,
fill = "#D55E00") +
geom_hline(data = ccc_res$delta,
aes(yintercept = d),
linetype = 1) +
annotate("rect",
xmin = -Inf, xmax = Inf,
ymin = ccc_res$delta$d.lci,
ymax = ccc_res$delta$d.uci,
alpha = .5,
fill = "gray") +
xlab("Average of Method x and Method y") +
ylab("Difference between Methods x & y") +
theme_bw() +
theme(legend.position = "none")
### Save limits of agreement
df_loa = data.frame(
estimate = c(ccc_res$delta$d, ccc_res$delta$lower.loa, ccc_res$delta$upper.loa),
lower.ci = c(ccc_res$delta$d.lci, ccc_res$delta$lower.lci, ccc_res$delta$upper.lci),
upper.ci = c(ccc_res$delta$d.uci, ccc_res$delta$lower.uci, ccc_res$delta$upper.uci),
row.names = c("Difference","Lower LoA","Upper LoA")
)
# Should I add this to the output?
var_comp = data.frame(
delta.sd = ccc_res$delta$d.sd,
var.loa = ccc_res$delta$var.loa
)
#######################
# Return Results ----
#######################
structure(list(shieh_test = shieh_test,
ccc.xy = ccc_res$rho.c,
s.shift = ccc_res$s.shift,
l.shift = ccc_res$l.shift,
bias = ccc_res$bias,
loa = df_loa,
conf.level = conf.level,
agree.level = agree.level,
bland_alt.plot = bland_alt.plot,
identity.plot = identity.plot,
h0_test = rej_text,
class = "simple"),
class = "simple_agree")
}
| /R/agree_test.R | no_license | hyunsooseol/SimplyAgree | R | false | false | 8,002 | r | #' Tests for Absolute Agreement
#' @description The agree_test function calculates a variety of agreement statistics. The hypothesis test of agreement is calculated by the method described by Shieh (2019). Bland-Altman limits of agreement, and confidence intervals, are also provided (Bland & Altman 1999; Bland & Altman 1986). In addition, the concordance correlation coefficient (CCC; Lin 1989) is also provided.
#' @param x Vector with first measurement
#' @param y Vector with second measurement
#' @param conf.level the confidence level required. Default is 95\%.
#' @param agree.level the agreement level required. Default is 95\%. The proportion of data that should lie between the thresholds, for 95\% limits of agreement this should be 0.95.
#' @param delta The threshold below which methods agree/can be considered equivalent, can be in any units. Equivalence Bound for Agreement.
#'
#' @return Returns single list with the results of the agreement analysis.
#'
#' \describe{
#' \item{\code{"shieh_test"}}{The TOST hypothesis test as described by Shieh.}
#' \item{\code{"ccc.xy"}}{Lin's concordance correlation coefficient and confidence intervals.}
#' \item{\code{"s.shift"}}{Scale shift from x to y.}
#' \item{\code{"l.shift"}}{Location shift from x to y.}
#' \item{\code{"bias"}}{a bias correction factor that measures how far the best-fit line deviates from a line at 45 degrees. No deviation from the 45 degree line occurs when bias = 1. See Lin 1989, page 258.}
#' \item{\code{"loa"}}{Data frame containing the limits of agreement calculations}
#' \item{\code{"h0_test"}}{Decision from hypothesis test.}
#' \item{\code{"identity.plot"}}{Plot of x and y with a line of identity with a linear regression line}
#' \item{\code{"bland_alt.plot"}}{Simple Bland-Altman plot. Red line are the upper and lower bounds for shieh test; grey box is the acceptable limits (delta). If the red lines are within the grey box then the shieh test should indicate 'reject h0', or to reject the null hypothesis that this not acceptable agreement between x & y.}
#'
#' }
#' @examples
#' data('reps')
#' agree_test(x=reps$x, y=reps$y, delta = 2)
#'
#' @section References:
#' Shieh (2019). Assessing Agreement Between Two Methods of Quantitative Measurements: Exact Test Procedure and Sample Size Calculation, Statistics in Biopharmaceutical Research, <https://doi.org/10.1080/19466315.2019.1677495>
#'
#' Bland, J. M., & Altman, D. G. (1999). Measuring agreement in method comparison studies. Statistical methods in medical research, 8(2), 135-160.
#'
#' Bland, J. M., & Altman, D. (1986). Statistical methods for assessing agreement between two methods of clinical measurement. The lancet, 327(8476), 307-310.
#'
#' Lawrence, I., & Lin, K. (1989). A concordance correlation coefficient to evaluate reproducibility. Biometrics, 255-268.
#' @importFrom stats pnorm pt qnorm qt lm anova aov complete.cases cor dchisq qchisq sd var
#' @import ggplot2
#' @export
agree_test <- function(x,
y,
delta,
conf.level = .95,
agree.level = .95) {
est <- lower.ci <- upper.ci <- NULL
if (agree.level >= 1 || agree.level <= 0) {
stop("agree.level (Limit of Agreement) must be a value between 0 and 1")
}
if (conf.level >= 1 || conf.level <= 0) {
stop("conf.level must be a value between 0 and 1")
}
#USER SPECIFICATIONS PORTION
#alpha<-0.05 #DESIGNATED ALPHA
#prop0<-0.8 #NULL CENTRAL PROPORTION or Limit of Agreement
#delta<-0.1 #THRESHOLD
#n<-15 #SAMPLE SIZE
#xbar<-0.011 #SAMPLE MEAN
#s<-0.044 #SAMPLE STANDARD DEVIATION
#END OF SPECIFICATION
prop0 = agree.level
alpha = 1 - conf.level
ccc_res = ccc.xy(x, y,
conf.level = conf.level,
agree.level = agree.level)
#pull values from ccc function output
xbar = ccc_res$delta$d #mean delta
s = ccc_res$delta$d.sd #sd of delta
n = nrow(ccc_res$df_diff)
pct <- 1 - (1 - prop0) / 2
zp <- qnorm(pct)
df <- n - 1
stdh <- s / sqrt(n)
numint <- 1000
coevec <- c(1, rep(c(4, 2), numint / 2 - 1), 4, 1)
cl <- 1e-6
cu <- qchisq(1 - cl, df)
int <- cu - cl
intl <- int / numint
cvec <- cl + intl * (0:numint)
wcpdf <- (intl / 3) * coevec * dchisq(cvec, df)
gaml <- 0
gamu <- 100
loop <- 0
dalpha <- 1
while (abs(dalpha) > 1e-8 | dalpha < 0) {
gam <- (gaml + gamu) / 2
h <- zp * sqrt(n) - gam * sqrt(cvec / df)
ht <- h * (cvec < n * df * (zp / gam) ^ 2)
alphat <- sum(wcpdf * (2 * pnorm(ht) - 1))
if (alphat > alpha)
gaml <- gam
else
gamu <- gam
loop <- loop + 1
dalpha <- alphat - alpha
}
el <- xbar - gam * stdh
eu <- xbar + gam * stdh
if (!missing(delta)){
rej <- (-delta < el) * (eu < delta)
rej_text = "don't reject h0"
if (rej == 1) {
rej_text = "reject h0"
} } else {
rej_text = "No Hypothesis Test"
}
shieh_test = data.frame(prop0,el,eu,rej_text,gam)
names(shieh_test) = c("prop0","lower.ci","upper.ci", "h0_test","test_statistic")
#######################
# Plot Results ----
#######################
z <- lm(y ~ x)
the_int <- summary(z)$coefficients[1,1]
the_slope <- summary(z)$coefficients[2,1]
tmp.lm <- data.frame(the_int, the_slope)
scalemin = min(c(min(x, na.rm = TRUE),min(y, na.rm = TRUE)))
scalemax = max(c(max(x, na.rm = TRUE),max(y, na.rm = TRUE)))
identity.plot = ggplot(ccc_res$df_diff,
aes(x = x, y = y)) +
geom_point(na.rm = TRUE) +
geom_abline(intercept = 0, slope = 1) +
geom_abline(
data = tmp.lm,
aes(intercept = the_int, slope = the_slope),
linetype = "dashed",
color = "red"
) +
xlab("Method: x") +
xlim(scalemin,scalemax) +
ylim(scalemin,scalemax) +
ylab("Method: y") +
coord_fixed(ratio = 1 / 1) +
theme_bw()
bland_alt.plot = ggplot(ccc_res$df_diff,
aes(x = mean, y = delta)) +
geom_point(na.rm = TRUE) +
annotate("rect",
xmin = -Inf, xmax = Inf,
ymin = ccc_res$delta$lower.lci,
ymax = ccc_res$delta$lower.uci,
alpha = .5,
fill = "#D55E00") +
annotate("rect",
xmin = -Inf, xmax = Inf,
ymin = ccc_res$delta$upper.lci,
ymax = ccc_res$delta$upper.uci,
alpha = .5,
fill = "#D55E00") +
geom_hline(data = ccc_res$delta,
aes(yintercept = d),
linetype = 1) +
annotate("rect",
xmin = -Inf, xmax = Inf,
ymin = ccc_res$delta$d.lci,
ymax = ccc_res$delta$d.uci,
alpha = .5,
fill = "gray") +
xlab("Average of Method x and Method y") +
ylab("Difference between Methods x & y") +
theme_bw() +
theme(legend.position = "none")
### Save limits of agreement
df_loa = data.frame(
estimate = c(ccc_res$delta$d, ccc_res$delta$lower.loa, ccc_res$delta$upper.loa),
lower.ci = c(ccc_res$delta$d.lci, ccc_res$delta$lower.lci, ccc_res$delta$upper.lci),
upper.ci = c(ccc_res$delta$d.uci, ccc_res$delta$lower.uci, ccc_res$delta$upper.uci),
row.names = c("Difference","Lower LoA","Upper LoA")
)
# Should I add this to the output?
var_comp = data.frame(
delta.sd = ccc_res$delta$d.sd,
var.loa = ccc_res$delta$var.loa
)
#######################
# Return Results ----
#######################
structure(list(shieh_test = shieh_test,
ccc.xy = ccc_res$rho.c,
s.shift = ccc_res$s.shift,
l.shift = ccc_res$l.shift,
bias = ccc_res$bias,
loa = df_loa,
conf.level = conf.level,
agree.level = agree.level,
bland_alt.plot = bland_alt.plot,
identity.plot = identity.plot,
h0_test = rej_text,
class = "simple"),
class = "simple_agree")
}
|
library(simstudy)
### Name: addColumns
### Title: Add columns to existing data set
### Aliases: addColumns
### ** Examples
# New data set
def <- defData(varname = "xNr", dist = "nonrandom", formula=7, id = "idnum")
def <- defData(def, varname="xUni", dist="uniform", formula="10;20")
dt <- genData(10, def)
# Add columns to dt
def2 <- defDataAdd(varname="y1", formula = 10, variance = 3)
def2 <- defDataAdd(def2, varname="y2", formula = .5, dist = "binary")
def2
dt <- addColumns(def2, dt)
dt
| /data/genthat_extracted_code/simstudy/examples/addColumns.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 505 | r | library(simstudy)
### Name: addColumns
### Title: Add columns to existing data set
### Aliases: addColumns
### ** Examples
# New data set
def <- defData(varname = "xNr", dist = "nonrandom", formula=7, id = "idnum")
def <- defData(def, varname="xUni", dist="uniform", formula="10;20")
dt <- genData(10, def)
# Add columns to dt
def2 <- defDataAdd(varname="y1", formula = 10, variance = 3)
def2 <- defDataAdd(def2, varname="y2", formula = .5, dist = "binary")
def2
dt <- addColumns(def2, dt)
dt
|
#####################################
# Helper functions for C interfaces #
#####################################
#' Generic C interface
#'
#' Generic interface for C-functions with inputs (values, times, length(values), ...) and output (values_new). Example: sma, rolling_max, ema, ...
#'
#' @param x a numeric \code{"uts"} object with finite, non-NA observation values.
#' @param C_fct the name of the C function to call.
#' @param \dots further arguments passed to the C function.
#'
#' @keywords internal
#' @examples
#' # SMA_last
#' generic_C_interface(ex_uts(), "sma_last", width_before=ddays(1), width_after=ddays(0))
#'
#' # One- vs. two-sided window
#' generic_C_interface(ex_uts(), "rolling_num_obs", width_before=dhours(6), width_after=dhours(0))
#' generic_C_interface(ex_uts(), "rolling_num_obs", width_before=dhours(6), width_after=dhours(6))
generic_C_interface <- function(x, C_fct, ...)
{
# Argument checking
if (!is.uts(x))
stop("'x' is not a 'uts' object")
if (!is.numeric(x$values))
stop("The time series is not numeric")
if (anyNA(x$values) || any(is.infinite(x$values)))
stop("The time series observation values have to be finite and not NA")
if (length(x$values) != length(x$times))
stop("The number of observation values and observation times does not match")
# Call Rcpp wrapper function
Cpp_fct <- paste0("Rcpp_wrapper_", C_fct)
values_new <- do.call(Cpp_fct, list(x$values, x$times, ...))
# Generate output time series in efficient way, avoiding calls to POSIXct constructors
x$values <- values_new
x
}
| /R/C_interfaces.R | no_license | andreas50/utsOperators | R | false | false | 1,582 | r | #####################################
# Helper functions for C interfaces #
#####################################
#' Generic C interface
#'
#' Generic interface for C-functions with inputs (values, times, length(values), ...) and output (values_new). Example: sma, rolling_max, ema, ...
#'
#' @param x a numeric \code{"uts"} object with finite, non-NA observation values.
#' @param C_fct the name of the C function to call.
#' @param \dots further arguments passed to the C function.
#'
#' @keywords internal
#' @examples
#' # SMA_last
#' generic_C_interface(ex_uts(), "sma_last", width_before=ddays(1), width_after=ddays(0))
#'
#' # One- vs. two-sided window
#' generic_C_interface(ex_uts(), "rolling_num_obs", width_before=dhours(6), width_after=dhours(0))
#' generic_C_interface(ex_uts(), "rolling_num_obs", width_before=dhours(6), width_after=dhours(6))
generic_C_interface <- function(x, C_fct, ...)
{
# Argument checking
if (!is.uts(x))
stop("'x' is not a 'uts' object")
if (!is.numeric(x$values))
stop("The time series is not numeric")
if (anyNA(x$values) || any(is.infinite(x$values)))
stop("The time series observation values have to be finite and not NA")
if (length(x$values) != length(x$times))
stop("The number of observation values and observation times does not match")
# Call Rcpp wrapper function
Cpp_fct <- paste0("Rcpp_wrapper_", C_fct)
values_new <- do.call(Cpp_fct, list(x$values, x$times, ...))
# Generate output time series in efficient way, avoiding calls to POSIXct constructors
x$values <- values_new
x
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/select_nodes_by_degree.R
\name{select_nodes_by_degree}
\alias{select_nodes_by_degree}
\title{Select nodes in the graph based on their degree values}
\usage{
select_nodes_by_degree(graph, expressions, set_op = "union")
}
\arguments{
\item{graph}{A graph object of class \code{dgr_graph}.}
\item{expressions}{One or more expressions for filtering nodes by degree
values. Use a combination of degree type (\code{deg} for total degree, \code{indeg}
for in-degree, and \code{outdeg} for out-degree) with a comparison operator and
values for comparison (e.g., use \code{"deg >= 2"} to select nodes with a degree
greater than or equal to 2).}
\item{set_op}{The set operation to perform upon consecutive selections of
graph nodes. This can either be as a \code{union} (the default), as an
intersection of selections with \code{intersect}, or, as a \code{difference} on the
previous selection, if it exists.}
}
\value{
A graph object of class \code{dgr_graph}.
}
\description{
Using a graph object of class \code{dgr_graph}, create a selection of nodes that
have certain degree values.
}
\examples{
# Create a random graph using
# the `add_gnm_graph()` function
graph <-
create_graph() \%>\%
add_gnm_graph(
n = 35, m = 125,
set_seed = 23)
# Report which nodes have a
# total degree (in-degree +
# out-degree) of exactly 9
graph \%>\%
select_nodes_by_degree(
expressions = "deg == 9") \%>\%
get_selection()
# Report which nodes have a
# total degree greater than or
# equal to 9
graph \%>\%
select_nodes_by_degree(
expressions = "deg >= 9") \%>\%
get_selection()
# Combine two calls of
# `select_nodes_by_degree()` to
# get those nodes with total
# degree less than 3 and total
# degree greater than 10 (by
# default, those `select...()`
# functions will `union` the
# sets of nodes selected)
graph \%>\%
select_nodes_by_degree(
expressions = "deg < 3") \%>\%
select_nodes_by_degree(
expressions = "deg > 10") \%>\%
get_selection()
# Combine two calls of
# `select_nodes_by_degree()` to
# get those nodes with total
# degree greater than or equal
# to 3 and less than or equal
# to 10 (the key here is to
# `intersect` the sets of nodes
# selected in the second call)
graph \%>\%
select_nodes_by_degree(
expressions = "deg >= 3") \%>\%
select_nodes_by_degree(
expressions = "deg <= 10",
set_op = "intersect") \%>\%
get_selection()
# Select all nodes with an
# in-degree greater than 5, then,
# apply a node attribute to those
# selected nodes (coloring the
# selected nodes red)
graph_2 <-
graph \%>\%
select_nodes_by_degree(
expressions = "indeg > 5") \%>\%
set_node_attrs_ws(
node_attr = color,
value = "red")
# Get the selection of nodes
graph_2 \%>\% get_selection()
}
| /man/select_nodes_by_degree.Rd | permissive | rich-iannone/DiagrammeR | R | false | true | 2,828 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/select_nodes_by_degree.R
\name{select_nodes_by_degree}
\alias{select_nodes_by_degree}
\title{Select nodes in the graph based on their degree values}
\usage{
select_nodes_by_degree(graph, expressions, set_op = "union")
}
\arguments{
\item{graph}{A graph object of class \code{dgr_graph}.}
\item{expressions}{One or more expressions for filtering nodes by degree
values. Use a combination of degree type (\code{deg} for total degree, \code{indeg}
for in-degree, and \code{outdeg} for out-degree) with a comparison operator and
values for comparison (e.g., use \code{"deg >= 2"} to select nodes with a degree
greater than or equal to 2).}
\item{set_op}{The set operation to perform upon consecutive selections of
graph nodes. This can either be as a \code{union} (the default), as an
intersection of selections with \code{intersect}, or, as a \code{difference} on the
previous selection, if it exists.}
}
\value{
A graph object of class \code{dgr_graph}.
}
\description{
Using a graph object of class \code{dgr_graph}, create a selection of nodes that
have certain degree values.
}
\examples{
# Create a random graph using
# the `add_gnm_graph()` function
graph <-
create_graph() \%>\%
add_gnm_graph(
n = 35, m = 125,
set_seed = 23)
# Report which nodes have a
# total degree (in-degree +
# out-degree) of exactly 9
graph \%>\%
select_nodes_by_degree(
expressions = "deg == 9") \%>\%
get_selection()
# Report which nodes have a
# total degree greater than or
# equal to 9
graph \%>\%
select_nodes_by_degree(
expressions = "deg >= 9") \%>\%
get_selection()
# Combine two calls of
# `select_nodes_by_degree()` to
# get those nodes with total
# degree less than 3 and total
# degree greater than 10 (by
# default, those `select...()`
# functions will `union` the
# sets of nodes selected)
graph \%>\%
select_nodes_by_degree(
expressions = "deg < 3") \%>\%
select_nodes_by_degree(
expressions = "deg > 10") \%>\%
get_selection()
# Combine two calls of
# `select_nodes_by_degree()` to
# get those nodes with total
# degree greater than or equal
# to 3 and less than or equal
# to 10 (the key here is to
# `intersect` the sets of nodes
# selected in the second call)
graph \%>\%
select_nodes_by_degree(
expressions = "deg >= 3") \%>\%
select_nodes_by_degree(
expressions = "deg <= 10",
set_op = "intersect") \%>\%
get_selection()
# Select all nodes with an
# in-degree greater than 5, then,
# apply a node attribute to those
# selected nodes (coloring the
# selected nodes red)
graph_2 <-
graph \%>\%
select_nodes_by_degree(
expressions = "indeg > 5") \%>\%
set_node_attrs_ws(
node_attr = color,
value = "red")
# Get the selection of nodes
graph_2 \%>\% get_selection()
}
|
rm(list=ls())
# environment ====
if (!requireNamespace("remotes", quietly = TRUE)) install.packages("remotes")
#remotes::install_github("MRCIEU/genetics.binaRies", force = F)
#remotes::install_github("explodecomputer/plinkbinr", force = F)
#remotes::install_github("chr1swallace/coloc@main", force = F)
#remotes::install_github("sjmgarnier/viridis", force = F)
library(genetics.binaRies)
library(plinkbinr)
library(coloc)
library(viridis)
library(data.table)
library(ieugwasr)
library(dplyr)
library(TwoSampleMR)
library(tidyverse)
source("scripts/011_colocalisation/functions/my_coloc_chriswallace.R")
# data ====
a <- read.table("analysis/008_mvmr/mvmr_results.txt", header = T, sep = "\t")
a <- subset(a, group == "Female")
a <- subset(a, exposure != "BMI")
a <- subset(a, exposure != "WHR")
a <- subset(a, exposure != "WHRadjBMI")
a <- subset(a, cancer == "rectal")
filenames_mvmr <- paste0("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb//", a$exposure, "_", a$gene, "_", a$protein, ".txt.gz.annotated.gz.exclusions.gz.alleles.gz.unzipped.cis.txt")
filenames_all <- dir("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb/", recursive = TRUE, full.names = TRUE, pattern = ".cis.txt")
filenames <- intersect(filenames_mvmr, filenames_all)
# exposure ====
exposure_list <- lapply(filenames, fread, col.names = c("CHR", "POS", "SNPID", "SNP", "EA", "OA",
"beta.exposure", "pval.exposure", "minus_log10_pval", "se.exposure", "samplesize.exposure",
"EAF", "exposure", "effect_allele.exposure", "other_allele.exposure", "eaf.exposure"))
length(exposure_list)
exposure_list <- exposure_list[sapply(exposure_list, nrow) > 0]
length(exposure_list)
exposure_list <- purrr::discard(exposure_list, ~any(.x$CHR == "chrX")) # X CHR not available in outcome
length(exposure_list)
# format exposure ====
exposure_filenames <- gsub("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb//", "", filenames)
exposure_filenames <- gsub(".txt.gz.annotated.gz.exclusions.gz.alleles.gz.unzipped.cis.txt", "", exposure_filenames)
for (i in 1:length(exposure_list)){
exposure_list[[i]]$exposure <- gsub("/data/protein_GWAS_ferkingstad_EU_2021/files/", "", exposure_list[[i]]$exposure)
}
for (i in 1:length(exposure_list)){
exposure_list[[i]]$exposure <- gsub(".txt.gz.unzipped", "", exposure_list[[i]]$exposure)
}
for (i in 1:length(exposure_list)){
exposure_list[[i]]$id.exposure <- paste0(exposure_filenames[[i]], "_", "joint_rectal_female")
}
# outcome data ====
filenames <- c("joint_rectal_Female_wald_MAC50_1.TBL.annotated.txt")
outcome_list <- list()
for (i in 1:length(exposure_list)){
outcome_list[i] <- lapply(paste0("/data/GWAS_data/files/huyghe_2018_PMID30510241/processed/",filenames),
read_outcome_data,
snps = exposure_list[[i]]$SNP,
sep = " ",
snp_col = "SNP",
beta_col = "Effect",
se_col = "StdErr",
eaf_col = "Freq1",
effect_allele_col = "Allele1",
other_allele_col = "Allele2",
pval_col = "P.value",
min_pval = 1e-200,
log_pval = FALSE,
chr_col = "Chr",
pos_col = "Position",
phenotype_col = "phenotype")
outcome_list[[i]]$outcome <- "joint_rectal_female"
outcome_list[[i]]$id.outcome <- paste0(exposure_filenames[[i]], "_", outcome_list[[i]]$outcome)
}
# harmonise ====
exposure <- bind_rows(exposure_list)
outcome <- bind_rows(outcome_list)
harmonise_data <- harmonise_data(exposure, outcome, action = 2)
harmonise_data$remove_duplicates <- paste0(harmonise_data$SNP, "_", harmonise_data$id.exposure)
harmonise_data <- harmonise_data[!duplicated(harmonise_data$remove_duplicates),]
harmonise_data_list <- split(harmonise_data, harmonise_data$id.exposure)
# loop over all harmonised data and run ld matrix, formatting, coloc, save ====
table_master <- data.frame() # make empty dataframe for final results
for (i in 1:length(harmonise_data_list)){
label_exposure <- unique(harmonise_data_list[[i]]$exposure)
label <- paste0(label_exposure, "_", "joint_rectal_female")
label_outcome <- "joint_rectal_female"
# make ld matrix ====
ld <- ld_matrix_local(
harmonise_data_list[[i]]$SNP,
with_alleles = FALSE,
bfile = "/data/GWAS_data/files/references/1kG_v3/EUR/EUR",
plink_bin = get_plink_exe())
# format LD matrix and harmonised list ====
ld <- ld[which(rownames(ld) %in% harmonise_data_list[[i]]$SNP), which(colnames(ld) %in% harmonise_data_list[[i]]$SNP)]
harmonise_data_list[[i]] <- harmonise_data_list[[i]][which(harmonise_data_list[[i]]$SNP %in% rownames(ld)),]
ld <- ld[match(harmonise_data_list[[i]]$SNP,rownames(ld)),]
ld <- ld[,match(harmonise_data_list[[i]]$SNP, colnames(ld))]
harmonise_data_list[[i]] <- harmonise_data_list[[i]][match(rownames(ld), harmonise_data_list[[i]]$SNP),]
# make lists for coloc ====
coloc_data_exposure <- list(beta = harmonise_data_list[[i]]$beta.exposure, varbeta = harmonise_data_list[[i]]$se.exposure^2, MAF = harmonise_data_list[[i]]$eaf.exposure, type = "quant", N = 35559, snp = rownames(ld), LD = ld, position = harmonise_data_list[[i]]$POS)
coloc_data_outcome <- list(beta = harmonise_data_list[[i]]$beta.outcome, varbeta = harmonise_data_list[[i]]$se.outcome^2, MAF = harmonise_data_list[[i]]$eaf.outcome, type = "cc", N = 120328, snp = rownames(ld), LD = ld, position = harmonise_data_list[[i]]$POS)
# coloc ====
coloc_results <- coloc.abf(dataset1 = coloc_data_exposure, dataset2 = coloc_data_outcome)
pdf(paste0("analysis/009_colocalisation/results/joint_rectal_female/figures/", label, ".pdf"),
height = 10, width = 10)
coloc_sensitivity <- my_sensitivity(coloc_results, "H4 > 0.9",
trait1_title = label_exposure, trait2_title = label_outcome)
dev.off()
# save ====
saveRDS(coloc_results, paste0("analysis/009_colocalisation/results/joint_rectal_female/", label, ".RData"))
# make table ====
table <- data.frame(
exposure = label_exposure,
outcome = label_outcome,
id = label,
nsnps = coloc_results["summary"][[1]][1],
h0 = coloc_results["summary"][[1]][2],
h1 = coloc_results["summary"][[1]][3],
h2 = coloc_results["summary"][[1]][4],
h3 = coloc_results["summary"][[1]][5],
h4 = coloc_results["summary"][[1]][6])
table_master <- rbind(table_master, table)
}
write.table(table_master, "analysis/009_colocalisation/results/joint_rectal_female/001_coloc_results.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
| /scripts/011_colocalisation/002_coloc/002_coloc_joint_rectal_female.R | no_license | mattlee821/adiposity_proteins_colorectal_cancer | R | false | false | 6,963 | r | rm(list=ls())
# environment ====
if (!requireNamespace("remotes", quietly = TRUE)) install.packages("remotes")
#remotes::install_github("MRCIEU/genetics.binaRies", force = F)
#remotes::install_github("explodecomputer/plinkbinr", force = F)
#remotes::install_github("chr1swallace/coloc@main", force = F)
#remotes::install_github("sjmgarnier/viridis", force = F)
library(genetics.binaRies)
library(plinkbinr)
library(coloc)
library(viridis)
library(data.table)
library(ieugwasr)
library(dplyr)
library(TwoSampleMR)
library(tidyverse)
source("scripts/011_colocalisation/functions/my_coloc_chriswallace.R")
# data ====
a <- read.table("analysis/008_mvmr/mvmr_results.txt", header = T, sep = "\t")
a <- subset(a, group == "Female")
a <- subset(a, exposure != "BMI")
a <- subset(a, exposure != "WHR")
a <- subset(a, exposure != "WHRadjBMI")
a <- subset(a, cancer == "rectal")
filenames_mvmr <- paste0("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb//", a$exposure, "_", a$gene, "_", a$protein, ".txt.gz.annotated.gz.exclusions.gz.alleles.gz.unzipped.cis.txt")
filenames_all <- dir("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb/", recursive = TRUE, full.names = TRUE, pattern = ".cis.txt")
filenames <- intersect(filenames_mvmr, filenames_all)
# exposure ====
exposure_list <- lapply(filenames, fread, col.names = c("CHR", "POS", "SNPID", "SNP", "EA", "OA",
"beta.exposure", "pval.exposure", "minus_log10_pval", "se.exposure", "samplesize.exposure",
"EAF", "exposure", "effect_allele.exposure", "other_allele.exposure", "eaf.exposure"))
length(exposure_list)
exposure_list <- exposure_list[sapply(exposure_list, nrow) > 0]
length(exposure_list)
exposure_list <- purrr::discard(exposure_list, ~any(.x$CHR == "chrX")) # X CHR not available in outcome
length(exposure_list)
# format exposure ====
exposure_filenames <- gsub("/data/protein_GWAS_ferkingstad_EU_2021/files/cis_snps_1mb//", "", filenames)
exposure_filenames <- gsub(".txt.gz.annotated.gz.exclusions.gz.alleles.gz.unzipped.cis.txt", "", exposure_filenames)
for (i in 1:length(exposure_list)){
exposure_list[[i]]$exposure <- gsub("/data/protein_GWAS_ferkingstad_EU_2021/files/", "", exposure_list[[i]]$exposure)
}
for (i in 1:length(exposure_list)){
exposure_list[[i]]$exposure <- gsub(".txt.gz.unzipped", "", exposure_list[[i]]$exposure)
}
for (i in 1:length(exposure_list)){
exposure_list[[i]]$id.exposure <- paste0(exposure_filenames[[i]], "_", "joint_rectal_female")
}
# outcome data ====
filenames <- c("joint_rectal_Female_wald_MAC50_1.TBL.annotated.txt")
outcome_list <- list()
for (i in 1:length(exposure_list)){
outcome_list[i] <- lapply(paste0("/data/GWAS_data/files/huyghe_2018_PMID30510241/processed/",filenames),
read_outcome_data,
snps = exposure_list[[i]]$SNP,
sep = " ",
snp_col = "SNP",
beta_col = "Effect",
se_col = "StdErr",
eaf_col = "Freq1",
effect_allele_col = "Allele1",
other_allele_col = "Allele2",
pval_col = "P.value",
min_pval = 1e-200,
log_pval = FALSE,
chr_col = "Chr",
pos_col = "Position",
phenotype_col = "phenotype")
outcome_list[[i]]$outcome <- "joint_rectal_female"
outcome_list[[i]]$id.outcome <- paste0(exposure_filenames[[i]], "_", outcome_list[[i]]$outcome)
}
# harmonise ====
exposure <- bind_rows(exposure_list)
outcome <- bind_rows(outcome_list)
harmonise_data <- harmonise_data(exposure, outcome, action = 2)
harmonise_data$remove_duplicates <- paste0(harmonise_data$SNP, "_", harmonise_data$id.exposure)
harmonise_data <- harmonise_data[!duplicated(harmonise_data$remove_duplicates),]
harmonise_data_list <- split(harmonise_data, harmonise_data$id.exposure)
# loop over all harmonised data and run ld matrix, formatting, coloc, save ====
table_master <- data.frame() # make empty dataframe for final results
for (i in 1:length(harmonise_data_list)){
label_exposure <- unique(harmonise_data_list[[i]]$exposure)
label <- paste0(label_exposure, "_", "joint_rectal_female")
label_outcome <- "joint_rectal_female"
# make ld matrix ====
ld <- ld_matrix_local(
harmonise_data_list[[i]]$SNP,
with_alleles = FALSE,
bfile = "/data/GWAS_data/files/references/1kG_v3/EUR/EUR",
plink_bin = get_plink_exe())
# format LD matrix and harmonised list ====
ld <- ld[which(rownames(ld) %in% harmonise_data_list[[i]]$SNP), which(colnames(ld) %in% harmonise_data_list[[i]]$SNP)]
harmonise_data_list[[i]] <- harmonise_data_list[[i]][which(harmonise_data_list[[i]]$SNP %in% rownames(ld)),]
ld <- ld[match(harmonise_data_list[[i]]$SNP,rownames(ld)),]
ld <- ld[,match(harmonise_data_list[[i]]$SNP, colnames(ld))]
harmonise_data_list[[i]] <- harmonise_data_list[[i]][match(rownames(ld), harmonise_data_list[[i]]$SNP),]
# make lists for coloc ====
coloc_data_exposure <- list(beta = harmonise_data_list[[i]]$beta.exposure, varbeta = harmonise_data_list[[i]]$se.exposure^2, MAF = harmonise_data_list[[i]]$eaf.exposure, type = "quant", N = 35559, snp = rownames(ld), LD = ld, position = harmonise_data_list[[i]]$POS)
coloc_data_outcome <- list(beta = harmonise_data_list[[i]]$beta.outcome, varbeta = harmonise_data_list[[i]]$se.outcome^2, MAF = harmonise_data_list[[i]]$eaf.outcome, type = "cc", N = 120328, snp = rownames(ld), LD = ld, position = harmonise_data_list[[i]]$POS)
# coloc ====
coloc_results <- coloc.abf(dataset1 = coloc_data_exposure, dataset2 = coloc_data_outcome)
pdf(paste0("analysis/009_colocalisation/results/joint_rectal_female/figures/", label, ".pdf"),
height = 10, width = 10)
coloc_sensitivity <- my_sensitivity(coloc_results, "H4 > 0.9",
trait1_title = label_exposure, trait2_title = label_outcome)
dev.off()
# save ====
saveRDS(coloc_results, paste0("analysis/009_colocalisation/results/joint_rectal_female/", label, ".RData"))
# make table ====
table <- data.frame(
exposure = label_exposure,
outcome = label_outcome,
id = label,
nsnps = coloc_results["summary"][[1]][1],
h0 = coloc_results["summary"][[1]][2],
h1 = coloc_results["summary"][[1]][3],
h2 = coloc_results["summary"][[1]][4],
h3 = coloc_results["summary"][[1]][5],
h4 = coloc_results["summary"][[1]][6])
table_master <- rbind(table_master, table)
}
write.table(table_master, "analysis/009_colocalisation/results/joint_rectal_female/001_coloc_results.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_predicted.glm.R
\name{get_predicted.glm}
\alias{get_predicted.glm}
\title{Compute predicted values of lm models.}
\usage{
\method{get_predicted}{glm}(fit, newdata = "model", prob = 0.95,
odds_to_probs = TRUE, ...)
}
\arguments{
\item{fit}{An lm model.}
\item{newdata}{A data frame in which to look for variables with which to predict. If omitted, the model matrix is used. If "model", the model's data is used.}
\item{prob}{Probability of confidence intervals (0.9 (default) will compute 2.5-97.5\% CI). Can also be a list of probs (e.g., c(0.90, 0.95)).}
\item{odds_to_probs}{Transform log odds ratios in logistic models to probabilies.}
\item{...}{Arguments passed to or from other methods.}
}
\value{
dataframe with predicted values.
}
\description{
Compute predicted from a lm model.
}
\examples{
\dontrun{
library(psycho)
library(ggplot2)
fit <- glm(Sex ~ Adjusting, data=affective, family="binomial")
refgrid <- psycho::refdata(affective, "Adjusting")
predicted <- get_predicted(fit, newdata=refgrid)
ggplot(predicted, aes(x=Adjusting, y=Sex_Predicted)) +
geom_line() +
geom_ribbon(aes(ymin=Sex_CI_2.5,
ymax=Sex_CI_97.5),
alpha=0.1)
}
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
| /man/get_predicted.glm.Rd | permissive | HugoNjb/psycho.R | R | false | true | 1,350 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_predicted.glm.R
\name{get_predicted.glm}
\alias{get_predicted.glm}
\title{Compute predicted values of lm models.}
\usage{
\method{get_predicted}{glm}(fit, newdata = "model", prob = 0.95,
odds_to_probs = TRUE, ...)
}
\arguments{
\item{fit}{An lm model.}
\item{newdata}{A data frame in which to look for variables with which to predict. If omitted, the model matrix is used. If "model", the model's data is used.}
\item{prob}{Probability of confidence intervals (0.9 (default) will compute 2.5-97.5\% CI). Can also be a list of probs (e.g., c(0.90, 0.95)).}
\item{odds_to_probs}{Transform log odds ratios in logistic models to probabilies.}
\item{...}{Arguments passed to or from other methods.}
}
\value{
dataframe with predicted values.
}
\description{
Compute predicted from a lm model.
}
\examples{
\dontrun{
library(psycho)
library(ggplot2)
fit <- glm(Sex ~ Adjusting, data=affective, family="binomial")
refgrid <- psycho::refdata(affective, "Adjusting")
predicted <- get_predicted(fit, newdata=refgrid)
ggplot(predicted, aes(x=Adjusting, y=Sex_Predicted)) +
geom_line() +
geom_ribbon(aes(ymin=Sex_CI_2.5,
ymax=Sex_CI_97.5),
alpha=0.1)
}
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
|
#' Search
#'
#' Search for NYT articles by keywords, filters and facets.
#'
#' @param q Search query.
#' @param since,until Begin and start \code{Date} objects.
#' @param pages Number of pages of results to return. Set to infinite (\code{Inf}) to retrieve all pages (\emph{not recommended}).
#' @param sort Sort order \code{newest}, \code{oldest}, \code{relevance}.
#' @param facets Whether to show facet counts, boolean.
#' @param facet_fields The following values are allowed: \code{day_of_week}, \code{document_type}, \code{ingredients},
#' \code{news_desk}, \code{pub_month}, \code{pub_year}, \code{section_name}, \code{source}, \code{subsection_name}, \code{type_of_material}.
#' @param facet_filter Have facet counts use filters, boolean.
#' @param fl List of fields to return.
#' @param fq Query filter.
#'
#' @examples
#' \dontrun{
#' nytimes_key("xXXxxXxXxXXx")
#' trump <- ny_search("Trump", since = Sys.Date() - 3)
#' }
#'
#' @export
ny_search <- function(q, since = NULL, until = NULL, pages = 1, sort = c("newest", "oldest", "relevance"),
facets = FALSE, facet_fields = NULL, facet_filter = NULL, fl = NULL, fq = NULL){
assert_that(!missing(q))
assert_that(pages > 0)
if(is.infinite(pages)) pages <- 999999
opts <- list(
q = q,
begin_date = .process_search_date(since),
end_date = .process_search_date(until),
sort = match.arg(sort),
facets = facets,
facet_fields = facet_fields,
facet_filter = facet_filter,
fl = fl,
fq = fq,
`api-key` = .get_key()
)
parsed_url <- parse_url(BASE_URL)
parsed_url$path <- c("svc", "search", "v2", "articlesearch.json")
pb <- progress::progress_bar$new(
format = " downloading [:bar] :percent",
total = pages - 1, clear = FALSE, width = 60
)
content <- list()
for(p in 1:pages){
opts$page <- p
parsed_url$query <- opts
url <- build_url(parsed_url)
response <- GET(url)
stop_for_status(response)
page_content <- content(response)
content <- append(content, list(page_content))
# check if results left
hits <- page_content$response$meta$hits
offset <- page_content$response$meta$offset
if(p == 1){
cat(crayon::blue(cli::symbol$info), hits, "results available\n")
} else {
pb$tick()
Sys.sleep(6)
}
if(offset >= hits)
break
}
pb$terminate()
content %>%
map("response") %>%
transpose()
} | /R/search.R | permissive | news-r/nytimes | R | false | false | 2,425 | r | #' Search
#'
#' Search for NYT articles by keywords, filters and facets.
#'
#' @param q Search query.
#' @param since,until Begin and start \code{Date} objects.
#' @param pages Number of pages of results to return. Set to infinite (\code{Inf}) to retrieve all pages (\emph{not recommended}).
#' @param sort Sort order \code{newest}, \code{oldest}, \code{relevance}.
#' @param facets Whether to show facet counts, boolean.
#' @param facet_fields The following values are allowed: \code{day_of_week}, \code{document_type}, \code{ingredients},
#' \code{news_desk}, \code{pub_month}, \code{pub_year}, \code{section_name}, \code{source}, \code{subsection_name}, \code{type_of_material}.
#' @param facet_filter Have facet counts use filters, boolean.
#' @param fl List of fields to return.
#' @param fq Query filter.
#'
#' @examples
#' \dontrun{
#' nytimes_key("xXXxxXxXxXXx")
#' trump <- ny_search("Trump", since = Sys.Date() - 3)
#' }
#'
#' @export
ny_search <- function(q, since = NULL, until = NULL, pages = 1, sort = c("newest", "oldest", "relevance"),
facets = FALSE, facet_fields = NULL, facet_filter = NULL, fl = NULL, fq = NULL){
assert_that(!missing(q))
assert_that(pages > 0)
if(is.infinite(pages)) pages <- 999999
opts <- list(
q = q,
begin_date = .process_search_date(since),
end_date = .process_search_date(until),
sort = match.arg(sort),
facets = facets,
facet_fields = facet_fields,
facet_filter = facet_filter,
fl = fl,
fq = fq,
`api-key` = .get_key()
)
parsed_url <- parse_url(BASE_URL)
parsed_url$path <- c("svc", "search", "v2", "articlesearch.json")
pb <- progress::progress_bar$new(
format = " downloading [:bar] :percent",
total = pages - 1, clear = FALSE, width = 60
)
content <- list()
for(p in 1:pages){
opts$page <- p
parsed_url$query <- opts
url <- build_url(parsed_url)
response <- GET(url)
stop_for_status(response)
page_content <- content(response)
content <- append(content, list(page_content))
# check if results left
hits <- page_content$response$meta$hits
offset <- page_content$response$meta$offset
if(p == 1){
cat(crayon::blue(cli::symbol$info), hits, "results available\n")
} else {
pb$tick()
Sys.sleep(6)
}
if(offset >= hits)
break
}
pb$terminate()
content %>%
map("response") %>%
transpose()
} |
# assign 18s algae to custom database
library(tidyverse)
library(DECIPHER)
library(here)
algae_18s <- readDNAStringSet(here("data/asv_seq_data/algae_18s.fasta"))
reference <- readDNAStringSet(here("reference_database/snow_algae_18s_taxonomy.fasta"))
curated_ref <- readDNAStringSet(here("reference_database/select_snow_algae_refs.fasta"))
# train classifier and assign taxonomy
trained_reference <- curated_ref %>%
LearnTaxa(names(curated_ref))
assignments <- IdTaxa(algae_18s, trained_reference, threshold = 50)
# convert to tbl
taxonomy_tbl <- tibble(list = assignments) %>%
add_column(asv_id = names(assignments)) %>%
hoist(list,
taxon = "taxon",
conf = "confidence") %>%
hoist(taxon,
root = 1,
domain = 2,
kingdom = 3,
phylum = 4,
class=5,
order=6,
family=7,
genus=8,
species=9) %>%
hoist(conf,
root_conf = 1,
domain_conf = 2,
kingdom_conf = 3,
phylum_conf = 4,
class_conf=5,
order_conf=6,
family_conf=7,
genus_conf=8,
species_conf=9)
taxonomy_select <- taxonomy_tbl %>%
select(asv_id, phylum, class, order, family, genus, species, phylum_conf, class_conf, order_conf, family_conf, genus_conf, species_conf)
label_na <- function(x){
if_else(str_detect(x,"unclass"), NA_character_, x)
}
taxonomy_na_labeled <- taxonomy_select %>%
mutate_at(2:7, label_na)
best_assignment <- taxonomy_na_labeled %>%
# make new col detecting assignment level
mutate(id_level = case_when(is.na(class)~"phylum",
is.na(order)~"class",
is.na(family)~"order",
is.na(genus)~"family",
is.na(species)~"genus",
TRUE~"species")) %>%
# make new col with best assignment
mutate(best_assignment = case_when(id_level == "class"~class,
id_level == "order"~order,
id_level == "family"~family,
id_level == "genus"~genus,
id_level=="species"~species))
best_assignment %>% view
# output
write_csv(best_assignment, path = here("output/algae_18s_assignments_w_select_snow_algae.csv"))
| /code/exploratory/assign_18s_algae_to_custom.R | no_license | cengstro/bc_snow_algae_amplicon | R | false | false | 2,356 | r | # assign 18s algae to custom database
library(tidyverse)
library(DECIPHER)
library(here)
algae_18s <- readDNAStringSet(here("data/asv_seq_data/algae_18s.fasta"))
reference <- readDNAStringSet(here("reference_database/snow_algae_18s_taxonomy.fasta"))
curated_ref <- readDNAStringSet(here("reference_database/select_snow_algae_refs.fasta"))
# train classifier and assign taxonomy
trained_reference <- curated_ref %>%
LearnTaxa(names(curated_ref))
assignments <- IdTaxa(algae_18s, trained_reference, threshold = 50)
# convert to tbl
taxonomy_tbl <- tibble(list = assignments) %>%
add_column(asv_id = names(assignments)) %>%
hoist(list,
taxon = "taxon",
conf = "confidence") %>%
hoist(taxon,
root = 1,
domain = 2,
kingdom = 3,
phylum = 4,
class=5,
order=6,
family=7,
genus=8,
species=9) %>%
hoist(conf,
root_conf = 1,
domain_conf = 2,
kingdom_conf = 3,
phylum_conf = 4,
class_conf=5,
order_conf=6,
family_conf=7,
genus_conf=8,
species_conf=9)
taxonomy_select <- taxonomy_tbl %>%
select(asv_id, phylum, class, order, family, genus, species, phylum_conf, class_conf, order_conf, family_conf, genus_conf, species_conf)
label_na <- function(x){
if_else(str_detect(x,"unclass"), NA_character_, x)
}
taxonomy_na_labeled <- taxonomy_select %>%
mutate_at(2:7, label_na)
best_assignment <- taxonomy_na_labeled %>%
# make new col detecting assignment level
mutate(id_level = case_when(is.na(class)~"phylum",
is.na(order)~"class",
is.na(family)~"order",
is.na(genus)~"family",
is.na(species)~"genus",
TRUE~"species")) %>%
# make new col with best assignment
mutate(best_assignment = case_when(id_level == "class"~class,
id_level == "order"~order,
id_level == "family"~family,
id_level == "genus"~genus,
id_level=="species"~species))
best_assignment %>% view
# output
write_csv(best_assignment, path = here("output/algae_18s_assignments_w_select_snow_algae.csv"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combine_data.R
\name{combine_data}
\alias{combine_data}
\title{Combine datasets}
\usage{
combine_data(d, levs = NULL)
}
\arguments{
\item{d}{A list of data frames to combine. Numbers of columns should match.}
\item{levs}{Optional list representing columns to add, where each element contains the levels to assign to each data frame.}
}
\description{
Combine multiple data frames and add extra columns to identify them.
}
| /man/combine_data.Rd | no_license | rscherrer/egstools | R | false | true | 500 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combine_data.R
\name{combine_data}
\alias{combine_data}
\title{Combine datasets}
\usage{
combine_data(d, levs = NULL)
}
\arguments{
\item{d}{A list of data frames to combine. Numbers of columns should match.}
\item{levs}{Optional list representing columns to add, where each element contains the levels to assign to each data frame.}
}
\description{
Combine multiple data frames and add extra columns to identify them.
}
|
load.dynamic.libraries<-function(libnames) {
for(libname in libnames) {
found_file=libname;
for(path in unlist(strsplit(Sys.getenv("LD_LIBRARY_PATH"),":",fixed=TRUE))) {
try_file <- paste0(path,"/",libname);
if( file.exists(try_file) ) {
found_file = try_file;
break;
}
}
write(paste("Loading :", try_file), stderr())
dyn.load(found_file);
}
}
# dyn.load(paste("rosR", .Platform$dynlib.ext, sep=""))
load.dynamic.libraries("rosR.so")
source("rosR.R")
source("std_vector.R")
cacheMetaData(1)
# mandatory option for not loosing precision...
options(digits=22)
rros_base_types <<- list(
all = c( "bool", "string", "int8", "uint8", "int16", "uint16",
"int32", "uint32", "int64", "uint64", "float32",
"float64", "duration", "time", "byte", "char" ),
integer = c( "int8", "uint8", "int16", "uint16", "int32", "uint32",
"int64", "uint64", "byte", "char"),
double = c( "float32", "float64", "time", "duration" ),
logical = c( "bool"),
character = c( "string") )
# buffer for storing allready parsed message definitions...
rros_msg_buffer <<- list(strType=c(), rosConversions=c(), rosReadExpr=c(), rosWriteExpr=c(), rosTypes=c())
# only once initialized ...
rros_node <<- c()
get_msg_def <- function(msg){
command <- paste("rosmsg show", msg)
msg_def <- system(command, intern=TRUE)
return(msg_def)
}
get_msg_md5 <- function(msg){
command <- paste("rosmsg md5", msg)
msg_md5 <- system(command, intern=TRUE)
return(msg_md5)
}
basic_datatype_translation <- function(type, ar, size, constant, value){
if(type == "time" || type == "duration"){
type <- "float64"
} else if(type == "byte" ) {
type <- "int8"
} else if(type == "char" || type == "bool") {
type <- "uint8"
}
if(ar==FALSE){
if(constant == FALSE){
# standard conversation
if(is.element(type, rros_base_types$integer)) {
if(size == 0) el <- 0
else el <- list(integer(size))
} else if(is.element(type, rros_base_types$double)) {
if(size == 0) el <- 0.0
else el <- list(double(size))
} else if(is.element(type, rros_base_types$logical)) {
if(size == 0) el <- F
else el <- list(logical(size))
} else if(is.element(type, rros_base_types$character)) {
if(size == 0) el <- ''
else el <- list(character(size))
} else {
el <- list()
}
} else {
if(is.element(type, rros_base_types$integer)) {
el <- strtoi(value)
} else if(is.element(type, rros_base_types$double)) {
el <- as.numeric(value)
} else if(is.element(type, rros_base_types$logical)) {
el <- as.logical(value)
} else {
el <- value
}
}
}
else{
el <- rros_vector(type)
}
return(el)
}
basic_datatype_conversion <- function(type, ar, size, var="", read=TRUE){
if(type == "time" || type == "duration"){
type <- "float64"
} else if(type == "byte" ) {
type <- "int8"
} else if(type == "char" || type == "bool") {
type <- "uint8"
}
el <- ""
if(read){
if(ar==FALSE){
el <- paste("msg$", var,"<-rros_stream_read_", type, "(stream)", sep="")
} else {
el <- paste("msg$", var,"@ptr<-rros_stream_read_", type, "_array(stream, ", size,")", sep="")
}
} else {
if(ar==FALSE){
el <- paste("rros_stream_write_", type, "(stream, msg$", var,")", sep="")
} else {
el <- paste("rros_stream_write_", type, "_array(stream, msg$", var,"@ptr)", sep="")
}
}
return(el)
}
get_msg_convertion <- function(msg, msg_def=NaN, space=""){
if(is.nan(msg_def[1])){
msg_def <- get_msg_def(msg)
}
cls <- c()
for(i in (1:length(msg_def))){
isArray <- c(FALSE, 0)
isConstant <- FALSE
valueConstant <- 0
var <- strsplit(sub("^ +", "", msg_def[i]), " ")[[1]]
# same number of spaces
if(grepl(paste("^", space, "[[:alpha:]]", sep=""), msg_def[i])){
# array ?
if(grepl("[", var[1], fixed=TRUE)){
ar <- strsplit(var[1], "[", fixed=TRUE)[[1]]
var[1] <- ar[1]
isArray[1] <- TRUE
isArray[2] <- strtoi(strsplit(ar[2], "]", fixed=TRUE)[[1]])
}
# constant ?
if(grepl("=", var[2], fixed=TRUE)){
constant <- strsplit(var[2], "=", fixed=TRUE)[[1]]
var[2] <- constant[1]
valueConstant <- constant[2]
isConstant <- TRUE
}
if(var[2] == "function"){ var[2] <- "function_"}
# if it is a final type
if( is.element(var[1], rros_base_types$all) ){
el <- list( element=var[2], datatype=var[1],
array=isArray[1], array_size=isArray[2],
constant=isConstant, constant_value=valueConstant)
cls <- rbind2(cls, el)
} else{
sub_cls <- get_msg_convertion(msg, msg_def[i+1:length(msg_def)], paste(space, " "))
for(j in (1:nrow(sub_cls))){
sub_cls[j,1] <- paste(var[2], sub_cls[j,1], sep="$")
cls <- rbind2(cls, sub_cls[j,])
}
}
} else {
if(nchar(space)>0){
break
}
}
}
return(cls)
}
#get_msg_convertion("rosgraph_msgs/Log")
# ros interface functions ...
ros.Init <- function(name){
rros_node <<- rrosInitNode(name)
}
ros.Logging <- function(str, mode=1){ rrosLog(str, mode) }
ros.Debug <- function(str){ ros.Logging(str,0) }
ros.Info <- function(str){ ros.Logging(str,1) }
ros.Warn <- function(str){ ros.Logging(str,2) }
ros.Error <- function(str){ ros.Logging(str,3) }
ros.Fatal <- function(str){ ros.Logging(str,4) }
ros.Subscriber <- function(topic, type=""){
msg_def <- get_msg_def(type)
if(length(msg_def)==0) {
ros.Warn(paste("unknown message format:", type))
}
subscriber <- rrosSubscriber(rros_node, topic, type, msg_def, get_msg_md5(type))
return(subscriber)
}
ros.Publisher <- function(topic, type=""){
msg_def <- get_msg_def(type)
if(length(msg_def)==0) {
ros.Warn(paste("unknown message format:", type))
}
publisher <- rrosPublisher(rros_node, topic, type, msg_def, get_msg_md5(type))
return(publisher)
}
ros.Message <- function(type, convert=0) {
# was already created ...
if(is.element(type, rros_msg_buffer$strType)){
pos <- which(rros_msg_buffer$strType == type)
msg <- rros_msg_buffer$rosTypes[[pos]]
exprRead <- rros_msg_buffer$rosReadExpr[[pos]]
exprWrite <- rros_msg_buffer$rosWriteExpr[[pos]]
conv <- rros_msg_buffer$rosConversions[[pos]]
} else {
rros_msg_buffer$strType <<- append(rros_msg_buffer$strType, type)
conv <- get_msg_convertion(type)
rros_msg_buffer$rosConversions <<- append(rros_msg_buffer$rosType, list(conv))
#create msg
msg <- list()
for(i in (1:nrow(conv))){
eval(parse(text=paste("msg$",
conv[i,1],
"<-basic_datatype_translation('",conv[i,2],"',", conv[i,3], "," , conv[i,4], "," , conv[i,5],",'" , conv[i,6],"')",
sep="")))
}
rros_msg_buffer$rosTypes <<- append(rros_msg_buffer$rosTypes, list(msg))
#create read expressions
exprRead <- c()
for(i in (1:nrow(conv))){
if(conv[i,5] == FALSE){
expr<-parse(text=basic_datatype_conversion(conv[i,2], conv[i,3], conv[i,4], conv[i,1], TRUE) )
exprRead <- c(exprRead, expr)
}
}
rros_msg_buffer$rosReadExpr <<- append(rros_msg_buffer$rosReadExpr, list(exprRead))
#create write expressions
exprWrite <- c()
for(i in (1:nrow(conv))){
if(conv[i,5] == FALSE){
expr<-parse(text=basic_datatype_conversion(conv[i,2], conv[i,3], conv[i,4], conv[i,1], FALSE) )
exprWrite <- c(exprWrite, expr)
}
}
rros_msg_buffer$rosWriteExpr <<- append(rros_msg_buffer$rosWriteExpr, list(exprWrite))
}
if(convert == 1){
return(exprRead)
} else if(convert == 2){
return(exprWrite)
} else if(convert == 3){
return(conv)
} else {
return(msg)
}
}
ros.ReadMessage <- function(subscriber){
type <- rrosSubscriberGetMessageType(subscriber)
msg <- ros.Message(type)
conv <- ros.Message(type, convert=1)
stream <- rrosSubscriberGetMessageStream(subscriber)
for(expr in conv){
eval(expr)
}
return(msg)
}
ros.WriteMessage <- function(publisher, msg){
type <- rrosPublisherGetMessageType(publisher)
conv <- ros.Message(type, convert=2)
stream <- rrosPublisherGetMessageStream(publisher)
for(expr in conv){
eval(expr)
}
rrosPublish(publisher)
}
ros.SpinOnce <- function(){
rrosSpinOnce()
}
ros.TimeNow <- function(){
rrosTimeNow()
}
ros.SubscriberHasNewMessage <- function(subscriber){
return(rrosSubscriberHasNewMsg(subscriber))
}
ros.OK <- function(){
rrosOK()
}
ros.BagRead <- function(filename, topics="", max_size=-1){
vecTopics <- rros_vector("string")
for(topic in topics){
append(vecTopics, topic)
}
vecBag <- rrosBagRead(filename, vecTopics@ptr, max_size)
rros_vector_remove(vecTopics)
rm(vecTopics)
topics <- c()
data_types <- c()
messages <- c()
time_stamps<- c()
for(i in (0:(vector_bag_size(vecBag)[1]-1))){
el <- vector_bag___getitem__(vecBag,i)
data_type <- BagMessage_datatype_get(el)
msg <- ros.Message(data_type)
conv <- ros.Message(data_type, convert=1)
stream <- BagMessage_isStream_get(el)
for(expr in conv){
eval(expr)
}
data_types <- c(data_types, data_type)
topics <- c(topics, BagMessage_topic_get(el))
messages <- c(messages, list(msg))
time_stamps <- c(time_stamps, BagMessage_time_stamp_get(el))
}
return( list(topic=topics, data_type=data_types, message=messages, time_stamp=time_stamps) )
}
#ROS_BagRead("/home/andre/2012-12-27-15-49-57.bag")
ros.ParamSet <- function(param, value){
type <- typeof(value)
if(type == 'logical'){
rrosSetParamBoolean(rros_node, param, value)
} else if(type == 'integer'){
rrosSetParamInteger(rros_node, param, value)
} else if(type == 'double'){
rrosSetParamDouble(rros_node, param, value)
}else if(type == 'character'){
rrosSetParamString(rros_node, param, value)
}
}
ros.ParamGet <- function(param){
type <- ros.ParamType(param)
if(type == 'logical'){
return(rrosGetParamBoolean(rros_node, param))
} else if(type == 'integer'){
return(rrosGetParamInteger(rros_node, param))
} else if(type == 'double'){
return(rrosGetParamDouble(rros_node, param))
}else if(type == 'character'){
return(rrosGetParamString(rros_node, param))
}
return(NULL)
}
ros.ParamType <- function(param){
type <- rrosGetParamType(rros_node, param)
if(type == "NULL"){
return(NULL)
}
return(type)
}
ros.ParamDelete <- function(param){
rrosDeleteParam(rros_node, param)
}
| /lib/ros.R | no_license | andre-dietrich/rosR | R | false | false | 10,224 | r | load.dynamic.libraries<-function(libnames) {
for(libname in libnames) {
found_file=libname;
for(path in unlist(strsplit(Sys.getenv("LD_LIBRARY_PATH"),":",fixed=TRUE))) {
try_file <- paste0(path,"/",libname);
if( file.exists(try_file) ) {
found_file = try_file;
break;
}
}
write(paste("Loading :", try_file), stderr())
dyn.load(found_file);
}
}
# dyn.load(paste("rosR", .Platform$dynlib.ext, sep=""))
load.dynamic.libraries("rosR.so")
source("rosR.R")
source("std_vector.R")
cacheMetaData(1)
# mandatory option for not loosing precision...
options(digits=22)
rros_base_types <<- list(
all = c( "bool", "string", "int8", "uint8", "int16", "uint16",
"int32", "uint32", "int64", "uint64", "float32",
"float64", "duration", "time", "byte", "char" ),
integer = c( "int8", "uint8", "int16", "uint16", "int32", "uint32",
"int64", "uint64", "byte", "char"),
double = c( "float32", "float64", "time", "duration" ),
logical = c( "bool"),
character = c( "string") )
# buffer for storing allready parsed message definitions...
rros_msg_buffer <<- list(strType=c(), rosConversions=c(), rosReadExpr=c(), rosWriteExpr=c(), rosTypes=c())
# only once initialized ...
rros_node <<- c()
get_msg_def <- function(msg){
command <- paste("rosmsg show", msg)
msg_def <- system(command, intern=TRUE)
return(msg_def)
}
get_msg_md5 <- function(msg){
command <- paste("rosmsg md5", msg)
msg_md5 <- system(command, intern=TRUE)
return(msg_md5)
}
basic_datatype_translation <- function(type, ar, size, constant, value){
if(type == "time" || type == "duration"){
type <- "float64"
} else if(type == "byte" ) {
type <- "int8"
} else if(type == "char" || type == "bool") {
type <- "uint8"
}
if(ar==FALSE){
if(constant == FALSE){
# standard conversation
if(is.element(type, rros_base_types$integer)) {
if(size == 0) el <- 0
else el <- list(integer(size))
} else if(is.element(type, rros_base_types$double)) {
if(size == 0) el <- 0.0
else el <- list(double(size))
} else if(is.element(type, rros_base_types$logical)) {
if(size == 0) el <- F
else el <- list(logical(size))
} else if(is.element(type, rros_base_types$character)) {
if(size == 0) el <- ''
else el <- list(character(size))
} else {
el <- list()
}
} else {
if(is.element(type, rros_base_types$integer)) {
el <- strtoi(value)
} else if(is.element(type, rros_base_types$double)) {
el <- as.numeric(value)
} else if(is.element(type, rros_base_types$logical)) {
el <- as.logical(value)
} else {
el <- value
}
}
}
else{
el <- rros_vector(type)
}
return(el)
}
basic_datatype_conversion <- function(type, ar, size, var="", read=TRUE){
if(type == "time" || type == "duration"){
type <- "float64"
} else if(type == "byte" ) {
type <- "int8"
} else if(type == "char" || type == "bool") {
type <- "uint8"
}
el <- ""
if(read){
if(ar==FALSE){
el <- paste("msg$", var,"<-rros_stream_read_", type, "(stream)", sep="")
} else {
el <- paste("msg$", var,"@ptr<-rros_stream_read_", type, "_array(stream, ", size,")", sep="")
}
} else {
if(ar==FALSE){
el <- paste("rros_stream_write_", type, "(stream, msg$", var,")", sep="")
} else {
el <- paste("rros_stream_write_", type, "_array(stream, msg$", var,"@ptr)", sep="")
}
}
return(el)
}
get_msg_convertion <- function(msg, msg_def=NaN, space=""){
if(is.nan(msg_def[1])){
msg_def <- get_msg_def(msg)
}
cls <- c()
for(i in (1:length(msg_def))){
isArray <- c(FALSE, 0)
isConstant <- FALSE
valueConstant <- 0
var <- strsplit(sub("^ +", "", msg_def[i]), " ")[[1]]
# same number of spaces
if(grepl(paste("^", space, "[[:alpha:]]", sep=""), msg_def[i])){
# array ?
if(grepl("[", var[1], fixed=TRUE)){
ar <- strsplit(var[1], "[", fixed=TRUE)[[1]]
var[1] <- ar[1]
isArray[1] <- TRUE
isArray[2] <- strtoi(strsplit(ar[2], "]", fixed=TRUE)[[1]])
}
# constant ?
if(grepl("=", var[2], fixed=TRUE)){
constant <- strsplit(var[2], "=", fixed=TRUE)[[1]]
var[2] <- constant[1]
valueConstant <- constant[2]
isConstant <- TRUE
}
if(var[2] == "function"){ var[2] <- "function_"}
# if it is a final type
if( is.element(var[1], rros_base_types$all) ){
el <- list( element=var[2], datatype=var[1],
array=isArray[1], array_size=isArray[2],
constant=isConstant, constant_value=valueConstant)
cls <- rbind2(cls, el)
} else{
sub_cls <- get_msg_convertion(msg, msg_def[i+1:length(msg_def)], paste(space, " "))
for(j in (1:nrow(sub_cls))){
sub_cls[j,1] <- paste(var[2], sub_cls[j,1], sep="$")
cls <- rbind2(cls, sub_cls[j,])
}
}
} else {
if(nchar(space)>0){
break
}
}
}
return(cls)
}
#get_msg_convertion("rosgraph_msgs/Log")
# ros interface functions ...
ros.Init <- function(name){
rros_node <<- rrosInitNode(name)
}
ros.Logging <- function(str, mode=1){ rrosLog(str, mode) }
ros.Debug <- function(str){ ros.Logging(str,0) }
ros.Info <- function(str){ ros.Logging(str,1) }
ros.Warn <- function(str){ ros.Logging(str,2) }
ros.Error <- function(str){ ros.Logging(str,3) }
ros.Fatal <- function(str){ ros.Logging(str,4) }
ros.Subscriber <- function(topic, type=""){
msg_def <- get_msg_def(type)
if(length(msg_def)==0) {
ros.Warn(paste("unknown message format:", type))
}
subscriber <- rrosSubscriber(rros_node, topic, type, msg_def, get_msg_md5(type))
return(subscriber)
}
ros.Publisher <- function(topic, type=""){
msg_def <- get_msg_def(type)
if(length(msg_def)==0) {
ros.Warn(paste("unknown message format:", type))
}
publisher <- rrosPublisher(rros_node, topic, type, msg_def, get_msg_md5(type))
return(publisher)
}
ros.Message <- function(type, convert=0) {
# was already created ...
if(is.element(type, rros_msg_buffer$strType)){
pos <- which(rros_msg_buffer$strType == type)
msg <- rros_msg_buffer$rosTypes[[pos]]
exprRead <- rros_msg_buffer$rosReadExpr[[pos]]
exprWrite <- rros_msg_buffer$rosWriteExpr[[pos]]
conv <- rros_msg_buffer$rosConversions[[pos]]
} else {
rros_msg_buffer$strType <<- append(rros_msg_buffer$strType, type)
conv <- get_msg_convertion(type)
rros_msg_buffer$rosConversions <<- append(rros_msg_buffer$rosType, list(conv))
#create msg
msg <- list()
for(i in (1:nrow(conv))){
eval(parse(text=paste("msg$",
conv[i,1],
"<-basic_datatype_translation('",conv[i,2],"',", conv[i,3], "," , conv[i,4], "," , conv[i,5],",'" , conv[i,6],"')",
sep="")))
}
rros_msg_buffer$rosTypes <<- append(rros_msg_buffer$rosTypes, list(msg))
#create read expressions
exprRead <- c()
for(i in (1:nrow(conv))){
if(conv[i,5] == FALSE){
expr<-parse(text=basic_datatype_conversion(conv[i,2], conv[i,3], conv[i,4], conv[i,1], TRUE) )
exprRead <- c(exprRead, expr)
}
}
rros_msg_buffer$rosReadExpr <<- append(rros_msg_buffer$rosReadExpr, list(exprRead))
#create write expressions
exprWrite <- c()
for(i in (1:nrow(conv))){
if(conv[i,5] == FALSE){
expr<-parse(text=basic_datatype_conversion(conv[i,2], conv[i,3], conv[i,4], conv[i,1], FALSE) )
exprWrite <- c(exprWrite, expr)
}
}
rros_msg_buffer$rosWriteExpr <<- append(rros_msg_buffer$rosWriteExpr, list(exprWrite))
}
if(convert == 1){
return(exprRead)
} else if(convert == 2){
return(exprWrite)
} else if(convert == 3){
return(conv)
} else {
return(msg)
}
}
ros.ReadMessage <- function(subscriber){
type <- rrosSubscriberGetMessageType(subscriber)
msg <- ros.Message(type)
conv <- ros.Message(type, convert=1)
stream <- rrosSubscriberGetMessageStream(subscriber)
for(expr in conv){
eval(expr)
}
return(msg)
}
ros.WriteMessage <- function(publisher, msg){
type <- rrosPublisherGetMessageType(publisher)
conv <- ros.Message(type, convert=2)
stream <- rrosPublisherGetMessageStream(publisher)
for(expr in conv){
eval(expr)
}
rrosPublish(publisher)
}
ros.SpinOnce <- function(){
rrosSpinOnce()
}
ros.TimeNow <- function(){
rrosTimeNow()
}
ros.SubscriberHasNewMessage <- function(subscriber){
return(rrosSubscriberHasNewMsg(subscriber))
}
ros.OK <- function(){
rrosOK()
}
ros.BagRead <- function(filename, topics="", max_size=-1){
vecTopics <- rros_vector("string")
for(topic in topics){
append(vecTopics, topic)
}
vecBag <- rrosBagRead(filename, vecTopics@ptr, max_size)
rros_vector_remove(vecTopics)
rm(vecTopics)
topics <- c()
data_types <- c()
messages <- c()
time_stamps<- c()
for(i in (0:(vector_bag_size(vecBag)[1]-1))){
el <- vector_bag___getitem__(vecBag,i)
data_type <- BagMessage_datatype_get(el)
msg <- ros.Message(data_type)
conv <- ros.Message(data_type, convert=1)
stream <- BagMessage_isStream_get(el)
for(expr in conv){
eval(expr)
}
data_types <- c(data_types, data_type)
topics <- c(topics, BagMessage_topic_get(el))
messages <- c(messages, list(msg))
time_stamps <- c(time_stamps, BagMessage_time_stamp_get(el))
}
return( list(topic=topics, data_type=data_types, message=messages, time_stamp=time_stamps) )
}
#ROS_BagRead("/home/andre/2012-12-27-15-49-57.bag")
ros.ParamSet <- function(param, value){
type <- typeof(value)
if(type == 'logical'){
rrosSetParamBoolean(rros_node, param, value)
} else if(type == 'integer'){
rrosSetParamInteger(rros_node, param, value)
} else if(type == 'double'){
rrosSetParamDouble(rros_node, param, value)
}else if(type == 'character'){
rrosSetParamString(rros_node, param, value)
}
}
ros.ParamGet <- function(param){
type <- ros.ParamType(param)
if(type == 'logical'){
return(rrosGetParamBoolean(rros_node, param))
} else if(type == 'integer'){
return(rrosGetParamInteger(rros_node, param))
} else if(type == 'double'){
return(rrosGetParamDouble(rros_node, param))
}else if(type == 'character'){
return(rrosGetParamString(rros_node, param))
}
return(NULL)
}
ros.ParamType <- function(param){
type <- rrosGetParamType(rros_node, param)
if(type == "NULL"){
return(NULL)
}
return(type)
}
ros.ParamDelete <- function(param){
rrosDeleteParam(rros_node, param)
}
|
# ==================================================================================
# EXTRACT BUFFER 500 M CALCULATING Ha OF SG-AES-GREENING
# ==================================================================================
rm(list=ls())
library(rgdal)
library(rgeos)
library(raster)
library(dplyr)
# ---- Load data (layers without overlap) ----
# Transects
tr <- readOGR("D:/PhD/Third chapter/GIS", "Trans_2018_EPSG23031") # Contains transects sampled each year (1/0)
# SG
#sg14 <- readOGR("C:/Users/Ana/Documents/PhD/Third chapter/GIS/Fallow/SG", layer = "SG_2014_EPSG23031")
#colnames(sg14@data)[colnames(sg14@data) == "Codi"] <- "Codi.2"
sg15 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2015_EPSG23031")
sg16 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2016_EPSG23031")
sg17 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2017_EPSG23031")
sg18 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2018_EPSG23031")
sg19 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2019_EPSG23031")
# AES
#aes14 <- readOGR("C:/Users/Ana/Documents/PhD/Third chapter/GIS/Fallow/AES", layer = "AEScutted_2014_EPSG23031")
aes15 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2015_EPSG23031")
aes16 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2016_EPSG23031")
aes17 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2017_EPSG23031_FIXED_FALLOW")
aes18 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2018_EPSG23031_FALLOW")
aes19 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2019_EPSG23031_FALLOW")
# SELECT ONLY FALLOW FIELDS IN AES (In 2016 it was already done, and in 2015 is all together)
aes17 <- aes17[which(aes17$PROD_NOM == "FALLOW"), ]
aes18 <- aes18[which(aes18$PROD_NOM == "FALLOW"), ]
aes19 <- aes19[which(aes19$PROD_NOM == "FALLOW"), ]
# GREEN
#?green14 <- readOGR("C:/Users/Ana/Documents/PhD/Third chapter/GIS/Fallow/Greening", layer = "GREENcutted_2014_EPSG23031")
green15 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2015_EPSG23031")
green16 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2016_EPSG23031")
green17 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2017_EPSG23031")
green18 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2018_EPSG23031")
green19 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2019_EPSG23031")
# ---- Create buffers and calculate area ----
buf <- gBuffer(tr, byid = TRUE, width = 500)
# Data frame to fill up:
management <- as.data.frame(matrix(NA, nrow = nrow(buf), ncol = 1))
colnames(management)[1] <- "Codi"
management$Codi <- buf@data$Codi
# ---- AES ----
#### 2015 ----
# Calculate proportional area, because the strip of fallow is not digital
# So calculate the HA_Fallow proportional to the intersected area
layers <- list(aes15)
layers_names <- c("aes15")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# Proportional intersecting area of fallow:
poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_fallow ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### 2016 & 2017 ----
# Strip of fallow is digitalized
# So only sum HA of Fallow fields
layers <- list(aes16, aes17)
layers_names <- c("aes16", "aes17")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### 2018 & 2019 (added afterwards) ----
# Strip of fallow is digitalized
# So only sum HA of Fallow fields
layers <- list(aes18, aes19)
layers_names <- c("aes18", "aes19")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### SAVE ####
setwd("D:/PhD/Third chapter/Data")
write.csv(management, "AES_15_19_FIX.csv")
# ---- SG 15-19 ----
# Data frame to fill up:
management <- as.data.frame(matrix(NA, nrow = nrow(buf), ncol = 1))
colnames(management)[1] <- "Codi"
management$Codi <- buf@data$Codi
layers <- list(sg15, sg16, sg17, sg18, sg19)
layers_names <- c("sg15", "sg16", "sg17", "sg18", "sg19")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### SAVE ####
setwd("D:/PhD/Third chapter/Data")
write.csv(management, "SG_15_19_FIX.csv")
# ---- GREENING ----
#### 2015 - 2019 ####
# Many intersection errors, very slow
# Alternative: Only fix 2018 and 2019, the bad ones
setwd("D:/PhD/Third chapter/Data")
green <- read.csv("GREEN_15_19.csv")
green <- green[ ,-c(6,7)]
management <- as.data.frame(matrix(NA, nrow = nrow(buf), ncol = 1))
colnames(management)[1] <- "Codi"
management$Codi <- buf@data$Codi
layers <- list(green18, green19)
layers_names <- c("green18", "green19")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
management2 <- left_join(green, management, by = "Codi")
# CHeck
plot(buf[153,])
plot(poli[which(poli$Codi == "BE14"), ], col = "red", add = TRUE)
be14 <- poli[which(poli$Codi == "BE14"), ]
be14$area <- area(be14)/10000
sum(be14$area)
plot(buf[149,])
plot(poli[which(poli$Codi == "BE06"), ], col = "red", add = TRUE)
be06 <- poli[which(poli$Codi == "BE06"), ]
be06$area <- area(be06)/10000
sum(be06$area)
#### SAVE ####
setwd("D:/PhD/Third chapter/Data")
write.csv(management2, "GREEN_15_19_FIX.csv")
| /Ch. 2-3/Ch. 3/Variables/Fallow/5.2.FIX_Calculo_Buffer500_AES_SG_GREEN.R | no_license | anasanz/MyScripts | R | false | false | 8,690 | r | # ==================================================================================
# EXTRACT BUFFER 500 M CALCULATING Ha OF SG-AES-GREENING
# ==================================================================================
rm(list=ls())
library(rgdal)
library(rgeos)
library(raster)
library(dplyr)
# ---- Load data (layers without overlap) ----
# Transects
tr <- readOGR("D:/PhD/Third chapter/GIS", "Trans_2018_EPSG23031") # Contains transects sampled each year (1/0)
# SG
#sg14 <- readOGR("C:/Users/Ana/Documents/PhD/Third chapter/GIS/Fallow/SG", layer = "SG_2014_EPSG23031")
#colnames(sg14@data)[colnames(sg14@data) == "Codi"] <- "Codi.2"
sg15 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2015_EPSG23031")
sg16 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2016_EPSG23031")
sg17 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2017_EPSG23031")
sg18 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2018_EPSG23031")
sg19 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/SG", layer = "mdm_SG_2019_EPSG23031")
# AES
#aes14 <- readOGR("C:/Users/Ana/Documents/PhD/Third chapter/GIS/Fallow/AES", layer = "AEScutted_2014_EPSG23031")
aes15 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2015_EPSG23031")
aes16 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2016_EPSG23031")
aes17 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2017_EPSG23031_FIXED_FALLOW")
aes18 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2018_EPSG23031_FALLOW")
aes19 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/AES", layer = "AEScutted_2019_EPSG23031_FALLOW")
# SELECT ONLY FALLOW FIELDS IN AES (In 2016 it was already done, and in 2015 is all together)
aes17 <- aes17[which(aes17$PROD_NOM == "FALLOW"), ]
aes18 <- aes18[which(aes18$PROD_NOM == "FALLOW"), ]
aes19 <- aes19[which(aes19$PROD_NOM == "FALLOW"), ]
# GREEN
#?green14 <- readOGR("C:/Users/Ana/Documents/PhD/Third chapter/GIS/Fallow/Greening", layer = "GREENcutted_2014_EPSG23031")
green15 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2015_EPSG23031")
green16 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2016_EPSG23031")
green17 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2017_EPSG23031")
green18 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2018_EPSG23031")
green19 <- readOGR("D:/PhD/Third chapter/GIS/Fallow_FIX/Greening", layer = "GREENcutted_2019_EPSG23031")
# ---- Create buffers and calculate area ----
buf <- gBuffer(tr, byid = TRUE, width = 500)
# Data frame to fill up:
management <- as.data.frame(matrix(NA, nrow = nrow(buf), ncol = 1))
colnames(management)[1] <- "Codi"
management$Codi <- buf@data$Codi
# ---- AES ----
#### 2015 ----
# Calculate proportional area, because the strip of fallow is not digital
# So calculate the HA_Fallow proportional to the intersected area
layers <- list(aes15)
layers_names <- c("aes15")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# Proportional intersecting area of fallow:
poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_fallow ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### 2016 & 2017 ----
# Strip of fallow is digitalized
# So only sum HA of Fallow fields
layers <- list(aes16, aes17)
layers_names <- c("aes16", "aes17")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### 2018 & 2019 (added afterwards) ----
# Strip of fallow is digitalized
# So only sum HA of Fallow fields
layers <- list(aes18, aes19)
layers_names <- c("aes18", "aes19")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### SAVE ####
setwd("D:/PhD/Third chapter/Data")
write.csv(management, "AES_15_19_FIX.csv")
# ---- SG 15-19 ----
# Data frame to fill up:
management <- as.data.frame(matrix(NA, nrow = nrow(buf), ncol = 1))
colnames(management)[1] <- "Codi"
management$Codi <- buf@data$Codi
layers <- list(sg15, sg16, sg17, sg18, sg19)
layers_names <- c("sg15", "sg16", "sg17", "sg18", "sg19")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
#### SAVE ####
setwd("D:/PhD/Third chapter/Data")
write.csv(management, "SG_15_19_FIX.csv")
# ---- GREENING ----
#### 2015 - 2019 ####
# Many intersection errors, very slow
# Alternative: Only fix 2018 and 2019, the bad ones
setwd("D:/PhD/Third chapter/Data")
green <- read.csv("GREEN_15_19.csv")
green <- green[ ,-c(6,7)]
management <- as.data.frame(matrix(NA, nrow = nrow(buf), ncol = 1))
colnames(management)[1] <- "Codi"
management$Codi <- buf@data$Codi
layers <- list(green18, green19)
layers_names <- c("green18", "green19")
for (i in 1:length(layers)){
poli <- raster::intersect(buf, layers[[i]]) # Intersect buffers with management fields polygons
poli$ha_intersect_buffer <- area(poli)/10000 # Calculate area of what falls in the buffer (in ha)
# HERE NOTHING
# Proportional intersecting area of fallow:
# poli$ha_intersect_fallow <- poli$ha_intersect_buffer*poli$HA_Fallow/poli$HA_SP
transect_area <- aggregate(ha_intersect_buffer ~ Codi, data = poli, FUN = sum) # Sum area of polygons belonging to a buffer
colnames(transect_area)[2] <- paste("area", layers_names[i], sep = "_") # Change column name to store it
management <- left_join(management, transect_area, by = "Codi") # Store area
management[is.na(management)] <- 0 # Substitute NA by 0
}
management2 <- left_join(green, management, by = "Codi")
# CHeck
plot(buf[153,])
plot(poli[which(poli$Codi == "BE14"), ], col = "red", add = TRUE)
be14 <- poli[which(poli$Codi == "BE14"), ]
be14$area <- area(be14)/10000
sum(be14$area)
plot(buf[149,])
plot(poli[which(poli$Codi == "BE06"), ], col = "red", add = TRUE)
be06 <- poli[which(poli$Codi == "BE06"), ]
be06$area <- area(be06)/10000
sum(be06$area)
#### SAVE ####
setwd("D:/PhD/Third chapter/Data")
write.csv(management2, "GREEN_15_19_FIX.csv")
|
res <- read.table("https://raw.githubusercontent.com/Shicheng-Guo/PANC/master/results.txt", header=TRUE)
head(res)
# Make a basic volcano plot
with(res, plot(log2FoldChange, -log10(padj), pch=20, main="Volcano plot", xlim=c(-1,1)))
# Add colored points: red if padj<0.05, orange of log2FC>1, green if both)
with(subset(res, padj<.05 ), points(log2FoldChange, -log10(pvalue), pch=20, col="red"))
with(subset(res, abs(log2FoldChange)>0.5), points(log2FoldChange, -log10(pvalue), pch=20, col="orange"))
with(subset(res, padj<.05 & abs(log2FoldChange)>0.75), points(log2FoldChange, -log10(pvalue), pch=20, col="green"))
# Label points with the textxy function from the calibrate plot
library(calibrate)
with(subset(res, padj<.05 & abs(log2FoldChange)>0.75), textxy(log2FoldChange, -log10(pvalue), labs=name, cex=.8))
| /mcri/volcano.R | no_license | Shicheng-Guo/GscRbasement | R | false | false | 824 | r | res <- read.table("https://raw.githubusercontent.com/Shicheng-Guo/PANC/master/results.txt", header=TRUE)
head(res)
# Make a basic volcano plot
with(res, plot(log2FoldChange, -log10(padj), pch=20, main="Volcano plot", xlim=c(-1,1)))
# Add colored points: red if padj<0.05, orange of log2FC>1, green if both)
with(subset(res, padj<.05 ), points(log2FoldChange, -log10(pvalue), pch=20, col="red"))
with(subset(res, abs(log2FoldChange)>0.5), points(log2FoldChange, -log10(pvalue), pch=20, col="orange"))
with(subset(res, padj<.05 & abs(log2FoldChange)>0.75), points(log2FoldChange, -log10(pvalue), pch=20, col="green"))
# Label points with the textxy function from the calibrate plot
library(calibrate)
with(subset(res, padj<.05 & abs(log2FoldChange)>0.75), textxy(log2FoldChange, -log10(pvalue), labs=name, cex=.8))
|
# neightborBasedValidation.r
#
# 12/18/09 cws Created
#
neightborBasedValidation <- function(df, keys, name, value, parameter, tbf, min, max)
# Performs a context-based range check of a value based on neighboring values,
# to find values that exceed their neighboring values by more than a specified
# factor, or which exceed specified constants if the neighboring values are
# missing.
#
# Returns a subset of the input dataframe containing the flagged values with
# an additional column TESTDESCRIPTION, and optionally including rows with their
# neighboring values as well.
#
# ARGUMENTS:
# df dataframe to undergo this validation test
# keys keys used to uniquely identify each row in the dataframe
# name string with name of column holding names of parameters in dataframe
# value string with name of column holding value of parameters in dataframe
# parameter name of parameter to check
# tbf factor by which adjacent values may be expected to vary; values
# which change greater than this factor are flagged for validation.
# min, max numeric values specifying the expected range of the parameter; this
# static range test is only performed when a value has no nonmissing
# neighbors.
#
# ASSUMPTIONS:
# Creation of temporary columns ..value, ..first, ..last, ..next, ..prev
# and ..flag is OK.
#
{
ds <- df[df[name]==parameter,] # subset to specific parameter
# Converting values to a numeric mode is difficult to do in a single step
# without generating warning messages, so three steps are used.
ds$..value <- unlist(ds[value])
ds$..value <- ifelse(is.na(ds$..value), NA, ds$..value)
ds$..value <- ifelse(trimws(ds$..value) == '' | ds$..value == '.', NA, ds$..value)
ds$..value <- as.numeric(ds$..value)
# Adjacent values are obtained with lag() and lead(). The beginning and end
# of a series has no previous or next neighbors (respectively), so those are
# made missing; lag()ing and lead()ing thus requires first() and last(), and
# thus appropriate ordering.
# if keys==c('UID','TRANSECT','STATION') then this statement will parse to
# order(ds$UID, ds$TRANSECT, ds$STATION)
ordering <- eval(parse(text=sprintf('order(%s)'
,paste('ds$', keys, sep='', collapse=', ')
)
)
)
ds <- ds[ordering,]
ds <- first(ds, keys[1], '..first')
ds <- last(ds, keys[1], '..last')
ds <- lag(ds, '..value', '..prev')
ds <- lead(ds, '..value', '..next')
ds$..prev <- ifelse(ds$..first, NA, ds$..prev)
ds$..next <- ifelse(ds$..last, NA, ds$..next)
# Compare values with their available neighbors.
ds$TESTDESCRIPTION <- as.character(NA)
ds$..flag <- ifelse(!(is.na(ds$..prev) | is.na(ds$..next))
,(ds$..value != 0 & ds$..prev != 0 & ds$..next != 0) &
(ds$..value > ds$..prev*tbf | ds$..value < ds$..prev/tbf |
ds$..value > ds$..next*tbf | ds$..value < ds$..next/tbf
)
,ifelse(!is.na(ds$..prev)
,(ds$..value != 0 & ds$..prev != 0) &
(ds$..value > ds$..prev*tbf |
ds$..value < ds$..prev/tbf
)
,ifelse(!is.na(ds$..next)
,(ds$..value != 0 & ds$..next != 0) &
(ds$..value > ds$..next*tbf |
ds$..value < ds$..next/tbf
)
,NA
)
)
)
ds$TESTDESCRIPTION <- ifelse(ds$..flag,'Value varies considerably from its neighbors', NA)
# perform static range checks, used to fill in gaps due to missing values
if(!is.null(max)) {
if(!is.null(min)) {
ff <- ds$..value > max | ds$..value < min
} else {
ff <- ds$..value > max
}
} else {
if(!is.null(min)) {
ff <- ds$..value < min
} else {
ff <- FALSE
}
}
# fill in missing checks with static range values, if provided
ds$TESTDESCRIPTION <- ifelse(is.na(ds$..flag) & ff
,sprintf('Value exceeds specified range (%s,%s)'
,min, max
)
,ds$TESTDESCRIPTION
)
ds <- subset(ds, !is.na(TESTDESCRIPTION)
,select=-c(..value,..flag,..first,..last,..prev,..next)
)
return(ds)
}
neightborBasedValidationTest <- function()
# unit test for neightborBasedValidation()
{
testData <- data.frame('k1'=rep(1:4, each=10)
,'k2'=rep(1:10, times=4)
,'par'=rep('dist', times=40)
,'val'=c(1, 10, 1, 1, 1, 10, 10, 1, 1, 10
,10, 1, 1, 10,100, 10, 1, 10, 10, 10
,NA, 1, 1, 0, 10, 1, 10, 1, NA, 10
,1, NA, 1, NA, 1, 1, 0, 10, 0, 1
)
,stringsAsFactors=FALSE
)
# with full arguments
rr <- neightborBasedValidation(testData, c('k1','k2'), 'par', 'val', 'dist', 5, 0, 7)
rownames(rr) <- NULL
ee <- subset(testData
,k1==1 & k2 %in% c(1,2,3,5,6,7,8,9,10) |
k1==2 & k2 %in% c(1:8) |
k1==3 & k2 %in% c(6,7,8,10)
)
ee$TESTDESCRIPTION <- 'Value varies considerably from its neighbors'
ee[ee$k1==3 & ee$k2==10,]$TESTDESCRIPTION <- "Value exceeds specified range (0,7)"
rownames(ee) <- NULL
checkEquals(ee,rr
,'Error: Did not correctly detect odd neighboring values with range check'
)
# no static range checks
rr <- neightborBasedValidation(testData, c('k1','k2'), 'par', 'val', 'dist', 5, NULL, NULL)
rownames(rr) <- NULL
ee <- subset(testData
,k1==1 & k2 %in% c(1,2,3,5,6,7,8,9,10) |
k1==2 & k2 %in% c(1:8) |
k1==3 & k2 %in% c(6,7,8)
)
ee$TESTDESCRIPTION <- 'Value varies considerably from its neighbors'
rownames(ee) <- NULL
checkEquals(ee,rr,
'Error: Did not correctly detect odd neighboring values with no range check'
)
}
# end of file | /neighborBasedValidation.r | no_license | jasonelaw/nrsa-epa | R | false | false | 6,720 | r | # neightborBasedValidation.r
#
# 12/18/09 cws Created
#
neightborBasedValidation <- function(df, keys, name, value, parameter, tbf, min, max)
# Performs a context-based range check of a value based on neighboring values,
# to find values that exceed their neighboring values by more than a specified
# factor, or which exceed specified constants if the neighboring values are
# missing.
#
# Returns a subset of the input dataframe containing the flagged values with
# an additional column TESTDESCRIPTION, and optionally including rows with their
# neighboring values as well.
#
# ARGUMENTS:
# df dataframe to undergo this validation test
# keys keys used to uniquely identify each row in the dataframe
# name string with name of column holding names of parameters in dataframe
# value string with name of column holding value of parameters in dataframe
# parameter name of parameter to check
# tbf factor by which adjacent values may be expected to vary; values
# which change greater than this factor are flagged for validation.
# min, max numeric values specifying the expected range of the parameter; this
# static range test is only performed when a value has no nonmissing
# neighbors.
#
# ASSUMPTIONS:
# Creation of temporary columns ..value, ..first, ..last, ..next, ..prev
# and ..flag is OK.
#
{
ds <- df[df[name]==parameter,] # subset to specific parameter
# Converting values to a numeric mode is difficult to do in a single step
# without generating warning messages, so three steps are used.
ds$..value <- unlist(ds[value])
ds$..value <- ifelse(is.na(ds$..value), NA, ds$..value)
ds$..value <- ifelse(trimws(ds$..value) == '' | ds$..value == '.', NA, ds$..value)
ds$..value <- as.numeric(ds$..value)
# Adjacent values are obtained with lag() and lead(). The beginning and end
# of a series has no previous or next neighbors (respectively), so those are
# made missing; lag()ing and lead()ing thus requires first() and last(), and
# thus appropriate ordering.
# if keys==c('UID','TRANSECT','STATION') then this statement will parse to
# order(ds$UID, ds$TRANSECT, ds$STATION)
ordering <- eval(parse(text=sprintf('order(%s)'
,paste('ds$', keys, sep='', collapse=', ')
)
)
)
ds <- ds[ordering,]
ds <- first(ds, keys[1], '..first')
ds <- last(ds, keys[1], '..last')
ds <- lag(ds, '..value', '..prev')
ds <- lead(ds, '..value', '..next')
ds$..prev <- ifelse(ds$..first, NA, ds$..prev)
ds$..next <- ifelse(ds$..last, NA, ds$..next)
# Compare values with their available neighbors.
ds$TESTDESCRIPTION <- as.character(NA)
ds$..flag <- ifelse(!(is.na(ds$..prev) | is.na(ds$..next))
,(ds$..value != 0 & ds$..prev != 0 & ds$..next != 0) &
(ds$..value > ds$..prev*tbf | ds$..value < ds$..prev/tbf |
ds$..value > ds$..next*tbf | ds$..value < ds$..next/tbf
)
,ifelse(!is.na(ds$..prev)
,(ds$..value != 0 & ds$..prev != 0) &
(ds$..value > ds$..prev*tbf |
ds$..value < ds$..prev/tbf
)
,ifelse(!is.na(ds$..next)
,(ds$..value != 0 & ds$..next != 0) &
(ds$..value > ds$..next*tbf |
ds$..value < ds$..next/tbf
)
,NA
)
)
)
ds$TESTDESCRIPTION <- ifelse(ds$..flag,'Value varies considerably from its neighbors', NA)
# perform static range checks, used to fill in gaps due to missing values
if(!is.null(max)) {
if(!is.null(min)) {
ff <- ds$..value > max | ds$..value < min
} else {
ff <- ds$..value > max
}
} else {
if(!is.null(min)) {
ff <- ds$..value < min
} else {
ff <- FALSE
}
}
# fill in missing checks with static range values, if provided
ds$TESTDESCRIPTION <- ifelse(is.na(ds$..flag) & ff
,sprintf('Value exceeds specified range (%s,%s)'
,min, max
)
,ds$TESTDESCRIPTION
)
ds <- subset(ds, !is.na(TESTDESCRIPTION)
,select=-c(..value,..flag,..first,..last,..prev,..next)
)
return(ds)
}
neightborBasedValidationTest <- function()
# unit test for neightborBasedValidation()
{
testData <- data.frame('k1'=rep(1:4, each=10)
,'k2'=rep(1:10, times=4)
,'par'=rep('dist', times=40)
,'val'=c(1, 10, 1, 1, 1, 10, 10, 1, 1, 10
,10, 1, 1, 10,100, 10, 1, 10, 10, 10
,NA, 1, 1, 0, 10, 1, 10, 1, NA, 10
,1, NA, 1, NA, 1, 1, 0, 10, 0, 1
)
,stringsAsFactors=FALSE
)
# with full arguments
rr <- neightborBasedValidation(testData, c('k1','k2'), 'par', 'val', 'dist', 5, 0, 7)
rownames(rr) <- NULL
ee <- subset(testData
,k1==1 & k2 %in% c(1,2,3,5,6,7,8,9,10) |
k1==2 & k2 %in% c(1:8) |
k1==3 & k2 %in% c(6,7,8,10)
)
ee$TESTDESCRIPTION <- 'Value varies considerably from its neighbors'
ee[ee$k1==3 & ee$k2==10,]$TESTDESCRIPTION <- "Value exceeds specified range (0,7)"
rownames(ee) <- NULL
checkEquals(ee,rr
,'Error: Did not correctly detect odd neighboring values with range check'
)
# no static range checks
rr <- neightborBasedValidation(testData, c('k1','k2'), 'par', 'val', 'dist', 5, NULL, NULL)
rownames(rr) <- NULL
ee <- subset(testData
,k1==1 & k2 %in% c(1,2,3,5,6,7,8,9,10) |
k1==2 & k2 %in% c(1:8) |
k1==3 & k2 %in% c(6,7,8)
)
ee$TESTDESCRIPTION <- 'Value varies considerably from its neighbors'
rownames(ee) <- NULL
checkEquals(ee,rr,
'Error: Did not correctly detect odd neighboring values with no range check'
)
}
# end of file |
#' Extract YAML data from metadata.yml
#'
#' @param file Path to the YAML file
#'
#' @importFrom yaml yaml.load_file
#'
dontTouch <- function(file = NULL) {
yaml <- yaml::yaml.load_file(file)
yaml$affil1 <- paste(c(yaml$affiliation1$university_name,
yaml$affiliation1$faculty_group,
yaml$affiliation1$department,
yaml$affiliation1$street_address,
yaml$affiliation1$state,
yaml$affiliation1$city,
yaml$affiliation1$country,
yaml$affiliation1$postal_code),
collapse = ', ')
yaml$affil2 <- paste(c(yaml$affiliation2$university_name,
yaml$affiliation2$faculty_group,
yaml$affiliation2$department,
yaml$affiliation2$street_address,
yaml$affiliation2$state,
yaml$affiliation2$city,
yaml$affiliation2$country,
yaml$affiliation2$postal_code),
collapse = ', ')
yaml$affil3 <- paste(c(yaml$affiliation3$university_name,
yaml$affiliation3$faculty_group,
yaml$affiliation3$department,
yaml$affiliation3$street_address,
yaml$affiliation3$state,
yaml$affiliation3$city,
yaml$affiliation3$country,
yaml$affiliation3$postal_code),
collapse = ', ')
yaml$affil4 <- paste(c(yaml$affiliation4$university_name,
yaml$affiliation4$faculty_group,
yaml$affiliation4$department,
yaml$affiliation4$street_address,
yaml$affiliation4$state,
yaml$affiliation4$city,
yaml$affiliation4$country,
yaml$affiliation4$postal_code),
collapse = ', ')
return(yaml)
} | /R/dontTouch.R | no_license | chris-twigg/AFIT | R | false | false | 2,162 | r | #' Extract YAML data from metadata.yml
#'
#' @param file Path to the YAML file
#'
#' @importFrom yaml yaml.load_file
#'
dontTouch <- function(file = NULL) {
yaml <- yaml::yaml.load_file(file)
yaml$affil1 <- paste(c(yaml$affiliation1$university_name,
yaml$affiliation1$faculty_group,
yaml$affiliation1$department,
yaml$affiliation1$street_address,
yaml$affiliation1$state,
yaml$affiliation1$city,
yaml$affiliation1$country,
yaml$affiliation1$postal_code),
collapse = ', ')
yaml$affil2 <- paste(c(yaml$affiliation2$university_name,
yaml$affiliation2$faculty_group,
yaml$affiliation2$department,
yaml$affiliation2$street_address,
yaml$affiliation2$state,
yaml$affiliation2$city,
yaml$affiliation2$country,
yaml$affiliation2$postal_code),
collapse = ', ')
yaml$affil3 <- paste(c(yaml$affiliation3$university_name,
yaml$affiliation3$faculty_group,
yaml$affiliation3$department,
yaml$affiliation3$street_address,
yaml$affiliation3$state,
yaml$affiliation3$city,
yaml$affiliation3$country,
yaml$affiliation3$postal_code),
collapse = ', ')
yaml$affil4 <- paste(c(yaml$affiliation4$university_name,
yaml$affiliation4$faculty_group,
yaml$affiliation4$department,
yaml$affiliation4$street_address,
yaml$affiliation4$state,
yaml$affiliation4$city,
yaml$affiliation4$country,
yaml$affiliation4$postal_code),
collapse = ', ')
return(yaml)
} |
testlist <- list(baseVector = numeric(0), binaryVector = c(-7.99666974103071e+304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), nAttributes = 0L)
result <- do.call(blatent:::bin2dec_Rcpp,testlist)
str(result) | /blatent/inst/testfiles/bin2dec_Rcpp/libFuzzer_bin2dec_Rcpp/bin2dec_Rcpp_valgrind_files/1609878114-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 230 | r | testlist <- list(baseVector = numeric(0), binaryVector = c(-7.99666974103071e+304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), nAttributes = 0L)
result <- do.call(blatent:::bin2dec_Rcpp,testlist)
str(result) |
\name{nnlink}
\alias{nnlink}
\title{Construct similarity graph by 1-NNL}
\description{This function provides the edges of the similarity graph constructed by 1-NNL.}
\usage{
nnlink(distance)
}
\arguments{
\item{distance}{A K by K matrix, which is the distance matrix on the distinct values and K is the number of distinct values with at least one observation in either group.}
}
\value{
\item{E}{An edge matrix representing a similarity graph on the distinct values with the number of edges in the similarity graph being the number of rows and 2 columns. Each row records the subject indices of the two ends of an edge in the similarity graph.}
}
\seealso{
\code{\link{getGraph}}
}
| /man/nnlink.Rd | no_license | cran/gTests | R | false | false | 707 | rd | \name{nnlink}
\alias{nnlink}
\title{Construct similarity graph by 1-NNL}
\description{This function provides the edges of the similarity graph constructed by 1-NNL.}
\usage{
nnlink(distance)
}
\arguments{
\item{distance}{A K by K matrix, which is the distance matrix on the distinct values and K is the number of distinct values with at least one observation in either group.}
}
\value{
\item{E}{An edge matrix representing a similarity graph on the distinct values with the number of edges in the similarity graph being the number of rows and 2 columns. Each row records the subject indices of the two ends of an edge in the similarity graph.}
}
\seealso{
\code{\link{getGraph}}
}
|
draw.dca <-
function(display.in.diagram = c('sites'),
display.species = c('none'),
display.sites = c('points'),
axes.shown = c(1,2),
down.rare.spec = 0,
display.EIV = FALSE,
display.header = FALSE,
display.envelope = FALSE,
header.name = 'env',
display.header.style = c('arrow'),
display.spider,
display.group.center = FALSE,
three.dim = FALSE,
resolution = c(1280, 768),
bw = FALSE,
...)
{
newRversion <- check.install (display.spider)
open.r.window(three.dim, resolution)
if (newRversion) pb <- myTkProgressBar (paste (ifelse (three.dim, '3D', '2D'),'DCA - Analysis progress'), 'Importing data from JUICE', 0, 100, 20) else pb <- NULL
write ('End of ordination', file='result.txt')
library (vegan)
input.data <- read.check.data (display.sites = display.sites, display.EIV = display.EIV, display.header = display.header, display.envelope = display.envelope, display.spider = display.spider, display.group.center = display.group.center)
# 2. update progress bar
if (newRversion) setTkProgressBar (pb, label = 'Calculation of ordination', value = 40)
# calculation of ordination
last.result <- use.last (input.data, 'dca', setting = list (down.rare.spec = down.rare.spec))
if (last.result$use.last.result) spec.data.ord <- last.result$last.data.result else
spec.data.ord <- decorana(input.data$spec.data, iweigh=down.rare.spec)
# 3. update progress bar
if (newRversion) setTkProgressBar (pb, label = 'Saving results', value = 60)
save.ord.result (spec.data.ord, last.result$use.last.result, 'dca', input.data$deleted.plots)
# 3. update progress bar
if (newRversion) setTkProgressBar (pb, label = 'Drawing the figure', value = 80)
if (three.dim)
draw.3d(input.data = input.data, spec.data.ord = spec.data.ord, display.in.diagram = display.in.diagram, display.species = display.species, display.sites = display.sites, axes.shown = axes.shown, display.EIV = display.EIV, display.header = display.header, display.envelope = display.envelope, header.name = header.name, display.header.style = display.header.style, display.spider = display.spider, display.group.center = display.group.center, pb = pb) else
draw.2d(input.data = input.data, spec.data.ord = spec.data.ord, display.in.diagram = display.in.diagram, display.species = display.species, display.sites = display.sites, axes.shown = axes.shown, display.EIV = display.EIV, display.header = display.header, display.envelope = display.envelope, header.name = header.name, display.header.style = display.header.style, display.spider = display.spider, display.group.center = display.group.center, bw = bw, pb = pb)
if (!last.result$use.last.result)
{
last.data <- list (last.matrix.sum = sum(input.data$spec.data), last.matrix.species = colnames (input.data$spec.data), last.matrix.sites = rownames (input.data$spec.data), last.result = spec.data.ord)
save (last.data, file = 'dca_lfa.r')
last.data.quick <- list (type.of.analysis = 'dca', size.of.matrix = dim (input.data$spec.data), setting = list (down.rare.spec = down.rare.spec))
save (last.data.quick, file = 'dca_lfq.r')
}
}
| /R/draw.dca.R | no_license | zdealveindy/ordijuice | R | false | false | 3,381 | r | draw.dca <-
function(display.in.diagram = c('sites'),
display.species = c('none'),
display.sites = c('points'),
axes.shown = c(1,2),
down.rare.spec = 0,
display.EIV = FALSE,
display.header = FALSE,
display.envelope = FALSE,
header.name = 'env',
display.header.style = c('arrow'),
display.spider,
display.group.center = FALSE,
three.dim = FALSE,
resolution = c(1280, 768),
bw = FALSE,
...)
{
newRversion <- check.install (display.spider)
open.r.window(three.dim, resolution)
if (newRversion) pb <- myTkProgressBar (paste (ifelse (three.dim, '3D', '2D'),'DCA - Analysis progress'), 'Importing data from JUICE', 0, 100, 20) else pb <- NULL
write ('End of ordination', file='result.txt')
library (vegan)
input.data <- read.check.data (display.sites = display.sites, display.EIV = display.EIV, display.header = display.header, display.envelope = display.envelope, display.spider = display.spider, display.group.center = display.group.center)
# 2. update progress bar
if (newRversion) setTkProgressBar (pb, label = 'Calculation of ordination', value = 40)
# calculation of ordination
last.result <- use.last (input.data, 'dca', setting = list (down.rare.spec = down.rare.spec))
if (last.result$use.last.result) spec.data.ord <- last.result$last.data.result else
spec.data.ord <- decorana(input.data$spec.data, iweigh=down.rare.spec)
# 3. update progress bar
if (newRversion) setTkProgressBar (pb, label = 'Saving results', value = 60)
save.ord.result (spec.data.ord, last.result$use.last.result, 'dca', input.data$deleted.plots)
# 3. update progress bar
if (newRversion) setTkProgressBar (pb, label = 'Drawing the figure', value = 80)
if (three.dim)
draw.3d(input.data = input.data, spec.data.ord = spec.data.ord, display.in.diagram = display.in.diagram, display.species = display.species, display.sites = display.sites, axes.shown = axes.shown, display.EIV = display.EIV, display.header = display.header, display.envelope = display.envelope, header.name = header.name, display.header.style = display.header.style, display.spider = display.spider, display.group.center = display.group.center, pb = pb) else
draw.2d(input.data = input.data, spec.data.ord = spec.data.ord, display.in.diagram = display.in.diagram, display.species = display.species, display.sites = display.sites, axes.shown = axes.shown, display.EIV = display.EIV, display.header = display.header, display.envelope = display.envelope, header.name = header.name, display.header.style = display.header.style, display.spider = display.spider, display.group.center = display.group.center, bw = bw, pb = pb)
if (!last.result$use.last.result)
{
last.data <- list (last.matrix.sum = sum(input.data$spec.data), last.matrix.species = colnames (input.data$spec.data), last.matrix.sites = rownames (input.data$spec.data), last.result = spec.data.ord)
save (last.data, file = 'dca_lfa.r')
last.data.quick <- list (type.of.analysis = 'dca', size.of.matrix = dim (input.data$spec.data), setting = list (down.rare.spec = down.rare.spec))
save (last.data.quick, file = 'dca_lfq.r')
}
}
|
fileURL <- "http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data"
download.file(fileURL, destfile="breast-cancer-wisconsin.data", method="curl")
# read the data
df <- read.table("breast-cancer-wisconsin.data", na.strings = "?", sep=",")
str(df)
# Name the columns.
# These names are displayed in the tree to facilitate semantic interpretation
df <- df[ , -1]
ds <- df
names(ds) <- c("ClumpThickness",
"UniformityCellSize",
"UniformityCellShape",
"MarginalAdhesion",
"SingleEpithelialCellSize",
"BareNuclei",
"BlandChromatin",
"NormalNucleoli",
"Mitoses",
"Class")
prop.table(table(ds$Class))
corrTable <- cor(df[,c("V2","V3","V4","V5","V6","V7","V8","V9","V10")])
corrTable
df$V11 <- factor(df$V11, levels=c(2,4), labels=c("1", "2"))
set.seed(1234)
ind <- sample(2, nrow(df), replace=TRUE, prob=c(0.7, 0.3))
trainDf <- df[ind==1,]
validationDf <- df[ind==2,]
# Running Naive Bayes using e1071 library
library(e1071)
classifier = naiveBayes(x = trainDf[ ,-10],
y = trainDf$V11 )
# Using the classifier on training data to test the predictions
y_pred_train = predict(classifier, newdata = trainDf[ ,-10])
cm = table(trainDf$V11, y_pred_train)
cm
summary(classifier)
# Validating the classifier on the validation data
y_pred_validation = predict(classifier, newdata = validationDf[ ,-10])
cm = table(validationDf$V11, y_pred_validation)
cm
# Variation1 - To view raw probabilities
classifier = naiveBayes(x = trainDf[ ,-10],
y = trainDf$V11, laplace = 1 )
classifier = naiveBayes(x = trainDf[ ,-10],
y = trainDf$V11)
# Using the classifier on training data to test the predictions
y_pred_train_raw = predict (classifier, newdata = trainDf[ ,-10], type = "raw" )
y_pred_train_class = predict (classifier, newdata = trainDf[ ,-10], type = "class" )
y_pred_train_class
s = cbind(y_pred_train_raw, y_pred_train_class)
# Variation 2 - To apply Laplacian Smoothing
classifier = naiveBayes(x = trainDf[ ,-10],
y = trainDf$V11, laplace=1 )
y_pred_train_raw = predict (classifier, newdata = trainDf[ ,-10], type = "raw", threshold = 0.001, eps = 0)
library(ROCR)
library(rpart)
#install.packages("gplots")
# To draw ROC we need to predict the prob values. So we run predict again
# Note that PredictROC is same as Predict with "type = prob"
PredictROC = predict(y_pred_train_raw, validationData)
PredictROC
PredictROC[,2]
pred = prediction(PredictROC[,2], validationData$Class)
perf = performance(pred, "tpr", "fpr")
pred
perf
plot(perf, colorize = T)
plot(perf, colorize=T,
main = "ROC curve",
ylab = "Sensitivity",
xlab = "Specificity",
print.cutoffs.at=seq(0,1,0.3),
text.adj= c(-0.2,1.7))
# Area Under Curve
auc = as.numeric(performance(pred, "auc")@y.values)
auc = round(auc, 3)
auc
| /Lab3_NaiveBayesian_VijayaShreeRajaSekaran.R | no_license | vjvijayashree/ML-in-R | R | false | false | 3,108 | r | fileURL <- "http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data"
download.file(fileURL, destfile="breast-cancer-wisconsin.data", method="curl")
# read the data
df <- read.table("breast-cancer-wisconsin.data", na.strings = "?", sep=",")
str(df)
# Name the columns.
# These names are displayed in the tree to facilitate semantic interpretation
df <- df[ , -1]
ds <- df
names(ds) <- c("ClumpThickness",
"UniformityCellSize",
"UniformityCellShape",
"MarginalAdhesion",
"SingleEpithelialCellSize",
"BareNuclei",
"BlandChromatin",
"NormalNucleoli",
"Mitoses",
"Class")
prop.table(table(ds$Class))
corrTable <- cor(df[,c("V2","V3","V4","V5","V6","V7","V8","V9","V10")])
corrTable
df$V11 <- factor(df$V11, levels=c(2,4), labels=c("1", "2"))
set.seed(1234)
ind <- sample(2, nrow(df), replace=TRUE, prob=c(0.7, 0.3))
trainDf <- df[ind==1,]
validationDf <- df[ind==2,]
# Running Naive Bayes using e1071 library
library(e1071)
classifier = naiveBayes(x = trainDf[ ,-10],
y = trainDf$V11 )
# Using the classifier on training data to test the predictions
y_pred_train = predict(classifier, newdata = trainDf[ ,-10])
cm = table(trainDf$V11, y_pred_train)
cm
summary(classifier)
# Validating the classifier on the validation data
y_pred_validation = predict(classifier, newdata = validationDf[ ,-10])
cm = table(validationDf$V11, y_pred_validation)
cm
# Variation1 - To view raw probabilities
classifier = naiveBayes(x = trainDf[ ,-10],
y = trainDf$V11, laplace = 1 )
classifier = naiveBayes(x = trainDf[ ,-10],
y = trainDf$V11)
# Using the classifier on training data to test the predictions
y_pred_train_raw = predict (classifier, newdata = trainDf[ ,-10], type = "raw" )
y_pred_train_class = predict (classifier, newdata = trainDf[ ,-10], type = "class" )
y_pred_train_class
s = cbind(y_pred_train_raw, y_pred_train_class)
# Variation 2 - To apply Laplacian Smoothing
classifier = naiveBayes(x = trainDf[ ,-10],
y = trainDf$V11, laplace=1 )
y_pred_train_raw = predict (classifier, newdata = trainDf[ ,-10], type = "raw", threshold = 0.001, eps = 0)
library(ROCR)
library(rpart)
#install.packages("gplots")
# To draw ROC we need to predict the prob values. So we run predict again
# Note that PredictROC is same as Predict with "type = prob"
PredictROC = predict(y_pred_train_raw, validationData)
PredictROC
PredictROC[,2]
pred = prediction(PredictROC[,2], validationData$Class)
perf = performance(pred, "tpr", "fpr")
pred
perf
plot(perf, colorize = T)
plot(perf, colorize=T,
main = "ROC curve",
ylab = "Sensitivity",
xlab = "Specificity",
print.cutoffs.at=seq(0,1,0.3),
text.adj= c(-0.2,1.7))
# Area Under Curve
auc = as.numeric(performance(pred, "auc")@y.values)
auc = round(auc, 3)
auc
|
# Title: Spectral Analysis of Time Series with R
# Review: 2020-01-07T0926 AU
## Loading the data
# Load in the nino3 sea surface temperature data.
# These data are an average of monthly sea surface temperatures over the East Equatorial Pacific.
nino <-
read.table(
"../2_raw-data/nino3data.asc"
, skip = 3
)
names(nino) <- c("Year", "SST", "SSA")
## Plot data
plot(
nino$Year
, nino$SST
, type = "l"
)
# library('ggplot2')
par(mfrow=c(2,1))
plot1 <- ggplot2::ggplot(data = nino) + geom_line(aes(y = SST, x = Year))
plot2 <- ggplot2::ggplot(data = nino) + geom_line(aes(y = SSA, x = Year))
# library('gridExtra')
gridExtra::grid.arrange(plot1, plot2)
# plot of chunk simpleplot plot of chunk simpleplot
# Autocorrelation plots
acf1 <- acf(nino$SST, lag.max = 12 * 20, plot = F)
acf2 <- acf(nino$SSA, lag.max = 12 * 20, plot = F)
plot1 <- ggplot() + geom_line(aes(x = c(acf1$lag)/12, y = c(acf1$acf)))
plot2 <- ggplot() + geom_line(aes(x = c(acf2$lag)/12, y = c(acf2$acf)))
grid.arrange(plot1, plot2)
# plot of chunk unnamed-chunk-1
## Harmonic Regression ####
# The purpose of spectral analysis is to decompose a time series into periodic components. We might consider doing this with a regression, where we regress the time series on a set of sine and cosine waves. For a dataset with annual variation, we might expect that the sine and cosine waves with one year might be important, but what other waves might be present in this time series?
# Let's try a “harmonic regression'' in which we regress the time series on harmonics (waves). I've included here annual harmonics, as well as other harmonics, some of which I think might duplicate the El Nino/La Nina cycle around 3-6 years.
# Create dataframe with different harmonics
X <- data.frame(
Year=nino$Year,
y = nino$SST,
sin(2*pi*1*nino$Year), cos(2*pi*1*nino$Year), # sine and cos for frequency = 1
sin(2*pi*2*nino$Year), cos(2*pi*2*nino$Year), # freq. equals 2 (i.e. period= 6 months)
sin(2*pi*1/3*nino$Year), cos(2*pi*1/3*nino$Year), # freq = 1/3 (period=3 years)
sin(2*pi*1/3.5*nino$Year), cos(2*pi*1/3.5*nino$Year), # freq=3.5 (period=3.5 years)
sin(2*pi*1/6*nino$Year), cos(2*pi*1/6*nino$Year), # freq=6 (period=6 years)
sin(2*pi*1.01*nino$Year), cos(2*pi*1.01*nino$Year) # freq=1.01 (period=.99 years)
)
ggplot(data=subset(X, Year>1980)) + geom_line(aes(x=Year, y=X[X$Year>1980,3]))
# plot of chunk unnamed-chunk-2
ggplot(data=subset(X, Year>1980)) + geom_line(aes(x=Year, y=X[X$Year>1980,5]))
# plot of chunk unnamed-chunk-2
ggplot(data=subset(X, Year>1980)) + geom_line(aes(x=Year, y=X[X$Year>1980,7]))
# plot of chunk unnamed-chunk-2
ggplot(data=subset(X, Year>1980)) + geom_line(aes(x=Year, y=X[X$Year>1980,9]))
# plot of chunk unnamed-chunk-2
ggplot(data=subset(X, Year>1980)) + geom_line(aes(x=Year, y=X[X$Year>1980,11]))
# plot of chunk unnamed-chunk-2
# Now that we've created a dataframe that has sines and cosines, we might see how well these predict the data series. We might regress SST on these sines and cosines.
mod <-
lm(
y ~ . - Year
, data = X
) # Regress y on everything (but Year)
summary(mod)
##
## Call:
## lm(formula = y ~ . - Year, data = X)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.0478 -0.5220 -0.0544 0.4492 2.7313
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 25.7119 0.0193 1331.89 < 2e-16 ***
## sin.2...pi...1...nino.Year. 1.2815 0.0281 45.68 < 2e-16 ***
## cos.2...pi...1...nino.Year. -0.2815 0.0275 -10.24 < 2e-16 ***
## sin.2...pi...2...nino.Year. -0.1903 0.0277 -6.88 8.7e-12 ***
## cos.2...pi...2...nino.Year. -0.2229 0.0269 -8.27 2.9e-16 ***
## sin.2...pi...1.3...nino.Year. 0.0984 0.0272 3.61 0.00031 ***
## cos.2...pi...1.3...nino.Year. -0.0625 0.0273 -2.29 0.02235 *
## sin.2...pi...1.3.5...nino.Year. -0.1047 0.0273 -3.84 0.00013 ***
## cos.2...pi...1.3.5...nino.Year. -0.1915 0.0273 -7.01 3.5e-12 ***
## sin.2...pi...1.6...nino.Year. -0.0290 0.0273 -1.06 0.28857
## cos.2...pi...1.6...nino.Year. 0.0632 0.0273 2.32 0.02074 *
## sin.2...pi...1.01...nino.Year. -0.0296 0.0278 -1.07 0.28586
## cos.2...pi...1.01...nino.Year. 0.0203 0.0278 0.73 0.46517
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.752 on 1505 degrees of freedom
## Multiple R-squared: 0.622, Adjusted R-squared: 0.619
## F-statistic: 206 on 12 and 1505 DF, p-value: <2e-16
# What's significant? The 1 year frequency is, but not the 1.01. The twice a year frequency is. The 3, 3.5 and 6 year frequenceis are significant. I tried other "El Nino frequencies” too, but they weren't always significant. For example, 4 years was not significant. This trial and error approach is difficult!
X$resid <- residuals(mod)
X$pred <- predict(mod)
ggplot(data = subset(X, Year > 1970)) + geom_line(aes(x = Year, y = y)) + geom_line(aes(x = Year,
y = pred), color = "red")
# plot of chunk unnamed-chunk-4
# We've described the annual variation pretty well. But we haven't picked up a lot of the year-to-year variation.
# Frequency analysis.
# I could extend this regression. If I have N data, and I include N sines and cosines, then my regression will perfectly predict the data. The regression will be overfitted. But I might learn something be seeing which coefficients are significantly different from zero. This is what the “periodogram” tells us.
x11()
raw.spec <-
spec.pgram(
nino$SST
, taper = 0
)
# plot of chunk unnamed-chunk-5
plot(raw.spec)
plot(raw.spec, log = "no")
# plot of chunk unnamed-chunk-5
# spec.df <- as.data.frame(raw.spec)
spec.df <- data.frame(freq = raw.spec$freq, spec = raw.spec$spec)
# Create a vector of periods to label on the graph, units are in years
yrs.period <- rev(c(1/6, 1/5, 1/4, 1/3, 0.5, 1, 3, 5, 10, 100))
yrs.labels <- rev(c("1/6", "1/5", "1/4", "1/3", "1/2", "1", "3", "5", "10", "100"))
yrs.freqs <- 1/yrs.period * 1/12 #Convert annual period to annual freq, and then to monthly freq
spec.df$period <- 1/spec.df$freq
ggplot(data = subset(spec.df)) + geom_line(aes(x = freq, y = spec)) + scale_x_continuous("Period (years)",
breaks = yrs.freqs, labels = yrs.labels) + scale_y_continuous()
# plot of chunk unnamed-chunk-6
# Sometimes the log scaling of the spectrum is more revealing:
ggplot(data = subset(spec.df)) + geom_line(aes(x = freq, y = spec)) + scale_x_continuous("Period (years)", breaks = yrs.freqs, labels = yrs.labels) + scale_y_log10()
# plot of chunk unnamed-chunk-7
# The log-scaling has some theoretical advantages, too. The periodogram values should be approximately normally distributed in the log scale.
# I could clean up the labels at the left end, but sometimes a log scaling can be helpful there, too, because as log scaling will spread out the low frequencies and squish the high frequencies:
ggplot(data = subset(spec.df)) +
geom_line(aes(x = freq, y = spec)) +
scale_x_log10(
"Period (years)"
, breaks = yrs.freqs
, labels = yrs.labels
) +
scale_y_log10()
# plot of chunk unnamed-chunk-8
# Typically, it is the relatively low frequency stuff that is the most interesting. And also the most challenging to resolve. (The overlap of interesting and challenging is not mere concidence. If it were easy, we know all about it and it would stop being interesting!)
# Smoothing the periodogram:
# There is a fundamental problem with the periodogram. Unlike most estimates you've encountered, such as the mean or a regression coefficient, which get more reliable as you collect more data, the periodogram does not get more reliable. As you collect more data, you add more periodogram points, but they are all just as noisy as before.
# We are assuming that there is some underlying curve of spectral values, and that the periodogram estimates this. But the periodogram is noisy, and will always be noisy. We call this underlying curve the “spectral density function,” or sometimes the “power spectrum.”
# The only way to get smooth estimates of the power spectrum is by taking moving averages of the periodogram. In essence, though, we want to give more weight to close frequencies, and little weight to far away frequencies. There are different ways to create weights. You could use a bell curve shape to give weights. You could use a triangle, or a rectangle. There are lots of others too. These are called 'kernel functions.'
plot(kernel("daniell", m = 10)) # A short moving average
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", m = 50)) # A long moving average
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", c(5, 5))) # m=5 moving average of a m=5 moving average
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", c(5, 5, 5))) # a m=5 moving average of that!
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", c(5, 5, 5, 5))) # a m=5 moving average of that!
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", c(9, 9, 9)))
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", c(3, 3, 21)))
# plot of chunk unnamed-chunk-9
k = kernel("daniell", c(9, 9, 9))
smooth.spec <-
spec.pgram(
nino$SST
, kernel = k
, taper = 0
)
# plot of chunk unnamed-chunk-9
# Note how the confidence interval got much narrower
spec.df <- data.frame(freq = smooth.spec$freq, `c(9,9,9)` = smooth.spec$spec)
names(spec.df) <- c("freq", "c(9,9,9)")
# Add other smooths
k <- kernel("daniell", c(9, 9))
spec.df[, "c(9,9)"] <- spec.pgram(nino$SST, kernel = k, taper = 0, plot = FALSE)$spec
k <- kernel("daniell", c(9))
spec.df[, "c(9)"] <- spec.pgram(nino$SST, kernel = k, taper = 0, plot = FALSE)$spec
# melt from wide format into long format
library('reshape2')
spec.df <-
reshape2::melt(
spec.df
, id.vars = "freq"
, value.name = "spec"
, variable.name = "kernel"
)
plot1 <-
ggplot(data = subset(spec.df)) +
geom_path(
aes(
x = freq
, y = spec
, color = kernel
)
) +
scale_x_continuous(
"Period (years)"
, breaks = yrs.freqs
, labels = yrs.labels
) +
scale_y_log10()
plot2 <- ggplot(data = subset(spec.df)) + geom_path(aes(x = freq, y = spec,
color = kernel)) + scale_x_log10("Period (years)", breaks = yrs.freqs, labels = yrs.labels) +
scale_y_log10()
grid.arrange(plot1, plot2)
# plot of chunk unnamed-chunk-9
# When you smooth the periodogram, then the log spacing on frequency is less necessary.
# It is unsurprising that smoothing is a good thing when the true spectral density is smooth, and smoothing is a bad thing when the true spectral density is not smooth. Here, smoothing seems to be a good thing everywhere except for the annual frequency, and it's harmonics. The spikes probably shouldn't be smoothed, but it is what it is.
# What if we repeat with SSA?
k = kernel("daniell", c(9, 9, 9))
smooth.spec <- spec.pgram(nino$SSA, kernel = k, taper = 0, plot = FALSE)
# spec.df <- as.data.frame(smooth.spec)
spec.df <- data.frame(freq = smooth.spec$freq, spec = smooth.spec$spec)
ggplot(data = subset(spec.df)) + geom_line(aes(x = freq, y = spec)) + scale_x_continuous("Period (years)",
breaks = yrs.freqs, labels = yrs.labels) + scale_y_continuous()
# plot of chunk unnamed-chunk-10
# The effect of tapering
# Besides windowing, there is one other 'trick' commonly done when spectral estimating, called tapering. Before describing tapering, let's discuss the problem.
# When you estimate a periodogram, you are implicitly making the assumption that your time series is circular, i.e. that you could wrap the time series around and just keep time marching on until infinity. Obviously, this isn't so. If you wrap the time series around, there will be a jump where the end meets the start again. This jump is spurious, but it will propagate itself through all the frequencies, contaminating them.
# The solution is to downweight the beginning and end of the data. This way, when you calculate the periodogram, you'll be giving more weight to the middle, and less weight to the ends. There is still the jump at the end, but it has very little weight, so it's effect is diminished. This downweighting is called tapering. But how much do you downweight? 5% at each end? 10%? 50% (i.e. the whole thing)?
k = kernel("daniell", c(9, 9))
smooth.spec <- spec.pgram(nino$SSA, kernel = k, taper = 0, plot = FALSE)
spec.df <- data.frame(freq = smooth.spec$freq, `0%` = smooth.spec$spec)
names(spec.df) <- c("freq", "0%")
# Add other tapers
spec.df[, "10%"] <- spec.pgram(nino$SSA, kernel = k, taper = 0.1, plot = FALSE)$spec
spec.df[, "30%"] <- spec.pgram(nino$SSA, kernel = k, taper = 0.3, plot = FALSE)$spec
spec.df <- melt(spec.df, id.vars = "freq", value.name = "spec", variable.name = "taper")
plot1 <- ggplot(data = subset(spec.df)) + geom_path(aes(x = freq, y = spec,
color = taper)) + scale_x_continuous("Period (years)", breaks = yrs.freqs,
labels = yrs.labels) + scale_y_log10()
plot2 <- ggplot(data = subset(spec.df)) + geom_path(aes(x = freq, y = spec,
color = taper)) + scale_x_log10("Period (years)", breaks = yrs.freqs, labels = yrs.labels) +
scale_y_log10()
grid.arrange(plot1, plot2)
# plot of chunk unnamed-chunk-11
# In practice, a 5% (from each side) often works pretty well. Tapering is less important the longer your time series is, but it can be very important in short series.
# I'm not going to cover confidence intervals. The default plotting shows a confidence interval. But, in general, it is difficult to construct meaningful confidence intervals of spectral density estimates.
k <- kernel("daniell", c(2))
spec.df[, "10%"] <- spec.pgram(nino$SSA, taper = 0.05)$spec
# plot of chunk unnamed-chunk-12
# There are another set of spectral density estimates called “multitaper” estimates. Multitaper estimates can have pretty good localization in time. Multitaper esimates have two smoothing parameters, though. In the software, they are called “NW” and “k”. Typically, k is set equal to \( 2NW-1 \), though, so you typically worry about NW only. For any 'true' frequency signal, it will be resolved to within \( \pm NW \) frequency intervals. (kinda like a moving average, only better behaved) This makes it pretty easy to determine how much smoothing you are getting.
# One of the nice things about R's implementation is that confidence intervals are shown:
library('multitaper')
mt.spec <- spec.mtm(nino$SSA, nw = 16, k = 2 * 16 - 1, jackknife = TRUE, dtUnits = "month")
## Warning: Time series is not a ts object and dT is not set. Frequency array
## and axes may be incorrect.
# plot of chunk unnamed-chunk-13
# multitaper can resolve frequencies to about +/- NW/N Hz. i.e 16/1518 Hz
# k is typically equal to 2NW - 1. Higher k is smoother
mt.spec <- spec.mtm(nino$SST, nw = 16, k = 2 * 16 - 1, jackknife = TRUE, deltat = 1/12,
dtUnits = "year")
# plot of chunk unnamed-chunk-13
# Time-Frequency estimation
# One of the potential shortcomings of spectral analysis is the assumption that the time-series structure is stationary. You might want to evaluate this empirically.
# Intuitively, you could cut your time series into different segments and calculate the periodgram separately for each one. Note, that since each interval is now shorter, you will have (1) less resolution between frequencies, and (2) you won't be able to detect low frequency stuff as easily.
# Now, you could imagine letting those segments overlap. This will allow you to see how periodogram is changing at various times. Finally, rather than just choosing segments, (where every datapoint in a segment gets a “weight” of 1, and every data point outside gets a weight of 0), you could imagine choosing segments by smoothly weighting points; giving more weight to the nearby time points, and less weight to the distant time points. This is precistly what wavelets do.
# There are many types of wavelets. Not all of them estimate the periodogram. Some of them estimate, slope, for example. But one that estimates the periodogram is called the “morlet” wavelet. And the resulting plot is called a spectrogram.
library('dplR')
wave.out <-
dplR::morlet(
y1 = nino$SST
, x1 = nino$Year # seq_along(y1)
, p2 = 8
, dj = 0.1
, siglvl = 0.95 # significance level
)
# p2=6 <=> estimate out to 2^8 = 256 months dj <=> controls the frequency
# resolution hack the period estimate to be in years, not months
wave.out$period <- wave.out$period/12
levs <- quantile(wave.out$Power, c(0, 0.25, 0.5, 0.75, 0.95, 1))
wavelet.plot(
wave.out
, wavelet.levels = levs
, crn.ylim = c(22.5, 30)
)
# plot of chunk unnamed-chunk-14
# I had to specify the y axis limit for the time series - it wasn't displaying properly otherwise.
# We see that the annual component is strong at all time periods. There is a strong component at 3-7 years. That would be what we call El Nino. But it is noticeably absent between 1920 and 1960. This seemed to be a period of weakening in the El Nino/La Nina cycle. As far as I know, we don't quite understand why this is, yet. There also seems to be something going on at 12-16 years. Hmmm.
# We can also calculate the “averaged” wavelet. If we calculate the average across all times, we should get another estimate of the spectral density function.
wave.avg <- data.frame(power = apply(wave.out$Power, 2, mean), period = (wave.out$period))
plot(wave.avg$period, wave.avg$power, type = "l")
# plot of chunk unnamed-chunk-15
# Note that we have plotted here period, rather than frequency. Thus, the high frequency stuff is on the left, not the right.
# That's all! | /times-series_analysis-master/7_sources/ts_spectral-analysis/1_code/ts_spectral-analysis.R | no_license | tarasevic-r/Vibro-acoustic | R | false | false | 18,317 | r | # Title: Spectral Analysis of Time Series with R
# Review: 2020-01-07T0926 AU
## Loading the data
# Load in the nino3 sea surface temperature data.
# These data are an average of monthly sea surface temperatures over the East Equatorial Pacific.
nino <-
read.table(
"../2_raw-data/nino3data.asc"
, skip = 3
)
names(nino) <- c("Year", "SST", "SSA")
## Plot data
plot(
nino$Year
, nino$SST
, type = "l"
)
# library('ggplot2')
par(mfrow=c(2,1))
plot1 <- ggplot2::ggplot(data = nino) + geom_line(aes(y = SST, x = Year))
plot2 <- ggplot2::ggplot(data = nino) + geom_line(aes(y = SSA, x = Year))
# library('gridExtra')
gridExtra::grid.arrange(plot1, plot2)
# plot of chunk simpleplot plot of chunk simpleplot
# Autocorrelation plots
acf1 <- acf(nino$SST, lag.max = 12 * 20, plot = F)
acf2 <- acf(nino$SSA, lag.max = 12 * 20, plot = F)
plot1 <- ggplot() + geom_line(aes(x = c(acf1$lag)/12, y = c(acf1$acf)))
plot2 <- ggplot() + geom_line(aes(x = c(acf2$lag)/12, y = c(acf2$acf)))
grid.arrange(plot1, plot2)
# plot of chunk unnamed-chunk-1
## Harmonic Regression ####
# The purpose of spectral analysis is to decompose a time series into periodic components. We might consider doing this with a regression, where we regress the time series on a set of sine and cosine waves. For a dataset with annual variation, we might expect that the sine and cosine waves with one year might be important, but what other waves might be present in this time series?
# Let's try a “harmonic regression'' in which we regress the time series on harmonics (waves). I've included here annual harmonics, as well as other harmonics, some of which I think might duplicate the El Nino/La Nina cycle around 3-6 years.
# Create dataframe with different harmonics
X <- data.frame(
Year=nino$Year,
y = nino$SST,
sin(2*pi*1*nino$Year), cos(2*pi*1*nino$Year), # sine and cos for frequency = 1
sin(2*pi*2*nino$Year), cos(2*pi*2*nino$Year), # freq. equals 2 (i.e. period= 6 months)
sin(2*pi*1/3*nino$Year), cos(2*pi*1/3*nino$Year), # freq = 1/3 (period=3 years)
sin(2*pi*1/3.5*nino$Year), cos(2*pi*1/3.5*nino$Year), # freq=3.5 (period=3.5 years)
sin(2*pi*1/6*nino$Year), cos(2*pi*1/6*nino$Year), # freq=6 (period=6 years)
sin(2*pi*1.01*nino$Year), cos(2*pi*1.01*nino$Year) # freq=1.01 (period=.99 years)
)
ggplot(data=subset(X, Year>1980)) + geom_line(aes(x=Year, y=X[X$Year>1980,3]))
# plot of chunk unnamed-chunk-2
ggplot(data=subset(X, Year>1980)) + geom_line(aes(x=Year, y=X[X$Year>1980,5]))
# plot of chunk unnamed-chunk-2
ggplot(data=subset(X, Year>1980)) + geom_line(aes(x=Year, y=X[X$Year>1980,7]))
# plot of chunk unnamed-chunk-2
ggplot(data=subset(X, Year>1980)) + geom_line(aes(x=Year, y=X[X$Year>1980,9]))
# plot of chunk unnamed-chunk-2
ggplot(data=subset(X, Year>1980)) + geom_line(aes(x=Year, y=X[X$Year>1980,11]))
# plot of chunk unnamed-chunk-2
# Now that we've created a dataframe that has sines and cosines, we might see how well these predict the data series. We might regress SST on these sines and cosines.
mod <-
lm(
y ~ . - Year
, data = X
) # Regress y on everything (but Year)
summary(mod)
##
## Call:
## lm(formula = y ~ . - Year, data = X)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.0478 -0.5220 -0.0544 0.4492 2.7313
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 25.7119 0.0193 1331.89 < 2e-16 ***
## sin.2...pi...1...nino.Year. 1.2815 0.0281 45.68 < 2e-16 ***
## cos.2...pi...1...nino.Year. -0.2815 0.0275 -10.24 < 2e-16 ***
## sin.2...pi...2...nino.Year. -0.1903 0.0277 -6.88 8.7e-12 ***
## cos.2...pi...2...nino.Year. -0.2229 0.0269 -8.27 2.9e-16 ***
## sin.2...pi...1.3...nino.Year. 0.0984 0.0272 3.61 0.00031 ***
## cos.2...pi...1.3...nino.Year. -0.0625 0.0273 -2.29 0.02235 *
## sin.2...pi...1.3.5...nino.Year. -0.1047 0.0273 -3.84 0.00013 ***
## cos.2...pi...1.3.5...nino.Year. -0.1915 0.0273 -7.01 3.5e-12 ***
## sin.2...pi...1.6...nino.Year. -0.0290 0.0273 -1.06 0.28857
## cos.2...pi...1.6...nino.Year. 0.0632 0.0273 2.32 0.02074 *
## sin.2...pi...1.01...nino.Year. -0.0296 0.0278 -1.07 0.28586
## cos.2...pi...1.01...nino.Year. 0.0203 0.0278 0.73 0.46517
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.752 on 1505 degrees of freedom
## Multiple R-squared: 0.622, Adjusted R-squared: 0.619
## F-statistic: 206 on 12 and 1505 DF, p-value: <2e-16
# What's significant? The 1 year frequency is, but not the 1.01. The twice a year frequency is. The 3, 3.5 and 6 year frequenceis are significant. I tried other "El Nino frequencies” too, but they weren't always significant. For example, 4 years was not significant. This trial and error approach is difficult!
X$resid <- residuals(mod)
X$pred <- predict(mod)
ggplot(data = subset(X, Year > 1970)) + geom_line(aes(x = Year, y = y)) + geom_line(aes(x = Year,
y = pred), color = "red")
# plot of chunk unnamed-chunk-4
# We've described the annual variation pretty well. But we haven't picked up a lot of the year-to-year variation.
# Frequency analysis.
# I could extend this regression. If I have N data, and I include N sines and cosines, then my regression will perfectly predict the data. The regression will be overfitted. But I might learn something be seeing which coefficients are significantly different from zero. This is what the “periodogram” tells us.
x11()
raw.spec <-
spec.pgram(
nino$SST
, taper = 0
)
# plot of chunk unnamed-chunk-5
plot(raw.spec)
plot(raw.spec, log = "no")
# plot of chunk unnamed-chunk-5
# spec.df <- as.data.frame(raw.spec)
spec.df <- data.frame(freq = raw.spec$freq, spec = raw.spec$spec)
# Create a vector of periods to label on the graph, units are in years
yrs.period <- rev(c(1/6, 1/5, 1/4, 1/3, 0.5, 1, 3, 5, 10, 100))
yrs.labels <- rev(c("1/6", "1/5", "1/4", "1/3", "1/2", "1", "3", "5", "10", "100"))
yrs.freqs <- 1/yrs.period * 1/12 #Convert annual period to annual freq, and then to monthly freq
spec.df$period <- 1/spec.df$freq
ggplot(data = subset(spec.df)) + geom_line(aes(x = freq, y = spec)) + scale_x_continuous("Period (years)",
breaks = yrs.freqs, labels = yrs.labels) + scale_y_continuous()
# plot of chunk unnamed-chunk-6
# Sometimes the log scaling of the spectrum is more revealing:
ggplot(data = subset(spec.df)) + geom_line(aes(x = freq, y = spec)) + scale_x_continuous("Period (years)", breaks = yrs.freqs, labels = yrs.labels) + scale_y_log10()
# plot of chunk unnamed-chunk-7
# The log-scaling has some theoretical advantages, too. The periodogram values should be approximately normally distributed in the log scale.
# I could clean up the labels at the left end, but sometimes a log scaling can be helpful there, too, because as log scaling will spread out the low frequencies and squish the high frequencies:
ggplot(data = subset(spec.df)) +
geom_line(aes(x = freq, y = spec)) +
scale_x_log10(
"Period (years)"
, breaks = yrs.freqs
, labels = yrs.labels
) +
scale_y_log10()
# plot of chunk unnamed-chunk-8
# Typically, it is the relatively low frequency stuff that is the most interesting. And also the most challenging to resolve. (The overlap of interesting and challenging is not mere concidence. If it were easy, we know all about it and it would stop being interesting!)
# Smoothing the periodogram:
# There is a fundamental problem with the periodogram. Unlike most estimates you've encountered, such as the mean or a regression coefficient, which get more reliable as you collect more data, the periodogram does not get more reliable. As you collect more data, you add more periodogram points, but they are all just as noisy as before.
# We are assuming that there is some underlying curve of spectral values, and that the periodogram estimates this. But the periodogram is noisy, and will always be noisy. We call this underlying curve the “spectral density function,” or sometimes the “power spectrum.”
# The only way to get smooth estimates of the power spectrum is by taking moving averages of the periodogram. In essence, though, we want to give more weight to close frequencies, and little weight to far away frequencies. There are different ways to create weights. You could use a bell curve shape to give weights. You could use a triangle, or a rectangle. There are lots of others too. These are called 'kernel functions.'
plot(kernel("daniell", m = 10)) # A short moving average
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", m = 50)) # A long moving average
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", c(5, 5))) # m=5 moving average of a m=5 moving average
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", c(5, 5, 5))) # a m=5 moving average of that!
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", c(5, 5, 5, 5))) # a m=5 moving average of that!
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", c(9, 9, 9)))
# plot of chunk unnamed-chunk-9
plot(kernel("daniell", c(3, 3, 21)))
# plot of chunk unnamed-chunk-9
k = kernel("daniell", c(9, 9, 9))
smooth.spec <-
spec.pgram(
nino$SST
, kernel = k
, taper = 0
)
# plot of chunk unnamed-chunk-9
# Note how the confidence interval got much narrower
spec.df <- data.frame(freq = smooth.spec$freq, `c(9,9,9)` = smooth.spec$spec)
names(spec.df) <- c("freq", "c(9,9,9)")
# Add other smooths
k <- kernel("daniell", c(9, 9))
spec.df[, "c(9,9)"] <- spec.pgram(nino$SST, kernel = k, taper = 0, plot = FALSE)$spec
k <- kernel("daniell", c(9))
spec.df[, "c(9)"] <- spec.pgram(nino$SST, kernel = k, taper = 0, plot = FALSE)$spec
# melt from wide format into long format
library('reshape2')
spec.df <-
reshape2::melt(
spec.df
, id.vars = "freq"
, value.name = "spec"
, variable.name = "kernel"
)
plot1 <-
ggplot(data = subset(spec.df)) +
geom_path(
aes(
x = freq
, y = spec
, color = kernel
)
) +
scale_x_continuous(
"Period (years)"
, breaks = yrs.freqs
, labels = yrs.labels
) +
scale_y_log10()
plot2 <- ggplot(data = subset(spec.df)) + geom_path(aes(x = freq, y = spec,
color = kernel)) + scale_x_log10("Period (years)", breaks = yrs.freqs, labels = yrs.labels) +
scale_y_log10()
grid.arrange(plot1, plot2)
# plot of chunk unnamed-chunk-9
# When you smooth the periodogram, then the log spacing on frequency is less necessary.
# It is unsurprising that smoothing is a good thing when the true spectral density is smooth, and smoothing is a bad thing when the true spectral density is not smooth. Here, smoothing seems to be a good thing everywhere except for the annual frequency, and it's harmonics. The spikes probably shouldn't be smoothed, but it is what it is.
# What if we repeat with SSA?
k = kernel("daniell", c(9, 9, 9))
smooth.spec <- spec.pgram(nino$SSA, kernel = k, taper = 0, plot = FALSE)
# spec.df <- as.data.frame(smooth.spec)
spec.df <- data.frame(freq = smooth.spec$freq, spec = smooth.spec$spec)
ggplot(data = subset(spec.df)) + geom_line(aes(x = freq, y = spec)) + scale_x_continuous("Period (years)",
breaks = yrs.freqs, labels = yrs.labels) + scale_y_continuous()
# plot of chunk unnamed-chunk-10
# The effect of tapering
# Besides windowing, there is one other 'trick' commonly done when spectral estimating, called tapering. Before describing tapering, let's discuss the problem.
# When you estimate a periodogram, you are implicitly making the assumption that your time series is circular, i.e. that you could wrap the time series around and just keep time marching on until infinity. Obviously, this isn't so. If you wrap the time series around, there will be a jump where the end meets the start again. This jump is spurious, but it will propagate itself through all the frequencies, contaminating them.
# The solution is to downweight the beginning and end of the data. This way, when you calculate the periodogram, you'll be giving more weight to the middle, and less weight to the ends. There is still the jump at the end, but it has very little weight, so it's effect is diminished. This downweighting is called tapering. But how much do you downweight? 5% at each end? 10%? 50% (i.e. the whole thing)?
k = kernel("daniell", c(9, 9))
smooth.spec <- spec.pgram(nino$SSA, kernel = k, taper = 0, plot = FALSE)
spec.df <- data.frame(freq = smooth.spec$freq, `0%` = smooth.spec$spec)
names(spec.df) <- c("freq", "0%")
# Add other tapers
spec.df[, "10%"] <- spec.pgram(nino$SSA, kernel = k, taper = 0.1, plot = FALSE)$spec
spec.df[, "30%"] <- spec.pgram(nino$SSA, kernel = k, taper = 0.3, plot = FALSE)$spec
spec.df <- melt(spec.df, id.vars = "freq", value.name = "spec", variable.name = "taper")
plot1 <- ggplot(data = subset(spec.df)) + geom_path(aes(x = freq, y = spec,
color = taper)) + scale_x_continuous("Period (years)", breaks = yrs.freqs,
labels = yrs.labels) + scale_y_log10()
plot2 <- ggplot(data = subset(spec.df)) + geom_path(aes(x = freq, y = spec,
color = taper)) + scale_x_log10("Period (years)", breaks = yrs.freqs, labels = yrs.labels) +
scale_y_log10()
grid.arrange(plot1, plot2)
# plot of chunk unnamed-chunk-11
# In practice, a 5% (from each side) often works pretty well. Tapering is less important the longer your time series is, but it can be very important in short series.
# I'm not going to cover confidence intervals. The default plotting shows a confidence interval. But, in general, it is difficult to construct meaningful confidence intervals of spectral density estimates.
k <- kernel("daniell", c(2))
spec.df[, "10%"] <- spec.pgram(nino$SSA, taper = 0.05)$spec
# plot of chunk unnamed-chunk-12
# There are another set of spectral density estimates called “multitaper” estimates. Multitaper estimates can have pretty good localization in time. Multitaper esimates have two smoothing parameters, though. In the software, they are called “NW” and “k”. Typically, k is set equal to \( 2NW-1 \), though, so you typically worry about NW only. For any 'true' frequency signal, it will be resolved to within \( \pm NW \) frequency intervals. (kinda like a moving average, only better behaved) This makes it pretty easy to determine how much smoothing you are getting.
# One of the nice things about R's implementation is that confidence intervals are shown:
library('multitaper')
mt.spec <- spec.mtm(nino$SSA, nw = 16, k = 2 * 16 - 1, jackknife = TRUE, dtUnits = "month")
## Warning: Time series is not a ts object and dT is not set. Frequency array
## and axes may be incorrect.
# plot of chunk unnamed-chunk-13
# multitaper can resolve frequencies to about +/- NW/N Hz. i.e 16/1518 Hz
# k is typically equal to 2NW - 1. Higher k is smoother
mt.spec <- spec.mtm(nino$SST, nw = 16, k = 2 * 16 - 1, jackknife = TRUE, deltat = 1/12,
dtUnits = "year")
# plot of chunk unnamed-chunk-13
# Time-Frequency estimation
# One of the potential shortcomings of spectral analysis is the assumption that the time-series structure is stationary. You might want to evaluate this empirically.
# Intuitively, you could cut your time series into different segments and calculate the periodgram separately for each one. Note, that since each interval is now shorter, you will have (1) less resolution between frequencies, and (2) you won't be able to detect low frequency stuff as easily.
# Now, you could imagine letting those segments overlap. This will allow you to see how periodogram is changing at various times. Finally, rather than just choosing segments, (where every datapoint in a segment gets a “weight” of 1, and every data point outside gets a weight of 0), you could imagine choosing segments by smoothly weighting points; giving more weight to the nearby time points, and less weight to the distant time points. This is precistly what wavelets do.
# There are many types of wavelets. Not all of them estimate the periodogram. Some of them estimate, slope, for example. But one that estimates the periodogram is called the “morlet” wavelet. And the resulting plot is called a spectrogram.
library('dplR')
wave.out <-
dplR::morlet(
y1 = nino$SST
, x1 = nino$Year # seq_along(y1)
, p2 = 8
, dj = 0.1
, siglvl = 0.95 # significance level
)
# p2=6 <=> estimate out to 2^8 = 256 months dj <=> controls the frequency
# resolution hack the period estimate to be in years, not months
wave.out$period <- wave.out$period/12
levs <- quantile(wave.out$Power, c(0, 0.25, 0.5, 0.75, 0.95, 1))
wavelet.plot(
wave.out
, wavelet.levels = levs
, crn.ylim = c(22.5, 30)
)
# plot of chunk unnamed-chunk-14
# I had to specify the y axis limit for the time series - it wasn't displaying properly otherwise.
# We see that the annual component is strong at all time periods. There is a strong component at 3-7 years. That would be what we call El Nino. But it is noticeably absent between 1920 and 1960. This seemed to be a period of weakening in the El Nino/La Nina cycle. As far as I know, we don't quite understand why this is, yet. There also seems to be something going on at 12-16 years. Hmmm.
# We can also calculate the “averaged” wavelet. If we calculate the average across all times, we should get another estimate of the spectral density function.
wave.avg <- data.frame(power = apply(wave.out$Power, 2, mean), period = (wave.out$period))
plot(wave.avg$period, wave.avg$power, type = "l")
# plot of chunk unnamed-chunk-15
# Note that we have plotted here period, rather than frequency. Thus, the high frequency stuff is on the left, not the right.
# That's all! |
#!/usr/bin/Rscript --no-save
initDate = '2002-10-21'
.from='2010-01-01'
.to='2010-12-31'
initEq = 100000
####
green = 'green'
red = 'red'
a = 'IB1'
###
require(quantstrat)
getSymbols('AAPL', from=.from, to=.to, verbose=FALSE)
###
initPortf(green, symbols='AAPL', initDate=initDate, currency='USD')
initPortf(red , symbols='AAPL', initDate=initDate, currency='USD')
initAcct(a, portfolios=c(green, red), initDate=initDate, currency='USD')
###
initOrders(green, initDate=initDate)
initOrders(red , initDate=initDate)
green.bee <- strategy(green)
red.bee <- strategy(red)
# indicators
green.bee <- add.indicator(green.bee, name="SMA", arguments = list(x = quote(Cl(mktdata)), n=10), label="SmaFAST")
green.bee <- add.indicator(green.bee, name="SMA", arguments = list(x = quote(Cl(mktdata)), n=30), label="SmaSLOW")
red.bee <- add.indicator(red.bee, name="SMA", arguments = list(x = quote(Cl(mktdata)), n=10), label="SmaFAST")
red.bee <- add.indicator(red.bee, name="SMA", arguments = list(x = quote(Cl(mktdata)), n=30), label="SmaSLOW")
# signals
green.bee <- add.signal(green.bee, 'sigCrossover', arguments = list(columns=c("SmaFAST","SmaSLOW"), relationship="gte"), label='fast.gt.up')
green.bee <- add.signal(green.bee, 'sigCrossover', arguments = list(columns=c("SmaFAST","SmaSLOW"), relationship="lt"), label='fast.lt.dn')
red.bee <- add.signal(red.bee, 'sigCrossover', arguments = list(columns=c("SmaFAST","SmaSLOW"), relationship="gte"), label='fast.gt.up')
red.bee <- add.signal(red.bee, 'sigCrossover', arguments = list(columns=c("SmaFAST","SmaSLOW"), relationship="lt"), label='fast.lt.dn')
# rules
green.bee <- add.rule(
strategy = green.bee,
name='ruleSignal',
arguments = list(sigcol="fast.gt.up",
sigval=TRUE,
orderqty=100,
ordertype='market',
orderside='long'),
type='enter',
label='EnterLONG')
green.bee <- add.rule(
strategy = green.bee,
name='ruleSignal',
arguments = list(sigcol="fast.lt.dn",
sigval=TRUE,
orderqty='all',
ordertype='market',
orderside='long'),
type='exit',
label='ExitLONG')
red.bee <- add.rule(
strategy = red.bee,
name='ruleSignal',
arguments = list(sigcol="fast.lt.dn",
sigval=TRUE,
orderqty=-100,
ordertype='market',
orderside='short'),
type='enter',
label='EnterSHORT')
red.bee <- add.rule(
strategy = red.bee,
name='ruleSignal',
arguments = list(sigcol="fast.gt.up",
sigval=TRUE,
orderqty='all',
ordertype='market',
orderside='short'),
type='exit',
label='ExitSHORT')
#
applyStrategy(green.bee, green, prefer='Open', verbose = FALSE)
applyStrategy(red.bee, red, prefer='Open', verbose = FALSE)
print(getOrderBook(green))
print(getOrderBook(red))
green.txns <- getTxns(green, 'AAPL')
red.txns <- getTxns(red, 'AAPL')
green.txns
red.txns
cat('Net profit from long side:', sum(green.txns$Net.Txn.Realized.PL), '\n')
cat('Net profit from short side:', sum(red.txns$Net.Txn.Realized.PL), '\n')
| /quantstrat/open3.r | no_license | Jicheng-Yan/rfinance2012 | R | false | false | 4,038 | r | #!/usr/bin/Rscript --no-save
initDate = '2002-10-21'
.from='2010-01-01'
.to='2010-12-31'
initEq = 100000
####
green = 'green'
red = 'red'
a = 'IB1'
###
require(quantstrat)
getSymbols('AAPL', from=.from, to=.to, verbose=FALSE)
###
initPortf(green, symbols='AAPL', initDate=initDate, currency='USD')
initPortf(red , symbols='AAPL', initDate=initDate, currency='USD')
initAcct(a, portfolios=c(green, red), initDate=initDate, currency='USD')
###
initOrders(green, initDate=initDate)
initOrders(red , initDate=initDate)
green.bee <- strategy(green)
red.bee <- strategy(red)
# indicators
green.bee <- add.indicator(green.bee, name="SMA", arguments = list(x = quote(Cl(mktdata)), n=10), label="SmaFAST")
green.bee <- add.indicator(green.bee, name="SMA", arguments = list(x = quote(Cl(mktdata)), n=30), label="SmaSLOW")
red.bee <- add.indicator(red.bee, name="SMA", arguments = list(x = quote(Cl(mktdata)), n=10), label="SmaFAST")
red.bee <- add.indicator(red.bee, name="SMA", arguments = list(x = quote(Cl(mktdata)), n=30), label="SmaSLOW")
# signals
green.bee <- add.signal(green.bee, 'sigCrossover', arguments = list(columns=c("SmaFAST","SmaSLOW"), relationship="gte"), label='fast.gt.up')
green.bee <- add.signal(green.bee, 'sigCrossover', arguments = list(columns=c("SmaFAST","SmaSLOW"), relationship="lt"), label='fast.lt.dn')
red.bee <- add.signal(red.bee, 'sigCrossover', arguments = list(columns=c("SmaFAST","SmaSLOW"), relationship="gte"), label='fast.gt.up')
red.bee <- add.signal(red.bee, 'sigCrossover', arguments = list(columns=c("SmaFAST","SmaSLOW"), relationship="lt"), label='fast.lt.dn')
# rules
green.bee <- add.rule(
strategy = green.bee,
name='ruleSignal',
arguments = list(sigcol="fast.gt.up",
sigval=TRUE,
orderqty=100,
ordertype='market',
orderside='long'),
type='enter',
label='EnterLONG')
green.bee <- add.rule(
strategy = green.bee,
name='ruleSignal',
arguments = list(sigcol="fast.lt.dn",
sigval=TRUE,
orderqty='all',
ordertype='market',
orderside='long'),
type='exit',
label='ExitLONG')
red.bee <- add.rule(
strategy = red.bee,
name='ruleSignal',
arguments = list(sigcol="fast.lt.dn",
sigval=TRUE,
orderqty=-100,
ordertype='market',
orderside='short'),
type='enter',
label='EnterSHORT')
red.bee <- add.rule(
strategy = red.bee,
name='ruleSignal',
arguments = list(sigcol="fast.gt.up",
sigval=TRUE,
orderqty='all',
ordertype='market',
orderside='short'),
type='exit',
label='ExitSHORT')
#
applyStrategy(green.bee, green, prefer='Open', verbose = FALSE)
applyStrategy(red.bee, red, prefer='Open', verbose = FALSE)
print(getOrderBook(green))
print(getOrderBook(red))
green.txns <- getTxns(green, 'AAPL')
red.txns <- getTxns(red, 'AAPL')
green.txns
red.txns
cat('Net profit from long side:', sum(green.txns$Net.Txn.Realized.PL), '\n')
cat('Net profit from short side:', sum(red.txns$Net.Txn.Realized.PL), '\n')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.