content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
Principal Components Analysis
states=row.names(USArrests)
states
names(USArrests)
apply(USArrests, 2, mean)
apply(USArrests, 2, var)
pr.out=prcomp(USArrests, scale=TRUE)
names(pr.out)
pr.out$center
pr.out$scale
pr.out$rotation
dim(pr.out$x)
biplot(pr.out, scale=0)
pr.out$rotation=-pr.out$rotation
pr.out$x=-pr.out$x
biplot(pr.out, scale=0)
pr.out$sdev
pr.var=pr.out$sdev^2
pr.var
pve=pr.var/sum(pr.var)
pve
plot(pve, xlab="Principal Component", ylab="Proportion of Variance Explained", ylim=c(0,1),type='b')
plot(cumsum(pve), xlab="Principal Component", ylab="Cumulative Proportion of Variance Explained", ylim=c(0,1),type='b')
a=c(1,2,8,-3)
cumsum(a)
|
/PCA.r
|
no_license
|
rushilgoyal/Principal_Component_Analysis
|
R
| false
| false
| 654
|
r
|
Principal Components Analysis
states=row.names(USArrests)
states
names(USArrests)
apply(USArrests, 2, mean)
apply(USArrests, 2, var)
pr.out=prcomp(USArrests, scale=TRUE)
names(pr.out)
pr.out$center
pr.out$scale
pr.out$rotation
dim(pr.out$x)
biplot(pr.out, scale=0)
pr.out$rotation=-pr.out$rotation
pr.out$x=-pr.out$x
biplot(pr.out, scale=0)
pr.out$sdev
pr.var=pr.out$sdev^2
pr.var
pve=pr.var/sum(pr.var)
pve
plot(pve, xlab="Principal Component", ylab="Proportion of Variance Explained", ylim=c(0,1),type='b')
plot(cumsum(pve), xlab="Principal Component", ylab="Cumulative Proportion of Variance Explained", ylim=c(0,1),type='b')
a=c(1,2,8,-3)
cumsum(a)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UNRATENSA.R
\docType{data}
\name{UNRATENSA}
\alias{UNRATENSA}
\title{Civilian Unemployment Rate}
\format{
An \code{\link{xts}} object of the Civilian Unemployment Rate.
\itemize{
\item\strong{Release:} {Employment Situation}
\item\strong{Seasonal Adjustment:} {Not Seasonally Adjusted}
\item\strong{Frequency:} {Monthly}
\item\strong{Units:} {Percent}
\item\strong{Date Range:} {1948-01-01 to 2021-03-01}
\item\strong{Last Updated} {2021-04-02 7:44 AM CDT}
}
}
\source{
U.S. Bureau of Labor Statistics \url{https://fred.stlouisfed.org/data/UNRATENSA.txt}
}
\usage{
data(UNRATENSA)
}
\description{
\code{UNRATENSA} Civilian Unemployment Rate
}
\section{Notes}{
The unemployment rate represents the number of unemployed as a
percentage of the labor force. Labor force data are restricted to
people 16 years of age and older, who currently reside in 1 of the 50
states or the District of Columbia, who do not reside in institutions
(e.g., penal and mental facilities, homes for the aged), and who are
not on active duty in the Armed Forces.
This rate is also defined as the U-3 measure of labor underutilization.
The series comes from the 'Current Population Survey (Household Survey)'
The source code is: LNU04000000
}
\examples{
data(UNRATENSA)
tail(UNRATENSA)
plot(UNRATENSA, grid.col = "white", col="green")
}
\keyword{datasets}
|
/man/UNRATENSA.Rd
|
no_license
|
cran/neverhpfilter
|
R
| false
| true
| 1,420
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UNRATENSA.R
\docType{data}
\name{UNRATENSA}
\alias{UNRATENSA}
\title{Civilian Unemployment Rate}
\format{
An \code{\link{xts}} object of the Civilian Unemployment Rate.
\itemize{
\item\strong{Release:} {Employment Situation}
\item\strong{Seasonal Adjustment:} {Not Seasonally Adjusted}
\item\strong{Frequency:} {Monthly}
\item\strong{Units:} {Percent}
\item\strong{Date Range:} {1948-01-01 to 2021-03-01}
\item\strong{Last Updated} {2021-04-02 7:44 AM CDT}
}
}
\source{
U.S. Bureau of Labor Statistics \url{https://fred.stlouisfed.org/data/UNRATENSA.txt}
}
\usage{
data(UNRATENSA)
}
\description{
\code{UNRATENSA} Civilian Unemployment Rate
}
\section{Notes}{
The unemployment rate represents the number of unemployed as a
percentage of the labor force. Labor force data are restricted to
people 16 years of age and older, who currently reside in 1 of the 50
states or the District of Columbia, who do not reside in institutions
(e.g., penal and mental facilities, homes for the aged), and who are
not on active duty in the Armed Forces.
This rate is also defined as the U-3 measure of labor underutilization.
The series comes from the 'Current Population Survey (Household Survey)'
The source code is: LNU04000000
}
\examples{
data(UNRATENSA)
tail(UNRATENSA)
plot(UNRATENSA, grid.col = "white", col="green")
}
\keyword{datasets}
|
#----------------------------------------------------------------------------#
#' Basic ggplot theme.
#'
#' \
#'
#' @details Maintained by: Clara Marquardt
#'
#' @export
#' @import ggplot2
#'
#' @param title_size Font size - title (numeric) [default: 8].
#' @param subtitle_size Font size - sub title (numeric) [default: 6].
#' @param axis_size Font size - axes labels (numeric) [default: 0.5].
#' @param col_gen Font color (character) [default: "grey50"].
#' @param legend_title_size Font size - legend title (numeric) [default: 0.5].
#' @param legend_text_size Font size - legend text (numeric) [default: 0.4].
#' @param legend_tick_size Tick length (numeric) [default: 0.08].
#' @param legend_width Legend width (numeric) [default: 0.5].
#' @param legend_height Legend height (numeric) [default: 0.2].
#' @param legend_hjust_title Legend title adjustment (horizontal) (numeric) [default: 0.5].
#'
#' @return Formatted ggplot object.
#'
#' @examples \dontrun{
#' test_plot <- ggplot(data=dia[1:25]) +
#' geom_bar(aes(x=dia_code)) +
#' labs(
#' title="Test Title",
#' subtitle="Test Subtitle",
#' x="Diagnosis Code",
#' y="Frequency (Number of Observations)",
#' caption="Test Plot - Details: * ------------------------ *") +
#' theme_basic(legend_tick_size=0.001)
#' ggsave("theme_basic_test.pdf", test_plot)
#'}
theme_basic <- function(axis_size=0.5, title_size=8, subtitle_size=6,
col_gen="grey50",
legend_title_size=0.5, legend_text_size=0.4, legend_tick_size=0.08,
legend_width=0.5, legend_height=0.2, legend_hjust_title=0.5) {
## pre-built base theme
theme_bw() +
#
# basic theme
#
theme(
axis.text.x = element_text(size=rel(axis_size), colour = col_gen),
axis.text.y = element_text(size=rel(axis_size), colour = col_gen),
axis.title.x = element_text(size=rel(axis_size), colour = col_gen),
axis.title.y = element_text(size=rel(axis_size), colour = col_gen),
plot.title = element_text(size = title_size, colour = col_gen, face = "bold"),
plot.subtitle = element_text(size = subtitle_size, colour = col_gen,
face = "plain"),
plot.caption = element_text(size = (subtitle_size-1), colour = col_gen,
face = "plain"),
#
# basic theme extension - legend bottom
#
legend.position="bottom",
legend.key.height=unit(legend_height,"cm"),
legend.key.width=unit(legend_width,"cm"),
axis.ticks.length=unit(legend_tick_size,"cm"),
legend.title=element_text(size=rel(legend_title_size), colour=col_gen,
hjust=legend_hjust_title, face="plain"),
legend.text=element_text(size=rel(legend_text_size), colour=col_gen)
)
}
#----------------------------------------------------------------------------#
|
/R/theme_basic.R
|
permissive
|
sysmedlab/ehR
|
R
| false
| false
| 2,777
|
r
|
#----------------------------------------------------------------------------#
#' Basic ggplot theme.
#'
#' \
#'
#' @details Maintained by: Clara Marquardt
#'
#' @export
#' @import ggplot2
#'
#' @param title_size Font size - title (numeric) [default: 8].
#' @param subtitle_size Font size - sub title (numeric) [default: 6].
#' @param axis_size Font size - axes labels (numeric) [default: 0.5].
#' @param col_gen Font color (character) [default: "grey50"].
#' @param legend_title_size Font size - legend title (numeric) [default: 0.5].
#' @param legend_text_size Font size - legend text (numeric) [default: 0.4].
#' @param legend_tick_size Tick length (numeric) [default: 0.08].
#' @param legend_width Legend width (numeric) [default: 0.5].
#' @param legend_height Legend height (numeric) [default: 0.2].
#' @param legend_hjust_title Legend title adjustment (horizontal) (numeric) [default: 0.5].
#'
#' @return Formatted ggplot object.
#'
#' @examples \dontrun{
#' test_plot <- ggplot(data=dia[1:25]) +
#' geom_bar(aes(x=dia_code)) +
#' labs(
#' title="Test Title",
#' subtitle="Test Subtitle",
#' x="Diagnosis Code",
#' y="Frequency (Number of Observations)",
#' caption="Test Plot - Details: * ------------------------ *") +
#' theme_basic(legend_tick_size=0.001)
#' ggsave("theme_basic_test.pdf", test_plot)
#'}
theme_basic <- function(axis_size=0.5, title_size=8, subtitle_size=6,
col_gen="grey50",
legend_title_size=0.5, legend_text_size=0.4, legend_tick_size=0.08,
legend_width=0.5, legend_height=0.2, legend_hjust_title=0.5) {
## pre-built base theme
theme_bw() +
#
# basic theme
#
theme(
axis.text.x = element_text(size=rel(axis_size), colour = col_gen),
axis.text.y = element_text(size=rel(axis_size), colour = col_gen),
axis.title.x = element_text(size=rel(axis_size), colour = col_gen),
axis.title.y = element_text(size=rel(axis_size), colour = col_gen),
plot.title = element_text(size = title_size, colour = col_gen, face = "bold"),
plot.subtitle = element_text(size = subtitle_size, colour = col_gen,
face = "plain"),
plot.caption = element_text(size = (subtitle_size-1), colour = col_gen,
face = "plain"),
#
# basic theme extension - legend bottom
#
legend.position="bottom",
legend.key.height=unit(legend_height,"cm"),
legend.key.width=unit(legend_width,"cm"),
axis.ticks.length=unit(legend_tick_size,"cm"),
legend.title=element_text(size=rel(legend_title_size), colour=col_gen,
hjust=legend_hjust_title, face="plain"),
legend.text=element_text(size=rel(legend_text_size), colour=col_gen)
)
}
#----------------------------------------------------------------------------#
|
# 4.1 Function documentation
?mean
help(mean)
args(mean)
# 4.2 Use a function
linkedin <- c(16, 9, 13, 5, 2, 17, 14)
facebook <- c(17, 7, 5, 16, 8, 13, 14)
avg_li <- mean(x = linkedin)
avg_fb <- mean(facebook)
avg_li
avg_fb
# 4.3 Use a function
avg_sum <- mean(linkedin + facebook)
avg_sum_trimmed <- mean(linkedin + facebook, trim = 0.2)
avg_sum
avg_sum_trimmed
# 4.4 Use a function
linkedin <- c(16, 9, 13, 5, NA, 17, 14)
facebook <- c(17, NA, 5, 16, 8, 13, 14)
mean(linkedin)
mean(linkedin, na.rm = TRUE)
# 4.5 Functions inside functions
mean(abs(linkedin - facebook), na.rm = TRUE)
# 4.6 Write your own function
pow_two <- function(x) {
x ^ 2
}
pow_two(12)
sum_abs <- function(x, y){
abs(x) + abs(y)
}
sum_abs(-2, 3)
# 4.7 Write your own function
hello <- function() {
print("Hi there!")
TRUE
}
hello()
# 4.8 Write your own function
pow_two <- function(x, print_info = TRUE) {
y <- x ^ 2
if(print_info){
print(paste(x, "to the power two equals", y))
}
return(y)
}
pow_two(5)
pow_two(5, FALSE)
pow_two(5, TRUE)
# 4.9 R you functional?
linkedin <- c(16, 9, 13, 5, 2, 17, 14)
facebook <- c(17, 7, 5, 16, 8, 13, 14)
interpret <- function(num_views) {
if (num_views > 15) {
print("You're popular!")
return(num_views)
} else {
print("Try to be more visible!")
return(0)
}
}
interpret(linkedin[1])
interpret(facebook[2])
# 4.10 R you functional?
interpret <- function(num_views) {
if (num_views > 15) {
print("You're popular!")
return(num_views)
} else {
print("Try to be more visible!")
return(0)
}
}
interpret_all <- function(views, return_sum = TRUE){
count <- 0
for (v in views) {
count <- count + interpret(v)
}
if (return_sum) {
return(count)
} else {
return(NULL)
}
}
interpret_all(linkedin)
interpret_all(facebook)
# 4.11 Load an R package
library(ggplot2)
qplot(mtcars$wt, mtcars$hp)
search()
|
/Intermediate R/Tutorials/4. Tutorial.r
|
no_license
|
TopicosSelectos/tutoriales-2019-2-mimi1698
|
R
| false
| false
| 2,013
|
r
|
# 4.1 Function documentation
?mean
help(mean)
args(mean)
# 4.2 Use a function
linkedin <- c(16, 9, 13, 5, 2, 17, 14)
facebook <- c(17, 7, 5, 16, 8, 13, 14)
avg_li <- mean(x = linkedin)
avg_fb <- mean(facebook)
avg_li
avg_fb
# 4.3 Use a function
avg_sum <- mean(linkedin + facebook)
avg_sum_trimmed <- mean(linkedin + facebook, trim = 0.2)
avg_sum
avg_sum_trimmed
# 4.4 Use a function
linkedin <- c(16, 9, 13, 5, NA, 17, 14)
facebook <- c(17, NA, 5, 16, 8, 13, 14)
mean(linkedin)
mean(linkedin, na.rm = TRUE)
# 4.5 Functions inside functions
mean(abs(linkedin - facebook), na.rm = TRUE)
# 4.6 Write your own function
pow_two <- function(x) {
x ^ 2
}
pow_two(12)
sum_abs <- function(x, y){
abs(x) + abs(y)
}
sum_abs(-2, 3)
# 4.7 Write your own function
hello <- function() {
print("Hi there!")
TRUE
}
hello()
# 4.8 Write your own function
pow_two <- function(x, print_info = TRUE) {
y <- x ^ 2
if(print_info){
print(paste(x, "to the power two equals", y))
}
return(y)
}
pow_two(5)
pow_two(5, FALSE)
pow_two(5, TRUE)
# 4.9 R you functional?
linkedin <- c(16, 9, 13, 5, 2, 17, 14)
facebook <- c(17, 7, 5, 16, 8, 13, 14)
interpret <- function(num_views) {
if (num_views > 15) {
print("You're popular!")
return(num_views)
} else {
print("Try to be more visible!")
return(0)
}
}
interpret(linkedin[1])
interpret(facebook[2])
# 4.10 R you functional?
interpret <- function(num_views) {
if (num_views > 15) {
print("You're popular!")
return(num_views)
} else {
print("Try to be more visible!")
return(0)
}
}
interpret_all <- function(views, return_sum = TRUE){
count <- 0
for (v in views) {
count <- count + interpret(v)
}
if (return_sum) {
return(count)
} else {
return(NULL)
}
}
interpret_all(linkedin)
interpret_all(facebook)
# 4.11 Load an R package
library(ggplot2)
qplot(mtcars$wt, mtcars$hp)
search()
|
restData <- read.csv("restaurants.csv")
# Creating sequences
s1 <- seq(1, 10, by=2); s1
s2 <- seq(1, 10, length=3); s2
x <- c(1,3,8,25,100); seq(along = x)
# Subsetting variables
str(restData)
restData$nearMe <- restData$neighborhood %in% c("Roland Park", "Homeland")
table(restData$nearMe)
# Creating binary variables
restData$zipWrong <- ifelse(restData$zipCode < 0, TRUE, FALSE)
table(restData$zipWrong, restData$zipCode < 0)
# Creating categorical variables
restData$zipGroups <- cut(restData$zipCode, breaks = quantile(restData$zipCode))
table(restData$zipGroups)
table(restData$zipGroups, restData$zipCode)
# Easier cutting
install.packages("Hmisc")
library(Hmisc)
restData$zipGroups <- cut2(restData$zipCode, g=4) # cutting producec factor variables
table(restData$zipGroups)
# Creating factor variables
restData$zcf <- factor(restData$zipCode)
restData$zcf[1:10]
class(restData$zcf)
# Levels of factor variables
yesno <- sample(c("yes", "no"), size = 10, replace = TRUE)
yesnofac <- factor(yesno, levels = c("yes", "no"))
relevel(yesnofac, ref = "yes")
as.numeric(yesnofac)
# Using the mutate function
library(plyr)
restData2 <- mutate(restData, zipGroups=cut2(zipCode, g=4))
table(restData2$zipGroups)
|
/Coursera/Data Science (JHU)/03 Getting and cleaning data/Week 3/03_03_creatingNewVariables.R
|
no_license
|
abudish/Course_Materials_and_Certificates
|
R
| false
| false
| 1,221
|
r
|
restData <- read.csv("restaurants.csv")
# Creating sequences
s1 <- seq(1, 10, by=2); s1
s2 <- seq(1, 10, length=3); s2
x <- c(1,3,8,25,100); seq(along = x)
# Subsetting variables
str(restData)
restData$nearMe <- restData$neighborhood %in% c("Roland Park", "Homeland")
table(restData$nearMe)
# Creating binary variables
restData$zipWrong <- ifelse(restData$zipCode < 0, TRUE, FALSE)
table(restData$zipWrong, restData$zipCode < 0)
# Creating categorical variables
restData$zipGroups <- cut(restData$zipCode, breaks = quantile(restData$zipCode))
table(restData$zipGroups)
table(restData$zipGroups, restData$zipCode)
# Easier cutting
install.packages("Hmisc")
library(Hmisc)
restData$zipGroups <- cut2(restData$zipCode, g=4) # cutting producec factor variables
table(restData$zipGroups)
# Creating factor variables
restData$zcf <- factor(restData$zipCode)
restData$zcf[1:10]
class(restData$zcf)
# Levels of factor variables
yesno <- sample(c("yes", "no"), size = 10, replace = TRUE)
yesnofac <- factor(yesno, levels = c("yes", "no"))
relevel(yesnofac, ref = "yes")
as.numeric(yesnofac)
# Using the mutate function
library(plyr)
restData2 <- mutate(restData, zipGroups=cut2(zipCode, g=4))
table(restData2$zipGroups)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{howto}
\alias{howto}
\title{How to use a package}
\usage{
howto(package)
}
\arguments{
\item{package}{a package to open vigettes of.}
}
\value{
Vignette data, invisibly.
}
\description{
Open all package vignettes in browser tabs ready for skimming.
}
\examples{
\dontrun{
howto("naniar")
}
}
|
/man/howto.Rd
|
no_license
|
johnfrye/fryeutilities
|
R
| false
| true
| 385
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{howto}
\alias{howto}
\title{How to use a package}
\usage{
howto(package)
}
\arguments{
\item{package}{a package to open vigettes of.}
}
\value{
Vignette data, invisibly.
}
\description{
Open all package vignettes in browser tabs ready for skimming.
}
\examples{
\dontrun{
howto("naniar")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ICUcapacity_FR.R
\docType{data}
\name{ICUcapacity_FR}
\alias{ICUcapacity_FR}
\title{Numbers of ICU beds in France}
\format{
A data frame with 19 rows and 2 variables
}
\source{
DREEES 2018 \url{https://www.sae-diffusion.sante.gouv.fr/sae-diffusion/recherche.htm}
obtaine on 2020-03-25 and stored in
\code{'data/raw/capacite_rea_regions_saediffusiondrees.txt'}
}
\usage{
ICUcapacity_FR
}
\description{
A dataset containing the numbers of ICU beds in France and in each region
according to DREES as of 2018-12-31
}
\examples{
data(ICUcapacity_FR)
#ICUcapacity_FR <- read.delim("data/raw/capacite_rea_regions_saediffusiondrees.txt")
}
\keyword{datasets}
|
/man/ICUcapacity_FR.Rd
|
permissive
|
sistm/SEIRcovid19
|
R
| false
| true
| 730
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ICUcapacity_FR.R
\docType{data}
\name{ICUcapacity_FR}
\alias{ICUcapacity_FR}
\title{Numbers of ICU beds in France}
\format{
A data frame with 19 rows and 2 variables
}
\source{
DREEES 2018 \url{https://www.sae-diffusion.sante.gouv.fr/sae-diffusion/recherche.htm}
obtaine on 2020-03-25 and stored in
\code{'data/raw/capacite_rea_regions_saediffusiondrees.txt'}
}
\usage{
ICUcapacity_FR
}
\description{
A dataset containing the numbers of ICU beds in France and in each region
according to DREES as of 2018-12-31
}
\examples{
data(ICUcapacity_FR)
#ICUcapacity_FR <- read.delim("data/raw/capacite_rea_regions_saediffusiondrees.txt")
}
\keyword{datasets}
|
library(ribiosNGS)
library(testthat)
test_that("readMpsnakeAsDGEList works", {
mpsnakeDir <- system.file("extdata/mpsnake-minimal-outdir", package="ribiosNGS")
mpsDgeList <- readMpsnakeAsDGEList(mpsnakeDir)
#' ## equivalent
mpsnakeResDir <- system.file("extdata/mpsnake-minimal-outdir/results", package="ribiosNGS")
mpsDgeList2 <- readMpsnakeAsDGEList(mpsnakeResDir)
sampleAnno <- ribiosIO::readTable(system.file("extdata/mpsnake-minimal-outdir",
"results/annot/phenoData.meta",
package="ribiosNGS"),
row.names = TRUE)
featAnno <- ribiosIO::readTable(system.file("extdata/mpsnake-minimal-outdir",
"results/annot/feature.annot",
package="ribiosNGS"),
row.names = FALSE)
counts <- ribiosIO::read_gct_matrix(system.file("extdata/mpsnake-minimal-outdir",
"results/gct/unnamed-molphen-project.gct",
package="ribiosNGS"))
expect_identical(mpsDgeList, mpsDgeList2)
expect_identical(as.character(mpsDgeList$samples$group),
as.character(sampleAnno$GROUP))
expect_identical(rownames(sampleAnno),
colnames(mpsDgeList$counts))
expect_equivalent(featAnno, mpsDgeList$genes)
expect_equivalent(as.matrix(counts), mpsDgeList$counts)
})
|
/tests/testthat/test-readMpsnakeAsDGEList.R
|
no_license
|
bedapub/ribiosNGS
|
R
| false
| false
| 1,559
|
r
|
library(ribiosNGS)
library(testthat)
test_that("readMpsnakeAsDGEList works", {
mpsnakeDir <- system.file("extdata/mpsnake-minimal-outdir", package="ribiosNGS")
mpsDgeList <- readMpsnakeAsDGEList(mpsnakeDir)
#' ## equivalent
mpsnakeResDir <- system.file("extdata/mpsnake-minimal-outdir/results", package="ribiosNGS")
mpsDgeList2 <- readMpsnakeAsDGEList(mpsnakeResDir)
sampleAnno <- ribiosIO::readTable(system.file("extdata/mpsnake-minimal-outdir",
"results/annot/phenoData.meta",
package="ribiosNGS"),
row.names = TRUE)
featAnno <- ribiosIO::readTable(system.file("extdata/mpsnake-minimal-outdir",
"results/annot/feature.annot",
package="ribiosNGS"),
row.names = FALSE)
counts <- ribiosIO::read_gct_matrix(system.file("extdata/mpsnake-minimal-outdir",
"results/gct/unnamed-molphen-project.gct",
package="ribiosNGS"))
expect_identical(mpsDgeList, mpsDgeList2)
expect_identical(as.character(mpsDgeList$samples$group),
as.character(sampleAnno$GROUP))
expect_identical(rownames(sampleAnno),
colnames(mpsDgeList$counts))
expect_equivalent(featAnno, mpsDgeList$genes)
expect_equivalent(as.matrix(counts), mpsDgeList$counts)
})
|
#This function creates object that cache its inverse
makeCacheMatrix<-function(x=matrix()){
inv<-NULL
set<-function(funtion(y){
x<<-y
inv<<-NULL
}
get<-function()x
setInverse<-funtion(inverse) inv<<- inverse
getInverse<-funtion() inv
list(set=set,
get=get,
setInverse=setInverse,
getInverse=getInverse)
}
}
#This function computes the inverse of the matrix from the cache,if inverse has already been created.
cacheSolve<-function(x, ...){
inv<- x$getInverse()
if(!is.null(inv)){
message ("Using Cached Data")
return(inv)
}
mat<-x$get()
inv<-solve(mat, ...)
x$setInverse(inv)
inv
}
|
/ProgrammingAssignment2/cachematrix.R
|
no_license
|
vikranthkasi/GitTestNew
|
R
| false
| false
| 615
|
r
|
#This function creates object that cache its inverse
makeCacheMatrix<-function(x=matrix()){
inv<-NULL
set<-function(funtion(y){
x<<-y
inv<<-NULL
}
get<-function()x
setInverse<-funtion(inverse) inv<<- inverse
getInverse<-funtion() inv
list(set=set,
get=get,
setInverse=setInverse,
getInverse=getInverse)
}
}
#This function computes the inverse of the matrix from the cache,if inverse has already been created.
cacheSolve<-function(x, ...){
inv<- x$getInverse()
if(!is.null(inv)){
message ("Using Cached Data")
return(inv)
}
mat<-x$get()
inv<-solve(mat, ...)
x$setInverse(inv)
inv
}
|
#' @title FLR to srmsymc
#'
#' @description XXX
#'
#' @export
#'
#' @param stk FLStock object
#' @param y Years
#' @param name_stock Name of the stock. May later down the line be used in
#' table and plot outputs
#' @param filename_age Name of the associated file containing biological (weights,
#' maturity and mortalit) and fisheries data (selection patter) by age.
#' @param opt_sr_model Recruitment model number (1: Ricker, 2: Beverton-Holt,
#' 3: Segmented regression).
#' @param opt_land Boolean
#' @param opt_sim 0=no simulation, 1=simulation
#' @param opt_age 0=error only in recr, 1=error in recr & steady-state vectors
#' @param opt_pen 0=no SR constraints, 1=apply SR constrain (IS THIS ONLY FOR
#' THE SEGMENTED REGRESSION??).
FLS2srmsymc <- function(stk,y=3,name_stock,filename_age,opt_sr_model,
opt_land=TRUE,opt_sim=TRUE,opt_age=TRUE,opt_pen=TRUE) {
x <- fls2list(stk,y=y,optLand=opt_land)
y <- srmsymc_cat_srmsymc("codNS","age.dat",min(x$rby$year),max(x$rby$year),
aR=min(as.numeric((x$dims$age))),
aP=max(as.numeric((x$dims$age))),
1,1,1,1,
r=x$rby$rec,
ssb=x$rby$ssb,
year=x$rby$year)
z <- srmsymc_cat_age(filename_age,n_fleets=2,pf=0,pm=0,
sel=cbind(x$sH[,1],x$sD[,1]),
sel_cv=cbind(x$sH[,2],x$sD[,2]),
w=cbind(x$wH[,1],x$wD[,1]),
w_cv=cbind(x$wH[,2],x$wD[,2]),
bio=cbind(x$M[,1],x$MT[,1],x$wS[,1]),
bio_cv=cbind(x$M[,2],x$MT[,2],x$wS[,2]))
}
#' @title XXX
#'
#' @description From Tim
#'
#' @export
#'
#' @param stk FLStock object
#' @param y year
#' @param optLand Boolean, if TRUE (default) then ...
fls2list <- function(stk, y, optLand=TRUE)
{
d.flag <- (sum(discards(stk)) > 0)
ret <- vector("list",9)
names(ret) <- c("rby","sH","sD","wH","wD","wS","M","MT","dims")
ret$rby <- data.frame(year=as.numeric(dimnames(stk@stock.n)$year),
rec= c(stock.n(stk)[1,]),
ssb=c(ssb(stk)),
fbar=c(fbar(stk)),
yield=c(catch(stk)))
years <- rev(rev(dimnames(stk@stock.n)$year)[1:y])
ages <- dimnames(stk@stock.n)$age
my_sum <- function(x) {r=cbind(mean=apply(x[,years,],1,mean),CV=apply(x[,years,],1,sd)/apply(x[,years,],1,mean));r[is.na(r)]<- 0; r}
if (d.flag & optLand) {
sC <- harvest(stk)[,years]
sH <- harvest(stk)[,years] * (landings.n(stk)/catch.n(stk))[,years]
sD <- harvest(stk)[,years] * (discards.n(stk)/catch.n(stk))[,years]
for (yy in years) sH[,yy] <- sH[,yy]/mean(sC[range(stk)["minfbar"]:range(stk)["maxfbar"],yy])
for (yy in years) sD[,yy] <- sD[,yy]/mean(sC[range(stk)["minfbar"]:range(stk)["maxfbar"],yy])
ret$sH <- my_sum(sH)
ret$sD <- my_sum(sD)
} else {
sH <- harvest(stk)[,years]
for (yy in years) sH[,yy] <- sH[,yy]/mean(sH[range(stk)["minfbar"]:range(stk)["maxfbar"],yy])
ret$sH <- my_sum(sH)
ret$sD <- ret$sH*0
}
ret$wH <- my_sum(landings.wt(stk))
ret$wD <- my_sum(discards.wt(stk))
ret$wD[is.na(ret$wD)] <- 0
ret$wS <- my_sum(stock.wt(stk))
ret$M <- my_sum(m(stk))
ret$MT <- my_sum(mat(stk))
ret$MT[is.na(ret$MT)] <- 0
ret$dims <- list(age=ages,year=years,fbarage=range(stk)["minfbar"]:range(stk)["maxfbar"])
#Set CVs to a more realistic value
#i <- (ret$MT[,"mean"] >=0.05 & ret$MT[,"mean"] <=0.95)
#ret$MT[i,"CV"] <- max(ret$MT[i,"CV"],0.1)
#ret$M[,"CV"] <- max(ret$M[,"CV"],0.1)
return (ret)
}
#' @title Write srmsymc.dat to disk
#'
#' @description The function is used to write srmsymc.dat to the disk.
#'
#' @author Einar Hjorleifsson
#'
#' @export
#'
#' @param name_stock Name of the stock. May later down the line be used in
#' table and plot outputs
#' @param filename_age Name of the associated file containing biological (weights,
#' maturity and mortalit) and fisheries data (selection patter) by age.
#' @param y1 First year of ssb and recruitment data
#' @param y2 Last year of ssb and recruitment data
#' @param aR Recruitment age
#' @param aP Plus group age
#' @param opt_sr_model Recruitment model number (1: Ricker, 2: Beverton-Holt,
#' 3: Segmented regression).
#' @param opt_sim 0=no simulation, 1=simulation
#' @param opt_age 0=error only in recr, 1=error in recr & steady-state vectors
#' @param opt_pen 0=no SR constraints, 1=apply SR constrain (IS THIS ONLY FOR
#' THE SEGMENTED REGRESSION??).
#' @param r A vector containing recruitment
#' @param ssb A vector containing spawning stock biomass
#' @param year A vector containinb years (just used in comments).
srmsymc_cat_srmsymc <- function(name_stock, filename_age, y1, y2, aR, aP,
opt_sr_model, opt_sim, opt_age, opt_pen,
r, ssb, year) {
tmpfile <- file('srmsymc.dat',open='w')
cat('# Header: Some nice description\n',file=tmpfile,append=TRUE)
cat(name_stock, '# stkname: Name of the stock\n',file=tmpfile,append=TRUE)
cat(filename_age,'# filname: Name of the option file (2nd file\n',file=tmpfile,append=TRUE)
cat(y1, ' # ybeg: First year (yearRange[1])\n',file=tmpfile,append=TRUE)
cat(y2, ' # yend: Last year (yearRange[2])\n',file=tmpfile,append=TRUE)
cat(aR, ' # r: Recruitment age (senhead[1])\n',file=tmpfile,append=TRUE)
cat(aP, ' # A: Plus group age (senhead[2])\n',file=tmpfile,append=TRUE)
cat(opt_sr_model, ' # Ropt: S-R function type (sr)\n',file=tmpfile,append=TRUE)
cat(opt_sim,' # simopt: 0=no simulation, 1=simulation (ifelse(nits==0,0,1)) \n',file=tmpfile,append=TRUE)
cat(opt_age,' # senopt: 0=error only in recr, 1=error in recr & steady-state vectors (ifelse(varybiodata,1,0))\n',file=tmpfile,append=TRUE)
cat(opt_pen,' # penopt: 0=no SR constraints, 1=apply SR constraints (ifelse(srconstrain,1,0))\n',file=tmpfile,append=TRUE)
cat('# r ssb\n', file=tmpfile, append=TRUE)
cat(paste(r,ssb,'#',year), file = tmpfile,append = TRUE,sep="\n")
close(tmpfile)
}
#' @title Write age.dat to disk
#'
#' @description XXX
#'
#' @author Einar Hjorleifsson
#'
#' @export
#'
#' @param filename_age XXX
#' @param n_fleets XXX
#' @param pf XXX
#' @param pm XXX
#' @param sel XXX
#' @param sel_cv XXX
#' @param w XXX
#' @param w_cv XXX
#' @param bio XXX
#' @param bio_cv XXX
#'
srmsymc_cat_age <- function(filename_age,n_fleets,pf,pm,sel,sel_cv,w,w_cv,bio,bio_cv) {
n <- 4 # number of digits to write
tmpfile <- file(filename_age,open='w')
cat('#Header: Some nice description\n',file=tmpfile,append=TRUE)
cat(n_fleets, '# fno: Number of fleets (nstocks)\n',file=tmpfile,append=TRUE)
cat(1, '# sno: Fleets for yield per recruit stats (always 1)\n',file=tmpfile,append=TRUE)
cat(pf, '# f: proportional fishing mortality before spawning time (pf)\n',file=tmpfile,append=TRUE)
cat(pm, '# m: proportional natural mortality before spawning time (pm)\n',file=tmpfile,append=TRUE)
cat('# Selection pattern\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(sel)) cat(format(round(sel[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Selection pattern\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(sel_cv)) cat(format(round(sel_cv[i,],n)),'\n',file=tmpfile,append=TRUE)
cat('# Weight at age\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(w)) cat(format(round(w[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Weight at age\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(w_cv)) cat(format(round(w_cv[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# Biological data\n',file=tmpfile,append=TRUE)
cat('# M, mat, wSSB\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(bio)) cat(format(round(bio[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Biological data\n',file=tmpfile,append=TRUE)
cat('# cvM, cvmat, cvwSSB\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(bio_cv)) cat(format(round(bio_cv[i,],n),n),'\n',file=tmpfile,append=TRUE)
close(tmpfile)
}
#' @title write data files to disk for the ADM srmsymc program
#'
#' @description Some details, including link to the function a) running the
#' program and creating the data input
#'
#' @author Einar Hjorleifsson <einar.hjorleifsson@@gmail.com>
#'
#' @export
#'
#' @param rby A list that contains in its first three rows year, recruitment
#' and ssb.
#' @param iba A list - read.sen
#' @param aR \emph{integer}. Age of recruitment. Has to be specified. Is used to
#' shift the recruitment data such that it aligns with ssb data. Can be set to
#' 0 (zero) if ssb-recruitemnt pairs already properly aligned.
#' @param col_names vector of column names for year, recruitment and spawning stock
#' biomass (in that order).
#' @param opt_sr_model \emph{integer}. Number (1-3) corresponding to recruitment model.
#' @param opt_sim \emph{integer}. If 0 no simulation, if 1 simulation.
#' #' @param opt_pen \emph{integer}. If 0 then no recruitement constraints if 1
#' then recruitment contraints applied (NEED MORE DETAIL)
#' @param opt_age \emph{integer}. If 0 then error only in recruitment, if 1 then
#' both error in recruitment and steady-state vectors (age based inputs).
#' @param rba data.frame that contains age based input NOT YET IMPLEMENTED
#' @param filename_age \emph{character}. Name of the option file that contains the
#' age-based-data. If missing, set to "age.dat".
#' @param aP \emph{integer}. Plus group age.
#' @param years vector containing years to include in the ssb-r
write.srmsymc <- function(rby,iba,aR,col_names=c("year","r","ssb"),opt_sr_model=1,
opt_sim=1,opt_pen=1,opt_age=1,rba,
filename_age="age.dat",aP,years) {
value <- NULL
## rby tests
if(missing(rby)) stop("summary (year) data must be specified")
class2 <- rby$info$class2
if(is.null(class2)) stop("Not implemented yet for provided file type")
if(class2 != "rby") stop("Not implemented yet for provided file type")
x <- rby$data[,col_names]
y <- rby$info
## rba test
#if(missing(rba)) stop("prediction (age) data must be specified")
#class2 <- attributes(rba)$class2
#if(is.null(class2)) stop("Not implemented yet for provided file type")
#if(class2 != "rby") stop("Not implemented yet for provided file type")
if(missing(filename_age)) filename_age <- "age.dat"
### A. Setting up the srmsymc.dat
## setting stock name
name_stock <- y$name_stock
if(is.na(name_stock)) name_stock <- "nn"
name_stock <- y$name_stock <- str_replace_all(name_stock," ","")
aR <- y$time[3]
if(is.na(aR)) stop("Recruitment age (aR) must be specified")
## Align recruitment
x <- align_ssb_r(x,col.names=names(x),aR)
x <- x[!is.na(x$r),]
if(missing(years)) {
y1 <- y$time[1]
y2 <- y$time[2]
years <- c(y1:y2)
} else {
y$time[1] <- y1 <- min(years)
y$time[2] <- y2 <- max(years)
}
x <- x[x$year %in% years,]
if(missing(aP)) stop("Plus group age (aP) must be specified")
y$time[6] <- aP
# return file
x <- data.frame(r=x$r,ssb=x$ssb,year=x$year)
y$filename_age = filename_age
y$opt_sr_model = opt_sr_model
y$opt_sim = opt_sim
y$opt_age = opt_age
y$opt_pen = opt_pen
rby <- list(data=x,info=y)
srmsymc_cat_srmsymc(name_stock, filename_age, y1, y2, aR, aP,
opt_sr_model, opt_sim, opt_age, opt_pen,
r=x$r, ssb=x$ssb, year=x$year)
### B. Setting up the age.dat
if(iba$info$creator == "created from function fishvise:::read.sen") {
x <- iba$data
y <- iba$info
nFleets <- sum(y$fleets[2:4])
fleet_Fs <- y$mort[,c(2:4)]
fleet_Fs_names <- colnames(fleet_Fs)
weight_names <- str_replace(fleet_Fs_names,"F","W")
ages <- c(y$time[4]:y$time[5])
sel <- cvsel <- wgt <- cvwgt <- matrix(NA,nrow=length(ages),ncol=nFleets)
for (i in 1:nFleets) {
x1 <- x[x$id %in% fleet_Fs_names[i],]
x1$value <- x1$value/mean(x1$value[x1$age %in% c(fleet_Fs[1,i]:fleet_Fs[2,i])])
sel[,i] <- x1[,'value']
cvsel[,i] <- x1[,'cv']
x1 <- x[x$id %in% weight_names[i],]
wgt[,i] <- x1[,"value"]
cvwgt[,i] <- x1[,"value"]
}
bio <- cvbio <- matrix(NA,nrow=length(ages),ncol=3)
bio[,1] <- x[x$id %in% 'M','value']
bio[,2] <- x[x$id %in% 'xN','value']
bio[,3] <- x[x$id %in% 'wN','value']
cvbio[,1] <- x[x$id %in% 'M','cv']
cvbio[,2] <- x[x$id %in% 'xN','cv']
cvbio[,3] <- x[x$id %in% 'wN','cv']
cat_age.dat(filename_age,nFleets,y$pF,y$pM,sel,cvsel,wgt,cvwgt,bio,cvbio)
iba <- list(sel=sel,sel_cv=cvsel,w=wgt,w_cv=cvwgt,bio=bio,bio_cv=cvbio,info=y)
}
if(iba$info$creator == "fishvise::read.rby") {
x <- iba$data[,c("year","age","tF","wC","wX","xN")]
tF <- ddply(x[x$age %in% 5:10,],"year",summarise,refF=mean(tF))
x <- join(x,tF)
x$sF <- x$tF/x$refF
d <- melt(x[,c("year","age","sF","wC","wX","xN")],id.vars=c("year","age"))
d <- ddply(d,c("variable","age"),summarise,ave=mean(value,na.rm=T),cv=sqrt(var(value,na.rm=T))/ave)
nFleets <- 1
fleet_Fs <- matrix(c(5,10),ncol=1,nrow=2)
colnames(fleet_Fs) <- "sF"
fleet_Fs_names <- colnames(fleet_Fs)
weight_names <- "wC"
ages <- c(min(x$age):max(x$age))
cat("Line 621")
sel <- cvsel <- wgt <- cvwgt <- matrix(NA,nrow=length(ages),ncol=nFleets)
x <- d
names(x) <- c("id","age","value","cv")
for (i in 1:nFleets) {
x1 <- x[x$id %in% fleet_Fs_names[i],]
x1$value <- x1$value/mean(x1$value[x1$age %in% c(fleet_Fs[1,i]:fleet_Fs[2,i])])
sel[,i] <- x1[,'value']
cvsel[,i] <- x1[,'cv']
x1 <- x[x$id %in% weight_names[i],]
wgt[,i] <- x1[,"value"]
cvwgt[,i] <- x1[,"value"]
}
bio <- cvbio <- matrix(NA,nrow=length(ages),ncol=3)
bio[,1] <- 0.2
bio[,2] <- x[x$id %in% 'xN','value']
bio[,3] <- x[x$id %in% 'wX','value']
cvbio[,1] <- 0.0
cvbio[,2] <- x[x$id %in% 'xN','cv']
cvbio[,3] <- x[x$id %in% 'wX','cv']
cat_age.dat("age.dat",1,0,0,sel,cvsel,wgt,cvwgt,bio,cvbio)
iba <- list(sel=sel,sel_cv=cvsel,w=wgt,w_cv=cvwgt,bio=bio,bio_cv=cvbio,info=y)
}
return(list(rby=rby,iba=iba))
}
#' @title Write age.dat to disk
#'
#' @description XXX
#'
#' @author Einar Hjorleifsson
#'
#' @export
#'
#' @param filename_age XXX
#' @param n_fleets XXX
#' @param pf XXX
#' @param pm XXX
#' @param sel XXX
#' @param sel_cv XXX
#' @param w XXX
#' @param w_cv XXX
#' @param bio XXX
#' @param bio_cv XXX
#'
cat_age.dat <- function(filename_age,n_fleets,pf,pm,sel,sel_cv,w,w_cv,bio,bio_cv) {
n <- 4 # number of digits to write
tmpfile <- file(filename_age,open='w')
cat('#Header: Some nice description\n',file=tmpfile,append=TRUE)
cat(n_fleets, '# fno: Number of fleets (nstocks)\n',file=tmpfile,append=TRUE)
cat(1, '# sno: Fleets for yield per recruit stats (always 1)\n',file=tmpfile,append=TRUE)
cat(pf, '# f: proportional fishing mortality before spawning time (pf)\n',file=tmpfile,append=TRUE)
cat(pm, '# m: proportional natural mortality before spawning time (pm)\n',file=tmpfile,append=TRUE)
cat('# Selection pattern\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(sel)) cat(format(round(sel[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Selection pattern\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(sel_cv)) cat(format(round(sel_cv[i,],n)),'\n',file=tmpfile,append=TRUE)
cat('# Weight at age\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(w)) cat(format(round(w[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Weight at age\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(w_cv)) cat(format(round(w_cv[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# Biological data\n',file=tmpfile,append=TRUE)
cat('# M, mat, wSSB\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(bio)) cat(format(round(bio[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Biological data\n',file=tmpfile,append=TRUE)
cat('# cvM, cvmat, cvwSSB\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(bio_cv)) cat(format(round(bio_cv[i,],n),n),'\n',file=tmpfile,append=TRUE)
close(tmpfile)
}
#' plotMSY
#'
#'
#' @param senfilename is the name of the senfile, with a corresponding sumfile sharing the same name except replacing ".sen" with ".sum". Produces an interactive window if not specified
#' @param indexfilename is the name of an index file in the Lowestoft format pointing to the pf and pm files. If pfpm is specified, this is ignored, if neither specified an interactive window appears to choose index file
#' @param pfpm is a vector of two values; pm and pf (proportion of m and f before spawning
#' @param srweights is a vector of three values, giving relative weighting of the three stock recruit forms, or a combination of 0 and NA for automatic weighting.
#' @param trimming proportion of the simulations to trim before taking harmonic mean. NA causes no trimming, but a diagnostic plot
#' @param nits Number of iterations of bootstrapping - if 0, does only the deterministic fit
#' @param nhair number of lines to plot on 'hair' plots
#' @param varybiodata If TRUE, bootstraps the biological & fleet data (weight, maturity, mortality, selectivity) if FALSE, varies SR relationship only
#' @param stockname Display title for stock used in titles and output path
#' @param fpa Value of Fpa to be plotted on output (NA for no value to plot)
#' @param flim Value of Flim to be plotted on output (NA for no value to plot)
#' @param bpa Value of Bpa to be plotted on output (NA for no value to plot)
#' @param blim Value of Blim to be plotted on output (NA for no value to plot)
#' @param outputfolder Location for output files. Defaults to ".\\output\\[stockname]\\"
#' @param datfilename A pre-calculated dat file - if provided, senfilename, indexfilename, varybiodata, srconstrain and pfpm are ignored in preference to values in the dat file. Data from the sum file will be added to the plots if it can be found
#' @param silent Supresses the majority of the output to screen. Default is TRUE
#' @param onlyYPR Calculate only the yield per recruit reference points, for stocks where the SRR is unknown or uncertain. Default is FALSE
#' @return mean(5:7) ##Some function
#' @author Tim Earl \email{timothy.earl@@cefas.co.uk}
#' @export
srmsymc_convertsumsen = function(senfilename = NA,
indexfilename = NA,
pfpm = NA,
srweights=c(NA, NA, NA),
trimming = NA,
nits = 100,
nhair = 100,
varybiodata = TRUE,
stockname = "",
fpa=NA,
flim=NA,
bpa=NA,
blim=NA,
outputfolder="",
datfilename=NA,
silent=TRUE,
onlyYPR=FALSE)
{
if (onlyYPR) {
srname = c("None")
srsn = c("00")
sr = 0
} else {
srname = c("Ricker","Beverton-Holt","Smooth hockeystick")
srsn = c("Ri","BH","HS")
sr = 1:length(srname)
}
srconstrain = TRUE #Should a penalty be applied to keep alpha and beta positive, and hockeystick breakpoint within data.
#Validate input arguments and ask if none provided
if (is.na(datfilename))
{
#take input from sen and sum files
#senfilename = tolower(senfilename)
#indexfilename = tolower(indexfilename)
if (is.na(senfilename)) stop("sen file needs to be specified") #senfilename = tolower(choose.files("*.sen", "Choose SEN file",multi=FALSE))
if (!file.exists(senfilename)) stop("SEN file not found")
sumfilename = sub(".sen",".sum",senfilename,fixed=TRUE)
if (!file.exists(sumfilename)) stop("SUM file not found")
if (any(is.na(pfpm)))
{
if (is.na(indexfilename)) indexfilename = tolower(choose.files("*.*", "Choose index file",multi=FALSE))
if (!file.exists(indexfilename)) stop("Index file not found")
} else {
if (length(pfpm)!=2) stop("pfpm must be a vector of length 2")
if (any(pfpm[1]>1,pfpm[1]<0)) stop("pf must be between 0 and 1")
if (any(pfpm[2]>1,pfpm[2]<0)) stop("pm must be between 0 and 1")
}
if (stockname=="") stockname = gsub(".sen", "", basename(senfilename), fixed=TRUE)
if (outputfolder=="") outputfolder = paste("output/", stockname, "/", sep="")
} else {
#datfilename provided
datfilename = tolower(datfilename)
if (!file.exists(datfilename)) stop("file not found:", datfilename)
if (stockname=="") stockname = scan(datfilename,"",comment.char='#',nlines=2,quiet=TRUE)[1]
if (outputfolder=="") outputfolder = paste(".\\output\\", stockname, "\\", sep="")
sumfilename = NA
if (!is.na(senfilename)) #This can be used for adding points and legends to the yield and SSB plots
{
senfilename = tolower(senfilename)
sumfilename = sub(".sen",".sum",senfilename,fixed=TRUE)
if (!file.exists(sumfilename))
{
sumfilename = NA
} else { #convert to space delimited if comma delimited; save as sumf.tmp
sumf = scan(sumfilename,"",sep="\n",quote=NULL,quiet=silent)
sumf = gsub(","," ",sumf)
cat(sumf,file = "sumf.tmp",sep="\n",quote=NULL)
sumfilename = "sumf.tmp"
}
}
}
if (length(srweights)!=3) stop("srweights must be a vector of length 3")
if (any(is.na(srweights))) {
if(!all(range(0,srweights,na.rm=TRUE)==c(0,0))) stop("NAs in srweight can only be combined with zeroes")
srautoweights <- TRUE
} else {
srweights <- srweights/sum(srweights)
if(is.na(sum(srweights))) stop("srweights can't be normalised - possibly infinite values or all zeroes")
if(sum(srweights)!=1) stop("srweights can't be normalised - possibly infinite values or all zeroes")
srautoweights <- FALSE
}
#Start of plotMSY function proper
cat("Stock:", stockname, "\n")
graphics.off() #So that graphics output can be sent to files
dir.create(outputfolder, showWarnings=FALSE, recursive=TRUE)
outputfilename = paste(outputfolder, stockname, ".txt", sep="")
output = list()
noredlines = simdatadet = simdata = simy = simSSB = list()
#Create .dat, run srmsy and read in its output
#for (srtype in srname)
#{
#Create srmsy.dat and out.dat
srno = sr[srname[sr]==srtype]
if (is.na(datfilename))
{
sumsen = convertSumSen(senfilename, indexfilename, pfpm, nits, srno, varybiodata, stockname,silent=TRUE,srconstrain=srconstrain)
} else {
sumsen = convertDat(datfilename, srno)
}
} # eof
|
/R/srmsymc_converter.R
|
no_license
|
AndyCampbell/msy
|
R
| false
| false
| 23,485
|
r
|
#' @title FLR to srmsymc
#'
#' @description XXX
#'
#' @export
#'
#' @param stk FLStock object
#' @param y Years
#' @param name_stock Name of the stock. May later down the line be used in
#' table and plot outputs
#' @param filename_age Name of the associated file containing biological (weights,
#' maturity and mortalit) and fisheries data (selection patter) by age.
#' @param opt_sr_model Recruitment model number (1: Ricker, 2: Beverton-Holt,
#' 3: Segmented regression).
#' @param opt_land Boolean
#' @param opt_sim 0=no simulation, 1=simulation
#' @param opt_age 0=error only in recr, 1=error in recr & steady-state vectors
#' @param opt_pen 0=no SR constraints, 1=apply SR constrain (IS THIS ONLY FOR
#' THE SEGMENTED REGRESSION??).
FLS2srmsymc <- function(stk,y=3,name_stock,filename_age,opt_sr_model,
opt_land=TRUE,opt_sim=TRUE,opt_age=TRUE,opt_pen=TRUE) {
x <- fls2list(stk,y=y,optLand=opt_land)
y <- srmsymc_cat_srmsymc("codNS","age.dat",min(x$rby$year),max(x$rby$year),
aR=min(as.numeric((x$dims$age))),
aP=max(as.numeric((x$dims$age))),
1,1,1,1,
r=x$rby$rec,
ssb=x$rby$ssb,
year=x$rby$year)
z <- srmsymc_cat_age(filename_age,n_fleets=2,pf=0,pm=0,
sel=cbind(x$sH[,1],x$sD[,1]),
sel_cv=cbind(x$sH[,2],x$sD[,2]),
w=cbind(x$wH[,1],x$wD[,1]),
w_cv=cbind(x$wH[,2],x$wD[,2]),
bio=cbind(x$M[,1],x$MT[,1],x$wS[,1]),
bio_cv=cbind(x$M[,2],x$MT[,2],x$wS[,2]))
}
#' @title XXX
#'
#' @description From Tim
#'
#' @export
#'
#' @param stk FLStock object
#' @param y year
#' @param optLand Boolean, if TRUE (default) then ...
fls2list <- function(stk, y, optLand=TRUE)
{
d.flag <- (sum(discards(stk)) > 0)
ret <- vector("list",9)
names(ret) <- c("rby","sH","sD","wH","wD","wS","M","MT","dims")
ret$rby <- data.frame(year=as.numeric(dimnames(stk@stock.n)$year),
rec= c(stock.n(stk)[1,]),
ssb=c(ssb(stk)),
fbar=c(fbar(stk)),
yield=c(catch(stk)))
years <- rev(rev(dimnames(stk@stock.n)$year)[1:y])
ages <- dimnames(stk@stock.n)$age
my_sum <- function(x) {r=cbind(mean=apply(x[,years,],1,mean),CV=apply(x[,years,],1,sd)/apply(x[,years,],1,mean));r[is.na(r)]<- 0; r}
if (d.flag & optLand) {
sC <- harvest(stk)[,years]
sH <- harvest(stk)[,years] * (landings.n(stk)/catch.n(stk))[,years]
sD <- harvest(stk)[,years] * (discards.n(stk)/catch.n(stk))[,years]
for (yy in years) sH[,yy] <- sH[,yy]/mean(sC[range(stk)["minfbar"]:range(stk)["maxfbar"],yy])
for (yy in years) sD[,yy] <- sD[,yy]/mean(sC[range(stk)["minfbar"]:range(stk)["maxfbar"],yy])
ret$sH <- my_sum(sH)
ret$sD <- my_sum(sD)
} else {
sH <- harvest(stk)[,years]
for (yy in years) sH[,yy] <- sH[,yy]/mean(sH[range(stk)["minfbar"]:range(stk)["maxfbar"],yy])
ret$sH <- my_sum(sH)
ret$sD <- ret$sH*0
}
ret$wH <- my_sum(landings.wt(stk))
ret$wD <- my_sum(discards.wt(stk))
ret$wD[is.na(ret$wD)] <- 0
ret$wS <- my_sum(stock.wt(stk))
ret$M <- my_sum(m(stk))
ret$MT <- my_sum(mat(stk))
ret$MT[is.na(ret$MT)] <- 0
ret$dims <- list(age=ages,year=years,fbarage=range(stk)["minfbar"]:range(stk)["maxfbar"])
#Set CVs to a more realistic value
#i <- (ret$MT[,"mean"] >=0.05 & ret$MT[,"mean"] <=0.95)
#ret$MT[i,"CV"] <- max(ret$MT[i,"CV"],0.1)
#ret$M[,"CV"] <- max(ret$M[,"CV"],0.1)
return (ret)
}
#' @title Write srmsymc.dat to disk
#'
#' @description The function is used to write srmsymc.dat to the disk.
#'
#' @author Einar Hjorleifsson
#'
#' @export
#'
#' @param name_stock Name of the stock. May later down the line be used in
#' table and plot outputs
#' @param filename_age Name of the associated file containing biological (weights,
#' maturity and mortalit) and fisheries data (selection patter) by age.
#' @param y1 First year of ssb and recruitment data
#' @param y2 Last year of ssb and recruitment data
#' @param aR Recruitment age
#' @param aP Plus group age
#' @param opt_sr_model Recruitment model number (1: Ricker, 2: Beverton-Holt,
#' 3: Segmented regression).
#' @param opt_sim 0=no simulation, 1=simulation
#' @param opt_age 0=error only in recr, 1=error in recr & steady-state vectors
#' @param opt_pen 0=no SR constraints, 1=apply SR constrain (IS THIS ONLY FOR
#' THE SEGMENTED REGRESSION??).
#' @param r A vector containing recruitment
#' @param ssb A vector containing spawning stock biomass
#' @param year A vector containinb years (just used in comments).
srmsymc_cat_srmsymc <- function(name_stock, filename_age, y1, y2, aR, aP,
opt_sr_model, opt_sim, opt_age, opt_pen,
r, ssb, year) {
tmpfile <- file('srmsymc.dat',open='w')
cat('# Header: Some nice description\n',file=tmpfile,append=TRUE)
cat(name_stock, '# stkname: Name of the stock\n',file=tmpfile,append=TRUE)
cat(filename_age,'# filname: Name of the option file (2nd file\n',file=tmpfile,append=TRUE)
cat(y1, ' # ybeg: First year (yearRange[1])\n',file=tmpfile,append=TRUE)
cat(y2, ' # yend: Last year (yearRange[2])\n',file=tmpfile,append=TRUE)
cat(aR, ' # r: Recruitment age (senhead[1])\n',file=tmpfile,append=TRUE)
cat(aP, ' # A: Plus group age (senhead[2])\n',file=tmpfile,append=TRUE)
cat(opt_sr_model, ' # Ropt: S-R function type (sr)\n',file=tmpfile,append=TRUE)
cat(opt_sim,' # simopt: 0=no simulation, 1=simulation (ifelse(nits==0,0,1)) \n',file=tmpfile,append=TRUE)
cat(opt_age,' # senopt: 0=error only in recr, 1=error in recr & steady-state vectors (ifelse(varybiodata,1,0))\n',file=tmpfile,append=TRUE)
cat(opt_pen,' # penopt: 0=no SR constraints, 1=apply SR constraints (ifelse(srconstrain,1,0))\n',file=tmpfile,append=TRUE)
cat('# r ssb\n', file=tmpfile, append=TRUE)
cat(paste(r,ssb,'#',year), file = tmpfile,append = TRUE,sep="\n")
close(tmpfile)
}
#' @title Write age.dat to disk
#'
#' @description XXX
#'
#' @author Einar Hjorleifsson
#'
#' @export
#'
#' @param filename_age XXX
#' @param n_fleets XXX
#' @param pf XXX
#' @param pm XXX
#' @param sel XXX
#' @param sel_cv XXX
#' @param w XXX
#' @param w_cv XXX
#' @param bio XXX
#' @param bio_cv XXX
#'
srmsymc_cat_age <- function(filename_age,n_fleets,pf,pm,sel,sel_cv,w,w_cv,bio,bio_cv) {
n <- 4 # number of digits to write
tmpfile <- file(filename_age,open='w')
cat('#Header: Some nice description\n',file=tmpfile,append=TRUE)
cat(n_fleets, '# fno: Number of fleets (nstocks)\n',file=tmpfile,append=TRUE)
cat(1, '# sno: Fleets for yield per recruit stats (always 1)\n',file=tmpfile,append=TRUE)
cat(pf, '# f: proportional fishing mortality before spawning time (pf)\n',file=tmpfile,append=TRUE)
cat(pm, '# m: proportional natural mortality before spawning time (pm)\n',file=tmpfile,append=TRUE)
cat('# Selection pattern\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(sel)) cat(format(round(sel[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Selection pattern\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(sel_cv)) cat(format(round(sel_cv[i,],n)),'\n',file=tmpfile,append=TRUE)
cat('# Weight at age\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(w)) cat(format(round(w[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Weight at age\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(w_cv)) cat(format(round(w_cv[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# Biological data\n',file=tmpfile,append=TRUE)
cat('# M, mat, wSSB\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(bio)) cat(format(round(bio[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Biological data\n',file=tmpfile,append=TRUE)
cat('# cvM, cvmat, cvwSSB\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(bio_cv)) cat(format(round(bio_cv[i,],n),n),'\n',file=tmpfile,append=TRUE)
close(tmpfile)
}
#' @title write data files to disk for the ADM srmsymc program
#'
#' @description Some details, including link to the function a) running the
#' program and creating the data input
#'
#' @author Einar Hjorleifsson <einar.hjorleifsson@@gmail.com>
#'
#' @export
#'
#' @param rby A list that contains in its first three rows year, recruitment
#' and ssb.
#' @param iba A list - read.sen
#' @param aR \emph{integer}. Age of recruitment. Has to be specified. Is used to
#' shift the recruitment data such that it aligns with ssb data. Can be set to
#' 0 (zero) if ssb-recruitemnt pairs already properly aligned.
#' @param col_names vector of column names for year, recruitment and spawning stock
#' biomass (in that order).
#' @param opt_sr_model \emph{integer}. Number (1-3) corresponding to recruitment model.
#' @param opt_sim \emph{integer}. If 0 no simulation, if 1 simulation.
#' #' @param opt_pen \emph{integer}. If 0 then no recruitement constraints if 1
#' then recruitment contraints applied (NEED MORE DETAIL)
#' @param opt_age \emph{integer}. If 0 then error only in recruitment, if 1 then
#' both error in recruitment and steady-state vectors (age based inputs).
#' @param rba data.frame that contains age based input NOT YET IMPLEMENTED
#' @param filename_age \emph{character}. Name of the option file that contains the
#' age-based-data. If missing, set to "age.dat".
#' @param aP \emph{integer}. Plus group age.
#' @param years vector containing years to include in the ssb-r
write.srmsymc <- function(rby,iba,aR,col_names=c("year","r","ssb"),opt_sr_model=1,
opt_sim=1,opt_pen=1,opt_age=1,rba,
filename_age="age.dat",aP,years) {
value <- NULL
## rby tests
if(missing(rby)) stop("summary (year) data must be specified")
class2 <- rby$info$class2
if(is.null(class2)) stop("Not implemented yet for provided file type")
if(class2 != "rby") stop("Not implemented yet for provided file type")
x <- rby$data[,col_names]
y <- rby$info
## rba test
#if(missing(rba)) stop("prediction (age) data must be specified")
#class2 <- attributes(rba)$class2
#if(is.null(class2)) stop("Not implemented yet for provided file type")
#if(class2 != "rby") stop("Not implemented yet for provided file type")
if(missing(filename_age)) filename_age <- "age.dat"
### A. Setting up the srmsymc.dat
## setting stock name
name_stock <- y$name_stock
if(is.na(name_stock)) name_stock <- "nn"
name_stock <- y$name_stock <- str_replace_all(name_stock," ","")
aR <- y$time[3]
if(is.na(aR)) stop("Recruitment age (aR) must be specified")
## Align recruitment
x <- align_ssb_r(x,col.names=names(x),aR)
x <- x[!is.na(x$r),]
if(missing(years)) {
y1 <- y$time[1]
y2 <- y$time[2]
years <- c(y1:y2)
} else {
y$time[1] <- y1 <- min(years)
y$time[2] <- y2 <- max(years)
}
x <- x[x$year %in% years,]
if(missing(aP)) stop("Plus group age (aP) must be specified")
y$time[6] <- aP
# return file
x <- data.frame(r=x$r,ssb=x$ssb,year=x$year)
y$filename_age = filename_age
y$opt_sr_model = opt_sr_model
y$opt_sim = opt_sim
y$opt_age = opt_age
y$opt_pen = opt_pen
rby <- list(data=x,info=y)
srmsymc_cat_srmsymc(name_stock, filename_age, y1, y2, aR, aP,
opt_sr_model, opt_sim, opt_age, opt_pen,
r=x$r, ssb=x$ssb, year=x$year)
### B. Setting up the age.dat
if(iba$info$creator == "created from function fishvise:::read.sen") {
x <- iba$data
y <- iba$info
nFleets <- sum(y$fleets[2:4])
fleet_Fs <- y$mort[,c(2:4)]
fleet_Fs_names <- colnames(fleet_Fs)
weight_names <- str_replace(fleet_Fs_names,"F","W")
ages <- c(y$time[4]:y$time[5])
sel <- cvsel <- wgt <- cvwgt <- matrix(NA,nrow=length(ages),ncol=nFleets)
for (i in 1:nFleets) {
x1 <- x[x$id %in% fleet_Fs_names[i],]
x1$value <- x1$value/mean(x1$value[x1$age %in% c(fleet_Fs[1,i]:fleet_Fs[2,i])])
sel[,i] <- x1[,'value']
cvsel[,i] <- x1[,'cv']
x1 <- x[x$id %in% weight_names[i],]
wgt[,i] <- x1[,"value"]
cvwgt[,i] <- x1[,"value"]
}
bio <- cvbio <- matrix(NA,nrow=length(ages),ncol=3)
bio[,1] <- x[x$id %in% 'M','value']
bio[,2] <- x[x$id %in% 'xN','value']
bio[,3] <- x[x$id %in% 'wN','value']
cvbio[,1] <- x[x$id %in% 'M','cv']
cvbio[,2] <- x[x$id %in% 'xN','cv']
cvbio[,3] <- x[x$id %in% 'wN','cv']
cat_age.dat(filename_age,nFleets,y$pF,y$pM,sel,cvsel,wgt,cvwgt,bio,cvbio)
iba <- list(sel=sel,sel_cv=cvsel,w=wgt,w_cv=cvwgt,bio=bio,bio_cv=cvbio,info=y)
}
if(iba$info$creator == "fishvise::read.rby") {
x <- iba$data[,c("year","age","tF","wC","wX","xN")]
tF <- ddply(x[x$age %in% 5:10,],"year",summarise,refF=mean(tF))
x <- join(x,tF)
x$sF <- x$tF/x$refF
d <- melt(x[,c("year","age","sF","wC","wX","xN")],id.vars=c("year","age"))
d <- ddply(d,c("variable","age"),summarise,ave=mean(value,na.rm=T),cv=sqrt(var(value,na.rm=T))/ave)
nFleets <- 1
fleet_Fs <- matrix(c(5,10),ncol=1,nrow=2)
colnames(fleet_Fs) <- "sF"
fleet_Fs_names <- colnames(fleet_Fs)
weight_names <- "wC"
ages <- c(min(x$age):max(x$age))
cat("Line 621")
sel <- cvsel <- wgt <- cvwgt <- matrix(NA,nrow=length(ages),ncol=nFleets)
x <- d
names(x) <- c("id","age","value","cv")
for (i in 1:nFleets) {
x1 <- x[x$id %in% fleet_Fs_names[i],]
x1$value <- x1$value/mean(x1$value[x1$age %in% c(fleet_Fs[1,i]:fleet_Fs[2,i])])
sel[,i] <- x1[,'value']
cvsel[,i] <- x1[,'cv']
x1 <- x[x$id %in% weight_names[i],]
wgt[,i] <- x1[,"value"]
cvwgt[,i] <- x1[,"value"]
}
bio <- cvbio <- matrix(NA,nrow=length(ages),ncol=3)
bio[,1] <- 0.2
bio[,2] <- x[x$id %in% 'xN','value']
bio[,3] <- x[x$id %in% 'wX','value']
cvbio[,1] <- 0.0
cvbio[,2] <- x[x$id %in% 'xN','cv']
cvbio[,3] <- x[x$id %in% 'wX','cv']
cat_age.dat("age.dat",1,0,0,sel,cvsel,wgt,cvwgt,bio,cvbio)
iba <- list(sel=sel,sel_cv=cvsel,w=wgt,w_cv=cvwgt,bio=bio,bio_cv=cvbio,info=y)
}
return(list(rby=rby,iba=iba))
}
#' @title Write age.dat to disk
#'
#' @description XXX
#'
#' @author Einar Hjorleifsson
#'
#' @export
#'
#' @param filename_age XXX
#' @param n_fleets XXX
#' @param pf XXX
#' @param pm XXX
#' @param sel XXX
#' @param sel_cv XXX
#' @param w XXX
#' @param w_cv XXX
#' @param bio XXX
#' @param bio_cv XXX
#'
cat_age.dat <- function(filename_age,n_fleets,pf,pm,sel,sel_cv,w,w_cv,bio,bio_cv) {
n <- 4 # number of digits to write
tmpfile <- file(filename_age,open='w')
cat('#Header: Some nice description\n',file=tmpfile,append=TRUE)
cat(n_fleets, '# fno: Number of fleets (nstocks)\n',file=tmpfile,append=TRUE)
cat(1, '# sno: Fleets for yield per recruit stats (always 1)\n',file=tmpfile,append=TRUE)
cat(pf, '# f: proportional fishing mortality before spawning time (pf)\n',file=tmpfile,append=TRUE)
cat(pm, '# m: proportional natural mortality before spawning time (pm)\n',file=tmpfile,append=TRUE)
cat('# Selection pattern\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(sel)) cat(format(round(sel[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Selection pattern\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(sel_cv)) cat(format(round(sel_cv[i,],n)),'\n',file=tmpfile,append=TRUE)
cat('# Weight at age\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(w)) cat(format(round(w[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Weight at age\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(w_cv)) cat(format(round(w_cv[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# Biological data\n',file=tmpfile,append=TRUE)
cat('# M, mat, wSSB\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(bio)) cat(format(round(bio[i,],n),n),'\n',file=tmpfile,append=TRUE)
cat('# cv Biological data\n',file=tmpfile,append=TRUE)
cat('# cvM, cvmat, cvwSSB\n',file=tmpfile,append=TRUE)
for(i in 1:nrow(bio_cv)) cat(format(round(bio_cv[i,],n),n),'\n',file=tmpfile,append=TRUE)
close(tmpfile)
}
#' plotMSY
#'
#'
#' @param senfilename is the name of the senfile, with a corresponding sumfile sharing the same name except replacing ".sen" with ".sum". Produces an interactive window if not specified
#' @param indexfilename is the name of an index file in the Lowestoft format pointing to the pf and pm files. If pfpm is specified, this is ignored, if neither specified an interactive window appears to choose index file
#' @param pfpm is a vector of two values; pm and pf (proportion of m and f before spawning
#' @param srweights is a vector of three values, giving relative weighting of the three stock recruit forms, or a combination of 0 and NA for automatic weighting.
#' @param trimming proportion of the simulations to trim before taking harmonic mean. NA causes no trimming, but a diagnostic plot
#' @param nits Number of iterations of bootstrapping - if 0, does only the deterministic fit
#' @param nhair number of lines to plot on 'hair' plots
#' @param varybiodata If TRUE, bootstraps the biological & fleet data (weight, maturity, mortality, selectivity) if FALSE, varies SR relationship only
#' @param stockname Display title for stock used in titles and output path
#' @param fpa Value of Fpa to be plotted on output (NA for no value to plot)
#' @param flim Value of Flim to be plotted on output (NA for no value to plot)
#' @param bpa Value of Bpa to be plotted on output (NA for no value to plot)
#' @param blim Value of Blim to be plotted on output (NA for no value to plot)
#' @param outputfolder Location for output files. Defaults to ".\\output\\[stockname]\\"
#' @param datfilename A pre-calculated dat file - if provided, senfilename, indexfilename, varybiodata, srconstrain and pfpm are ignored in preference to values in the dat file. Data from the sum file will be added to the plots if it can be found
#' @param silent Supresses the majority of the output to screen. Default is TRUE
#' @param onlyYPR Calculate only the yield per recruit reference points, for stocks where the SRR is unknown or uncertain. Default is FALSE
#' @return mean(5:7) ##Some function
#' @author Tim Earl \email{timothy.earl@@cefas.co.uk}
#' @export
srmsymc_convertsumsen = function(senfilename = NA,
indexfilename = NA,
pfpm = NA,
srweights=c(NA, NA, NA),
trimming = NA,
nits = 100,
nhair = 100,
varybiodata = TRUE,
stockname = "",
fpa=NA,
flim=NA,
bpa=NA,
blim=NA,
outputfolder="",
datfilename=NA,
silent=TRUE,
onlyYPR=FALSE)
{
if (onlyYPR) {
srname = c("None")
srsn = c("00")
sr = 0
} else {
srname = c("Ricker","Beverton-Holt","Smooth hockeystick")
srsn = c("Ri","BH","HS")
sr = 1:length(srname)
}
srconstrain = TRUE #Should a penalty be applied to keep alpha and beta positive, and hockeystick breakpoint within data.
#Validate input arguments and ask if none provided
if (is.na(datfilename))
{
#take input from sen and sum files
#senfilename = tolower(senfilename)
#indexfilename = tolower(indexfilename)
if (is.na(senfilename)) stop("sen file needs to be specified") #senfilename = tolower(choose.files("*.sen", "Choose SEN file",multi=FALSE))
if (!file.exists(senfilename)) stop("SEN file not found")
sumfilename = sub(".sen",".sum",senfilename,fixed=TRUE)
if (!file.exists(sumfilename)) stop("SUM file not found")
if (any(is.na(pfpm)))
{
if (is.na(indexfilename)) indexfilename = tolower(choose.files("*.*", "Choose index file",multi=FALSE))
if (!file.exists(indexfilename)) stop("Index file not found")
} else {
if (length(pfpm)!=2) stop("pfpm must be a vector of length 2")
if (any(pfpm[1]>1,pfpm[1]<0)) stop("pf must be between 0 and 1")
if (any(pfpm[2]>1,pfpm[2]<0)) stop("pm must be between 0 and 1")
}
if (stockname=="") stockname = gsub(".sen", "", basename(senfilename), fixed=TRUE)
if (outputfolder=="") outputfolder = paste("output/", stockname, "/", sep="")
} else {
#datfilename provided
datfilename = tolower(datfilename)
if (!file.exists(datfilename)) stop("file not found:", datfilename)
if (stockname=="") stockname = scan(datfilename,"",comment.char='#',nlines=2,quiet=TRUE)[1]
if (outputfolder=="") outputfolder = paste(".\\output\\", stockname, "\\", sep="")
sumfilename = NA
if (!is.na(senfilename)) #This can be used for adding points and legends to the yield and SSB plots
{
senfilename = tolower(senfilename)
sumfilename = sub(".sen",".sum",senfilename,fixed=TRUE)
if (!file.exists(sumfilename))
{
sumfilename = NA
} else { #convert to space delimited if comma delimited; save as sumf.tmp
sumf = scan(sumfilename,"",sep="\n",quote=NULL,quiet=silent)
sumf = gsub(","," ",sumf)
cat(sumf,file = "sumf.tmp",sep="\n",quote=NULL)
sumfilename = "sumf.tmp"
}
}
}
if (length(srweights)!=3) stop("srweights must be a vector of length 3")
if (any(is.na(srweights))) {
if(!all(range(0,srweights,na.rm=TRUE)==c(0,0))) stop("NAs in srweight can only be combined with zeroes")
srautoweights <- TRUE
} else {
srweights <- srweights/sum(srweights)
if(is.na(sum(srweights))) stop("srweights can't be normalised - possibly infinite values or all zeroes")
if(sum(srweights)!=1) stop("srweights can't be normalised - possibly infinite values or all zeroes")
srautoweights <- FALSE
}
#Start of plotMSY function proper
cat("Stock:", stockname, "\n")
graphics.off() #So that graphics output can be sent to files
dir.create(outputfolder, showWarnings=FALSE, recursive=TRUE)
outputfilename = paste(outputfolder, stockname, ".txt", sep="")
output = list()
noredlines = simdatadet = simdata = simy = simSSB = list()
#Create .dat, run srmsy and read in its output
#for (srtype in srname)
#{
#Create srmsy.dat and out.dat
srno = sr[srname[sr]==srtype]
if (is.na(datfilename))
{
sumsen = convertSumSen(senfilename, indexfilename, pfpm, nits, srno, varybiodata, stockname,silent=TRUE,srconstrain=srconstrain)
} else {
sumsen = convertDat(datfilename, srno)
}
} # eof
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
huber_weight <- function(x, cw) {
.Call('_RobustFPLM_huber_weight', PACKAGE = 'RobustFPLM', x, cw)
}
huber_rho <- function(x, cw) {
.Call('_RobustFPLM_huber_rho', PACKAGE = 'RobustFPLM', x, cw)
}
huber_estimates <- function(X, y, beta, cw, tol) {
.Call('_RobustFPLM_huber_estimates', PACKAGE = 'RobustFPLM', X, y, beta, cw, tol)
}
|
/R/RcppExports.R
|
no_license
|
ywbetter/RobustFPLM
|
R
| false
| false
| 473
|
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
huber_weight <- function(x, cw) {
.Call('_RobustFPLM_huber_weight', PACKAGE = 'RobustFPLM', x, cw)
}
huber_rho <- function(x, cw) {
.Call('_RobustFPLM_huber_rho', PACKAGE = 'RobustFPLM', x, cw)
}
huber_estimates <- function(X, y, beta, cw, tol) {
.Call('_RobustFPLM_huber_estimates', PACKAGE = 'RobustFPLM', X, y, beta, cw, tol)
}
|
library(dplyr)
morph <- read.csv('input/morph_data.csv', header = T)
## subsetting parameters ----
mammalsp <- c(1,2,5,6)
nafodiet <- c('2H','2J','3K')
dietby <- c('year', 'mmsp')
## look only at data from main stomach and nafo Divs ----
diet <- diet[which(diet$digestivetractsection == 'Main Stomach'),]
diet <- diet[which(diet$nafo %in% nafodiet),]
diet <- diet[which(diet$codemmsp %in% mammalsp),]
## merge datasets ----
md <- merge(diet[!duplicated(diet$idsex), c('idsex', 'nafo', 'year')],
select(morph, -year),
by = 'idsex')
md <- subset(md, year < 2007)
## reorder
md$mmsp <- ifelse(md$codemmsp == 1, 'Harp seal',
ifelse(md$codemmsp == 2, 'Hooded seal',
ifelse(md$codemmsp == 5, 'Ringed seal',
ifelse(md$codemmsp == 6, 'Bearded seal','flag'))))
md <- transform(md,
mmsp=factor(mmsp,levels=c(
"Harp seal",
"Ringed seal",
"Hooded seal",
"Bearded seal")))
p <- ggplot(md, aes( factor(year), mmweight))
p <- p + geom_violin()
p <- p + facet_grid(mmsp~., drop = TRUE, scales = 'free_y')
#p <- p + stat_summary(aes(factor(year)), fun.y = median, geom = "point", fill = "red", shape = 21, size = 1.5)
p <- p + theme_bw()
p
|
/analysis/mm_weight_dist.R
|
no_license
|
adbpatagonia/SealDietAnalysis
|
R
| false
| false
| 1,414
|
r
|
library(dplyr)
morph <- read.csv('input/morph_data.csv', header = T)
## subsetting parameters ----
mammalsp <- c(1,2,5,6)
nafodiet <- c('2H','2J','3K')
dietby <- c('year', 'mmsp')
## look only at data from main stomach and nafo Divs ----
diet <- diet[which(diet$digestivetractsection == 'Main Stomach'),]
diet <- diet[which(diet$nafo %in% nafodiet),]
diet <- diet[which(diet$codemmsp %in% mammalsp),]
## merge datasets ----
md <- merge(diet[!duplicated(diet$idsex), c('idsex', 'nafo', 'year')],
select(morph, -year),
by = 'idsex')
md <- subset(md, year < 2007)
## reorder
md$mmsp <- ifelse(md$codemmsp == 1, 'Harp seal',
ifelse(md$codemmsp == 2, 'Hooded seal',
ifelse(md$codemmsp == 5, 'Ringed seal',
ifelse(md$codemmsp == 6, 'Bearded seal','flag'))))
md <- transform(md,
mmsp=factor(mmsp,levels=c(
"Harp seal",
"Ringed seal",
"Hooded seal",
"Bearded seal")))
p <- ggplot(md, aes( factor(year), mmweight))
p <- p + geom_violin()
p <- p + facet_grid(mmsp~., drop = TRUE, scales = 'free_y')
#p <- p + stat_summary(aes(factor(year)), fun.y = median, geom = "point", fill = "red", shape = 21, size = 1.5)
p <- p + theme_bw()
p
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AccountBalance.R
\name{AccountBalance}
\alias{AccountBalance}
\alias{accountbalance}
\alias{getbalance}
\alias{get_account_balance}
\title{Retrieve MTurk account balance}
\usage{
AccountBalance()
}
\value{
Returns a list of length 2: \dQuote{AvailableBalance}, the balance of
the account in US Dollars, and \dQuote{RequestMetadata}, the metadata for the
request. Note: list is returned invisibly.
}
\description{
Retrieves the amount of money (in US Dollars) in your MTurk account.
}
\details{
\code{AccountBalance} takes no arguments.
\code{accountbalance()}, \code{get_account_balance()} and \code{getbalance()}
are aliases for \code{AccountBalance}.
}
\examples{
\dontrun{
AccountBalance()
}
}
\references{
\href{https://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_GetAccountBalanceOperation.html}{API
Reference}
\href{https://requester.mturk.com/pricing}{MTurk Pricing Structure}
}
\author{
Tyler Burleigh, Thomas J. Leeper
}
|
/man/AccountBalance.Rd
|
no_license
|
cloudyr/pyMTurkR
|
R
| false
| true
| 1,032
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AccountBalance.R
\name{AccountBalance}
\alias{AccountBalance}
\alias{accountbalance}
\alias{getbalance}
\alias{get_account_balance}
\title{Retrieve MTurk account balance}
\usage{
AccountBalance()
}
\value{
Returns a list of length 2: \dQuote{AvailableBalance}, the balance of
the account in US Dollars, and \dQuote{RequestMetadata}, the metadata for the
request. Note: list is returned invisibly.
}
\description{
Retrieves the amount of money (in US Dollars) in your MTurk account.
}
\details{
\code{AccountBalance} takes no arguments.
\code{accountbalance()}, \code{get_account_balance()} and \code{getbalance()}
are aliases for \code{AccountBalance}.
}
\examples{
\dontrun{
AccountBalance()
}
}
\references{
\href{https://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_GetAccountBalanceOperation.html}{API
Reference}
\href{https://requester.mturk.com/pricing}{MTurk Pricing Structure}
}
\author{
Tyler Burleigh, Thomas J. Leeper
}
|
library(DiffBind)
library(microbenchmark)
bin = 250
method = 'DiffBind'
mark = 'EZH2'
cell = 'Encode_threecells'
chip1 = c(
'wgEncodeBroadHistoneHelas3Ezh239875AlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHelas3Ezh239875AlnRep2.markdup.q10.sorted.bam'
)
chip2 = c(
'wgEncodeBroadHistoneHepg2Ezh239875AlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHepg2Ezh239875AlnRep2.markdup.q10.sorted.bam'
)
chip3 = c('wgEncodeBroadHistoneHuvecEzh239875AlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHuvecEzh239875AlnRep2.markdup.q10.sorted.bam')
control1 = c(
'wgEncodeBroadHistoneHelas3ControlStdAlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHelas3ControlStdAlnRep2.markdup.q10.sorted.bam'
)
control2 = c(
'wgEncodeBroadHistoneHepg2ControlStdAlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHepg2ControlStdAlnRep2.markdup.q10.sorted.bam'
)
control3 = c('wgEncodeBroadHistoneHuvecControlStdAlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHuvecControlStdAlnRep2.markdup.q10.sorted.bam')
dirchip1 = '../../../../Data/Encode_helas3/'
dirchip2 = '../../../../Data/Encode_hepg2/'
dirchip3 = '../../../../Data/Encode_huvec/'
### Organizing other parameters
cptime = list()
if (mark %in% c('H3K36me3', 'H3K27me3', 'EZH2')) {
dirpeak1 = paste0(
'../../../MACS2/',
mark,
'/Helas3/Output/MACS2_',
mark,
'_Helas3_Output_peaks.xls'
)
dirpeak2 = paste0('../../../MACS2/',
mark,
'/Hepg2/Output/MACS2_',
mark,
'_Hepg2_Output_peaks.xls')
dirpeak3 = paste0('../../../MACS2/',
mark,
'/Huvec/Output/MACS2_',
mark,
'_Huvec_Output_peaks.xls')
}
if (mark %in% c('H3K4me3', 'H3K27ac', 'CTCF')) {
dirpeak1 = paste0(
'../../../MACS2/',
mark,
'/Helas3/Output/MACS2_',
mark,
'_Helas3_Output_peaks.xls'
)
dirpeak2 = paste0('../../../MACS2/',
mark,
'/Hepg2/Output/MACS2_',
mark,
'_Hepg2_Output_peaks.xls')
dirpeak3 = paste0('../../../MACS2/',
mark,
'/Huvec/Output/MACS2_',
mark,
'_Huvec_Output_peaks.xls')
}
outdir = paste0('./Output', bin, '/')
system(paste('mkdir', outdir))
### Input for DiffBind
conf <- data.frame(
SampleID = 1:6,
Tissue = c('Helas3', 'Helas3', 'Hepg2', 'Hepg2', 'Huvec','Huvec'),
Factor = rep(mark, 6),
Condition = c('Helas3', 'Helas3', 'Hepg2', 'Hepg2','Huvec','Huvec'),
bamReads = c(
paste0(dirchip1, mark, '/', chip1[1]),
paste0(dirchip1, mark, '/', chip1[2]),
paste0(dirchip2, mark, '/', chip2[1]),
paste0(dirchip2, mark, '/', chip2[2]),
paste0(dirchip3, mark, '/', chip3[1]),
paste0(dirchip3, mark, '/', chip3[2])
),
ControlID = paste0(1:6, 'C'),
bamControl = c(
paste0(dirchip1, 'Control/', control1[1]),
paste0(dirchip1, 'Control/', control1[2]),
paste0(dirchip2, 'Control/', control2[1]),
paste0(dirchip2, 'Control/', control2[2]),
paste0(dirchip3, 'Control/', control3[1]),
paste0(dirchip3, 'Control/', control3[2])
),
Peaks = c(dirpeak1, dirpeak1, dirpeak2, dirpeak2,dirpeak3,dirpeak3),
PeakCaller = rep('macs', 6)
)
### Running DiffBind
for (bp in bin) {
cptime[[paste(method, mark, cell, 'Output', bp, sep = '_')]] = microbenchmark({
encode <- dba(sampleSheet = conf)
encode <- dba.count(encode)
encode <-
dba.contrast(encode, categories = DBA_CONDITION, minMembers = 2)
encode <- dba.analyze(encode,bBlacklist = FALSE,bGreylist = FALSE)
# DiffBind throws an error if bBlacklist = TRUE:
## Blacklist error: Error in .normarg_seqlevels(seqnames): supplied 'seqlevels' cannot contain NAs or empty strings ("")
# I manually ran the source code dba.blacklist without problems, so it looks like a bug from DiffBind
# In any case, I will remove blacklists later for all methods when benchmarking the results for a fair comparison
encode.DB <- dba.report(encode, th = 1)
write.table(
as.data.frame(encode.DB),
file = paste0(
outdir,
paste(method, mark, cell, 'Output', paste0(bp, 'bp.txt'), sep = '_')
),
quote = F,
row.names = F
)
}, times = 1)
}
### Saving computing time
save(cptime, file = paste0(outdir, paste(
method, mark, cell, 'Time', paste0(bin, 'bp.RData'), sep = '_'
)))
|
/Public/DiffBind/EZH2/Encode_threecells/DiffBind_EZH2_Encode_threecells_250bp.R
|
permissive
|
plbaldoni/epigraHMMPaper
|
R
| false
| false
| 4,518
|
r
|
library(DiffBind)
library(microbenchmark)
bin = 250
method = 'DiffBind'
mark = 'EZH2'
cell = 'Encode_threecells'
chip1 = c(
'wgEncodeBroadHistoneHelas3Ezh239875AlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHelas3Ezh239875AlnRep2.markdup.q10.sorted.bam'
)
chip2 = c(
'wgEncodeBroadHistoneHepg2Ezh239875AlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHepg2Ezh239875AlnRep2.markdup.q10.sorted.bam'
)
chip3 = c('wgEncodeBroadHistoneHuvecEzh239875AlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHuvecEzh239875AlnRep2.markdup.q10.sorted.bam')
control1 = c(
'wgEncodeBroadHistoneHelas3ControlStdAlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHelas3ControlStdAlnRep2.markdup.q10.sorted.bam'
)
control2 = c(
'wgEncodeBroadHistoneHepg2ControlStdAlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHepg2ControlStdAlnRep2.markdup.q10.sorted.bam'
)
control3 = c('wgEncodeBroadHistoneHuvecControlStdAlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHuvecControlStdAlnRep2.markdup.q10.sorted.bam')
dirchip1 = '../../../../Data/Encode_helas3/'
dirchip2 = '../../../../Data/Encode_hepg2/'
dirchip3 = '../../../../Data/Encode_huvec/'
### Organizing other parameters
cptime = list()
if (mark %in% c('H3K36me3', 'H3K27me3', 'EZH2')) {
dirpeak1 = paste0(
'../../../MACS2/',
mark,
'/Helas3/Output/MACS2_',
mark,
'_Helas3_Output_peaks.xls'
)
dirpeak2 = paste0('../../../MACS2/',
mark,
'/Hepg2/Output/MACS2_',
mark,
'_Hepg2_Output_peaks.xls')
dirpeak3 = paste0('../../../MACS2/',
mark,
'/Huvec/Output/MACS2_',
mark,
'_Huvec_Output_peaks.xls')
}
if (mark %in% c('H3K4me3', 'H3K27ac', 'CTCF')) {
dirpeak1 = paste0(
'../../../MACS2/',
mark,
'/Helas3/Output/MACS2_',
mark,
'_Helas3_Output_peaks.xls'
)
dirpeak2 = paste0('../../../MACS2/',
mark,
'/Hepg2/Output/MACS2_',
mark,
'_Hepg2_Output_peaks.xls')
dirpeak3 = paste0('../../../MACS2/',
mark,
'/Huvec/Output/MACS2_',
mark,
'_Huvec_Output_peaks.xls')
}
outdir = paste0('./Output', bin, '/')
system(paste('mkdir', outdir))
### Input for DiffBind
conf <- data.frame(
SampleID = 1:6,
Tissue = c('Helas3', 'Helas3', 'Hepg2', 'Hepg2', 'Huvec','Huvec'),
Factor = rep(mark, 6),
Condition = c('Helas3', 'Helas3', 'Hepg2', 'Hepg2','Huvec','Huvec'),
bamReads = c(
paste0(dirchip1, mark, '/', chip1[1]),
paste0(dirchip1, mark, '/', chip1[2]),
paste0(dirchip2, mark, '/', chip2[1]),
paste0(dirchip2, mark, '/', chip2[2]),
paste0(dirchip3, mark, '/', chip3[1]),
paste0(dirchip3, mark, '/', chip3[2])
),
ControlID = paste0(1:6, 'C'),
bamControl = c(
paste0(dirchip1, 'Control/', control1[1]),
paste0(dirchip1, 'Control/', control1[2]),
paste0(dirchip2, 'Control/', control2[1]),
paste0(dirchip2, 'Control/', control2[2]),
paste0(dirchip3, 'Control/', control3[1]),
paste0(dirchip3, 'Control/', control3[2])
),
Peaks = c(dirpeak1, dirpeak1, dirpeak2, dirpeak2,dirpeak3,dirpeak3),
PeakCaller = rep('macs', 6)
)
### Running DiffBind
for (bp in bin) {
cptime[[paste(method, mark, cell, 'Output', bp, sep = '_')]] = microbenchmark({
encode <- dba(sampleSheet = conf)
encode <- dba.count(encode)
encode <-
dba.contrast(encode, categories = DBA_CONDITION, minMembers = 2)
encode <- dba.analyze(encode,bBlacklist = FALSE,bGreylist = FALSE)
# DiffBind throws an error if bBlacklist = TRUE:
## Blacklist error: Error in .normarg_seqlevels(seqnames): supplied 'seqlevels' cannot contain NAs or empty strings ("")
# I manually ran the source code dba.blacklist without problems, so it looks like a bug from DiffBind
# In any case, I will remove blacklists later for all methods when benchmarking the results for a fair comparison
encode.DB <- dba.report(encode, th = 1)
write.table(
as.data.frame(encode.DB),
file = paste0(
outdir,
paste(method, mark, cell, 'Output', paste0(bp, 'bp.txt'), sep = '_')
),
quote = F,
row.names = F
)
}, times = 1)
}
### Saving computing time
save(cptime, file = paste0(outdir, paste(
method, mark, cell, 'Time', paste0(bin, 'bp.RData'), sep = '_'
)))
|
#' Connect to pelagic database
#'
#' @param dbname Database name
connect_to_pelagic = function(dbname = "pelagic"){
require(RpgSQL)
# it does not work from loval
con <- dbConnect(pgSQL(),host='baseline.stanford.edu',user = "postgres", password = "DELETED", dbname = dbname) # this works in remote R
con
#iccat = dbSendQuery(con, statement = paste("select lat,lon,gearcode FROM iccatt2ce WHERE Eff1Type = 'NO.HOOKS' AND GearGrpCode = 'LL' and Lat <= 46 AND Lat>30 AND (QuadID = 1 OR (QuadID = 4 AND Lon>0 AND Lon<=5))",sep=""))
#iccat<- fetch(iccat, n = -1)
}
|
/R/connect_to_pelagic.R
|
no_license
|
seananderson/sharkbase
|
R
| false
| false
| 575
|
r
|
#' Connect to pelagic database
#'
#' @param dbname Database name
connect_to_pelagic = function(dbname = "pelagic"){
require(RpgSQL)
# it does not work from loval
con <- dbConnect(pgSQL(),host='baseline.stanford.edu',user = "postgres", password = "DELETED", dbname = dbname) # this works in remote R
con
#iccat = dbSendQuery(con, statement = paste("select lat,lon,gearcode FROM iccatt2ce WHERE Eff1Type = 'NO.HOOKS' AND GearGrpCode = 'LL' and Lat <= 46 AND Lat>30 AND (QuadID = 1 OR (QuadID = 4 AND Lon>0 AND Lon<=5))",sep=""))
#iccat<- fetch(iccat, n = -1)
}
|
# ---------------------------------------------------------------------------------
# carregando pacotes para Large Data
library(ff)
library(ffbase)
library(tidyverse)
# Baixar a base do SAEB 2019 no endereço abaixo e salvar na pasta bases_originais
# https://download.inep.gov.br/microdados/microdados_saeb_2019.zip
# na pasta DADOS escolher base TS_ESCOLA
# Precisei modificar a base e excluir várias colunas (NIVEL...) pois o comando read.csv.ffdf não
# estava funcionando. Por isso o enderecoBase se refere a TS_ESCOLA1
enderecoBase <- "bases_originais/TS_ESCOLA1.csv"
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
#
# tempo de carregamento da base SAEB 2019 TS_ESCOLA (~20MB) Apenas para ilustrar pois a base não é pesada
#
system.time(saeb2019 <- read.csv.ffdf(file = enderecoBase))
class(saeb2019) # classe do objeto
object.size(saeb2019)/1024 # tamanho em KB
summary(saeb2019)
head(saeb2019)
# ---------------------------------------------------------------------------------
mean(saeb2019$MEDIA_5EF_MT, na.rm = T)
mean(saeb2019$NU_PRESENTES_5EF, na.rm = T)
# Tentei calcular a mediana mas demorou tanto que desisti
regressao <- lm(MEDIA_5EF_MT ~ NU_PRESENTES_5EF, data = saeb2019)
summary(regressao)
# ---------------------------------------------------------------------------------
|
/scripts/06_large_data.R
|
no_license
|
marcosabmelo/eletiva_analise_dados
|
R
| false
| false
| 1,425
|
r
|
# ---------------------------------------------------------------------------------
# carregando pacotes para Large Data
library(ff)
library(ffbase)
library(tidyverse)
# Baixar a base do SAEB 2019 no endereço abaixo e salvar na pasta bases_originais
# https://download.inep.gov.br/microdados/microdados_saeb_2019.zip
# na pasta DADOS escolher base TS_ESCOLA
# Precisei modificar a base e excluir várias colunas (NIVEL...) pois o comando read.csv.ffdf não
# estava funcionando. Por isso o enderecoBase se refere a TS_ESCOLA1
enderecoBase <- "bases_originais/TS_ESCOLA1.csv"
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
#
# tempo de carregamento da base SAEB 2019 TS_ESCOLA (~20MB) Apenas para ilustrar pois a base não é pesada
#
system.time(saeb2019 <- read.csv.ffdf(file = enderecoBase))
class(saeb2019) # classe do objeto
object.size(saeb2019)/1024 # tamanho em KB
summary(saeb2019)
head(saeb2019)
# ---------------------------------------------------------------------------------
mean(saeb2019$MEDIA_5EF_MT, na.rm = T)
mean(saeb2019$NU_PRESENTES_5EF, na.rm = T)
# Tentei calcular a mediana mas demorou tanto que desisti
regressao <- lm(MEDIA_5EF_MT ~ NU_PRESENTES_5EF, data = saeb2019)
summary(regressao)
# ---------------------------------------------------------------------------------
|
#YJLs code for making dotplots
# ggplot2 point plot (pp) with specified circle size
library(ggplot2)
# GO_BP
# ggplot2 point plot (pp) with specified circle size (5x5.5 inches)
data <- read.table("/Users/JulianKimura/Documents/Lab/Single_Cell_Seq/Post_Embryonic/RH_For_JK_URD_Data2/JK_embedding/HJ/filtered/GO_dotplots/dotplot_matrix.txt", sep = '\t', header = T)
pp <- ggplot(data, aes(Cluster,GO.Terms))
pp <- pp + geom_point(aes(size = Counts, colour= -log(FDR)), alpha=1.0) + scale_size(range = c(6, 12))
pp <- pp + scale_colour_gradientn(colours = c("royal blue","light grey","red"))
pp <- pp + scale_x_discrete(limits=c("C0", "C1", "C2", "C3", "C5"), breaks = c("C0", "C1", "C2", "C3", "C5"))
pp <- pp + scale_y_discrete(limits=c("lipid metabolic process", "muscle system process", "synaptic vesicle transport", "mRNA metabolic process", "cilium organization"))
pp <- pp + theme(panel.grid.major = element_line(colour = "light grey",size=0.2), panel.grid.minor = element_blank(), axis.ticks=element_line(colour = "black"),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text=element_text(colour = "black", size=6), axis.title=element_text(size=8))
pp <- pp + theme(panel.border=element_blank(), axis.line=element_line())
pp
data <- read.table("/Users/JulianKimura/Documents/Lab/Single_Cell_Seq/Post_Embryonic/RH_For_JK_URD_Data2/JK_embedding/HJ/filtered/GO_dotplots/dotplot_matrix.txt", sep = '\t', header = T)
pp <- ggplot(data, aes(Cluster,GO.Terms))
pp <- pp + geom_point(aes(size = Counts, colour= -log(FDR)), alpha=1.0) + scale_size(range = c(6, 12))
pp <- pp + scale_colour_gradientn(colours = c("royal blue","light grey","red"))
pp <- pp + scale_x_discrete(limits=data$Cluster, breaks = data$Cluster)
pp <- pp + scale_y_discrete(limits=data$GO.Terms)
pp <- pp + theme(panel.grid.major = element_line(colour = "light grey",size=0.2), panel.grid.minor = element_blank(), axis.ticks=element_line(colour = "black"),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text=element_text(colour = "black", size=6), axis.title=element_text(size=8))
pp <- pp + theme(panel.border=element_blank(), axis.line=element_line())
pp
|
/GO_dotplots.R
|
no_license
|
JulianKimura/Hulett_etal
|
R
| false
| false
| 2,277
|
r
|
#YJLs code for making dotplots
# ggplot2 point plot (pp) with specified circle size
library(ggplot2)
# GO_BP
# ggplot2 point plot (pp) with specified circle size (5x5.5 inches)
data <- read.table("/Users/JulianKimura/Documents/Lab/Single_Cell_Seq/Post_Embryonic/RH_For_JK_URD_Data2/JK_embedding/HJ/filtered/GO_dotplots/dotplot_matrix.txt", sep = '\t', header = T)
pp <- ggplot(data, aes(Cluster,GO.Terms))
pp <- pp + geom_point(aes(size = Counts, colour= -log(FDR)), alpha=1.0) + scale_size(range = c(6, 12))
pp <- pp + scale_colour_gradientn(colours = c("royal blue","light grey","red"))
pp <- pp + scale_x_discrete(limits=c("C0", "C1", "C2", "C3", "C5"), breaks = c("C0", "C1", "C2", "C3", "C5"))
pp <- pp + scale_y_discrete(limits=c("lipid metabolic process", "muscle system process", "synaptic vesicle transport", "mRNA metabolic process", "cilium organization"))
pp <- pp + theme(panel.grid.major = element_line(colour = "light grey",size=0.2), panel.grid.minor = element_blank(), axis.ticks=element_line(colour = "black"),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text=element_text(colour = "black", size=6), axis.title=element_text(size=8))
pp <- pp + theme(panel.border=element_blank(), axis.line=element_line())
pp
data <- read.table("/Users/JulianKimura/Documents/Lab/Single_Cell_Seq/Post_Embryonic/RH_For_JK_URD_Data2/JK_embedding/HJ/filtered/GO_dotplots/dotplot_matrix.txt", sep = '\t', header = T)
pp <- ggplot(data, aes(Cluster,GO.Terms))
pp <- pp + geom_point(aes(size = Counts, colour= -log(FDR)), alpha=1.0) + scale_size(range = c(6, 12))
pp <- pp + scale_colour_gradientn(colours = c("royal blue","light grey","red"))
pp <- pp + scale_x_discrete(limits=data$Cluster, breaks = data$Cluster)
pp <- pp + scale_y_discrete(limits=data$GO.Terms)
pp <- pp + theme(panel.grid.major = element_line(colour = "light grey",size=0.2), panel.grid.minor = element_blank(), axis.ticks=element_line(colour = "black"),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text=element_text(colour = "black", size=6), axis.title=element_text(size=8))
pp <- pp + theme(panel.border=element_blank(), axis.line=element_line())
pp
|
# rankall <- function(outcome, num = "best") {
# ## Read outcome data
# data <- read.csv("outcome-of-care-measures.csv",colClasses = "character")
#
# ## Check that state and outcome are valid
#
# outcomes <- c("heart attack","heart failure","pneumonia")
#
# if ((outcome %in% outcomes)==FALSE) {
# stop(princomp("invalid outcome"))
# }
#
# #create a list of states and char array to store hospital name
# ## For each state, find the hospital of the given rank
# state <- levels(factor(data[,7]))
# hospital <- vector(mode="character")
#
# for (i in seq(state)) {
# hospital[i] <- rankhospital(state[i],outcome,num)
# }
# ## Return a data frame with the hospital names and the
# ## (abbreviated) state name
# data.frame(hospital,state)
# }
rankall <- function(outcome, num = "best"){
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
fd <- as.data.frame(cbind(data[, 2], # hospital
data[, 7], # state
data[, 11], # heart attack
data[, 17], # heart failure
data[, 23]), # pneumonia
stringsAsFactors = FALSE)
colnames(fd) <- c("hospital", "state", "heart attack", "heart failure", "pneumonia")
fd[, eval(outcome)] <- as.numeric(fd[, eval(outcome)])
## Check that state and outcome are valid
if (!outcome %in% c("heart attack", "heart failure", "pneumonia")){
stop('invalid outcome')
} else if (is.numeric(num)) {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"]), ]
ordered[[i]] <- c(by_state[[i]][num, "hospital"], by_state[[i]][, "state"][1])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, row.names = result[, 2], stringsAsFactors = FALSE)
names(output) <- c("hospital", "state")
} else if (!is.numeric(num)) {
if (num == "best") {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"]), ]
ordered[[i]] <- c(by_state[[i]][1, c("hospital", "state")])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, stringsAsFactors = FALSE)
rownames(output) <- output[, 2]
} else if (num == "worst") {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"],
decreasing = TRUE), ]
ordered[[i]] <- c(by_state[[i]][1, c("hospital", "state")])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, stringsAsFactors = FALSE)
rownames(output) <- output[, 2]
} else {
stop('invalid num')
}
}
return(output)
}
|
/rankall.R
|
no_license
|
sonarshalaka/R-Programming-week4
|
R
| false
| false
| 3,289
|
r
|
# rankall <- function(outcome, num = "best") {
# ## Read outcome data
# data <- read.csv("outcome-of-care-measures.csv",colClasses = "character")
#
# ## Check that state and outcome are valid
#
# outcomes <- c("heart attack","heart failure","pneumonia")
#
# if ((outcome %in% outcomes)==FALSE) {
# stop(princomp("invalid outcome"))
# }
#
# #create a list of states and char array to store hospital name
# ## For each state, find the hospital of the given rank
# state <- levels(factor(data[,7]))
# hospital <- vector(mode="character")
#
# for (i in seq(state)) {
# hospital[i] <- rankhospital(state[i],outcome,num)
# }
# ## Return a data frame with the hospital names and the
# ## (abbreviated) state name
# data.frame(hospital,state)
# }
rankall <- function(outcome, num = "best"){
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
fd <- as.data.frame(cbind(data[, 2], # hospital
data[, 7], # state
data[, 11], # heart attack
data[, 17], # heart failure
data[, 23]), # pneumonia
stringsAsFactors = FALSE)
colnames(fd) <- c("hospital", "state", "heart attack", "heart failure", "pneumonia")
fd[, eval(outcome)] <- as.numeric(fd[, eval(outcome)])
## Check that state and outcome are valid
if (!outcome %in% c("heart attack", "heart failure", "pneumonia")){
stop('invalid outcome')
} else if (is.numeric(num)) {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"]), ]
ordered[[i]] <- c(by_state[[i]][num, "hospital"], by_state[[i]][, "state"][1])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, row.names = result[, 2], stringsAsFactors = FALSE)
names(output) <- c("hospital", "state")
} else if (!is.numeric(num)) {
if (num == "best") {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"]), ]
ordered[[i]] <- c(by_state[[i]][1, c("hospital", "state")])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, stringsAsFactors = FALSE)
rownames(output) <- output[, 2]
} else if (num == "worst") {
by_state <- with(fd, split(fd, state))
ordered <- list()
for (i in seq_along(by_state)){
by_state[[i]] <- by_state[[i]][order(by_state[[i]][, eval(outcome)],
by_state[[i]][, "hospital"],
decreasing = TRUE), ]
ordered[[i]] <- c(by_state[[i]][1, c("hospital", "state")])
}
result <- do.call(rbind, ordered)
output <- as.data.frame(result, stringsAsFactors = FALSE)
rownames(output) <- output[, 2]
} else {
stop('invalid num')
}
}
return(output)
}
|
# prepare data
results = read.csv("times.csv",sep=";")
avg_results = aggregate( time_better ~ size, data=results, FUN=mean)
avg_results$time_naive = aggregate(time_naive ~ size,data = results,FUN = mean)$time_naive
avg_results$sd_time_better = aggregate(time_better ~ size,data=results,FUN = sd)$time_better
avg_results$sd_time_naive = aggregate(time_naive ~ size,data=results,FUN = sd)$time_naive
# make plot
ggplot(avg_results,aes(size)) + geom_point(aes(y = time_better,colour="darkblue")) + geom_point(aes(y = time_naive,colour="red")) + geom_errorbar(data=avg_results,mapping = aes(x=size,ymin=time_better-(sd_time_better/2),ymax=time_better+(sd_time_better/2))) + geom_errorbar(data=avg_results,mapping = aes(x=size,ymin=time_naive-(sd_time_naive/2),ymax=time_naive+(sd_time_naive/2))) + labs(title = "Times of multyplying", x = "Sizes", y ="Times")
fit = lm(time_better ~ poly(size, 3, raw=TRUE), data=avg_results)
newdata = data.frame(size = seq(20,380, length.out=380))
newdata$time_better = predict(fit, newdata)
last_plot() + geom_line(data=newdata,aes(size,time_better,colour="darkblue"))
fit2 = lm(time_naive ~ poly(size, 3, raw=TRUE), data=avg_results)
newdata$time_naive = predict(fit2, newdata)
last_plot() + geom_line(data=newdata,aes(size,time_naive,colour="red")) +scale_color_discrete(name = "Approximation", labels = c("time better", "time naive"))
|
/lab05/create_aggregate_data.R
|
no_license
|
mimagiera/mownit-agh
|
R
| false
| false
| 1,372
|
r
|
# prepare data
results = read.csv("times.csv",sep=";")
avg_results = aggregate( time_better ~ size, data=results, FUN=mean)
avg_results$time_naive = aggregate(time_naive ~ size,data = results,FUN = mean)$time_naive
avg_results$sd_time_better = aggregate(time_better ~ size,data=results,FUN = sd)$time_better
avg_results$sd_time_naive = aggregate(time_naive ~ size,data=results,FUN = sd)$time_naive
# make plot
ggplot(avg_results,aes(size)) + geom_point(aes(y = time_better,colour="darkblue")) + geom_point(aes(y = time_naive,colour="red")) + geom_errorbar(data=avg_results,mapping = aes(x=size,ymin=time_better-(sd_time_better/2),ymax=time_better+(sd_time_better/2))) + geom_errorbar(data=avg_results,mapping = aes(x=size,ymin=time_naive-(sd_time_naive/2),ymax=time_naive+(sd_time_naive/2))) + labs(title = "Times of multyplying", x = "Sizes", y ="Times")
fit = lm(time_better ~ poly(size, 3, raw=TRUE), data=avg_results)
newdata = data.frame(size = seq(20,380, length.out=380))
newdata$time_better = predict(fit, newdata)
last_plot() + geom_line(data=newdata,aes(size,time_better,colour="darkblue"))
fit2 = lm(time_naive ~ poly(size, 3, raw=TRUE), data=avg_results)
newdata$time_naive = predict(fit2, newdata)
last_plot() + geom_line(data=newdata,aes(size,time_naive,colour="red")) +scale_color_discrete(name = "Approximation", labels = c("time better", "time naive"))
|
source('read_data.R')
with(t, {
plot(Sub_metering_1~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png,"plot3.png", width=480, height=480)
dev.off()
|
/plot3.R
|
no_license
|
xarus01/ExData_Plotting1
|
R
| false
| false
| 423
|
r
|
source('read_data.R')
with(t, {
plot(Sub_metering_1~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png,"plot3.png", width=480, height=480)
dev.off()
|
#
# select_nesting.R, 16 Jan 20
# Data from:
# The New {C} Standard: {An} Economic and Cultural Commentary
# Derek M. Jones
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG C selection-statement_nesting source-code_C
source("ESEUR_config.r")
plot_layout(2, 1)
pal_col=rainbow(2)
sn=read.csv(paste0(ESEUR_dir, "sourcecode/select_nesting.csv.xz"), as.is=TRUE)
ll=read.csv(paste0(ESEUR_dir, "sourcecode/logicline.csv.xz"), as.is=TRUE)
# tl=read.csv(paste0(ESEUR_dir, "sourcecode/tokenonline.csv.xz"), as.is=TRUE)
cf=subset(ll, file_suff == ".c")
cf=subset(cf, characters < 400)
plot(cf$characters, cf$occurrences, log="xy", col=point_col,
xlab="Characters on line", ylab="Lines\n")
# plot(tl$tokens, tl$lines, log="y", col=point_col,
# xlab="Tokens on line", ylab="Lines\n")
plot(sn$nesting, sn$occurrences, log="y", col=pal_col[1],
xaxs="i",
xlim=c(0, 25),
xlab="Nesting level", ylab="Selection-statements\n")
mod_113=glm(log(occurrences) ~ nesting, data=sn, subset=2:13)
pred=predict(mod_113)
lines(1:12, exp(pred), col=pal_col[2])
# Embedded C data from Engblom <book Engblom_98>
#
# emb=data.frame(nesting=1:10,
# occurrences=c(0.495, 0.196, 0.095, 0.067, 0.065,
# 0.063, 0.019, 0.014, 0.007, 0.008))
#
# points(emb$nesting, 1e5*emb$occurrences)
|
/sourcecode/select_nesting.R
|
no_license
|
Derek-Jones/ESEUR-code-data
|
R
| false
| false
| 1,350
|
r
|
#
# select_nesting.R, 16 Jan 20
# Data from:
# The New {C} Standard: {An} Economic and Cultural Commentary
# Derek M. Jones
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG C selection-statement_nesting source-code_C
source("ESEUR_config.r")
plot_layout(2, 1)
pal_col=rainbow(2)
sn=read.csv(paste0(ESEUR_dir, "sourcecode/select_nesting.csv.xz"), as.is=TRUE)
ll=read.csv(paste0(ESEUR_dir, "sourcecode/logicline.csv.xz"), as.is=TRUE)
# tl=read.csv(paste0(ESEUR_dir, "sourcecode/tokenonline.csv.xz"), as.is=TRUE)
cf=subset(ll, file_suff == ".c")
cf=subset(cf, characters < 400)
plot(cf$characters, cf$occurrences, log="xy", col=point_col,
xlab="Characters on line", ylab="Lines\n")
# plot(tl$tokens, tl$lines, log="y", col=point_col,
# xlab="Tokens on line", ylab="Lines\n")
plot(sn$nesting, sn$occurrences, log="y", col=pal_col[1],
xaxs="i",
xlim=c(0, 25),
xlab="Nesting level", ylab="Selection-statements\n")
mod_113=glm(log(occurrences) ~ nesting, data=sn, subset=2:13)
pred=predict(mod_113)
lines(1:12, exp(pred), col=pal_col[2])
# Embedded C data from Engblom <book Engblom_98>
#
# emb=data.frame(nesting=1:10,
# occurrences=c(0.495, 0.196, 0.095, 0.067, 0.065,
# 0.063, 0.019, 0.014, 0.007, 0.008))
#
# points(emb$nesting, 1e5*emb$occurrences)
|
#Reading data
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".",na.strings="?")
#subseting the data from the dates 2007-02-01 and 2007-02-02
reqData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#converting the dates to right format
reqData$Datetime <- strptime(paste(reqData$Date, reqData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#ploting data
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(reqData$Datetime, reqData$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
plot(reqData$Datetime, reqData$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(reqData$Datetime, reqData$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(reqData$Datetime, reqData$Sub_metering_2, type="l", col="red")
lines(reqData$Datetime, reqData$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(reqData$Datetime, reqData$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
arunbv123/ExploratoryDataAnalysis
|
R
| false
| false
| 1,166
|
r
|
#Reading data
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".",na.strings="?")
#subseting the data from the dates 2007-02-01 and 2007-02-02
reqData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#converting the dates to right format
reqData$Datetime <- strptime(paste(reqData$Date, reqData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#ploting data
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(reqData$Datetime, reqData$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
plot(reqData$Datetime, reqData$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(reqData$Datetime, reqData$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(reqData$Datetime, reqData$Sub_metering_2, type="l", col="red")
lines(reqData$Datetime, reqData$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(reqData$Datetime, reqData$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
rm(list=ls())
options(stringsAsFactors = FALSE)
setwd("/net/wong05/home/liz86/Steffi/primary_vs_mets/")
## load data
load("Data_v2/tpm.RData")
log2tpm <- log2(tpm + 1)
load("Data_v2/gene_annot_biomart.RData")
load("Data_v2/sample_annot.RData")
length(unique(gene_annot_biomart[,"external_gene_name_v2"])) #55644
dup_genes <- unique(gene_annot_biomart[duplicated(gene_annot_biomart[,"external_gene_name_v2"]), "external_gene_name_v2"])
dup_genes <- dup_genes[!is.na(dup_genes)]
length(dup_genes) # 142
non_dup_gene_index <- which((!(gene_annot_biomart[,"external_gene_name_v2"] %in% dup_genes))
& (!is.na(gene_annot_biomart[,"external_gene_name_v2"])) )
length(non_dup_gene_index) # 55415
select_dup_genes <- function(gene_name, counter, data, method){
candi_index <- which(gene_annot_biomart[,"external_gene_name_v2"]==gene_name)
sub_data <- data[candi_index, ]
if(method=="IQR"){
mea_vec <- apply(sub_data, 1, IQR)
}else{
mea_vec <- apply(sub_data, 1, sd)
}
print(counter)
return(candi_index[which.max(mea_vec)])
}
dup_gene_keep_index <- sapply(1:length(dup_genes), function(x)
select_dup_genes(dup_genes[x], x, log2tpm, method="SD"))
gene_keep_index <- c(non_dup_gene_index, dup_gene_keep_index)
gene_annot_biomart_unique <- gene_annot_biomart[gene_keep_index, ]
log2tpm_unique <- log2tpm[gene_keep_index, ]
dim(log2tpm_unique) #55557 105
save(log2tpm_unique, file="Data_v2/log2tpm_unique.RData")
save(gene_annot_biomart_unique, file="Data_v2/gene_annot_biomart_unique.RData")
|
/Code/convert_to_unique_genes.R
|
no_license
|
lizhu06/TILsComparison_PBTvsMET
|
R
| false
| false
| 1,507
|
r
|
rm(list=ls())
options(stringsAsFactors = FALSE)
setwd("/net/wong05/home/liz86/Steffi/primary_vs_mets/")
## load data
load("Data_v2/tpm.RData")
log2tpm <- log2(tpm + 1)
load("Data_v2/gene_annot_biomart.RData")
load("Data_v2/sample_annot.RData")
length(unique(gene_annot_biomart[,"external_gene_name_v2"])) #55644
dup_genes <- unique(gene_annot_biomart[duplicated(gene_annot_biomart[,"external_gene_name_v2"]), "external_gene_name_v2"])
dup_genes <- dup_genes[!is.na(dup_genes)]
length(dup_genes) # 142
non_dup_gene_index <- which((!(gene_annot_biomart[,"external_gene_name_v2"] %in% dup_genes))
& (!is.na(gene_annot_biomart[,"external_gene_name_v2"])) )
length(non_dup_gene_index) # 55415
select_dup_genes <- function(gene_name, counter, data, method){
candi_index <- which(gene_annot_biomart[,"external_gene_name_v2"]==gene_name)
sub_data <- data[candi_index, ]
if(method=="IQR"){
mea_vec <- apply(sub_data, 1, IQR)
}else{
mea_vec <- apply(sub_data, 1, sd)
}
print(counter)
return(candi_index[which.max(mea_vec)])
}
dup_gene_keep_index <- sapply(1:length(dup_genes), function(x)
select_dup_genes(dup_genes[x], x, log2tpm, method="SD"))
gene_keep_index <- c(non_dup_gene_index, dup_gene_keep_index)
gene_annot_biomart_unique <- gene_annot_biomart[gene_keep_index, ]
log2tpm_unique <- log2tpm[gene_keep_index, ]
dim(log2tpm_unique) #55557 105
save(log2tpm_unique, file="Data_v2/log2tpm_unique.RData")
save(gene_annot_biomart_unique, file="Data_v2/gene_annot_biomart_unique.RData")
|
#######################################################################
# 전현직 대통령 연설문 연습문제
#######################################################################
park <- file("data\\park.txt", encoding="UTF-8")
myline <- readLines(park)
myline
myword <- sapply(myline, extractNoun, USE.NAMES=F)
myword
result <- unlist(myword)
head(result, 20)
result2 <- Filter(function(x){ nchar(x) >=2 }, result)
head(result2, 20)
result3 <- Filter(function(x){nchar(x) ==3}, result)
head(result3, 20)
result4 <- Filter(function(x){nchar(x) >=2 & nchar(x) <= 4},result)
head(result4, 20)
result2<- gsub("것","",result2)
result2<- gsub("저","",result2)
result2<- gsub("원","",result2)
result2<- gsub("\\n","",result2)
result2<- gsub("\\d","",result2)
result2<- gsub("\\.","",result2)
head(result2, 20)
write(unlist(result2),"myresult.txt")
myword <- read.table("myresult.txt")
nrow(myword)
wordcount <- table(myword)
head(sort(wordcount, decreasing=T), 20)
palete <- brewer.pal(9, "Set1")
#x11()
wordcloud(
names(wordcount),
freq=wordcount,
scale=c(5,1),
rot.per=0.5,
min.freq=4,
random.order=F,
random.color=T,
colors=palete
)
a<-head(sort(wordcount, decreasing=T), 20)
pie(a, col=rainbow(10), radius=1)
pct<- round(a/sum(a)*100, 1)
names(a)
lab <- paste(names(a), "\n", pct, "%")
pie(a, mail="대통령연설문", col=rainbow(10), cex=0.8, lables=lab)
par(new=T)
pie(a, radius=0.6, col="white", lables=NA, border=NA)
|
/text_mining_president.R
|
no_license
|
HappyBottle/Bottles
|
R
| false
| false
| 1,496
|
r
|
#######################################################################
# 전현직 대통령 연설문 연습문제
#######################################################################
park <- file("data\\park.txt", encoding="UTF-8")
myline <- readLines(park)
myline
myword <- sapply(myline, extractNoun, USE.NAMES=F)
myword
result <- unlist(myword)
head(result, 20)
result2 <- Filter(function(x){ nchar(x) >=2 }, result)
head(result2, 20)
result3 <- Filter(function(x){nchar(x) ==3}, result)
head(result3, 20)
result4 <- Filter(function(x){nchar(x) >=2 & nchar(x) <= 4},result)
head(result4, 20)
result2<- gsub("것","",result2)
result2<- gsub("저","",result2)
result2<- gsub("원","",result2)
result2<- gsub("\\n","",result2)
result2<- gsub("\\d","",result2)
result2<- gsub("\\.","",result2)
head(result2, 20)
write(unlist(result2),"myresult.txt")
myword <- read.table("myresult.txt")
nrow(myword)
wordcount <- table(myword)
head(sort(wordcount, decreasing=T), 20)
palete <- brewer.pal(9, "Set1")
#x11()
wordcloud(
names(wordcount),
freq=wordcount,
scale=c(5,1),
rot.per=0.5,
min.freq=4,
random.order=F,
random.color=T,
colors=palete
)
a<-head(sort(wordcount, decreasing=T), 20)
pie(a, col=rainbow(10), radius=1)
pct<- round(a/sum(a)*100, 1)
names(a)
lab <- paste(names(a), "\n", pct, "%")
pie(a, mail="대통령연설문", col=rainbow(10), cex=0.8, lables=lab)
par(new=T)
pie(a, radius=0.6, col="white", lables=NA, border=NA)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SBGN.to.SVG.R
\name{renderSbgn}
\alias{renderSbgn}
\title{Overlay omics data on a SBGN pathway graph and output image files.}
\usage{
renderSbgn(
input.sbgn,
output.file,
output.formats,
sbgn.id.attr,
glyphs.user = list(),
arcs.user = list(),
arcs.info = "straight",
compartment.layer.info = "original",
user.data = matrix("no.user.data", nrow = 1),
if.plot.svg = TRUE,
key.pos = "topright",
color.panel.scale = 1,
color.panel.n.grid = 21,
col.gene.low = "green",
col.gene.high = "red",
col.gene.mid = "gray",
col.cpd.low = "blue",
col.cpd.high = "yellow",
col.cpd.mid = "gray",
min.gene.value = -1,
max.gene.value = 1,
mid.gene.value = 0,
min.cpd.value = -1,
max.cpd.value = 1,
mid.cpd.value = 0,
pathway.name = "",
pathway.name.font.size = 1,
if.plot.cardinality = FALSE,
multimer.margin = 5,
compartment.opacity = 1,
auxiliary.opacity = 1,
if.plot.annotation.nodes = FALSE,
inhibition.edge.end.shift = 5,
edge.tip.size = 6,
if.use.number.for.long.label = FALSE,
label.spliting.string = c(" ", ":", "-", ";", "/", "_"),
complex.compartment.label.margin = 8,
if.write.shorter.label.mapping = TRUE,
font.size = 3,
logic.node.font.scale = 3,
status.node.font.scale = 3,
node.width.adjust.factor = 2,
font.size.scale.gene = 3,
font.size.scale.cpd = 3,
font.size.scale.complex = 1.1,
font.size.scale.compartment = 1.6,
if.scale.complex.font.size = FALSE,
if.scale.compartment.font.size = FALSE,
node.width.adjust.factor.compartment = 0.02,
node.width.adjust.factor.complex = 0.02,
text.length.factor = 2,
text.length.factor.macromolecule = 2,
text.length.factor.compartment = 2,
text.length.factor.complex = 2,
space.between.color.panel.and.entity = 100,
global.parameters.list = NULL
)
}
\arguments{
\item{input.sbgn}{A character string. The path to a local SBGN-ML file.}
\item{output.file, output.formats, sbgn.id.attr}{These parameters are the same as the ones in \code{\link{SBGNview}}. Please see \code{\link{SBGNview}} for more details.}
\item{glyphs.user}{A list, optional. Each element is a 'glyph' object. The element names are glyph IDs (attribute 'id' of XHTML element 'glyph'). Note this is not affected by parameter 'sbgn.id.attr'. The glyph elements contain glyph meta-data for plotting (e.g. text size, border width, border color etc.). Please see the \code{\link{glyph-class}} documentation for more information. By default, SBGNview will run without this argument and return a glyph list extracted from the SBGN file. User can then customize this glyph list and assign it to 'glyphs.user' in the next SBGNview run to update the graph.}
\item{arcs.user}{A list, optional. Each member is an 'arc' object. The element names are arc IDs (the value of 'id' attribute in XHTML element 'arc' or 'arc.spline' in the SBGN-ML file). Some SBGN-ML files have no arc IDs, in this case SBGNview will create an arc ID using 'source' and 'target' node IDs). The arc object contains arc meta-data for plotting (e.g. arc line width, line color etc.). Please see the \code{\link{arc-class}} documentation for more information. By default, SBGNview() will run without this argument and return an arc list. User can then customize this arc list and assign it to 'arcs.user' in the next SBGNview() run to update the arcs.}
\item{arcs.info}{A character string. It should be one of the following: 'parse splines', 'straight' or a string of svg code of arcs. If it is 'parse splines', this function will look for XML element 'arc.spline' in the SBGN-ML file and plot spline arcs. If it is 'straight', the function will look for element 'arc' and plot straight line arcs. If it is a string of svg code, it will write this code directly to the output svg file.}
\item{compartment.layer.info}{A character vector. It is a vector containing the IDs of all compartment glyphs. It determins the layer arrangement of compartments. Compartments will be drawn following their sequence in this vector. Therefore, a compartment that appears later in the vector will be on the front layer and covers the compartments that are before it in this vector. This is important. In some cases compartments have overlap. This layer information ensures that a glyph laying in the overlapped region belongs to the compartment on the top layer.}
\item{user.data}{A list. It holds both gene/protein data and compound data. Names are gene or compounds, each element is a numeric vector of the omics data of each molecule.}
\item{if.plot.svg}{Logical. Default: T. Whether to generate svg code or only parse SBGN-ML file. This parameter is for internal use only.}
\item{key.pos}{A character string. The position of color panel. Default: 'topright'. Accepts one of 'bottomleft' , 'bottomright', 'topright', or 'topleft'. The ideal position for the color panel is 'topright' or 'bottomright'. If 'topleft' or 'bottomleft' are passed in, the key.pos location will default to 'topright'. If key.pos is set to 'none', the pathway name and color panel won't be plotted.}
\item{color.panel.scale}{Numeric. Default: 1. It controls the relative size of color scheme panel.}
\item{color.panel.n.grid}{Numeric. Default: 21. How many colors does the color scheme show.}
\item{col.gene.low}{A character string. Default: 'green'. Color panel's color representing low gene data values.}
\item{col.gene.high}{A character string. Default: 'red'. Color panel's color representing high gene data values.}
\item{col.gene.mid}{A character string. Default: 'gray'. Color panel's color representing mid range gene data values.}
\item{col.cpd.low}{A character string. Default: 'blue'. Color panel's color representing low compound data values.}
\item{col.cpd.high}{A character string. Default: 'yellow'. Color panel's color representing high compound data values.}
\item{col.cpd.mid}{A character string. Default: 'gray'. Color panel's color representing mid range compound data values.}
\item{min.gene.value}{Numeric. Default: -1. Color panel's min value for gene data. Values smaller than this will have the same color as min value.}
\item{max.gene.value}{Numeric. Default: 1. Color panel's max value for gene data. Values greater than this will have the same color as the max value.}
\item{mid.gene.value}{Numeric. Default: 0. Color panel's mid value for gene data.}
\item{min.cpd.value}{Numeric. Default: -1. Color panel's min value for compound data. Values smaller than this will have the same color as min value.}
\item{max.cpd.value}{Numeric. Default: 1. Color panel's max value for compound data. Values greater than this will have the same color as max value.}
\item{mid.cpd.value}{Numeric. Default: 0. Color panel's mid value for compound data.}
\item{pathway.name}{List containing two elements: 1. pathway name 2. stamp information. See argument description in \code{\link{SBGNview}} function to change/update pathway name displayed on graph.}
\item{pathway.name.font.size}{Numeric. Default: 1. When pathway names are plotted on graph, this parameter controls their font size.}
\item{if.plot.cardinality}{Logical. Default: F. If plot cardinality glyphs.}
\item{multimer.margin}{Numeric. Default: 5. For multimers, they are represented by two partly overlapped shapes (rectangle, ellipse etc.). This parameter controls how much the two shapes overlap.}
\item{compartment.opacity}{Numeric. Default: 1. Controls how transparent the compartments are.}
\item{auxiliary.opacity}{Numeric. Default: 1. Controls opacity of auxiliary glyphs.}
\item{if.plot.annotation.nodes}{Logical. Default: F. Some SBGN-ML files have 'annotation' glyphs. By default we don't plot them.}
\item{inhibition.edge.end.shift}{Numeric. Default: 5. The tip of 'inhibition' arcs is a line segment. Sometimes it overlaps with target glyph's border. We can shift it along the arc to prevent the overlap.}
\item{edge.tip.size}{Numeric. Default: 6. Control size of edge tips.}
\item{if.use.number.for.long.label}{Logical. Default: F. If the label is too long, we can create a shorter name for it. e.g. 'macromolecule_1'.}
\item{label.spliting.string}{A character vector. Default: c(' ','-',';','/','_'). When we split text into multiple lines, these characters will be used to split label (where a new line can be created).}
\item{complex.compartment.label.margin}{Numeric. Default: 8. Move the label text vertically for compartment and complex.}
\item{if.write.shorter.label.mapping}{Logical. Default: T. If if.use.number.for.long.label is 'T', we can write the mapping between shorter name and the original label to a text file.}
\item{font.size}{Numeric. Default: 3. Affects font size of all types of glyphs.}
\item{logic.node.font.scale}{Numeric. Default: 3. Controls the size of logical glyphs ('and', 'or', 'not' etc.).}
\item{status.node.font.scale}{Numeric. Default: 3. Scale the font size for status variable and unit of information nodes.}
\item{node.width.adjust.factor}{Numeric. Default: 2. Change font size according to the width of its glyph. If the glyph is too large (e.g. compartment), its label may look too small. Then we can enlarge the label in proportion to the width of the glyph. It affects all types of glyphs.}
\item{font.size.scale.gene}{Numeric. Default: 3. Scales font size according to the node's width for large compartments. Only affect font size of 'macromolecule' glyphs.}
\item{font.size.scale.cpd}{Numeric. Default: 3. Scales font size according to the node's width for large compartments. Only affects font size of 'simple chemical' glyphs.}
\item{font.size.scale.complex}{Numeric. Default: 1.1. Scale the font size of a complex.}
\item{font.size.scale.compartment}{Numeric. Default: 1.6. Scale the font size of a compartment.}
\item{if.scale.complex.font.size}{Logical. Default: F. Whether to scale complex font size according to its width. If set to 'T', the 'node.width.adjust.factor.complex' argument can be used to specify the scale factor.}
\item{if.scale.compartment.font.size}{Logical. Default: F. Whether to scale compartment font size according to its width. If set to 'T', the 'node.width.adjust.factor.compartment' argument can be used to specify the scale factor.}
\item{node.width.adjust.factor.compartment}{Numeric. Default: 0.02. How much the font size should change in proportion to the width of compartment. The font is scaled only if 'if.scale.compartment.font.size' is set to 'T'. To find the best scale factor which works you, start with 0.02 (default) and incrementally increase that value.}
\item{node.width.adjust.factor.complex}{Numeric. Default: 0.02. How much the font size should change in proportion to the width of complex. The font is scaled only if 'if.scale.complex.font.size' is set to 'T'. To find the best scale factor which works you, start with 0.02 (default) and incrementally increase that value.}
\item{text.length.factor}{Numeric. Default: 2. How wide the wrapped text should be. If text is longer than the width controled by this parameter, the text is split into a new line, but only at characters in 'label.spliting.string'. Controls all glyphs.}
\item{text.length.factor.macromolecule}{Numeric. Default: 2. Used to determine label text wrapping based on number of characters, font size, and node width for macromolecule glyphs.}
\item{text.length.factor.compartment}{Numeric. Default: 2. Used to determine label text wrapping based on number of characters, font size, and node width for compartment glyphs.}
\item{text.length.factor.complex}{Numeric. Default: 2. Used to determine label text wrapping based on number of characters, font size, and node width for complex glyphs.}
\item{space.between.color.panel.and.entity}{Numeric. Default: 100. The minimum space between color panel and any other object in the graph. The function will always try to find a location of the color panel to minimize empty space on the whole graph. This parameter controls how close it can reach a glyph.}
\item{global.parameters.list}{List. A record of parameters fed to 'renderSbgn' for reuse. It will over-write other parameters. It is not designed to be used directly.}
}
\value{
A list of three elements: glyphs.list, arcs.list, global.parameters.list
}
\description{
This function is not intended to be used directly. Use SBGNview instead. Some input arguments can be better prepared by \code{\link{SBGNview}}.
}
\examples{
\dontrun{
data(pathways.info)
SBGNview.obj <- SBGNview(simulate.data = TRUE,
sbgn.dir = './',
input.sbgn = 'P00001',
output.file = './test.local.file',
output.formats = c('pdf'),
min.gene.value = -1,
max.gene.value = 1)
}
}
|
/man/renderSbgn.Rd
|
no_license
|
datapplab/SBGNview
|
R
| false
| true
| 12,827
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SBGN.to.SVG.R
\name{renderSbgn}
\alias{renderSbgn}
\title{Overlay omics data on a SBGN pathway graph and output image files.}
\usage{
renderSbgn(
input.sbgn,
output.file,
output.formats,
sbgn.id.attr,
glyphs.user = list(),
arcs.user = list(),
arcs.info = "straight",
compartment.layer.info = "original",
user.data = matrix("no.user.data", nrow = 1),
if.plot.svg = TRUE,
key.pos = "topright",
color.panel.scale = 1,
color.panel.n.grid = 21,
col.gene.low = "green",
col.gene.high = "red",
col.gene.mid = "gray",
col.cpd.low = "blue",
col.cpd.high = "yellow",
col.cpd.mid = "gray",
min.gene.value = -1,
max.gene.value = 1,
mid.gene.value = 0,
min.cpd.value = -1,
max.cpd.value = 1,
mid.cpd.value = 0,
pathway.name = "",
pathway.name.font.size = 1,
if.plot.cardinality = FALSE,
multimer.margin = 5,
compartment.opacity = 1,
auxiliary.opacity = 1,
if.plot.annotation.nodes = FALSE,
inhibition.edge.end.shift = 5,
edge.tip.size = 6,
if.use.number.for.long.label = FALSE,
label.spliting.string = c(" ", ":", "-", ";", "/", "_"),
complex.compartment.label.margin = 8,
if.write.shorter.label.mapping = TRUE,
font.size = 3,
logic.node.font.scale = 3,
status.node.font.scale = 3,
node.width.adjust.factor = 2,
font.size.scale.gene = 3,
font.size.scale.cpd = 3,
font.size.scale.complex = 1.1,
font.size.scale.compartment = 1.6,
if.scale.complex.font.size = FALSE,
if.scale.compartment.font.size = FALSE,
node.width.adjust.factor.compartment = 0.02,
node.width.adjust.factor.complex = 0.02,
text.length.factor = 2,
text.length.factor.macromolecule = 2,
text.length.factor.compartment = 2,
text.length.factor.complex = 2,
space.between.color.panel.and.entity = 100,
global.parameters.list = NULL
)
}
\arguments{
\item{input.sbgn}{A character string. The path to a local SBGN-ML file.}
\item{output.file, output.formats, sbgn.id.attr}{These parameters are the same as the ones in \code{\link{SBGNview}}. Please see \code{\link{SBGNview}} for more details.}
\item{glyphs.user}{A list, optional. Each element is a 'glyph' object. The element names are glyph IDs (attribute 'id' of XHTML element 'glyph'). Note this is not affected by parameter 'sbgn.id.attr'. The glyph elements contain glyph meta-data for plotting (e.g. text size, border width, border color etc.). Please see the \code{\link{glyph-class}} documentation for more information. By default, SBGNview will run without this argument and return a glyph list extracted from the SBGN file. User can then customize this glyph list and assign it to 'glyphs.user' in the next SBGNview run to update the graph.}
\item{arcs.user}{A list, optional. Each member is an 'arc' object. The element names are arc IDs (the value of 'id' attribute in XHTML element 'arc' or 'arc.spline' in the SBGN-ML file). Some SBGN-ML files have no arc IDs, in this case SBGNview will create an arc ID using 'source' and 'target' node IDs). The arc object contains arc meta-data for plotting (e.g. arc line width, line color etc.). Please see the \code{\link{arc-class}} documentation for more information. By default, SBGNview() will run without this argument and return an arc list. User can then customize this arc list and assign it to 'arcs.user' in the next SBGNview() run to update the arcs.}
\item{arcs.info}{A character string. It should be one of the following: 'parse splines', 'straight' or a string of svg code of arcs. If it is 'parse splines', this function will look for XML element 'arc.spline' in the SBGN-ML file and plot spline arcs. If it is 'straight', the function will look for element 'arc' and plot straight line arcs. If it is a string of svg code, it will write this code directly to the output svg file.}
\item{compartment.layer.info}{A character vector. It is a vector containing the IDs of all compartment glyphs. It determins the layer arrangement of compartments. Compartments will be drawn following their sequence in this vector. Therefore, a compartment that appears later in the vector will be on the front layer and covers the compartments that are before it in this vector. This is important. In some cases compartments have overlap. This layer information ensures that a glyph laying in the overlapped region belongs to the compartment on the top layer.}
\item{user.data}{A list. It holds both gene/protein data and compound data. Names are gene or compounds, each element is a numeric vector of the omics data of each molecule.}
\item{if.plot.svg}{Logical. Default: T. Whether to generate svg code or only parse SBGN-ML file. This parameter is for internal use only.}
\item{key.pos}{A character string. The position of color panel. Default: 'topright'. Accepts one of 'bottomleft' , 'bottomright', 'topright', or 'topleft'. The ideal position for the color panel is 'topright' or 'bottomright'. If 'topleft' or 'bottomleft' are passed in, the key.pos location will default to 'topright'. If key.pos is set to 'none', the pathway name and color panel won't be plotted.}
\item{color.panel.scale}{Numeric. Default: 1. It controls the relative size of color scheme panel.}
\item{color.panel.n.grid}{Numeric. Default: 21. How many colors does the color scheme show.}
\item{col.gene.low}{A character string. Default: 'green'. Color panel's color representing low gene data values.}
\item{col.gene.high}{A character string. Default: 'red'. Color panel's color representing high gene data values.}
\item{col.gene.mid}{A character string. Default: 'gray'. Color panel's color representing mid range gene data values.}
\item{col.cpd.low}{A character string. Default: 'blue'. Color panel's color representing low compound data values.}
\item{col.cpd.high}{A character string. Default: 'yellow'. Color panel's color representing high compound data values.}
\item{col.cpd.mid}{A character string. Default: 'gray'. Color panel's color representing mid range compound data values.}
\item{min.gene.value}{Numeric. Default: -1. Color panel's min value for gene data. Values smaller than this will have the same color as min value.}
\item{max.gene.value}{Numeric. Default: 1. Color panel's max value for gene data. Values greater than this will have the same color as the max value.}
\item{mid.gene.value}{Numeric. Default: 0. Color panel's mid value for gene data.}
\item{min.cpd.value}{Numeric. Default: -1. Color panel's min value for compound data. Values smaller than this will have the same color as min value.}
\item{max.cpd.value}{Numeric. Default: 1. Color panel's max value for compound data. Values greater than this will have the same color as max value.}
\item{mid.cpd.value}{Numeric. Default: 0. Color panel's mid value for compound data.}
\item{pathway.name}{List containing two elements: 1. pathway name 2. stamp information. See argument description in \code{\link{SBGNview}} function to change/update pathway name displayed on graph.}
\item{pathway.name.font.size}{Numeric. Default: 1. When pathway names are plotted on graph, this parameter controls their font size.}
\item{if.plot.cardinality}{Logical. Default: F. If plot cardinality glyphs.}
\item{multimer.margin}{Numeric. Default: 5. For multimers, they are represented by two partly overlapped shapes (rectangle, ellipse etc.). This parameter controls how much the two shapes overlap.}
\item{compartment.opacity}{Numeric. Default: 1. Controls how transparent the compartments are.}
\item{auxiliary.opacity}{Numeric. Default: 1. Controls opacity of auxiliary glyphs.}
\item{if.plot.annotation.nodes}{Logical. Default: F. Some SBGN-ML files have 'annotation' glyphs. By default we don't plot them.}
\item{inhibition.edge.end.shift}{Numeric. Default: 5. The tip of 'inhibition' arcs is a line segment. Sometimes it overlaps with target glyph's border. We can shift it along the arc to prevent the overlap.}
\item{edge.tip.size}{Numeric. Default: 6. Control size of edge tips.}
\item{if.use.number.for.long.label}{Logical. Default: F. If the label is too long, we can create a shorter name for it. e.g. 'macromolecule_1'.}
\item{label.spliting.string}{A character vector. Default: c(' ','-',';','/','_'). When we split text into multiple lines, these characters will be used to split label (where a new line can be created).}
\item{complex.compartment.label.margin}{Numeric. Default: 8. Move the label text vertically for compartment and complex.}
\item{if.write.shorter.label.mapping}{Logical. Default: T. If if.use.number.for.long.label is 'T', we can write the mapping between shorter name and the original label to a text file.}
\item{font.size}{Numeric. Default: 3. Affects font size of all types of glyphs.}
\item{logic.node.font.scale}{Numeric. Default: 3. Controls the size of logical glyphs ('and', 'or', 'not' etc.).}
\item{status.node.font.scale}{Numeric. Default: 3. Scale the font size for status variable and unit of information nodes.}
\item{node.width.adjust.factor}{Numeric. Default: 2. Change font size according to the width of its glyph. If the glyph is too large (e.g. compartment), its label may look too small. Then we can enlarge the label in proportion to the width of the glyph. It affects all types of glyphs.}
\item{font.size.scale.gene}{Numeric. Default: 3. Scales font size according to the node's width for large compartments. Only affect font size of 'macromolecule' glyphs.}
\item{font.size.scale.cpd}{Numeric. Default: 3. Scales font size according to the node's width for large compartments. Only affects font size of 'simple chemical' glyphs.}
\item{font.size.scale.complex}{Numeric. Default: 1.1. Scale the font size of a complex.}
\item{font.size.scale.compartment}{Numeric. Default: 1.6. Scale the font size of a compartment.}
\item{if.scale.complex.font.size}{Logical. Default: F. Whether to scale complex font size according to its width. If set to 'T', the 'node.width.adjust.factor.complex' argument can be used to specify the scale factor.}
\item{if.scale.compartment.font.size}{Logical. Default: F. Whether to scale compartment font size according to its width. If set to 'T', the 'node.width.adjust.factor.compartment' argument can be used to specify the scale factor.}
\item{node.width.adjust.factor.compartment}{Numeric. Default: 0.02. How much the font size should change in proportion to the width of compartment. The font is scaled only if 'if.scale.compartment.font.size' is set to 'T'. To find the best scale factor which works you, start with 0.02 (default) and incrementally increase that value.}
\item{node.width.adjust.factor.complex}{Numeric. Default: 0.02. How much the font size should change in proportion to the width of complex. The font is scaled only if 'if.scale.complex.font.size' is set to 'T'. To find the best scale factor which works you, start with 0.02 (default) and incrementally increase that value.}
\item{text.length.factor}{Numeric. Default: 2. How wide the wrapped text should be. If text is longer than the width controled by this parameter, the text is split into a new line, but only at characters in 'label.spliting.string'. Controls all glyphs.}
\item{text.length.factor.macromolecule}{Numeric. Default: 2. Used to determine label text wrapping based on number of characters, font size, and node width for macromolecule glyphs.}
\item{text.length.factor.compartment}{Numeric. Default: 2. Used to determine label text wrapping based on number of characters, font size, and node width for compartment glyphs.}
\item{text.length.factor.complex}{Numeric. Default: 2. Used to determine label text wrapping based on number of characters, font size, and node width for complex glyphs.}
\item{space.between.color.panel.and.entity}{Numeric. Default: 100. The minimum space between color panel and any other object in the graph. The function will always try to find a location of the color panel to minimize empty space on the whole graph. This parameter controls how close it can reach a glyph.}
\item{global.parameters.list}{List. A record of parameters fed to 'renderSbgn' for reuse. It will over-write other parameters. It is not designed to be used directly.}
}
\value{
A list of three elements: glyphs.list, arcs.list, global.parameters.list
}
\description{
This function is not intended to be used directly. Use SBGNview instead. Some input arguments can be better prepared by \code{\link{SBGNview}}.
}
\examples{
\dontrun{
data(pathways.info)
SBGNview.obj <- SBGNview(simulate.data = TRUE,
sbgn.dir = './',
input.sbgn = 'P00001',
output.file = './test.local.file',
output.formats = c('pdf'),
min.gene.value = -1,
max.gene.value = 1)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objectSetOfTimePoints.R
\docType{class}
\name{SetOfTimePoints-class}
\alias{SetOfTimePoints-class}
\alias{SetOfTimePoints}
\alias{setOfTimePoints}
\title{S4 class SetOfTimePoints representing a set of designs with given time points}
\description{
S4 class SetOfTimePoints representing a set of designs with given time points
}
\section{Slots}{
\describe{
\item{\code{.Data}}{a numerics array of 2 dimensions ( nTimePointChoices x nTimePointsSelect) contains per time point choice the selected time points in hours}
\item{\code{fullTimePoints}}{numeric vector of all time points one is willing to consider}
\item{\code{nFullTimePoints}}{number of all time points one is willing to consider}
\item{\code{nTimePointsSelect}}{number of time points selected from the fullTimePoints}
\item{\code{nTimePointOptions}}{number of possible timePoint choices}
\item{\code{ranking}}{is a data.frame which is the rank of the timePointChoices according to a specific criterion.}
}}
\author{
Adriaan Blommaert
}
|
/man/SetOfTimePoints-class.Rd
|
no_license
|
cran/microsamplingDesign
|
R
| false
| true
| 1,082
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objectSetOfTimePoints.R
\docType{class}
\name{SetOfTimePoints-class}
\alias{SetOfTimePoints-class}
\alias{SetOfTimePoints}
\alias{setOfTimePoints}
\title{S4 class SetOfTimePoints representing a set of designs with given time points}
\description{
S4 class SetOfTimePoints representing a set of designs with given time points
}
\section{Slots}{
\describe{
\item{\code{.Data}}{a numerics array of 2 dimensions ( nTimePointChoices x nTimePointsSelect) contains per time point choice the selected time points in hours}
\item{\code{fullTimePoints}}{numeric vector of all time points one is willing to consider}
\item{\code{nFullTimePoints}}{number of all time points one is willing to consider}
\item{\code{nTimePointsSelect}}{number of time points selected from the fullTimePoints}
\item{\code{nTimePointOptions}}{number of possible timePoint choices}
\item{\code{ranking}}{is a data.frame which is the rank of the timePointChoices according to a specific criterion.}
}}
\author{
Adriaan Blommaert
}
|
if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager")
BiocManager::version()
# If your bioconductor version is previous to 3.9, see the section bellow
## Required
BiocManager::install(c("AUCell", "RcisTarget"))
BiocManager::install(c("GENIE3")) # Optional. Can be replaced by GRNBoost
## Optional (but highly recommended):
# To score the network on cells (i.e. run AUCell):
BiocManager::install(c("zoo", "mixtools", "rbokeh"))
# For various visualizations and perform t-SNEs:
BiocManager::install(c("DT", "NMF", "pheatmap", "R2HTML", "Rtsne"))
# To support paralell execution (not available in Windows):
BiocManager::install(c("doMC", "doRNG"))
# To export/visualize in http://scope.aertslab.org
if (!requireNamespace("devtools", quietly = TRUE)) install.packages("devtools")
devtools::install_github("aertslab/SCopeLoomR", build_vignettes = TRUE)
|
/for_install_SCENIC_reqiured_packages.r
|
no_license
|
WinterFor/Telomerase.top
|
R
| false
| false
| 894
|
r
|
if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager")
BiocManager::version()
# If your bioconductor version is previous to 3.9, see the section bellow
## Required
BiocManager::install(c("AUCell", "RcisTarget"))
BiocManager::install(c("GENIE3")) # Optional. Can be replaced by GRNBoost
## Optional (but highly recommended):
# To score the network on cells (i.e. run AUCell):
BiocManager::install(c("zoo", "mixtools", "rbokeh"))
# For various visualizations and perform t-SNEs:
BiocManager::install(c("DT", "NMF", "pheatmap", "R2HTML", "Rtsne"))
# To support paralell execution (not available in Windows):
BiocManager::install(c("doMC", "doRNG"))
# To export/visualize in http://scope.aertslab.org
if (!requireNamespace("devtools", quietly = TRUE)) install.packages("devtools")
devtools::install_github("aertslab/SCopeLoomR", build_vignettes = TRUE)
|
# Data wrangling in R
# Created by jdegen on Sep 17, 2016
# Modified by jdegen on May 11, 2018
library(tidyverse)
# Load datasets. R will automatically read the contents of these files into data.frames.
wide = read.csv("data/lexdec_wide.csv")
head(wide)
wordinfo = read.csv("data/wordinfo.csv")
head(wordinfo)
# If your data isn't comma-separated, you can use read.table() instead.
wordinfo = read.table("data/wordinfo.csv", sep=",", header=T)
head(wordinfo)
# In order to conduct our regression analysis, we need to
# a) get wide into long format
# b) add word info (frequency, family size).
# We can easily switch between long and wide format using the gather() and spread() functions from the tidyr package.
long = wide %>%
gather(Word,RT,-Subject,-Sex,-NativeLanguage) %>%
arrange(Subject)
head(long)
# 1. We just sorted the resulting long format by Subject. Sort it by Word instead.
long = wide %>%
gather(Word,RT,-Subject,-Sex,-NativeLanguage) %>%
arrange(Word)
head(long)
# We can add word level information to the long format using merge()
lexdec = merge(long,wordinfo,by=c("Word"),all.x=T)
head(lexdec)
# Are we sure the data.frames got merged correctly? Let's inspect a few cases.
wordinfo[wordinfo$Word == "almond",]
# 2. Convince yourself that the information got correctly added by testing a few more cases.
wordinfo[wordinfo$Word == "melon",]
wordinfo[wordinfo$Word == "walnut",]
# Success! We are ready to run our mixed effects models.
m = lmer(RT ~ Frequency*NativeLanguage + FamilySize + (1 + Frequency + FamilySize | Subject) + (1 + NativeLanguage | Word), data=lexdec)
summary(m)
# Often, we'll want to summarize information by groups. For example, if we want to compute RT means by subject:
subj_means = lexdec %>%
group_by(Subject) %>%
summarize(Mean = mean(RT), SD = sd(RT), Max = max(RT))
# 3. Compute RT means by Word instead.
subj_means = lexdec %>%
group_by(Word) %>%
summarize(Mean = mean(RT), SD = sd(RT), Max = max(RT))
# Sometimes we want to save data.frames or R console output to a file. For example, we might want to save our newly created lexdec dataset:
write.csv(lexdec,file="data/lexdec_long.csv")
# 3. Using the R help, figure out how to suppress the row.names and the quotes in the output and re-write the file.
write.csv(lexdec,file="data/lexdec_long.csv",row.names=F,quote=F)
# We can also save the console output to a file (for example, if you've run a particularly time-consuming regression analysis you may want to save the model results).
out = capture.output(summary(m))
out
cat("My awesome results", out, file="data/modeloutput.txt", sep="\n")
# If you want to save the model directly for future use:
save(m,file="data/mymodel.RData")
# You can later reload the model using load()
load("data/mymodel.RData")
# 4. Why was "mymodel.RData" a poorly chosen name for the file? Choose a better name.
# Because load() loads the object with its original variable name. If you load an object for the first time months after you saved it, you may not remember what you called it. For that reason it's advisable to save models in files that are named after the model, e.g.:
save(m,file="data/m.RData")
|
/code_sheets/3_reformatting_data.R
|
permissive
|
willclapp/Replication245B
|
R
| false
| false
| 3,174
|
r
|
# Data wrangling in R
# Created by jdegen on Sep 17, 2016
# Modified by jdegen on May 11, 2018
library(tidyverse)
# Load datasets. R will automatically read the contents of these files into data.frames.
wide = read.csv("data/lexdec_wide.csv")
head(wide)
wordinfo = read.csv("data/wordinfo.csv")
head(wordinfo)
# If your data isn't comma-separated, you can use read.table() instead.
wordinfo = read.table("data/wordinfo.csv", sep=",", header=T)
head(wordinfo)
# In order to conduct our regression analysis, we need to
# a) get wide into long format
# b) add word info (frequency, family size).
# We can easily switch between long and wide format using the gather() and spread() functions from the tidyr package.
long = wide %>%
gather(Word,RT,-Subject,-Sex,-NativeLanguage) %>%
arrange(Subject)
head(long)
# 1. We just sorted the resulting long format by Subject. Sort it by Word instead.
long = wide %>%
gather(Word,RT,-Subject,-Sex,-NativeLanguage) %>%
arrange(Word)
head(long)
# We can add word level information to the long format using merge()
lexdec = merge(long,wordinfo,by=c("Word"),all.x=T)
head(lexdec)
# Are we sure the data.frames got merged correctly? Let's inspect a few cases.
wordinfo[wordinfo$Word == "almond",]
# 2. Convince yourself that the information got correctly added by testing a few more cases.
wordinfo[wordinfo$Word == "melon",]
wordinfo[wordinfo$Word == "walnut",]
# Success! We are ready to run our mixed effects models.
m = lmer(RT ~ Frequency*NativeLanguage + FamilySize + (1 + Frequency + FamilySize | Subject) + (1 + NativeLanguage | Word), data=lexdec)
summary(m)
# Often, we'll want to summarize information by groups. For example, if we want to compute RT means by subject:
subj_means = lexdec %>%
group_by(Subject) %>%
summarize(Mean = mean(RT), SD = sd(RT), Max = max(RT))
# 3. Compute RT means by Word instead.
subj_means = lexdec %>%
group_by(Word) %>%
summarize(Mean = mean(RT), SD = sd(RT), Max = max(RT))
# Sometimes we want to save data.frames or R console output to a file. For example, we might want to save our newly created lexdec dataset:
write.csv(lexdec,file="data/lexdec_long.csv")
# 3. Using the R help, figure out how to suppress the row.names and the quotes in the output and re-write the file.
write.csv(lexdec,file="data/lexdec_long.csv",row.names=F,quote=F)
# We can also save the console output to a file (for example, if you've run a particularly time-consuming regression analysis you may want to save the model results).
out = capture.output(summary(m))
out
cat("My awesome results", out, file="data/modeloutput.txt", sep="\n")
# If you want to save the model directly for future use:
save(m,file="data/mymodel.RData")
# You can later reload the model using load()
load("data/mymodel.RData")
# 4. Why was "mymodel.RData" a poorly chosen name for the file? Choose a better name.
# Because load() loads the object with its original variable name. If you load an object for the first time months after you saved it, you may not remember what you called it. For that reason it's advisable to save models in files that are named after the model, e.g.:
save(m,file="data/m.RData")
|
#Script to analyze environmental factors, and their significances, in shaping trends in zeta diversity.
library("plyr")
library(dplyr)
library("ggplot2")
library(lubridate)
library("ape")
library("vegan")
library("microbiome")
library(data.table)
library(tidyr)
library(MASS)
library(zetadiv)
library(magrittr)
library(stats)
library(CINNA)
library(fitdistrplus)
#Check for co-occurrence frequencies by watershed in the SCCWRP data set.
setwd("~/Desktop/SCCWRP")
#Read in site data containing biological counts, water chemistry, and land usage values.
GISBioData <- read.table("CAGISBioData.csv", header=TRUE, sep=",",as.is=T,skip=0,fill=TRUE,check.names=FALSE)
#Filter out to taxonomic groups of interest.
GISBioData <- subset(GISBioData, MeasurementType == "benthic macroinvertebrate relative abundance")
#Remove duplicate measures.
GISBioData <- GISBioData[!duplicated(GISBioData[,c("UniqueID","FinalID","Count")]),]
#Read in sample metadata.
SCCWRP <- read.table("CSCI.csv", header=TRUE, sep=",",as.is=T,skip=0,fill=TRUE,check.names=FALSE)
#Read in functional feeding group for each taxon.
#Abbreviations used in denoting functional feeding groups are as follows ( http://www.safit.org/Docs/CABW_std_taxonomic_effort.pdf ):
#P= predator MH= macrophyte herbivore OM= omnivore
#PA= parasite PH= piercer herbivore XY= xylophage (wood eater)
#CG= collector-gatherer SC= scraper
#CF= collector filterer SH= shredder
FFG <- read.table("metadata.csv", header=TRUE, sep=",",as.is=T,skip=0,fill=TRUE,check.names=FALSE)
# Filter data so only known functional feeding groups are kept.
FFG <- subset(FFG, FunctionalFeedingGroup != "")
# Generate functional feeding group data frame.
FFG <- FFG[,c("FinalID","LifeStageCode","FunctionalFeedingGroup")]
FFG <- subset(FFG,LifeStageCode=="L" | LifeStageCode=="X" | FinalID=="Hydrophilidae" | FinalID=="Hydraenidae")
#Merge in functional feeding groups into sample data.
GISBioData <- join(GISBioData,FFG[,c("FinalID","FunctionalFeedingGroup")],by=c("FinalID"))
FFGCounts <- na.omit(as.data.frame(unique(GISBioData$FunctionalFeedingGroup)))
colnames(FFGCounts) <- c("FunctionalFeedingGroups")
#How many samples per watershed?
groupNum=20
#Select watersheds with a large enough set of samples for analysis.
watersheds <- as.data.frame(table(SCCWRP$Watershed))
colnames(watersheds) <- c("Watershed","Samples")
GISBioData <- join(GISBioData,watersheds,by=c("Watershed"))
#Get samples only found in more heavily sampled watersheds.
GISBioDataLargeWS <- subset(GISBioData,Samples>=groupNum)
#Add the number of genera per sample
taxaBySample <- count(GISBioDataLargeWS,UniqueID)
colnames(taxaBySample) <- c("UniqueID","nTaxa")
GISBioDataLargeWS <- join(GISBioDataLargeWS,taxaBySample,by=c("UniqueID"))
#Initialize data frames to compute zeta diversity for genera and functional feeding groups
selected <- GISBioDataLargeWS
selected <- arrange(selected,Year,UniqueID)
#Get zeta diversity decay parameters for taxonomic diversity for the same set of samples within a given land use band.
eLSAInput <- as.data.frame(unique(selected$FinalID))
colnames(eLSAInput) <- c("FinalID")
eLSAInput <- as.data.frame(eLSAInput[order(as.character(eLSAInput$FinalID)),])
colnames(eLSAInput) <- c("FinalID")
taxa <- eLSAInput
eLSAInputRand <- eLSAInput
selected <- selected[order(selected$Year,selected$UniqueID,selected$FinalID),]
#Get zeta diversity decay parameters for functional feeding group diversity for the same set of samples within a given land use band.
FFGInput <- as.data.frame(unique(selected$FunctionalFeedingGroup))
colnames(FFGInput) <- c("FunctionalFeedingGroup")
FFGInput <- as.data.frame(FFGInput[order(as.character(FFGInput$FunctionalFeedingGroup)),])
colnames(FFGInput) <- c("FunctionalFeedingGroup")
FFGInput <- na.omit(FFGInput)
FFGrand <- FFGInput
FFgroups <- FFGInput
#Generate presence/absence matrices for genera and functional feeding groups by stream sample.
for(ID in unique(selected$UniqueID)){
#Add the relative taxa abundances by column to a new dataframe.
#The rows are the unique taxa in a given subset of data.
tmp <- filter(selected, UniqueID == ID)[,c("FinalID","Measurement","UniqueID")]
tmp <- as.data.frame(tmp[order(tmp$FinalID),])
tmp <- tmp[-c(3)]
colnames(tmp) <- c("FinalID",ID)
tmp <- tmp %>% group_by(FinalID) %>% summarise_if(is.numeric,mean,na.rm=TRUE)
tmp <- join(tmp,taxa,type="full",by=c("FinalID"))
tmp <- as.data.frame(tmp[order(tmp$FinalID),])
eLSAInput <- cbind(eLSAInput,tmp)
eLSAInput <- eLSAInput[,!duplicated(colnames(eLSAInput))]
#Compute functional feeding group diversity by sample and sample grouping.
tmp2 <- filter(selected, UniqueID == ID)[,c("FunctionalFeedingGroup","Count","UniqueID")]
tmp2 <- as.data.frame(tmp2[order(tmp2$FunctionalFeedingGroup),])
tmp2 <- tmp2[-c(3)]
colnames(tmp2) <- c("FunctionalFeedingGroup",ID)
tmp2 <- tmp2 %>% group_by(FunctionalFeedingGroup) %>% summarise_if(is.numeric,sum,na.rm=TRUE)
tmp2 <- join(tmp2,FFgroups,type="full",by=c("FunctionalFeedingGroup"))
tmp2 <- as.data.frame(tmp2[order(tmp2$FunctionalFeedingGroup),])
tmp2 <- tmp2[!is.na(tmp2$FunctionalFeedingGroup),]
FFGInput <- cbind(FFGInput,tmp2)
FFGInput <- FFGInput[,!duplicated(colnames(FFGInput))]
#Randomly assign functional feeding groups to their sample counts to eventually test how
#far from random their relative abundances are.
tmp3 <- tmp2[sample(nrow(tmp2)),]
tmp3$FunctionalFeedingGroup <- tmp2$FunctionalFeedingGroup
colnames(tmp3) <- c("FunctionalFeedingGroup",ID)
FFGrand <- cbind(FFGrand,tmp3)
FFGrand <- FFGrand[,!duplicated(colnames(FFGrand))]
#Randomly assign genera to their sample counts to eventually test how
#far from random their relative abundances are.
tmp4 <- tmp[sample(nrow(tmp)),]
tmp4$FinalID <- tmp$FinalID
colnames(tmp4) <- c("FinalID",ID)
eLSAInputRand <- cbind(eLSAInputRand,tmp4)
eLSAInputRand <- eLSAInputRand[,!duplicated(colnames(eLSAInputRand))]
}
#Generate a presence/absence dataframe for zeta diversity analysis of taxa.
#Rows for samples, columns for taxa IDs.
eLSAInput[is.na(eLSAInput)] <- 0
eLSANames <- eLSAInput$FinalID
data.SCCWRP <- as.data.frame(t(eLSAInput[,-c(1)]))
colnames(data.SCCWRP) <- eLSANames
data.SCCWRP[data.SCCWRP > 0] <- 1
#Generate a presence/absence dataframe for zeta diversity analysis of functional feeding groups.
#Rows for samples, columns for functional feeding group types.
FFGInput[is.na(FFGInput)] <- 0
FFGNames <- FFGInput$FunctionalFeedingGroup
ffg.SCCWRP <- as.data.frame(t(FFGInput[,-c(1)]))
colnames(ffg.SCCWRP) <- FFGNames
ffg.SCCWRP[ffg.SCCWRP > 0] <- 1
#Generate a presence/absence dataframe for zeta diversity analysis of randomly assigned functional feeding groups.
#Rows for samples, columns for functional feeding group types.
FFGrand[is.na(FFGrand)] <- 0
FFGrandNames <- FFGrand$FunctionalFeedingGroup
ffg.rand.SCCWRP <- as.data.frame(t(FFGrand[,-c(1)]))
colnames(ffg.rand.SCCWRP) <- FFGrandNames
ffg.rand.SCCWRP[ffg.rand.SCCWRP > 0] <- 1
#Generate a presence/absence dataframe for zeta diversity analysis of randomly assigned genera.
#Rows for samples, columns for genera.
eLSAInputRand[is.na(eLSAInputRand)] <- 0
eLSAInputRandRames <- eLSAInputRand$FinalID
data.rand.SCCWRP <- as.data.frame(t(eLSAInputRand[,-c(1)]))
colnames(data.rand.SCCWRP)<- eLSAInputRandRames
data.rand.SCCWRP[data.rand.SCCWRP > 0] <- 1
#Subset environmental factor data.
env.SCCWRP <- join(GISBioDataLargeWS,SCCWRP,by=c("UniqueID"))
env.SCCWRP <- env.SCCWRP[,c("UniqueID","Watershed","LU_2000_5K","altitude","Year")]
env.SCCWRP <- env.SCCWRP[!duplicated(env.SCCWRP[c("UniqueID")]),]
env.SCCWRP <- env.SCCWRP[,c("Watershed","LU_2000_5K","altitude","Year")]
env.SCCWRP$Watershed <- as.factor(env.SCCWRP$Watershed)
env.SCCWRP$Year <- as.numeric(env.SCCWRP$Year)
env.SCCWRP$altitude <- as.numeric(env.SCCWRP$altitude)
#Zeta diversity with respect to environmental variables.
zetaTest <- Zeta.msgdm(data.spec=data.SCCWRP,data.env=env.SCCWRP,xy=NULL,sam=nrow(env.SCCWRP),order=2,rescale=FALSE)
|
/ZetaFactors.R
|
no_license
|
levisimons/SCCWRP
|
R
| false
| false
| 8,010
|
r
|
#Script to analyze environmental factors, and their significances, in shaping trends in zeta diversity.
library("plyr")
library(dplyr)
library("ggplot2")
library(lubridate)
library("ape")
library("vegan")
library("microbiome")
library(data.table)
library(tidyr)
library(MASS)
library(zetadiv)
library(magrittr)
library(stats)
library(CINNA)
library(fitdistrplus)
#Check for co-occurrence frequencies by watershed in the SCCWRP data set.
setwd("~/Desktop/SCCWRP")
#Read in site data containing biological counts, water chemistry, and land usage values.
GISBioData <- read.table("CAGISBioData.csv", header=TRUE, sep=",",as.is=T,skip=0,fill=TRUE,check.names=FALSE)
#Filter out to taxonomic groups of interest.
GISBioData <- subset(GISBioData, MeasurementType == "benthic macroinvertebrate relative abundance")
#Remove duplicate measures.
GISBioData <- GISBioData[!duplicated(GISBioData[,c("UniqueID","FinalID","Count")]),]
#Read in sample metadata.
SCCWRP <- read.table("CSCI.csv", header=TRUE, sep=",",as.is=T,skip=0,fill=TRUE,check.names=FALSE)
#Read in functional feeding group for each taxon.
#Abbreviations used in denoting functional feeding groups are as follows ( http://www.safit.org/Docs/CABW_std_taxonomic_effort.pdf ):
#P= predator MH= macrophyte herbivore OM= omnivore
#PA= parasite PH= piercer herbivore XY= xylophage (wood eater)
#CG= collector-gatherer SC= scraper
#CF= collector filterer SH= shredder
FFG <- read.table("metadata.csv", header=TRUE, sep=",",as.is=T,skip=0,fill=TRUE,check.names=FALSE)
# Filter data so only known functional feeding groups are kept.
FFG <- subset(FFG, FunctionalFeedingGroup != "")
# Generate functional feeding group data frame.
FFG <- FFG[,c("FinalID","LifeStageCode","FunctionalFeedingGroup")]
FFG <- subset(FFG,LifeStageCode=="L" | LifeStageCode=="X" | FinalID=="Hydrophilidae" | FinalID=="Hydraenidae")
#Merge in functional feeding groups into sample data.
GISBioData <- join(GISBioData,FFG[,c("FinalID","FunctionalFeedingGroup")],by=c("FinalID"))
FFGCounts <- na.omit(as.data.frame(unique(GISBioData$FunctionalFeedingGroup)))
colnames(FFGCounts) <- c("FunctionalFeedingGroups")
#How many samples per watershed?
groupNum=20
#Select watersheds with a large enough set of samples for analysis.
watersheds <- as.data.frame(table(SCCWRP$Watershed))
colnames(watersheds) <- c("Watershed","Samples")
GISBioData <- join(GISBioData,watersheds,by=c("Watershed"))
#Get samples only found in more heavily sampled watersheds.
GISBioDataLargeWS <- subset(GISBioData,Samples>=groupNum)
#Add the number of genera per sample
taxaBySample <- count(GISBioDataLargeWS,UniqueID)
colnames(taxaBySample) <- c("UniqueID","nTaxa")
GISBioDataLargeWS <- join(GISBioDataLargeWS,taxaBySample,by=c("UniqueID"))
#Initialize data frames to compute zeta diversity for genera and functional feeding groups
selected <- GISBioDataLargeWS
selected <- arrange(selected,Year,UniqueID)
#Get zeta diversity decay parameters for taxonomic diversity for the same set of samples within a given land use band.
eLSAInput <- as.data.frame(unique(selected$FinalID))
colnames(eLSAInput) <- c("FinalID")
eLSAInput <- as.data.frame(eLSAInput[order(as.character(eLSAInput$FinalID)),])
colnames(eLSAInput) <- c("FinalID")
taxa <- eLSAInput
eLSAInputRand <- eLSAInput
selected <- selected[order(selected$Year,selected$UniqueID,selected$FinalID),]
#Get zeta diversity decay parameters for functional feeding group diversity for the same set of samples within a given land use band.
FFGInput <- as.data.frame(unique(selected$FunctionalFeedingGroup))
colnames(FFGInput) <- c("FunctionalFeedingGroup")
FFGInput <- as.data.frame(FFGInput[order(as.character(FFGInput$FunctionalFeedingGroup)),])
colnames(FFGInput) <- c("FunctionalFeedingGroup")
FFGInput <- na.omit(FFGInput)
FFGrand <- FFGInput
FFgroups <- FFGInput
#Generate presence/absence matrices for genera and functional feeding groups by stream sample.
for(ID in unique(selected$UniqueID)){
#Add the relative taxa abundances by column to a new dataframe.
#The rows are the unique taxa in a given subset of data.
tmp <- filter(selected, UniqueID == ID)[,c("FinalID","Measurement","UniqueID")]
tmp <- as.data.frame(tmp[order(tmp$FinalID),])
tmp <- tmp[-c(3)]
colnames(tmp) <- c("FinalID",ID)
tmp <- tmp %>% group_by(FinalID) %>% summarise_if(is.numeric,mean,na.rm=TRUE)
tmp <- join(tmp,taxa,type="full",by=c("FinalID"))
tmp <- as.data.frame(tmp[order(tmp$FinalID),])
eLSAInput <- cbind(eLSAInput,tmp)
eLSAInput <- eLSAInput[,!duplicated(colnames(eLSAInput))]
#Compute functional feeding group diversity by sample and sample grouping.
tmp2 <- filter(selected, UniqueID == ID)[,c("FunctionalFeedingGroup","Count","UniqueID")]
tmp2 <- as.data.frame(tmp2[order(tmp2$FunctionalFeedingGroup),])
tmp2 <- tmp2[-c(3)]
colnames(tmp2) <- c("FunctionalFeedingGroup",ID)
tmp2 <- tmp2 %>% group_by(FunctionalFeedingGroup) %>% summarise_if(is.numeric,sum,na.rm=TRUE)
tmp2 <- join(tmp2,FFgroups,type="full",by=c("FunctionalFeedingGroup"))
tmp2 <- as.data.frame(tmp2[order(tmp2$FunctionalFeedingGroup),])
tmp2 <- tmp2[!is.na(tmp2$FunctionalFeedingGroup),]
FFGInput <- cbind(FFGInput,tmp2)
FFGInput <- FFGInput[,!duplicated(colnames(FFGInput))]
#Randomly assign functional feeding groups to their sample counts to eventually test how
#far from random their relative abundances are.
tmp3 <- tmp2[sample(nrow(tmp2)),]
tmp3$FunctionalFeedingGroup <- tmp2$FunctionalFeedingGroup
colnames(tmp3) <- c("FunctionalFeedingGroup",ID)
FFGrand <- cbind(FFGrand,tmp3)
FFGrand <- FFGrand[,!duplicated(colnames(FFGrand))]
#Randomly assign genera to their sample counts to eventually test how
#far from random their relative abundances are.
tmp4 <- tmp[sample(nrow(tmp)),]
tmp4$FinalID <- tmp$FinalID
colnames(tmp4) <- c("FinalID",ID)
eLSAInputRand <- cbind(eLSAInputRand,tmp4)
eLSAInputRand <- eLSAInputRand[,!duplicated(colnames(eLSAInputRand))]
}
#Generate a presence/absence dataframe for zeta diversity analysis of taxa.
#Rows for samples, columns for taxa IDs.
eLSAInput[is.na(eLSAInput)] <- 0
eLSANames <- eLSAInput$FinalID
data.SCCWRP <- as.data.frame(t(eLSAInput[,-c(1)]))
colnames(data.SCCWRP) <- eLSANames
data.SCCWRP[data.SCCWRP > 0] <- 1
#Generate a presence/absence dataframe for zeta diversity analysis of functional feeding groups.
#Rows for samples, columns for functional feeding group types.
FFGInput[is.na(FFGInput)] <- 0
FFGNames <- FFGInput$FunctionalFeedingGroup
ffg.SCCWRP <- as.data.frame(t(FFGInput[,-c(1)]))
colnames(ffg.SCCWRP) <- FFGNames
ffg.SCCWRP[ffg.SCCWRP > 0] <- 1
#Generate a presence/absence dataframe for zeta diversity analysis of randomly assigned functional feeding groups.
#Rows for samples, columns for functional feeding group types.
FFGrand[is.na(FFGrand)] <- 0
FFGrandNames <- FFGrand$FunctionalFeedingGroup
ffg.rand.SCCWRP <- as.data.frame(t(FFGrand[,-c(1)]))
colnames(ffg.rand.SCCWRP) <- FFGrandNames
ffg.rand.SCCWRP[ffg.rand.SCCWRP > 0] <- 1
#Generate a presence/absence dataframe for zeta diversity analysis of randomly assigned genera.
#Rows for samples, columns for genera.
eLSAInputRand[is.na(eLSAInputRand)] <- 0
eLSAInputRandRames <- eLSAInputRand$FinalID
data.rand.SCCWRP <- as.data.frame(t(eLSAInputRand[,-c(1)]))
colnames(data.rand.SCCWRP)<- eLSAInputRandRames
data.rand.SCCWRP[data.rand.SCCWRP > 0] <- 1
#Subset environmental factor data.
env.SCCWRP <- join(GISBioDataLargeWS,SCCWRP,by=c("UniqueID"))
env.SCCWRP <- env.SCCWRP[,c("UniqueID","Watershed","LU_2000_5K","altitude","Year")]
env.SCCWRP <- env.SCCWRP[!duplicated(env.SCCWRP[c("UniqueID")]),]
env.SCCWRP <- env.SCCWRP[,c("Watershed","LU_2000_5K","altitude","Year")]
env.SCCWRP$Watershed <- as.factor(env.SCCWRP$Watershed)
env.SCCWRP$Year <- as.numeric(env.SCCWRP$Year)
env.SCCWRP$altitude <- as.numeric(env.SCCWRP$altitude)
#Zeta diversity with respect to environmental variables.
zetaTest <- Zeta.msgdm(data.spec=data.SCCWRP,data.env=env.SCCWRP,xy=NULL,sam=nrow(env.SCCWRP),order=2,rescale=FALSE)
|
# Empirical Macro Model of my THESIS
#
# INIT ####
options(max.print=3000)
# set working Directory
setwd("C:/Users/Ferdi/Documents/R/C5")
setwd("C:/Users/fouwe/Documents/R/C5")
library(tseries)
library(vars)
library(lmtest)
library(urca)
library(ardl)
library(outliers)
library(strucchange)
## library(gvlma)
# Data --------------------------------------------------------------------
# Read data
invraw5 <- read.csv("INV5_RealGrossPrivateDomesticInvestment.csv",head = TRUE, sep=",")
profraw1 <- read.csv("ProfitsAfterTax.csv", head = TRUE, sep=",")
uraw1 <- read.csv("Capacity1_Utilization_Manuf.csv", head = TRUE, sep=",")
fininvraw <- read.csv("FinInv.csv", skip = 1,head = TRUE, sep=",")
#All FinAsset (including UNIDENTIFIED)
finInvIndeed <- read.csv("PURGED_FININV.csv", skip = 2,head = TRUE, sep=",")
#ONLY Identified Financial assets
intanginv <- read.csv("IntangibleInv.csv", skip = 1,head = TRUE, sep=",")
#UnIdentified Financial assets
prodinvraw <- read.csv("FinInv2.csv", skip = 1,head = TRUE, sep=",")
DebtToNw <- read.csv("Z1_NFCBusiness_creditMarket_Debt_asPercentageof_NetWorth.csv",
head = TRUE, sep=",")
DebtToEq <- read.csv("Z1_NFCBusiness_CreditMarketDebtAsPercentageOfMarketValueOfCorpEquities.csv",
head = TRUE, sep=",")
DebtTot <- read.csv("NFCDEBT.csv",
head = TRUE, sep=",")
#
# Make Time Series of data
inv5 <- ts(invraw5$GPDIC1, start = c(1947,1),end = c(2016,4),frequency = 4)
profit1 <- ts(profraw1$NFCPATAX, start = c(1947,1),end = c(2016,4),frequency = 4)
capu1 <- ts(uraw1$CAPUTLB00004SQ, start = c(1948,1),end = c(2016,4),frequency = 4)
FinInv <- ts(finInvIndeed$FININDEED, start = c(1951,4),
end = c(2015,1),frequency = 4)
IntInv <- ts(intanginv$intinv, start = c(1951,4),
end = c(2015,1),frequency = 4)
#INTANGIBLE INVESTMENT SERIES
ProInv <- ts(prodinvraw$physasset, start = c(1951,4),end = c(2016,4),frequency = 4)
#PRODUCTIVE INVESTMENT SERIES
FinInvHistRatio <- ts(fininvraw$hfininvratio, start = c(1951,4),end = c(2016,4),frequency = 4)
FinInvRatio <- ts(fininvraw$fininvratio, start = c(1951,4),frequency = 4)
AssetTot <- ts(fininvraw$totinv, start = c(1951,4),frequency = 4)
dbtnw <- ts(DebtToNw$NCBCMDPNWMV, start = c(1951,4),end = c(2016,4),frequency = 4)
dbteq <- ts(DebtToEq$NCBCMDPMVCE, start = c(1951,4),frequency = 4)
dbtot <- ts(DebtTot$CRDQUSANABIS, start = c(1952,1),frequency = 4)
d_1<- c(rep(1,104),rep(0,124))
d_ante<- ts(d_1, start = c(1958,1),frequency = 4)
d_2<- c(rep(0,104),rep(1,124))
d_post<- ts(d_2, start = c(1958,1),frequency = 4)
#
# DataSET ----------------------------------------------------------------
#Data sets arangement (as a DATAFRAME)
#Create LIST of 6 variables (i-u-r-Fi-Ii-D)
#After Peter's comment, I change 3 variables (Inv, Profit, FinInv)
data_list<- ts.intersect(log(ProInv+IntInv+FinInv),
(capu1),
((profit1/(ProInv+IntInv+FinInv))),
dbtot/(ProInv+IntInv+FinInv),
((FinInv+IntInv)/(ProInv+IntInv+FinInv)),
#log(IntInv),
#log(FinInv),
#LogRgdp, log(inv5),
#log(dbtot),
(dbtnw))
data_list_w <- window(data_list,start=c(1984,1), end=c(2015,1), frequency=4)
ardl_data <- data.frame(gtot = (data_list_w[,1]),
u = data_list_w[,2],
r=(data_list_w[,3]),
d = data_list_w[,4],
etha = data_list_w[,5],
#ii = data_list_w[,6],
#fi = data_list_w[,7],
#gdp = data_list_w[,8],
#inv = data_list_w[,9],
#lgd = data_list_w[,10],
dtonw = data_list_w[,6])
#
plot.ts(ardl_data[,1:6])
data_list_w[101:102,2]<-data_list_w[100,2]
data_list_w[103:105,2]<-data_list_w[106,2]
ardl_data[76,"r"]<-ardl_data[75,"r"]
ur.ers(ardl_data[,"r"], model="const")
outlier(ardl_data)
#PLOTS
ts.plot(gdpts, ylab="Q-RGDP (Level)")
ts.plot(G_RATE, ylab="Q-Growth (Level)")
abline(h=0)
abline(v=2003.75, col="grey50")
abline(h=0.0125, col="red")
ts.plot(moymob, type = "h", ylab=paste0("SMA-",malevel," :Q-Growth (Level)"))
plot.default(G_RATE, type = "h")
plot.default(Ygdp_RATE, type = "h")
plot.default(GCAP_RATE, type = "h")
abline(v=1983.75, col="grey50")
ts.plot(LogRgdp)
ts.plot(Ygdp)
#LongRun growth
ts.plot(gdpCAP)
abline(v =1984)
abline(v =1945)
abline(v =1880)
abline(v =1930)
# plot level & Diff-lev
par(mfrow=c(2,2))
ts.plot(LogRgdp<-vniveau[,1], ylabflibra="RGDP (log)")
ts.plot(dnw<-vniveau[,2], ylab="Debt/net worth (level)")
# plot of diff_Debt_level is informative of
# the transformation in debt dynamics occuring from mid 1980's
ts.plot(gd<-vdiff_niveau[,1], ylab="RGDP (diff.)")
ts.plot(dnwd<-vdiff_niveau[,2], ylab="Debt/net worth (diff.)")
### COINTEGRAT? #####
#Johansen test
#Ho: no cointegrat? (r=0 against r>0 , then r<=1 against r>1 etc..)
# A rank r>0 implies a cointegrating relationship
# between two or possibly more time series
#MAX
jojolevel<-ca.jo(cbind(LogRgdp,dnw),ecdet="const")
summary(jojolevel)
## RESULTS: Cointegration
#TRACE
jojolevTrace<-ca.jo(cbind(LogRgdp,dnw),ecdet="const",type="trace")
summary(jojolevTrace)
## RESULTS: Cointegration
#Test for "wrongly accept COINT" for struct. Break
#(Pfaff §8.2 AND Lütkepohl, H., Saikkonen, P. and Trenkler, C. (2004), )
jojoStruct <- cajolst(cbind(LogRgdp,dnw))
summary(jojoStruct)
slot(jojoStruct, "bp")
slot(jojoStruct, "x")
slot(jojoStruct, "x")[126] # corrsponding to 1983
## RESULTS: NO Cointegration once break accounted for (1983,1)
# i.e there maybe coint just becausz of struct shift
#TEST 3 separated periods FOR FINANCIALIZATION ACCOUNT
vniveaupostBpt <- window(vniveau,start=1983,end=2016)
vniveauante <- window(vniveau,start=1951,end=1983)
vniveaubtwn <- window(vniveau,start=1985,end=2007)
#RESAMPLE
#POST
#JOHANSEN
jojopostBpt <- ca.jo(vniveaupostBpt[,(1:2)],ecdet="trend",type="trace") #,type="trace")
summary(jojopostBpt)
##RESULTS: COINT at 1% from 1983 on !!!
# i.e one may estimate a VECM for G&D
# goto SVAR section
##ANTE FIN°
#Johansen TRACE
jojoAnteTrace<-ca.jo(vniveauante[,(1:2)],ecdet="trend",type="trace") #,type="trace")
summary(jojoAnteTrace)
#Johansen MAX
jojoAnteMax<-ca.jo(vniveau[,(1:2)],ecdet="trend") #,type="trace")
summary(jojoAnteMax)
###RESULTS: NO COINT Neither way
#Phillips Ouliaris test # Ho: no cointegrat?
po.test(vniveauante[,(1:2)], demean = T, lshort = T) # No COINT
po.test(vniveauante[,2:1], demean = T, lshort = T) # neither the other way round
###RESULTS: Confirms NO COINT
#Test the 1983-2016 period for "wrongly accept COINT" while struct. Break
#Pfaff §8.2 AND Lütkepohl, H., Saikkonen, P. and Trenkler, C. (2004), )
jojoStr2007 <- cajolst(vniveaupostBpt[,1:2])
summary(jojoStr2007)
slot(jojoStr2007, "bp")
slot(jojoStr2007, "x")
slot(jojoStr2007, "x")[101] # corrsponding to 2008:1
## RESULTS: Cointegration is confirmed after data readjustment for break (2008,1)
# i.e no effect of 2008 financial crisis on D&G comovement
library(strucchange)
### STRUCT. BREAK TESTS -------------------------------
#1- StrBrk_1 : EFP
# EFP = empirical fluct° process
# Ho: no struct. break (Kleiber p.171)
## GROWTH ##
#DIFF-rgdp - EFP Type= MOsum - ALL DATA RANGE
efpMo_rgdp <- efp(diff(gdpts) ~ 1, type = "Rec-MOSUM",h=0.3)
# 0.3 --> 21 years trimming
plot(efpMo_rgdp)
abline(v=1984.86, col="grey40")
abline(v=1983.5, col="grey62")
###RESULTS:RGDP BREAK in 1984 ##
<<<<<<< HEAD
## DEBT ##
### type= MOSUM
efpMo_d <- efp(dnw ~ 1, type = "Rec-MOSUM",h=0.053)#1980's break whithin 0.05-->0.07
# 0.053 --> 3 years
plot(efpMo_d)
abline(v=1965.75, col="grey50")
abline(v=1984.75, col="grey50")
###RESULTS: BREAK in 1965 then 1984 for dnw #
#Out of conf. interv.
# Ho: No Struct. Break
sctest(efpMo_rgdp)
sctest(efpCum_g73)
sctest(efpMo_d)
#2- StrBrk_2 : Fstat
## GROWTH ##
#DIFF-rgdp
# F stat
fs.growth <- Fstats(diff(gdpts) ~ 1)
sctest(fs.growth, type = "supF",asymptotic = T)
sctest(fs.growth, type = "aveF",asymptotic = T)
sctest(fs.growth, type = "expF")
# Fitted models
fs.growth <- Fstats(diff(gdpts) ~ 1)
plot(fs.growth)
breakpoints(fs.growth)
bp.growth <- breakpoints(diff(gdpts) ~ 1,breaks = 1)
summary(bp.growth)
fmg0 <- lm(diff(gdpts) ~ 1)
fmgf <- lm(diff(gdpts) ~ breakfactor(bp.growth))
plot(diff(gdpts), ylab="diff.RGDP")
lines(ts(fitted(fmg0), start=c(1947.25)), col = 3)
lines(ts(fitted(fmgf), start = c(1947.25), frequency = 4), col = 4)
lines(bp.growth)
###RESULTS: BREAK in 1982:4 for LogRgdp #
=======
#DIFF-rgdp - Fstat
#F-statistics
fs.growth <- Fstats(diff(gdpts) ~ 1)
sctest(fs.growth, type = "supF",asymptotic = T)
btsctest(fs.growth, type = "aveF",asymptotic = T)
sctest(fs.growth, type = "expF")
#Fitted models
fs.growth <- Fstats(diff(gdpts) ~ 1)
plot(fs.growth)
breakpoints(fs.growth)
bp.growth <- breakpoints(diff(gdpts) ~ 1,breaks = 1)
summary(bp.growth)
fmg0 <- lm(diff(gdpts) ~ 1)
fmgf <- lm(diff(gdpts) ~ breakfactor(bp.growth))
plot(diff(gdpts), ylab="diff.RGDP")
lines(ts(fitted(fmg0), start=c(1947.25)), col = 3)
lines(ts(fitted(fmgf), start = c(1947.25), frequency = 4), col = 4)
lines(bp.growth)
###RESULTS: BREAK in 1982:4 for LogRgdp #
>>>>>>> 080a319056724b50b448253192efe80eb7d4cf34
#BIC of many breakpoints
bp.growth2 <- breakpoints(diff(gdpts) ~ 1)
summary(bp.growth2)
x <- c(3141, 3119, 3120, 3126, 3134, 3144)
plot(c(0,1,2,3,4,5), x, xlab="Number of break points", ylab="BIC", type="l")
points(c(0,1,2,3,4,5), x,type = "p")
#Out of conf. interv.
# Ho: No Struct. Break
sctest(efpMo_rgdp)
sctest(fs.growth)
## Long Term: Rgdp Per CAPITA 1800-2016 ##
fs.gpercap <- Fstats(gdpCAP ~ 1)
##Modulating the number of BP...
bp.gpercap <- breakpoints(gdpCAP ~ 1,breaks = 5)
summary(bp.gpercap)
fmg0 <- lm(gdpCAP ~ 1)
fmgf <- lm(gdpCAP ~ breakfactor(bp.gpercap))#,breaks = 1))
plot(gdpCAP)
lines(ts(fitted(fmgf), start = c(1800,1), frequency = 1), col = 4)
lines(bp.gpercap)
###RESULTS: BREAK in 1984 for gdpCAP whenever BrkPts > 1 #
## DEBT ##
# EFP process
#type= MOSUM
efpMo_d <- efp(diff(dnw) ~ 1, type = "Rec-MOSUM",h=0.145)#1980's break whithin 0.05-->0.07
# 0.053 --> 3 years
plot(efpMo_d)
abline(v=1999.5, col="grey50")
abline(v=1985.0, col="grey50")
###RESULTS: BREAK in 1985 then 1999 for dnw #
#type= ME
efpCu_d <- efp(diff(dnw) ~ 1, type = "ME")
# 0.053 --> 3 years
plot(efpCu_d)
abline(v=1999.5, col="grey50")
abline(v=1985.0, col="grey50")
###RESULTS: BREAK in 1985 then 1999 for dnw #
#Fstat
#p.vavule of F-stat
fs.debt <- Fstats(diff(dnw) ~ 1)
sctest(fs.debt, type = "supF",asymptotic = T)
sctest(fs.debt, type = "aveF",asymptotic = T)
sctest(fs.debt, type = "expF")
#BIC of many breakpoints
bp.debt1 <- breakpoints(diff(dnw) ~ 1)
summary(bp.debt1)
x <- c(570.4, 567.3, 564.1, 566.9, 569.6, 579.8)
plot(c(0,1,2,3,4,5), x, xlab="Number of break points", ylab="BIC", type="l")
points(c(0,1,2,3,4,5), x,type = "p")
# Fitted models
fs.debt2 <- Fstats(diff(dnw) ~ 1)
breakpoints(fs.debt2)
bp.debt2 <- breakpoints(diff(dnw) ~ 1,breaks = 2)
summary(bp.debt2)
fmdnw0 <- lm(diff(dnw) ~ 1)
fmdnwf <- lm(diff(dnw) ~ breakfactor(bp.debt2))
plot(diff(dnw))
lines(ts(fitted(fmdnw0), start=c(1951)), col = 3)
lines(ts(fitted(fmdnwf), start = c(1951,4), frequency = 4), col = 4)
lines(bp.debt2)
#Stats
sctest(efpMo_d)
sctest(efpCu_d)
sctest(fs.debt2)
# ZIVOT & ANDREWS test
#RGDP - Level
za.dnw <- ur.za(gdpts, lag= 9, model = "intercept")
summary(za.dnw)
plot(za.dnw)
za.dnw <- ur.za(gdpts, lag= 9, model = "trend")
summary(za.dnw)
plot(za.dnw)
# result: non-signif BP in 1980:50
za.dnw <- ur.za(gdpts, lag= 9, model = "both")
summary(za.dnw)
plot(za.dnw)
#DEBT - Level
za.dnw <- ur.za(dnw, lag= 9, model = "intercept")
summary(za.dnw)
plot(za.dnw)
za.dnw <- ur.za(dnw, lag= 1, model = "trend")
summary(za.dnw)
plot(za.dnw)
# result: non-signif BP in 1998:50
za.dnw <- ur.za(dnw, lag= 9, model = "both")
summary(za.dnw)
plot(za.dnw)
#BREAK in the cointegration
data_coint <- ts.intersect(gdpts, dnw, diff(gdpts),diff(dnw))
ci_dat <- data.frame(g = (data_coint[,1]), d = data_coint[,2],
dg= data_coint[,3], dd= data_coint[,4])
#ci_dat <- window(ci_dat2, start= c(1952, 1)) #, end= c(2016,4),frequency = 4)
coint.res <- residuals(lm(g ~ d, data = ci_dat))
coint.res <- lag(ts(coint.res, start = c(1953, 1), freq = 4), k = -1)
data_coint <- ts.intersect(gdpts, dnw, diff(gdpts),diff(dnw),coint.res)
ci_dat <- data.frame(g = (data_coint[,1]), d = data_coint[,2],
dg= data_coint[,3], dd= data_coint[,4],
cir= data_coint[,5])
#ci_dat <- cbind(ci_dat, coint.res)
#ci_dat2 <- cbind(ci_dat2, diff(ci_dat2[,"g"]), coint.res)
ecm.model <- dg ~ cir + dd
#EFP
ocus <- efp(ecm.model, type = "OLS-CUSUM", data = ci_dat)
me <- efp(ecm.model, type = "ME", data = ci_dat, h = 0.2)
bound.ocus <- boundary(ocus, alpha = 0.01)
plot(ocus, boundary = FALSE)
lines(bound.ocus, col = "red")
lines(-bound.ocus, col = "red")
plot(me, functional = NULL)
plot(ocus, functional = "meanL2")
sctest(ocus)
#F-stat tests
fs <- Fstats(ecm.model, from = c(1955, 1),
to = c(1990, 1), data = ci_dat)
plot(fs, alpha = 0.01)
plot(fs, aveF=T, alpha = 0.01)
# U.S RATE OF GROWTH- STRUCT BREAK ----------------------------------------
#Non-Significant Results
#1- StrBrk_1 : EFP
# EFP = empirical fluct° process
# Ho: no struct. break (Kleiber p.171)
## RATE OF GROWTH ##
#DIFF-rgdp - EFP Type= MOsum - ALL DATA RANGE
efpMo_rgdp <- efp(G_RATE ~ 1, type = "Rec-MOSUM",h=0.05)
# 0.05 --> 21 years trimming
plot(efpMo_rgdp)
abline(v=1984.86, col="grey40")
abline(v=1983.5, col="grey62")
###RESULTS:RGDP BREAK in 1984 ##
#2- StrBrk_2 : Fstat
## RATE OF GROWTH ##
#G_RATE
# F stat
fs.growth <- Fstats(G_RATE ~ 1)
sctest(fs.growth, type = "supF",asymptotic = T)
sctest(fs.growth, type = "aveF",asymptotic = T)
sctest(fs.growth, type = "expF")
# Fitted models
fs.growth <- Fstats(G_RATE ~ 1)
plot(fs.growth)
breakpoints(fs.growth)
bp.growth <- breakpoints(G_RATE ~ 1,breaks = 1)
summary(bp.growth)
fmg0 <- lm(G_RATE ~ 1)
fmgf <- lm(G_RATE ~ breakfactor(bp.growth))
plot(G_RATE, ylab="diff.RGDP")
lines(ts(fitted(fmg0), start=c(1947.25)), col = 3)
lines(ts(fitted(fmgf), start = c(1947.25), frequency = 4), col = 4)
lines(bp.growth)
###RESULTS: BREAK in 1982:4 for LogRgdp #
=======
#DIFF-rgdp - Fstat
#F-statistics
fs.growth <- Fstats(G_RATE ~ 1)
sctest(fs.growth, type = "supF",asymptotic = T)
btsctest(fs.growth, type = "aveF",asymptotic = T)
sctest(fs.growth, type = "expF")
#Fitted models
fs.growth <- Fstats(G_RATE ~ 1)
plot(fs.growth)
breakpoints(fs.growth)
bp.growth <- breakpoints(G_RATE ~ 1,breaks = 1)
summary(bp.growth)
fmg0 <- lm(G_RATE ~ 1)
fmgf <- lm(G_RATE ~ breakfactor(bp.growth))
plot(G_RATE, ylab="diff.RGDP")
lines(ts(fitted(fmg0), start=c(1947.25)), col = 3)
lines(ts(fitted(fmgf), start = c(1947.25), frequency = 4), col = 4)
lines(bp.growth)
###RESULTS: BREAK in 1982:4 for LogRgdp #
>>>>>>> 080a319056724b50b448253192efe80eb7d4cf34
#BIC of many breakpoints
bp.growth2 <- breakpoints(G_RATE ~ 1)
summary(bp.growth2)
x <- c(3141, 3119, 3120, 3126, 3134, 3144)
plot(c(0,1,2,3,4,5), x, xlab="Number of break points", ylab="BIC", type="l")
points(c(0,1,2,3,4,5), x,type = "p")
#Out of conf. interv.
# Ho: No Struct. Break
sctest(efpMo_rgdp)
sctest(fs.growth)
# Reduced-VAR -------------------------------------------------------------
# ---- Reduced Form VAR ----------------------------- #
## Choose optimal length for unrestricted VAR
VARselect(var_data, lag.max = 6, type = "both")
# SC & HQ --> 2 lags
# AIC & FPE --> 3 lags
## Order the 2 variables
DcG <- myvar[, c("debt","growth")]
GcD <- myvar[, c("growth","debt")]
## Estimate the VAR (for lag length 2 then 3)
## Here "both" means we include a constant and a time trend
GvarD <- VAR(var_data, p = 2, type = "both")
GvarD <- VAR(var_data, p = 3, type = "both")
## See results (for now, no restrictions on PRIORITY. both RFVAR are symetric)
DvarG
GvarD
summary(GvarD)
## See results for any equation in detail.
summary(GvarD, equation = "growth")
# Stability: see the roots of the companion matrix for the VAR
# The moduli of the roots should all lie within the unit circle for the VAR to be stable
# A stable VAR is stationary.
roots(GvarD)
# Two roots are close to unity.
##### Residuals' Diagnostic tests
#SERIAL: Portmanteau- and Breusch-Godfrey test for serially correlated errors
serial.test(GvarD,lags.pt = 16,type = "PT.asymptotic")
serial.test(GvarD,lags.pt = 16,type = "PT.adjusted")
#JB: Jarque-Bera tests and multivariate skewness
# and kurtosis tests for the residuals of a VAR(p) or of a VECM in levels.
normality.test(GvarD)
# Norm. OK
#ARCH:
arch.test(GvarD,lags.multi = 5)
#Heteroscedastic resid.
### VECM: (G,D) ####
vecm <- ca.jo(cbind(dnw,LogRgdp),ecdet="trend",K=2)
vecm <- ca.jo(var_data,ecdet="trend",K=3)
vecm.r1<-cajorls(vecm,r=1)
alpha<-coef(vecm.r1$rlm)[1,]
beta<-vecm.r1$beta
resids<-resid(vecm.r1$rlm)
N<-nrow(resids)
sigma<-crossprod(resids)/N
#alpha t-stats
alpha.se<-sqrt(solve(crossprod(cbind(vecm@ZK %*% beta,
vecm@Z1))) [1,1]*diag(sigma))
alpha.t<-alpha/alpha.se
#beta t-stats
beta.se<-sqrt(diag(kronecker(solve(crossprod(vecm@RK [,-1])),
solve(t(alpha) %*% solve(sigma) %*% alpha))))
beta.t<-c(NA,beta[-1]/beta.se)
#Display alpha & beta (with respect. t-stat)
alpha
alpha.t
beta
beta.t
# SVECM: Growth --> Debt ---------------------------------------------------
#SVECM
vecm <- ca.jo(cbind(LogRgdp,dnw),ecdet="trend",K=2)
vecm <- ca.jo(vniveaupostBpt[,(1:2)],ecdet="trend",K=2)
vecm <- ca.jo(var_data,ecdet="trend",K=3)
SR<-matrix(NA,nrow = 2,ncol = 2)
LR<-matrix(NA,nrow = 2,ncol = 2)
LR[1:2,2]<-0
SR
LR
svecm<-SVEC(vecm,LR=LR,SR=SR,r=1,lrtest=F,boot = T,runs = 100)
svecm
svecm$SR
#t-stat
svecm$SR / svecm$SRse
svecm$LR
svecm$LR / svecm$LRse
svecm.irf<-irf(svecm, n.ahead = 48)
svecm.irf
plot(svecm.irf)
# SVECM : Y=(rgdp, ii, d, r) --------------------------------
# VAR Lag Order
VARselect(vecm_data,lag.max = 8, type = "both")
# VAR estimat° (p=1, 2 & 7)
p1<-VAR(vecm_data, p=3, type = "both")
p2<-VAR(vecm_data, p=4, type = "both")
p7<-VAR(vecm_data, p=5, type = "both")
# VAR diagnostic tests
#SERIAL: Portmanteau- and Breusch-Godfrey test for serially correlated errors
serial.test(p1,lags.pt = 16,type = "PT.asymptotic")
serial.test(p1,lags.pt = 16,type = "PT.adjusted")
serial.test(p2,lags.pt = 16,type = "PT.asymptotic")
serial.test(p2,lags.pt = 16,type = "PT.adjusted")
serial.test(p7,lags.pt = 16,type = "PT.asymptotic")
serial.test(p7,lags.pt = 16,type = "PT.adjusted")
#JB: Jarque-Bera tests and multivariate skewness
# and kurtosis tests for the residuals of a VAR(p) or of a VECM in levels.
normality.test(p1)
# Non-norm.
normality.test(p2)
# Non-norm.
normality.test(p7)
# Non-norm.
#ARCH:
arch.test(p1,lags.multi = 5)
#Heteroscedastic resid.
arch.test(p2,lags.multi = 5)
#Heteroscedastic resid.
arch.test(p7,lags.multi = 5)
#Heteroscedastic resid.
#Stability : Recursive CUMSUM
plot(stability(p1),nc=2)
plot(stability(p2),nc=2)
plot(stability(p7),nc=2)
#
#VECM - Y=gdp,ii,d,inv
#reorder data set for debt priority
vecm_data <- vecm_data[ , c("d","gdp","fii","inv")]
vecm <- ca.jo(vecm_data,ecdet="trend",K=5) #Alternative specif° #1 pass 1 coint. relat° at 5%
summary(vecm)
vecm.r1<-cajorls(vecm,r=1)
alpha<-coef(vecm.r1$rlm)[1,]
beta<-vecm.r1$beta
resids<-resid(vecm.r1$rlm)
N<-nrow(resids)
sigma<-crossprod(resids)/N
#alpha t-stats
alpha.se<-sqrt(solve(crossprod(cbind(vecm@ZK %*% beta,
vecm@Z1))) [1,1]*diag(sigma))
alpha.t<-alpha/alpha.se
#beta t-stats
beta.se<-sqrt(diag(kronecker(solve(crossprod(vecm@RK [,-1])),
solve(t(alpha) %*% solve(sigma) %*% alpha))))
beta.t<-c(NA,beta[-1]/beta.se)
#Display alpha & beta (with respect. t-stat)
alpha
alpha.t
beta
beta.t
#SVECM
vecm <- ca.jo(vecm_data,ecdet="trend",K=5)
SR<-matrix(NA,nrow = 4,ncol = 4)
LR<-matrix(NA,nrow = 4,ncol = 4)
LR[1:4,1]<-0
SR[3,2]<-0
SR[3,4]<-0
LR[3,4]<-0
#SR[4,3]<-0
SR
LR
svecm<-SVEC(vecm,LR=LR,SR=SR,r=1,lrtest=F,boot = T,runs = 100)
svecm
svecm$SR
#t-stat
svecm$SR / svecm$SRse
svecm$LR
svecm$LR / svecm$LRse
svecm.irf<-irf(svecm,n.ahead = 144)
svecm.irf
plot(svecm.irf)
fevd.d <- fevd(svecm, n.ahead = 148)$dbtnw
fevd.d
# Stationarity ------------------------------------------------------------
#1- ADF: Ho=non-stat. H1= diff-stat.
#2-KPSS: Ho=stat.
#LEVELS
adf.test(ardl_data[,"gtot"])
kpss.test(ardl_data[,"gtot"])
adf.test(ardl_data[,"u"])
kpss.test(ardl_data[,"u"])
adf.test(ardl_data[,"r"])
kpss.test(ardl_data[,"r"])
adf.test(ardl_data[,"d"])
kpss.test(ardl_data[,"d"])
# 1st. DIFF
adf.test(diff(ardl_data[,"gtot"]))
kpss.test(diff(ardl_data[,"gtot"]))
adf.test(diff(ardl_data[,"u"]))
kpss.test(diff(ardl_data[,"u"]))
adf.test(diff(ardl_data[,"r"]))
kpss.test(diff(ardl_data[,"r"]))
adf.test(diff(ardl_data[,"d"]))
kpss.test(diff(ardl_data[,"d"]))
# all I(1)
# Coint -------------------------------------------------------------------
coint52_2016 <- ca.jo(cbind(ecmeq[,1:5])) #,ecdet="const",type="trace")
summary(coint52_2016)
# as Ratio of TotalAsset -> Coint Rank= 1
coint52_85 <- ca.jo(cbind(ecmeqante[,1:5])) #,ecdet="const",type="trace")
summary(coint52_85)
# Coint Rank=2
coint85_2016 <- ca.jo(cbind(ecmeqpost[,1:5])) #,ecdet="const",type="trace")
summary(coint85_2016)
# as Ratio of TotalAsset -> Coint Rank=1
coint85_2007 <- ca.jo(cbind(ecmeqbtwn[,1:5])) #,ecdet="const",type="trace")
summary(coint85_2007)
# as Ratio of TotalAsset -> Coint Rank=1
# test for structural breack btw 85 & 2016 (2007 crisis)
cointbreak07 <- cajolst(ecmeqpost[,1:5])
summary(cointbreak07)
slot(cointbreak07,"bp")
# COINT rank 1 CONFIRMED even Break=2008:Q2
# Lag selection -----------------------------------------------------------
dy <- diff(data_list_w[,1])
y1 <- lag(data_list_w[,1])
u1 <- lag(data_list_w[,2])
r1 <- lag(data_list_w[,3])
d1 <- lag(data_list_w[,4])
for(i in 1:2) {
du[i] <- diff(data_list_w[,2], i)
}
du0<-data_list_w[,2]
du1<-diff(data_list_w[,2],1)
du2<-diff(data_list_w[,2],2)
du3<-diff(data_list_w[,2],3)
du4<-diff(data_list_w[,2],4)
dr0<-data_list_w[,3]
dr1<-diff(data_list_w[,3],1)
dr2<-diff(data_list_w[,3],2)
dr3<-diff(data_list_w[,3],3)
dr4<-diff(data_list_w[,3],4)
dd0<-data_list_w[,4]
dd1<-diff(data_list_w[,4],1)
dd2<-diff(data_list_w[,4],2)
dd3<-diff(data_list_w[,4],3)
dd4<-diff(data_list_w[,4],4)
dd5<-diff(data_list_w[,4],5)
dd6<-diff(data_list_w[,4],6)
dd7<-diff(data_list_w[,4],7)
dd8<-diff(data_list_w[,4],8)
#
s <- ts.intersect(dy,
du0,dr0,dd0,
du1,du2,du3,du4,
dr1,dr2,
dd1,dd2,dd3,dd4,dd5,dd6,dd7,dd8,
y1,u1,r1,d1)
VARselect(s[,"dy"],lag.max = 18, type = "both",
exogen = cbind(s[,"du0"],s[,"dr0"],s[,"dd0"],
s[,"du1"],s[,"du2"],s[,"du3"],s[,"du4"],
s[,"dr1"],s[,"dr1"],
s[,"dd1"],s[,"dd2"],s[,"dd3"],s[,"dd4"],
s[,"dd5"],s[,"dd6"],s[,"dd7"],s[,"dd8"],
s[,"y1"],s[,"u1"],s[,"r1"],s[,"d1"]))
# 2 methods (ARDL Chapter p.54)
# 1- Informat° Criteria
# 2- iid residuals
# Ardl ------------------------------------------------------------
#Merging Fi & ii into Fii=total intangibles(financial+goodwill)
#CROISS=Fii
Mod_sos<-ardl::ardl(gtot ~ u+r+d, data=ardl_data, ylag=16,
xlag=c(4,2,8), case = 5)
Mod_sos<-ardl::ardl(gtot ~ u + r +d, data=ardl_data, ylag=8,
xlag=c(4,8,8), case = 3)
summary(Mod_sos)
######## WALD TEST OK --> long run relationship btw i~u.r.fi+DEBT ###
bounds.test(Mod_sos)
coint(Mod_sos)
plot(Mod_sos)
# I.I.D TESTS
Box.test(Mod_sos$residuals,lag = 9, type="Ljung-Box",fitdf=4)
#Ho:INDEPENDANT
shapiro.test(Mod_sos$residuals) #Royston (1995) to be adequate for p.value < 0.1.
#Ho:nORMALITY
car::ncvTest(Mod_sos)
#Ho:constant error variance
qqnorm(Mod_sos$residuals)
qqline(Mod_sos$residuals)
bgtest(Mod_sos$residuals)
boxplot(Mod_sos$residuals)
hist(Mod_sos$residuals)
shapiro.test(Mod_sos$residuals) #Royston (1995) to be adequate for p.value < 0.1.
# ardl SERANN -------------------------------------------------------------
Alt1select1 <- ardl::auto.ardl(gtot~u+r+d, data=ardl_data, ymax=18,
xmax=c(8,8,8),case=3,verbose = T,ic = "aic")
# Gtot --------------------------------------------------------------------
data_list<- ts.intersect(log(ProInv+IntInv+FinInv),
(capu1),
((profit1/(ProInv+IntInv+FinInv))),
dbtot/(ProInv+IntInv+FinInv),
((FinInv+IntInv)/(ProInv+IntInv+FinInv)),
log(IntInv),
log(FinInv),
LogRgdp, log(inv5),
log(dbtot),
(dbtnw),
d_ante, d_post)
data_list_w <- window(data_list,start=c(1958,1), end=c(2015,1), frequency=4)
ardl_data <- data.frame(gtot = (data_list_w[,1]),
u = data_list_w[,2],
r=(data_list_w[,3]),
d = data_list_w[,4],
etha = data_list_w[,5],
ii = data_list_w[,6],
fi = data_list_w[,7],
gdp = data_list_w[,8],
inv = data_list_w[,9],
lgd = data_list_w[,10],
dtonw = data_list_w[,11],
dA = data_list_w[,12] , dP = data_list_w[,13])
Alt1select1 <- ardl::auto.ardl(gtot~u+r+d|dP, data=ardl_data, ymax=18,
xmax=c(8,8,16),case=(3),verbose = T,ic = "aic")
Mod_sos<-ardl::ardl(gtot~u+r+d|dA , data=ardl_data, ylag=8,
xlag=c(4,8,9), case = 3)
# Ratio gama(ii) calculation
ratio_memb<- ts.intersect(log(IntInv+FinInv) , log((FinInv+IntInv)/(ProInv+IntInv+FinInv) ) )
#ratio_memb<- ts.intersect(log(ProInv) , log((ProInv)/(ProInv+IntInv+FinInv) ) )
gamma_ratio <- ratio_memb[,1] - ratio_memb[,2]
print(gamma_ratio)
ts.plot(gamma_ratio)
#
# Prod-Inv ----------------------------------------------------------------
data_list<- ts.intersect(log(ProInv),
(capu1),
(profit1/ProInv+IntInv+FinInv),
dbtot/(ProInv+IntInv+FinInv),
((FinInv+IntInv)/(ProInv+IntInv+FinInv)),
log(IntInv),
log(FinInv),
LogRgdp, log(inv5),
log(dbtot),
(dbtnw),
d_ante , d_post,
log(FinInv+IntInv)
# (d_post*log(FinInv+IntInv))
)
data_list_w <- window(data_list,start=c(1958,1), end=c(2014,4), frequency=4)
ardl_data <- data.frame(Pinv = (data_list_w[,1]),
u = data_list_w[,2],
r=(data_list_w[,3]),
d = data_list_w[,4],
etha = data_list_w[,5],
ii = data_list_w[,6],
fi = data_list_w[,7],
gdp = data_list_w[,8],
inv = data_list_w[,9],
lgd = data_list_w[,10],
dtonw = data_list_w[,11],
dA = data_list_w[,12] , dP = data_list_w[,13],
iit = data_list_w[,14])
Alt1select1 <- ardl::auto.ardl(inv~u+r|dA, data=ardl_data, ymax=18,
xmax=c(8,8),case=(1),verbose = T,ic = "aic")
Mod_sos<-ardl::ardl(inv~u+r|dA , data=ardl_data, ylag=6,
xlag=c(6,9), case = 1)
# Ratio gama(ii) calculation
ratio_memb<- ts.intersect(IntInv+FinInv , dbtnw ) # dbtot/(ProInv+IntInv+FinInv))
gamma_ratio <- ratio_memb[,2] / ratio_memb[,1]
print(gamma_ratio)
ts.plot(gamma_ratio)
#
# Intangible-Inv ----------------------------------------------------------
data_list<- ts.intersect(log(ProInv+IntInv+FinInv),
(capu1),
((profit1/(ProInv+IntInv+FinInv))),
dbtot/(ProInv+IntInv+FinInv),
((FinInv+IntInv)/(ProInv+IntInv+FinInv)),
log(IntInv),
log(FinInv),
LogRgdp, log(inv5),
log(dbtot),
(dbtnw),
d_ante, d_post)
data_list_w <- window(data_list,start=c(1984,1), end=c(2015,1), frequency=4)
data_list_w <- window(data_list,start=c(1952,1), end=c(2015,1), frequency=4)
ardl_data <- data.frame(gtot = (data_list_w[,1]),
u = data_list_w[,2],
r=(data_list_w[,3]),
d = data_list_w[,4],
etha = data_list_w[,5],
ii = data_list_w[,6],
fi = data_list_w[,7],
gdp = data_list_w[,8],
inv = data_list_w[,9],
lgd = data_list_w[,10],
dtonw = data_list_w[,11])
Alt1select1 <- ardl::auto.ardl(ii~u+r+d, data=ardl_data, ymax=18,
xmax=c(8,8,8),case=(1),verbose = T,ic = "aic")
Mod_sos<-ardl::ardl(ii~u+r+d , data=ardl_data, ylag=1,
xlag=c(0,1,4), case = 1)
Alt1select1 <- auto.ardl(ii~d, data=ardl_data, ymax=18,
xmax=c(8,8,8),case=(1),verbose = T,ic = "ll")
Mod_sos<-ardl(ii~d , data=ardl_data, ylag=5,
xlag=c(5), case = 1)
#test d=inv+ii (for Minky dynamics)
Alt1select1 <- ardl::auto.ardl(d ~ inv + ii , data=ardl_data, ymax=18,
xmax=c(8,8),case=1,verbose = T,ic = "ll")
Mod_sos<-ardl::ardl(d ~ inv + ii , data=ardl_data, ylag=5,
xlag=c(5,5), case = 1)
#test d=inv+ii+r (for d=inv-sRE and Minky dynamics)
Alt1select1 <- ardl::auto.ardl(d ~ inv + ii + r , data=ardl_data, ymax=18,
xmax=c(8,8,8),case=1,verbose = T,ic = "ll")
Mod_sos<-ardl::ardl(d ~ inv + ii + r , data=ardl_data, ylag=5,
xlag=c(5,5,5), case = 1)
# Diag --------------------------------------------------------------------
summary(Mod_sos)
bounds.test(Mod_sos)
coint(Mod_sos)
Box.test(Mod_sos$residuals,lag = 9, type="Ljung-Box",fitdf=4) # I.I.D TESTS #Ho:INDEPENDANT
shapiro.test(Mod_sos$residuals) #Ho:nORMALITY
car::ncvTest(Mod_sos) #Ho:constant error variance
#
|
/Recap.R
|
no_license
|
Fouwed/C5
|
R
| false
| false
| 33,544
|
r
|
# Empirical Macro Model of my THESIS
#
# INIT ####
options(max.print=3000)
# set working Directory
setwd("C:/Users/Ferdi/Documents/R/C5")
setwd("C:/Users/fouwe/Documents/R/C5")
library(tseries)
library(vars)
library(lmtest)
library(urca)
library(ardl)
library(outliers)
library(strucchange)
## library(gvlma)
# Data --------------------------------------------------------------------
# Read data
invraw5 <- read.csv("INV5_RealGrossPrivateDomesticInvestment.csv",head = TRUE, sep=",")
profraw1 <- read.csv("ProfitsAfterTax.csv", head = TRUE, sep=",")
uraw1 <- read.csv("Capacity1_Utilization_Manuf.csv", head = TRUE, sep=",")
fininvraw <- read.csv("FinInv.csv", skip = 1,head = TRUE, sep=",")
#All FinAsset (including UNIDENTIFIED)
finInvIndeed <- read.csv("PURGED_FININV.csv", skip = 2,head = TRUE, sep=",")
#ONLY Identified Financial assets
intanginv <- read.csv("IntangibleInv.csv", skip = 1,head = TRUE, sep=",")
#UnIdentified Financial assets
prodinvraw <- read.csv("FinInv2.csv", skip = 1,head = TRUE, sep=",")
DebtToNw <- read.csv("Z1_NFCBusiness_creditMarket_Debt_asPercentageof_NetWorth.csv",
head = TRUE, sep=",")
DebtToEq <- read.csv("Z1_NFCBusiness_CreditMarketDebtAsPercentageOfMarketValueOfCorpEquities.csv",
head = TRUE, sep=",")
DebtTot <- read.csv("NFCDEBT.csv",
head = TRUE, sep=",")
#
# Make Time Series of data
inv5 <- ts(invraw5$GPDIC1, start = c(1947,1),end = c(2016,4),frequency = 4)
profit1 <- ts(profraw1$NFCPATAX, start = c(1947,1),end = c(2016,4),frequency = 4)
capu1 <- ts(uraw1$CAPUTLB00004SQ, start = c(1948,1),end = c(2016,4),frequency = 4)
FinInv <- ts(finInvIndeed$FININDEED, start = c(1951,4),
end = c(2015,1),frequency = 4)
IntInv <- ts(intanginv$intinv, start = c(1951,4),
end = c(2015,1),frequency = 4)
#INTANGIBLE INVESTMENT SERIES
ProInv <- ts(prodinvraw$physasset, start = c(1951,4),end = c(2016,4),frequency = 4)
#PRODUCTIVE INVESTMENT SERIES
FinInvHistRatio <- ts(fininvraw$hfininvratio, start = c(1951,4),end = c(2016,4),frequency = 4)
FinInvRatio <- ts(fininvraw$fininvratio, start = c(1951,4),frequency = 4)
AssetTot <- ts(fininvraw$totinv, start = c(1951,4),frequency = 4)
dbtnw <- ts(DebtToNw$NCBCMDPNWMV, start = c(1951,4),end = c(2016,4),frequency = 4)
dbteq <- ts(DebtToEq$NCBCMDPMVCE, start = c(1951,4),frequency = 4)
dbtot <- ts(DebtTot$CRDQUSANABIS, start = c(1952,1),frequency = 4)
d_1<- c(rep(1,104),rep(0,124))
d_ante<- ts(d_1, start = c(1958,1),frequency = 4)
d_2<- c(rep(0,104),rep(1,124))
d_post<- ts(d_2, start = c(1958,1),frequency = 4)
#
# DataSET ----------------------------------------------------------------
#Data sets arangement (as a DATAFRAME)
#Create LIST of 6 variables (i-u-r-Fi-Ii-D)
#After Peter's comment, I change 3 variables (Inv, Profit, FinInv)
data_list<- ts.intersect(log(ProInv+IntInv+FinInv),
(capu1),
((profit1/(ProInv+IntInv+FinInv))),
dbtot/(ProInv+IntInv+FinInv),
((FinInv+IntInv)/(ProInv+IntInv+FinInv)),
#log(IntInv),
#log(FinInv),
#LogRgdp, log(inv5),
#log(dbtot),
(dbtnw))
data_list_w <- window(data_list,start=c(1984,1), end=c(2015,1), frequency=4)
ardl_data <- data.frame(gtot = (data_list_w[,1]),
u = data_list_w[,2],
r=(data_list_w[,3]),
d = data_list_w[,4],
etha = data_list_w[,5],
#ii = data_list_w[,6],
#fi = data_list_w[,7],
#gdp = data_list_w[,8],
#inv = data_list_w[,9],
#lgd = data_list_w[,10],
dtonw = data_list_w[,6])
#
plot.ts(ardl_data[,1:6])
data_list_w[101:102,2]<-data_list_w[100,2]
data_list_w[103:105,2]<-data_list_w[106,2]
ardl_data[76,"r"]<-ardl_data[75,"r"]
ur.ers(ardl_data[,"r"], model="const")
outlier(ardl_data)
#PLOTS
ts.plot(gdpts, ylab="Q-RGDP (Level)")
ts.plot(G_RATE, ylab="Q-Growth (Level)")
abline(h=0)
abline(v=2003.75, col="grey50")
abline(h=0.0125, col="red")
ts.plot(moymob, type = "h", ylab=paste0("SMA-",malevel," :Q-Growth (Level)"))
plot.default(G_RATE, type = "h")
plot.default(Ygdp_RATE, type = "h")
plot.default(GCAP_RATE, type = "h")
abline(v=1983.75, col="grey50")
ts.plot(LogRgdp)
ts.plot(Ygdp)
#LongRun growth
ts.plot(gdpCAP)
abline(v =1984)
abline(v =1945)
abline(v =1880)
abline(v =1930)
# plot level & Diff-lev
par(mfrow=c(2,2))
ts.plot(LogRgdp<-vniveau[,1], ylabflibra="RGDP (log)")
ts.plot(dnw<-vniveau[,2], ylab="Debt/net worth (level)")
# plot of diff_Debt_level is informative of
# the transformation in debt dynamics occuring from mid 1980's
ts.plot(gd<-vdiff_niveau[,1], ylab="RGDP (diff.)")
ts.plot(dnwd<-vdiff_niveau[,2], ylab="Debt/net worth (diff.)")
### COINTEGRAT? #####
#Johansen test
#Ho: no cointegrat? (r=0 against r>0 , then r<=1 against r>1 etc..)
# A rank r>0 implies a cointegrating relationship
# between two or possibly more time series
#MAX
jojolevel<-ca.jo(cbind(LogRgdp,dnw),ecdet="const")
summary(jojolevel)
## RESULTS: Cointegration
#TRACE
jojolevTrace<-ca.jo(cbind(LogRgdp,dnw),ecdet="const",type="trace")
summary(jojolevTrace)
## RESULTS: Cointegration
#Test for "wrongly accept COINT" for struct. Break
#(Pfaff §8.2 AND Lütkepohl, H., Saikkonen, P. and Trenkler, C. (2004), )
jojoStruct <- cajolst(cbind(LogRgdp,dnw))
summary(jojoStruct)
slot(jojoStruct, "bp")
slot(jojoStruct, "x")
slot(jojoStruct, "x")[126] # corrsponding to 1983
## RESULTS: NO Cointegration once break accounted for (1983,1)
# i.e there maybe coint just becausz of struct shift
#TEST 3 separated periods FOR FINANCIALIZATION ACCOUNT
vniveaupostBpt <- window(vniveau,start=1983,end=2016)
vniveauante <- window(vniveau,start=1951,end=1983)
vniveaubtwn <- window(vniveau,start=1985,end=2007)
#RESAMPLE
#POST
#JOHANSEN
jojopostBpt <- ca.jo(vniveaupostBpt[,(1:2)],ecdet="trend",type="trace") #,type="trace")
summary(jojopostBpt)
##RESULTS: COINT at 1% from 1983 on !!!
# i.e one may estimate a VECM for G&D
# goto SVAR section
##ANTE FIN°
#Johansen TRACE
jojoAnteTrace<-ca.jo(vniveauante[,(1:2)],ecdet="trend",type="trace") #,type="trace")
summary(jojoAnteTrace)
#Johansen MAX
jojoAnteMax<-ca.jo(vniveau[,(1:2)],ecdet="trend") #,type="trace")
summary(jojoAnteMax)
###RESULTS: NO COINT Neither way
#Phillips Ouliaris test # Ho: no cointegrat?
po.test(vniveauante[,(1:2)], demean = T, lshort = T) # No COINT
po.test(vniveauante[,2:1], demean = T, lshort = T) # neither the other way round
###RESULTS: Confirms NO COINT
#Test the 1983-2016 period for "wrongly accept COINT" while struct. Break
#Pfaff §8.2 AND Lütkepohl, H., Saikkonen, P. and Trenkler, C. (2004), )
jojoStr2007 <- cajolst(vniveaupostBpt[,1:2])
summary(jojoStr2007)
slot(jojoStr2007, "bp")
slot(jojoStr2007, "x")
slot(jojoStr2007, "x")[101] # corrsponding to 2008:1
## RESULTS: Cointegration is confirmed after data readjustment for break (2008,1)
# i.e no effect of 2008 financial crisis on D&G comovement
library(strucchange)
### STRUCT. BREAK TESTS -------------------------------
#1- StrBrk_1 : EFP
# EFP = empirical fluct° process
# Ho: no struct. break (Kleiber p.171)
## GROWTH ##
#DIFF-rgdp - EFP Type= MOsum - ALL DATA RANGE
efpMo_rgdp <- efp(diff(gdpts) ~ 1, type = "Rec-MOSUM",h=0.3)
# 0.3 --> 21 years trimming
plot(efpMo_rgdp)
abline(v=1984.86, col="grey40")
abline(v=1983.5, col="grey62")
###RESULTS:RGDP BREAK in 1984 ##
<<<<<<< HEAD
## DEBT ##
### type= MOSUM
efpMo_d <- efp(dnw ~ 1, type = "Rec-MOSUM",h=0.053)#1980's break whithin 0.05-->0.07
# 0.053 --> 3 years
plot(efpMo_d)
abline(v=1965.75, col="grey50")
abline(v=1984.75, col="grey50")
###RESULTS: BREAK in 1965 then 1984 for dnw #
#Out of conf. interv.
# Ho: No Struct. Break
sctest(efpMo_rgdp)
sctest(efpCum_g73)
sctest(efpMo_d)
#2- StrBrk_2 : Fstat
## GROWTH ##
#DIFF-rgdp
# F stat
fs.growth <- Fstats(diff(gdpts) ~ 1)
sctest(fs.growth, type = "supF",asymptotic = T)
sctest(fs.growth, type = "aveF",asymptotic = T)
sctest(fs.growth, type = "expF")
# Fitted models
fs.growth <- Fstats(diff(gdpts) ~ 1)
plot(fs.growth)
breakpoints(fs.growth)
bp.growth <- breakpoints(diff(gdpts) ~ 1,breaks = 1)
summary(bp.growth)
fmg0 <- lm(diff(gdpts) ~ 1)
fmgf <- lm(diff(gdpts) ~ breakfactor(bp.growth))
plot(diff(gdpts), ylab="diff.RGDP")
lines(ts(fitted(fmg0), start=c(1947.25)), col = 3)
lines(ts(fitted(fmgf), start = c(1947.25), frequency = 4), col = 4)
lines(bp.growth)
###RESULTS: BREAK in 1982:4 for LogRgdp #
=======
#DIFF-rgdp - Fstat
#F-statistics
fs.growth <- Fstats(diff(gdpts) ~ 1)
sctest(fs.growth, type = "supF",asymptotic = T)
btsctest(fs.growth, type = "aveF",asymptotic = T)
sctest(fs.growth, type = "expF")
#Fitted models
fs.growth <- Fstats(diff(gdpts) ~ 1)
plot(fs.growth)
breakpoints(fs.growth)
bp.growth <- breakpoints(diff(gdpts) ~ 1,breaks = 1)
summary(bp.growth)
fmg0 <- lm(diff(gdpts) ~ 1)
fmgf <- lm(diff(gdpts) ~ breakfactor(bp.growth))
plot(diff(gdpts), ylab="diff.RGDP")
lines(ts(fitted(fmg0), start=c(1947.25)), col = 3)
lines(ts(fitted(fmgf), start = c(1947.25), frequency = 4), col = 4)
lines(bp.growth)
###RESULTS: BREAK in 1982:4 for LogRgdp #
>>>>>>> 080a319056724b50b448253192efe80eb7d4cf34
#BIC of many breakpoints
bp.growth2 <- breakpoints(diff(gdpts) ~ 1)
summary(bp.growth2)
x <- c(3141, 3119, 3120, 3126, 3134, 3144)
plot(c(0,1,2,3,4,5), x, xlab="Number of break points", ylab="BIC", type="l")
points(c(0,1,2,3,4,5), x,type = "p")
#Out of conf. interv.
# Ho: No Struct. Break
sctest(efpMo_rgdp)
sctest(fs.growth)
## Long Term: Rgdp Per CAPITA 1800-2016 ##
fs.gpercap <- Fstats(gdpCAP ~ 1)
##Modulating the number of BP...
bp.gpercap <- breakpoints(gdpCAP ~ 1,breaks = 5)
summary(bp.gpercap)
fmg0 <- lm(gdpCAP ~ 1)
fmgf <- lm(gdpCAP ~ breakfactor(bp.gpercap))#,breaks = 1))
plot(gdpCAP)
lines(ts(fitted(fmgf), start = c(1800,1), frequency = 1), col = 4)
lines(bp.gpercap)
###RESULTS: BREAK in 1984 for gdpCAP whenever BrkPts > 1 #
## DEBT ##
# EFP process
#type= MOSUM
efpMo_d <- efp(diff(dnw) ~ 1, type = "Rec-MOSUM",h=0.145)#1980's break whithin 0.05-->0.07
# 0.053 --> 3 years
plot(efpMo_d)
abline(v=1999.5, col="grey50")
abline(v=1985.0, col="grey50")
###RESULTS: BREAK in 1985 then 1999 for dnw #
#type= ME
efpCu_d <- efp(diff(dnw) ~ 1, type = "ME")
# 0.053 --> 3 years
plot(efpCu_d)
abline(v=1999.5, col="grey50")
abline(v=1985.0, col="grey50")
###RESULTS: BREAK in 1985 then 1999 for dnw #
#Fstat
#p.vavule of F-stat
fs.debt <- Fstats(diff(dnw) ~ 1)
sctest(fs.debt, type = "supF",asymptotic = T)
sctest(fs.debt, type = "aveF",asymptotic = T)
sctest(fs.debt, type = "expF")
#BIC of many breakpoints
bp.debt1 <- breakpoints(diff(dnw) ~ 1)
summary(bp.debt1)
x <- c(570.4, 567.3, 564.1, 566.9, 569.6, 579.8)
plot(c(0,1,2,3,4,5), x, xlab="Number of break points", ylab="BIC", type="l")
points(c(0,1,2,3,4,5), x,type = "p")
# Fitted models
fs.debt2 <- Fstats(diff(dnw) ~ 1)
breakpoints(fs.debt2)
bp.debt2 <- breakpoints(diff(dnw) ~ 1,breaks = 2)
summary(bp.debt2)
fmdnw0 <- lm(diff(dnw) ~ 1)
fmdnwf <- lm(diff(dnw) ~ breakfactor(bp.debt2))
plot(diff(dnw))
lines(ts(fitted(fmdnw0), start=c(1951)), col = 3)
lines(ts(fitted(fmdnwf), start = c(1951,4), frequency = 4), col = 4)
lines(bp.debt2)
#Stats
sctest(efpMo_d)
sctest(efpCu_d)
sctest(fs.debt2)
# ZIVOT & ANDREWS test
#RGDP - Level
za.dnw <- ur.za(gdpts, lag= 9, model = "intercept")
summary(za.dnw)
plot(za.dnw)
za.dnw <- ur.za(gdpts, lag= 9, model = "trend")
summary(za.dnw)
plot(za.dnw)
# result: non-signif BP in 1980:50
za.dnw <- ur.za(gdpts, lag= 9, model = "both")
summary(za.dnw)
plot(za.dnw)
#DEBT - Level
za.dnw <- ur.za(dnw, lag= 9, model = "intercept")
summary(za.dnw)
plot(za.dnw)
za.dnw <- ur.za(dnw, lag= 1, model = "trend")
summary(za.dnw)
plot(za.dnw)
# result: non-signif BP in 1998:50
za.dnw <- ur.za(dnw, lag= 9, model = "both")
summary(za.dnw)
plot(za.dnw)
#BREAK in the cointegration
data_coint <- ts.intersect(gdpts, dnw, diff(gdpts),diff(dnw))
ci_dat <- data.frame(g = (data_coint[,1]), d = data_coint[,2],
dg= data_coint[,3], dd= data_coint[,4])
#ci_dat <- window(ci_dat2, start= c(1952, 1)) #, end= c(2016,4),frequency = 4)
coint.res <- residuals(lm(g ~ d, data = ci_dat))
coint.res <- lag(ts(coint.res, start = c(1953, 1), freq = 4), k = -1)
data_coint <- ts.intersect(gdpts, dnw, diff(gdpts),diff(dnw),coint.res)
ci_dat <- data.frame(g = (data_coint[,1]), d = data_coint[,2],
dg= data_coint[,3], dd= data_coint[,4],
cir= data_coint[,5])
#ci_dat <- cbind(ci_dat, coint.res)
#ci_dat2 <- cbind(ci_dat2, diff(ci_dat2[,"g"]), coint.res)
ecm.model <- dg ~ cir + dd
#EFP
ocus <- efp(ecm.model, type = "OLS-CUSUM", data = ci_dat)
me <- efp(ecm.model, type = "ME", data = ci_dat, h = 0.2)
bound.ocus <- boundary(ocus, alpha = 0.01)
plot(ocus, boundary = FALSE)
lines(bound.ocus, col = "red")
lines(-bound.ocus, col = "red")
plot(me, functional = NULL)
plot(ocus, functional = "meanL2")
sctest(ocus)
#F-stat tests
fs <- Fstats(ecm.model, from = c(1955, 1),
to = c(1990, 1), data = ci_dat)
plot(fs, alpha = 0.01)
plot(fs, aveF=T, alpha = 0.01)
# U.S RATE OF GROWTH- STRUCT BREAK ----------------------------------------
#Non-Significant Results
#1- StrBrk_1 : EFP
# EFP = empirical fluct° process
# Ho: no struct. break (Kleiber p.171)
## RATE OF GROWTH ##
#DIFF-rgdp - EFP Type= MOsum - ALL DATA RANGE
efpMo_rgdp <- efp(G_RATE ~ 1, type = "Rec-MOSUM",h=0.05)
# 0.05 --> 21 years trimming
plot(efpMo_rgdp)
abline(v=1984.86, col="grey40")
abline(v=1983.5, col="grey62")
###RESULTS:RGDP BREAK in 1984 ##
#2- StrBrk_2 : Fstat
## RATE OF GROWTH ##
#G_RATE
# F stat
fs.growth <- Fstats(G_RATE ~ 1)
sctest(fs.growth, type = "supF",asymptotic = T)
sctest(fs.growth, type = "aveF",asymptotic = T)
sctest(fs.growth, type = "expF")
# Fitted models
fs.growth <- Fstats(G_RATE ~ 1)
plot(fs.growth)
breakpoints(fs.growth)
bp.growth <- breakpoints(G_RATE ~ 1,breaks = 1)
summary(bp.growth)
fmg0 <- lm(G_RATE ~ 1)
fmgf <- lm(G_RATE ~ breakfactor(bp.growth))
plot(G_RATE, ylab="diff.RGDP")
lines(ts(fitted(fmg0), start=c(1947.25)), col = 3)
lines(ts(fitted(fmgf), start = c(1947.25), frequency = 4), col = 4)
lines(bp.growth)
###RESULTS: BREAK in 1982:4 for LogRgdp #
=======
#DIFF-rgdp - Fstat
#F-statistics
fs.growth <- Fstats(G_RATE ~ 1)
sctest(fs.growth, type = "supF",asymptotic = T)
btsctest(fs.growth, type = "aveF",asymptotic = T)
sctest(fs.growth, type = "expF")
#Fitted models
fs.growth <- Fstats(G_RATE ~ 1)
plot(fs.growth)
breakpoints(fs.growth)
bp.growth <- breakpoints(G_RATE ~ 1,breaks = 1)
summary(bp.growth)
fmg0 <- lm(G_RATE ~ 1)
fmgf <- lm(G_RATE ~ breakfactor(bp.growth))
plot(G_RATE, ylab="diff.RGDP")
lines(ts(fitted(fmg0), start=c(1947.25)), col = 3)
lines(ts(fitted(fmgf), start = c(1947.25), frequency = 4), col = 4)
lines(bp.growth)
###RESULTS: BREAK in 1982:4 for LogRgdp #
>>>>>>> 080a319056724b50b448253192efe80eb7d4cf34
#BIC of many breakpoints
bp.growth2 <- breakpoints(G_RATE ~ 1)
summary(bp.growth2)
x <- c(3141, 3119, 3120, 3126, 3134, 3144)
plot(c(0,1,2,3,4,5), x, xlab="Number of break points", ylab="BIC", type="l")
points(c(0,1,2,3,4,5), x,type = "p")
#Out of conf. interv.
# Ho: No Struct. Break
sctest(efpMo_rgdp)
sctest(fs.growth)
# Reduced-VAR -------------------------------------------------------------
# ---- Reduced Form VAR ----------------------------- #
## Choose optimal length for unrestricted VAR
VARselect(var_data, lag.max = 6, type = "both")
# SC & HQ --> 2 lags
# AIC & FPE --> 3 lags
## Order the 2 variables
DcG <- myvar[, c("debt","growth")]
GcD <- myvar[, c("growth","debt")]
## Estimate the VAR (for lag length 2 then 3)
## Here "both" means we include a constant and a time trend
GvarD <- VAR(var_data, p = 2, type = "both")
GvarD <- VAR(var_data, p = 3, type = "both")
## See results (for now, no restrictions on PRIORITY. both RFVAR are symetric)
DvarG
GvarD
summary(GvarD)
## See results for any equation in detail.
summary(GvarD, equation = "growth")
# Stability: see the roots of the companion matrix for the VAR
# The moduli of the roots should all lie within the unit circle for the VAR to be stable
# A stable VAR is stationary.
roots(GvarD)
# Two roots are close to unity.
##### Residuals' Diagnostic tests
#SERIAL: Portmanteau- and Breusch-Godfrey test for serially correlated errors
serial.test(GvarD,lags.pt = 16,type = "PT.asymptotic")
serial.test(GvarD,lags.pt = 16,type = "PT.adjusted")
#JB: Jarque-Bera tests and multivariate skewness
# and kurtosis tests for the residuals of a VAR(p) or of a VECM in levels.
normality.test(GvarD)
# Norm. OK
#ARCH:
arch.test(GvarD,lags.multi = 5)
#Heteroscedastic resid.
### VECM: (G,D) ####
vecm <- ca.jo(cbind(dnw,LogRgdp),ecdet="trend",K=2)
vecm <- ca.jo(var_data,ecdet="trend",K=3)
vecm.r1<-cajorls(vecm,r=1)
alpha<-coef(vecm.r1$rlm)[1,]
beta<-vecm.r1$beta
resids<-resid(vecm.r1$rlm)
N<-nrow(resids)
sigma<-crossprod(resids)/N
#alpha t-stats
alpha.se<-sqrt(solve(crossprod(cbind(vecm@ZK %*% beta,
vecm@Z1))) [1,1]*diag(sigma))
alpha.t<-alpha/alpha.se
#beta t-stats
beta.se<-sqrt(diag(kronecker(solve(crossprod(vecm@RK [,-1])),
solve(t(alpha) %*% solve(sigma) %*% alpha))))
beta.t<-c(NA,beta[-1]/beta.se)
#Display alpha & beta (with respect. t-stat)
alpha
alpha.t
beta
beta.t
# SVECM: Growth --> Debt ---------------------------------------------------
#SVECM
vecm <- ca.jo(cbind(LogRgdp,dnw),ecdet="trend",K=2)
vecm <- ca.jo(vniveaupostBpt[,(1:2)],ecdet="trend",K=2)
vecm <- ca.jo(var_data,ecdet="trend",K=3)
SR<-matrix(NA,nrow = 2,ncol = 2)
LR<-matrix(NA,nrow = 2,ncol = 2)
LR[1:2,2]<-0
SR
LR
svecm<-SVEC(vecm,LR=LR,SR=SR,r=1,lrtest=F,boot = T,runs = 100)
svecm
svecm$SR
#t-stat
svecm$SR / svecm$SRse
svecm$LR
svecm$LR / svecm$LRse
svecm.irf<-irf(svecm, n.ahead = 48)
svecm.irf
plot(svecm.irf)
# SVECM : Y=(rgdp, ii, d, r) --------------------------------
# VAR Lag Order
VARselect(vecm_data,lag.max = 8, type = "both")
# VAR estimat° (p=1, 2 & 7)
p1<-VAR(vecm_data, p=3, type = "both")
p2<-VAR(vecm_data, p=4, type = "both")
p7<-VAR(vecm_data, p=5, type = "both")
# VAR diagnostic tests
#SERIAL: Portmanteau- and Breusch-Godfrey test for serially correlated errors
serial.test(p1,lags.pt = 16,type = "PT.asymptotic")
serial.test(p1,lags.pt = 16,type = "PT.adjusted")
serial.test(p2,lags.pt = 16,type = "PT.asymptotic")
serial.test(p2,lags.pt = 16,type = "PT.adjusted")
serial.test(p7,lags.pt = 16,type = "PT.asymptotic")
serial.test(p7,lags.pt = 16,type = "PT.adjusted")
#JB: Jarque-Bera tests and multivariate skewness
# and kurtosis tests for the residuals of a VAR(p) or of a VECM in levels.
normality.test(p1)
# Non-norm.
normality.test(p2)
# Non-norm.
normality.test(p7)
# Non-norm.
#ARCH:
arch.test(p1,lags.multi = 5)
#Heteroscedastic resid.
arch.test(p2,lags.multi = 5)
#Heteroscedastic resid.
arch.test(p7,lags.multi = 5)
#Heteroscedastic resid.
#Stability : Recursive CUMSUM
plot(stability(p1),nc=2)
plot(stability(p2),nc=2)
plot(stability(p7),nc=2)
#
#VECM - Y=gdp,ii,d,inv
#reorder data set for debt priority
vecm_data <- vecm_data[ , c("d","gdp","fii","inv")]
vecm <- ca.jo(vecm_data,ecdet="trend",K=5) #Alternative specif° #1 pass 1 coint. relat° at 5%
summary(vecm)
vecm.r1<-cajorls(vecm,r=1)
alpha<-coef(vecm.r1$rlm)[1,]
beta<-vecm.r1$beta
resids<-resid(vecm.r1$rlm)
N<-nrow(resids)
sigma<-crossprod(resids)/N
#alpha t-stats
alpha.se<-sqrt(solve(crossprod(cbind(vecm@ZK %*% beta,
vecm@Z1))) [1,1]*diag(sigma))
alpha.t<-alpha/alpha.se
#beta t-stats
beta.se<-sqrt(diag(kronecker(solve(crossprod(vecm@RK [,-1])),
solve(t(alpha) %*% solve(sigma) %*% alpha))))
beta.t<-c(NA,beta[-1]/beta.se)
#Display alpha & beta (with respect. t-stat)
alpha
alpha.t
beta
beta.t
#SVECM
vecm <- ca.jo(vecm_data,ecdet="trend",K=5)
SR<-matrix(NA,nrow = 4,ncol = 4)
LR<-matrix(NA,nrow = 4,ncol = 4)
LR[1:4,1]<-0
SR[3,2]<-0
SR[3,4]<-0
LR[3,4]<-0
#SR[4,3]<-0
SR
LR
svecm<-SVEC(vecm,LR=LR,SR=SR,r=1,lrtest=F,boot = T,runs = 100)
svecm
svecm$SR
#t-stat
svecm$SR / svecm$SRse
svecm$LR
svecm$LR / svecm$LRse
svecm.irf<-irf(svecm,n.ahead = 144)
svecm.irf
plot(svecm.irf)
fevd.d <- fevd(svecm, n.ahead = 148)$dbtnw
fevd.d
# Stationarity ------------------------------------------------------------
#1- ADF: Ho=non-stat. H1= diff-stat.
#2-KPSS: Ho=stat.
#LEVELS
adf.test(ardl_data[,"gtot"])
kpss.test(ardl_data[,"gtot"])
adf.test(ardl_data[,"u"])
kpss.test(ardl_data[,"u"])
adf.test(ardl_data[,"r"])
kpss.test(ardl_data[,"r"])
adf.test(ardl_data[,"d"])
kpss.test(ardl_data[,"d"])
# 1st. DIFF
adf.test(diff(ardl_data[,"gtot"]))
kpss.test(diff(ardl_data[,"gtot"]))
adf.test(diff(ardl_data[,"u"]))
kpss.test(diff(ardl_data[,"u"]))
adf.test(diff(ardl_data[,"r"]))
kpss.test(diff(ardl_data[,"r"]))
adf.test(diff(ardl_data[,"d"]))
kpss.test(diff(ardl_data[,"d"]))
# all I(1)
# Coint -------------------------------------------------------------------
coint52_2016 <- ca.jo(cbind(ecmeq[,1:5])) #,ecdet="const",type="trace")
summary(coint52_2016)
# as Ratio of TotalAsset -> Coint Rank= 1
coint52_85 <- ca.jo(cbind(ecmeqante[,1:5])) #,ecdet="const",type="trace")
summary(coint52_85)
# Coint Rank=2
coint85_2016 <- ca.jo(cbind(ecmeqpost[,1:5])) #,ecdet="const",type="trace")
summary(coint85_2016)
# as Ratio of TotalAsset -> Coint Rank=1
coint85_2007 <- ca.jo(cbind(ecmeqbtwn[,1:5])) #,ecdet="const",type="trace")
summary(coint85_2007)
# as Ratio of TotalAsset -> Coint Rank=1
# test for structural breack btw 85 & 2016 (2007 crisis)
cointbreak07 <- cajolst(ecmeqpost[,1:5])
summary(cointbreak07)
slot(cointbreak07,"bp")
# COINT rank 1 CONFIRMED even Break=2008:Q2
# Lag selection -----------------------------------------------------------
dy <- diff(data_list_w[,1])
y1 <- lag(data_list_w[,1])
u1 <- lag(data_list_w[,2])
r1 <- lag(data_list_w[,3])
d1 <- lag(data_list_w[,4])
for(i in 1:2) {
du[i] <- diff(data_list_w[,2], i)
}
du0<-data_list_w[,2]
du1<-diff(data_list_w[,2],1)
du2<-diff(data_list_w[,2],2)
du3<-diff(data_list_w[,2],3)
du4<-diff(data_list_w[,2],4)
dr0<-data_list_w[,3]
dr1<-diff(data_list_w[,3],1)
dr2<-diff(data_list_w[,3],2)
dr3<-diff(data_list_w[,3],3)
dr4<-diff(data_list_w[,3],4)
dd0<-data_list_w[,4]
dd1<-diff(data_list_w[,4],1)
dd2<-diff(data_list_w[,4],2)
dd3<-diff(data_list_w[,4],3)
dd4<-diff(data_list_w[,4],4)
dd5<-diff(data_list_w[,4],5)
dd6<-diff(data_list_w[,4],6)
dd7<-diff(data_list_w[,4],7)
dd8<-diff(data_list_w[,4],8)
#
s <- ts.intersect(dy,
du0,dr0,dd0,
du1,du2,du3,du4,
dr1,dr2,
dd1,dd2,dd3,dd4,dd5,dd6,dd7,dd8,
y1,u1,r1,d1)
VARselect(s[,"dy"],lag.max = 18, type = "both",
exogen = cbind(s[,"du0"],s[,"dr0"],s[,"dd0"],
s[,"du1"],s[,"du2"],s[,"du3"],s[,"du4"],
s[,"dr1"],s[,"dr1"],
s[,"dd1"],s[,"dd2"],s[,"dd3"],s[,"dd4"],
s[,"dd5"],s[,"dd6"],s[,"dd7"],s[,"dd8"],
s[,"y1"],s[,"u1"],s[,"r1"],s[,"d1"]))
# 2 methods (ARDL Chapter p.54)
# 1- Informat° Criteria
# 2- iid residuals
# Ardl ------------------------------------------------------------
#Merging Fi & ii into Fii=total intangibles(financial+goodwill)
#CROISS=Fii
Mod_sos<-ardl::ardl(gtot ~ u+r+d, data=ardl_data, ylag=16,
xlag=c(4,2,8), case = 5)
Mod_sos<-ardl::ardl(gtot ~ u + r +d, data=ardl_data, ylag=8,
xlag=c(4,8,8), case = 3)
summary(Mod_sos)
######## WALD TEST OK --> long run relationship btw i~u.r.fi+DEBT ###
bounds.test(Mod_sos)
coint(Mod_sos)
plot(Mod_sos)
# I.I.D TESTS
Box.test(Mod_sos$residuals,lag = 9, type="Ljung-Box",fitdf=4)
#Ho:INDEPENDANT
shapiro.test(Mod_sos$residuals) #Royston (1995) to be adequate for p.value < 0.1.
#Ho:nORMALITY
car::ncvTest(Mod_sos)
#Ho:constant error variance
qqnorm(Mod_sos$residuals)
qqline(Mod_sos$residuals)
bgtest(Mod_sos$residuals)
boxplot(Mod_sos$residuals)
hist(Mod_sos$residuals)
shapiro.test(Mod_sos$residuals) #Royston (1995) to be adequate for p.value < 0.1.
# ardl SERANN -------------------------------------------------------------
Alt1select1 <- ardl::auto.ardl(gtot~u+r+d, data=ardl_data, ymax=18,
xmax=c(8,8,8),case=3,verbose = T,ic = "aic")
# Gtot --------------------------------------------------------------------
data_list<- ts.intersect(log(ProInv+IntInv+FinInv),
(capu1),
((profit1/(ProInv+IntInv+FinInv))),
dbtot/(ProInv+IntInv+FinInv),
((FinInv+IntInv)/(ProInv+IntInv+FinInv)),
log(IntInv),
log(FinInv),
LogRgdp, log(inv5),
log(dbtot),
(dbtnw),
d_ante, d_post)
data_list_w <- window(data_list,start=c(1958,1), end=c(2015,1), frequency=4)
ardl_data <- data.frame(gtot = (data_list_w[,1]),
u = data_list_w[,2],
r=(data_list_w[,3]),
d = data_list_w[,4],
etha = data_list_w[,5],
ii = data_list_w[,6],
fi = data_list_w[,7],
gdp = data_list_w[,8],
inv = data_list_w[,9],
lgd = data_list_w[,10],
dtonw = data_list_w[,11],
dA = data_list_w[,12] , dP = data_list_w[,13])
Alt1select1 <- ardl::auto.ardl(gtot~u+r+d|dP, data=ardl_data, ymax=18,
xmax=c(8,8,16),case=(3),verbose = T,ic = "aic")
Mod_sos<-ardl::ardl(gtot~u+r+d|dA , data=ardl_data, ylag=8,
xlag=c(4,8,9), case = 3)
# Ratio gama(ii) calculation
ratio_memb<- ts.intersect(log(IntInv+FinInv) , log((FinInv+IntInv)/(ProInv+IntInv+FinInv) ) )
#ratio_memb<- ts.intersect(log(ProInv) , log((ProInv)/(ProInv+IntInv+FinInv) ) )
gamma_ratio <- ratio_memb[,1] - ratio_memb[,2]
print(gamma_ratio)
ts.plot(gamma_ratio)
#
# Prod-Inv ----------------------------------------------------------------
data_list<- ts.intersect(log(ProInv),
(capu1),
(profit1/ProInv+IntInv+FinInv),
dbtot/(ProInv+IntInv+FinInv),
((FinInv+IntInv)/(ProInv+IntInv+FinInv)),
log(IntInv),
log(FinInv),
LogRgdp, log(inv5),
log(dbtot),
(dbtnw),
d_ante , d_post,
log(FinInv+IntInv)
# (d_post*log(FinInv+IntInv))
)
data_list_w <- window(data_list,start=c(1958,1), end=c(2014,4), frequency=4)
ardl_data <- data.frame(Pinv = (data_list_w[,1]),
u = data_list_w[,2],
r=(data_list_w[,3]),
d = data_list_w[,4],
etha = data_list_w[,5],
ii = data_list_w[,6],
fi = data_list_w[,7],
gdp = data_list_w[,8],
inv = data_list_w[,9],
lgd = data_list_w[,10],
dtonw = data_list_w[,11],
dA = data_list_w[,12] , dP = data_list_w[,13],
iit = data_list_w[,14])
Alt1select1 <- ardl::auto.ardl(inv~u+r|dA, data=ardl_data, ymax=18,
xmax=c(8,8),case=(1),verbose = T,ic = "aic")
Mod_sos<-ardl::ardl(inv~u+r|dA , data=ardl_data, ylag=6,
xlag=c(6,9), case = 1)
# Ratio gama(ii) calculation
ratio_memb<- ts.intersect(IntInv+FinInv , dbtnw ) # dbtot/(ProInv+IntInv+FinInv))
gamma_ratio <- ratio_memb[,2] / ratio_memb[,1]
print(gamma_ratio)
ts.plot(gamma_ratio)
#
# Intangible-Inv ----------------------------------------------------------
data_list<- ts.intersect(log(ProInv+IntInv+FinInv),
(capu1),
((profit1/(ProInv+IntInv+FinInv))),
dbtot/(ProInv+IntInv+FinInv),
((FinInv+IntInv)/(ProInv+IntInv+FinInv)),
log(IntInv),
log(FinInv),
LogRgdp, log(inv5),
log(dbtot),
(dbtnw),
d_ante, d_post)
data_list_w <- window(data_list,start=c(1984,1), end=c(2015,1), frequency=4)
data_list_w <- window(data_list,start=c(1952,1), end=c(2015,1), frequency=4)
ardl_data <- data.frame(gtot = (data_list_w[,1]),
u = data_list_w[,2],
r=(data_list_w[,3]),
d = data_list_w[,4],
etha = data_list_w[,5],
ii = data_list_w[,6],
fi = data_list_w[,7],
gdp = data_list_w[,8],
inv = data_list_w[,9],
lgd = data_list_w[,10],
dtonw = data_list_w[,11])
Alt1select1 <- ardl::auto.ardl(ii~u+r+d, data=ardl_data, ymax=18,
xmax=c(8,8,8),case=(1),verbose = T,ic = "aic")
Mod_sos<-ardl::ardl(ii~u+r+d , data=ardl_data, ylag=1,
xlag=c(0,1,4), case = 1)
Alt1select1 <- auto.ardl(ii~d, data=ardl_data, ymax=18,
xmax=c(8,8,8),case=(1),verbose = T,ic = "ll")
Mod_sos<-ardl(ii~d , data=ardl_data, ylag=5,
xlag=c(5), case = 1)
#test d=inv+ii (for Minky dynamics)
Alt1select1 <- ardl::auto.ardl(d ~ inv + ii , data=ardl_data, ymax=18,
xmax=c(8,8),case=1,verbose = T,ic = "ll")
Mod_sos<-ardl::ardl(d ~ inv + ii , data=ardl_data, ylag=5,
xlag=c(5,5), case = 1)
#test d=inv+ii+r (for d=inv-sRE and Minky dynamics)
Alt1select1 <- ardl::auto.ardl(d ~ inv + ii + r , data=ardl_data, ymax=18,
xmax=c(8,8,8),case=1,verbose = T,ic = "ll")
Mod_sos<-ardl::ardl(d ~ inv + ii + r , data=ardl_data, ylag=5,
xlag=c(5,5,5), case = 1)
# Diag --------------------------------------------------------------------
summary(Mod_sos)
bounds.test(Mod_sos)
coint(Mod_sos)
Box.test(Mod_sos$residuals,lag = 9, type="Ljung-Box",fitdf=4) # I.I.D TESTS #Ho:INDEPENDANT
shapiro.test(Mod_sos$residuals) #Ho:nORMALITY
car::ncvTest(Mod_sos) #Ho:constant error variance
#
|
# Download classifying and cleaning tweets
rm(list = ls())
source("twitterAuthorization.R")
source("dataRetriever.R")
source("preprocessing.R")
source("corpusBuilding.R")
source("emojiRetriever.R")
num <- 500
size <- "small"
emojiDict <- readEmojiDictionary("./data/emoji_dictionary.csv")
emoteDict <- readEmoticonDictionary("./data/emote_dictionary.csv")
downloadTweets <- function(num, search, class, emojiDict = NULL, emoteDict = NULL)
{
tweets <- data.frame()
for(i in 1:length(search))
{
message("\nSearch: ", search[i])
s <- searchTwitter(search[i], n=num, lang = "en")
message("Transformation...")
s <- tweetsToDF(s)
if(!is.null(emoteDict))
{
message("Dealing with ASCII emotes...")
emoteWord <- getEmoticonsWord(s$text, emoteDict)
s$text <- paste(s$text, emoteWord, sep = " ")
}
message("Parsing hashtags...")
s$hashtags <- getHashtags(s$text)
message("Parsin emojis...")
s$emojis <- getEmojis(s$text)
if(!is.null(emojiDict))
{
message("Adding emoji keywords to text...")
emoKWs <- getEmojiKeywords(s$emojis, emojiDict)
s$text <- paste(s$text, emoKWs)
}
message("Cleaning...")
s$ctext <- cleanTexts(s$text)
s$chtext <- cleanTexts(s$text, keepHashtags = FALSE)
s$class <- as.factor(class)
tweets <- rbind(tweets, s)
}
message("Done! Tweets: ", dim(tweets)[1], " variables: ", dim(tweets)[2])
gc()
return(tweets)
}
#happy
happySyn <- c("#happy", "#joy", "#bliss", "#happiness")
happy <- data.frame()
repeat
{
h <- downloadTweets(num, happySyn, "happy", emojiDict, emoteDict)
happy <- rbind(happy, h)
happy <- unique(happy)
message("Current #happy unique tweets: ", nrow(happy))
if(nrow(happy)*length(happySyn) < num)
{
Sys.sleep(180)
}
else
{
rm(h)
break;
}
}
#sad
sadSyn <- c("#sad","#unhappy","#depressed", "#bitter", "#heartbroken", "#dismay")
sad <- data.frame()
repeat
{
s <- downloadTweets(num, sadSyn, "sad", emojiDict, emoteDict)
sad <- rbind(sad, s)
sad <- unique(sad)
message("Current #sad unique tweets: ", nrow(sad))
if(nrow(sad)*length(sadSyn) < num)
{
Sys.sleep(180)
}
else
{
rm(s)
break;
}
}
#surprised
surprisedSyn <- c("#surprised","#shocked","#amazed", "#astonished", "#omg")
surprised <- data.frame()
repeat
{
su <- downloadTweets(num, surprisedSyn, "surprised", emojiDict, emoteDict)
surprised <- rbind(surprised, su)
surprised <- unique(surprised)
message("Current #surprised unique tweets: ", nrow(surprised))
if(nrow(surprised)*length(surprisedSyn) < num)
{
Sys.sleep(180)
}
else
{
rm(su)
break;
}
}
#afraid
afraidSyn <- c("#afraid","#scared","#worried", "#fear", "#angst", "#horror")
afraid <- data.frame()
repeat
{
af <- downloadTweets(num, afraidSyn, "afraid", emojiDict, emoteDict)
afraid <- rbind(afraid, af)
afraid <- unique(afraid)
message("Current #afraid unique tweets: ", nrow(afraid))
if(nrow(afraid)*length(afraidSyn) < num)
{
Sys.sleep(180)
}
else
{
rm(af)
break;
}
}
#angry
angrySyn <- c("#angry","#mad","#furious", "#annoyed", "#rage", "#jealous", "#jelly", "#frustrated", "#outrage", "#grumpy", "#anger")
angry <- data.frame()
repeat
{
an <- downloadTweets(num, angrySyn, "angry", emojiDict, emoteDict)
angry <- rbind(angry, an)
angry <- unique(angry)
message("Current #angry unique tweets: ", nrow(angry))
if(nrow(angry)*length(angrySyn) < num)
{
Sys.sleep(180)
}
else
{
rm(an)
break;
}
}
#disgusted
disgustedSyn <- c("#disgusted","#sickened","#offended", "#sick", "#weary", "#wtf")
disgusted <- data.frame()
repeat
{
di <- downloadTweets(num, disgustedSyn, "disgusted", emojiDict, emoteDict)
disgusted <- rbind(disgusted, di)
disgusted <- unique(disgusted)
message("Current #disgusted unique tweets: ", nrow(disgusted))
if(nrow(disgusted)*length(disgustedSyn) < num)
{
Sys.sleep(180)
}
else
{
rm(di)
break;
}
}
tweets <- rbind(happy, sad, surprised, afraid, angry, disgusted)
write.csv(tweets, "./data/medium/tweets.csv")
write.csv(happy, "./data/medium/happy.csv")
write.csv(sad, "./data/medium/sad.csv")
write.csv(surprised, "./data/medium/surprised.csv")
write.csv(afraid, "./data/medium/afraid.csv")
write.csv(angry, "./data/medium/angry.csv")
write.csv(disgusted, "./data/medium/disgusted.csv")
|
/downloadTweets.R
|
no_license
|
hucara/eanalysis_rio
|
R
| false
| false
| 4,416
|
r
|
# Download classifying and cleaning tweets
rm(list = ls())
source("twitterAuthorization.R")
source("dataRetriever.R")
source("preprocessing.R")
source("corpusBuilding.R")
source("emojiRetriever.R")
num <- 500
size <- "small"
emojiDict <- readEmojiDictionary("./data/emoji_dictionary.csv")
emoteDict <- readEmoticonDictionary("./data/emote_dictionary.csv")
downloadTweets <- function(num, search, class, emojiDict = NULL, emoteDict = NULL)
{
tweets <- data.frame()
for(i in 1:length(search))
{
message("\nSearch: ", search[i])
s <- searchTwitter(search[i], n=num, lang = "en")
message("Transformation...")
s <- tweetsToDF(s)
if(!is.null(emoteDict))
{
message("Dealing with ASCII emotes...")
emoteWord <- getEmoticonsWord(s$text, emoteDict)
s$text <- paste(s$text, emoteWord, sep = " ")
}
message("Parsing hashtags...")
s$hashtags <- getHashtags(s$text)
message("Parsin emojis...")
s$emojis <- getEmojis(s$text)
if(!is.null(emojiDict))
{
message("Adding emoji keywords to text...")
emoKWs <- getEmojiKeywords(s$emojis, emojiDict)
s$text <- paste(s$text, emoKWs)
}
message("Cleaning...")
s$ctext <- cleanTexts(s$text)
s$chtext <- cleanTexts(s$text, keepHashtags = FALSE)
s$class <- as.factor(class)
tweets <- rbind(tweets, s)
}
message("Done! Tweets: ", dim(tweets)[1], " variables: ", dim(tweets)[2])
gc()
return(tweets)
}
#happy
happySyn <- c("#happy", "#joy", "#bliss", "#happiness")
happy <- data.frame()
repeat
{
h <- downloadTweets(num, happySyn, "happy", emojiDict, emoteDict)
happy <- rbind(happy, h)
happy <- unique(happy)
message("Current #happy unique tweets: ", nrow(happy))
if(nrow(happy)*length(happySyn) < num)
{
Sys.sleep(180)
}
else
{
rm(h)
break;
}
}
#sad
sadSyn <- c("#sad","#unhappy","#depressed", "#bitter", "#heartbroken", "#dismay")
sad <- data.frame()
repeat
{
s <- downloadTweets(num, sadSyn, "sad", emojiDict, emoteDict)
sad <- rbind(sad, s)
sad <- unique(sad)
message("Current #sad unique tweets: ", nrow(sad))
if(nrow(sad)*length(sadSyn) < num)
{
Sys.sleep(180)
}
else
{
rm(s)
break;
}
}
#surprised
surprisedSyn <- c("#surprised","#shocked","#amazed", "#astonished", "#omg")
surprised <- data.frame()
repeat
{
su <- downloadTweets(num, surprisedSyn, "surprised", emojiDict, emoteDict)
surprised <- rbind(surprised, su)
surprised <- unique(surprised)
message("Current #surprised unique tweets: ", nrow(surprised))
if(nrow(surprised)*length(surprisedSyn) < num)
{
Sys.sleep(180)
}
else
{
rm(su)
break;
}
}
#afraid
afraidSyn <- c("#afraid","#scared","#worried", "#fear", "#angst", "#horror")
afraid <- data.frame()
repeat
{
af <- downloadTweets(num, afraidSyn, "afraid", emojiDict, emoteDict)
afraid <- rbind(afraid, af)
afraid <- unique(afraid)
message("Current #afraid unique tweets: ", nrow(afraid))
if(nrow(afraid)*length(afraidSyn) < num)
{
Sys.sleep(180)
}
else
{
rm(af)
break;
}
}
#angry
angrySyn <- c("#angry","#mad","#furious", "#annoyed", "#rage", "#jealous", "#jelly", "#frustrated", "#outrage", "#grumpy", "#anger")
angry <- data.frame()
repeat
{
an <- downloadTweets(num, angrySyn, "angry", emojiDict, emoteDict)
angry <- rbind(angry, an)
angry <- unique(angry)
message("Current #angry unique tweets: ", nrow(angry))
if(nrow(angry)*length(angrySyn) < num)
{
Sys.sleep(180)
}
else
{
rm(an)
break;
}
}
#disgusted
disgustedSyn <- c("#disgusted","#sickened","#offended", "#sick", "#weary", "#wtf")
disgusted <- data.frame()
repeat
{
di <- downloadTweets(num, disgustedSyn, "disgusted", emojiDict, emoteDict)
disgusted <- rbind(disgusted, di)
disgusted <- unique(disgusted)
message("Current #disgusted unique tweets: ", nrow(disgusted))
if(nrow(disgusted)*length(disgustedSyn) < num)
{
Sys.sleep(180)
}
else
{
rm(di)
break;
}
}
tweets <- rbind(happy, sad, surprised, afraid, angry, disgusted)
write.csv(tweets, "./data/medium/tweets.csv")
write.csv(happy, "./data/medium/happy.csv")
write.csv(sad, "./data/medium/sad.csv")
write.csv(surprised, "./data/medium/surprised.csv")
write.csv(afraid, "./data/medium/afraid.csv")
write.csv(angry, "./data/medium/angry.csv")
write.csv(disgusted, "./data/medium/disgusted.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ecg_beats}
\alias{ecg_beats}
\title{Killer whale heart beats recorded by ECG}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 1075 rows and 3 columns.
}
\usage{
ecg_beats
}
\description{
Killer whale heart beats recorded by ECG
}
\keyword{datasets}
|
/man/ecg_beats.Rd
|
permissive
|
FlukeAndFeather/cetaceanbcg
|
R
| false
| true
| 400
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ecg_beats}
\alias{ecg_beats}
\title{Killer whale heart beats recorded by ECG}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 1075 rows and 3 columns.
}
\usage{
ecg_beats
}
\description{
Killer whale heart beats recorded by ECG
}
\keyword{datasets}
|
## ------- ptmScan.R -------------- ##
# #
# p.scan #
# ac.scan #
# me.scan #
# ub.scan #
# su.scan #
# gl.scan #
# sni.scan #
# ni.scan #
# ptm.scan #
# reg.scan #
# dis.scan #
# #
## -------------------------------- ##
## ---------------------------------------------------------------- ##
# p.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Phosphosites
#' @description Scans the indicated protein in search of phosphosites.
#' @usage p.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to a phosphorylatable residue.
#' @author Juan Carlos Aledo
#' @examples \dontrun{p.scan('P01009', db = 'PSP')}
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @references Ullah et al. Sci. Rep. 2016 6:23534, (PMID: 27010073).
#' @references Durek et al. Nucleic Acids Res.2010 38:D828-D834, (PMID: 19880383).
#' @references Dinkel et al. Nucleic Acids Res. 2011 39:D261-D567 (PMID: 21062810).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
p.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
p_db <- NULL
baseUrl <- "https://github.com/jcaledo/p_db/blob/master/"
call <- paste(baseUrl, "p_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
p_db <- p_db[which(p_db$database == db),]
}
return(p_db)
}
## ---------------------------------------------------------------- ##
# ac.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Acetylation Sites
#' @description Scans the indicated protein in search of acetylation sites.
#' @usage ac.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to an acetylable residue.
#' @author Juan Carlos Aledo
#' @examples ac.scan('P01009', db = 'PSP')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), p.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
ac.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
ac_db <- NULL
baseUrl <- "https://github.com/jcaledo/ac_db/blob/master/"
call <- paste(baseUrl, "ac_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
ac_db <- ac_db[which(ac_db$database == db),]
}
return(ac_db)
}
## ---------------------------------------------------------------- ##
# me.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Methylation Sites
#' @description Scans the indicated protein in search of methylation sites.
#' @usage me.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to a modifiable residue.
#' @author Juan Carlos Aledo
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @examples me.scan('Q16695', db = 'PSP')
#' @seealso meto.scan(), ac.scan(), p.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
me.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
me_db <- NULL
baseUrl <- "https://github.com/jcaledo/me_db/blob/master/"
call <- paste(baseUrl, "me_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
me_db <- me_db[which(me_db$database == db),]
}
return(me_db)
}
## ---------------------------------------------------------------- ##
# ub.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Ubiquitination Sites
#' @description Scans the indicated protein in search of ubiquitination sites.
#' @usage ub.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to a modifiable residue.
#' @author Juan Carlos Aledo
#' @examples ub.scan('Q16695', db = 'PSP')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), ac.scan(), me.scan(), p.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
ub.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
ub_db <- NULL
baseUrl <- "https://github.com/jcaledo/ub_db/blob/master/"
call <- paste(baseUrl, "ub_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
ub_db <- ub_db[which(ub_db$database == db),]
}
return(ub_db)
}
## ---------------------------------------------------------------- ##
# su.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Sumoylation Sites
#' @description Scans the indicated protein in search of sumoylation sites.
#' @usage su.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to a modifiable residue.
#' @author Juan Carlos Aledo
#' @examples su.scan('Q16695', db = 'PSP')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), p.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
su.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
su_db <- NULL
baseUrl <- "https://github.com/jcaledo/su_db/blob/master/"
call <- paste(baseUrl, "su_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
su_db <- su_db[which(su_db$database == db),]
}
return(su_db)
}
## ---------------------------------------------------------------- ##
# gl.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of OGlcNAc Sites
#' @description Scans the indicated protein in search of glycosylation sites.
#' @usage gl.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to a modifiable residue.
#' @author Juan Carlos Aledo
#' @examples gl.scan('P08670', db = 'PSP')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), p.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
gl.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
gl_db <- NULL
baseUrl <- "https://github.com/jcaledo/gl_db/blob/master/"
call <- paste(baseUrl, "gl_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
gl_db <- gl_db[which(gl_db$database == db),]
}
return(gl_db)
}
## ---------------------------------------------------------------- ##
# sni.scan <- function(up_id", db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of S-nitrosylation Sites
#' @description Scans the indicated protein in search of S-nitrosylation sites.
#' @usage sni.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @return Returns a dataframe where each row corresponds to a modifiable residue.
#' @author Juan Carlos Aledo
#' @examples sni.scan('P01009')
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), p.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
sni.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
sni_db <- NULL
baseUrl <- "https://github.com/jcaledo/sni_db/blob/master/"
call <- paste(baseUrl, "sni_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
sni_db <- sni_db[which(sni_db$database == db),]
}
return(sni_db)
}
## ---------------------------------------------------------------- ##
# ni.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Nitration Sites
#' @description Scans the indicated protein in search of nitration sites.
#' @usage ni.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @return Returns a dataframe where each row corresponds to a modified residue.
#' @author Juan Carlos Aledo
#' @examples ni.scan('P05202')
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), p.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
ni.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
ni_db <- NULL
baseUrl <- "https://github.com/jcaledo/ni_db/blob/master/"
call <- paste(baseUrl, "ni_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
ni_db <- ni_db[which(ni_db$database == db),]
}
return(ni_db)
}
## ---------------------------------------------------------------- ##
# ptm.scan <- function(up_id, renumerate = TRUE) #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of PTM Sites
#' @description Scans the indicated protein in search of PTM sites.
#' @usage ptm.scan(up_id, renumerate = TRUE)
#' @param up_id a character string corresponding to the UniProt ID.
#' @param renumerate logical, when TRUE the sequence numeration of MetO sites is that given by Uniprot, which may not coincide with that from MetOSite.
#' @details The numerations of the sequences given by UniProt and MetOSite may or may not match. Sometimes one of the sequences corresponds to the precursor protein and the other to the procesed mature protein.
#' @return Returns a dataframe where each row corresponds to a residue, and the colums inform about the modifications.
#' @author Juan Carlos Aledo
#' @examples \dontrun{ptm.scan('P01009', renumerate = TRUE)}
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @references Ullah et al. Sci. Rep. 2016 6:23534, (PMID: 27010073).
#' @references Durek et al. Nucleic Acids Res.2010 38:D828-D834, (PMID: 19880383).
#' @references Dinkel et al. Nucleic Acids Res. 2011 39:D261-D567 (PMID: 21062810).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), p.scan(), reg.scan(), dis.scan()
#' @export
ptm.scan <- function(up_id, renumerate = TRUE){
seq <- get.seq(up_id, as.string = FALSE)[[1]]
output <- as.data.frame(matrix(rep(NA, length(seq)*14), ncol = 14))
names(output) <- c('id','n', 'aa', 'meto', 'p', 'ac', 'me', 'ub', 'su', 'gl', 'sni', 'ni', 'reg', 'dis')
output$id <- up_id
output$n <- 1:nrow(output)
output$aa <- seq
## ----- Sulfoxidation -------- ##
meto <- meto.scan(up_id)[[1]]
if (!is.null(nrow(meto))){ # if there is any meto site
for (i in 1:nrow(meto)){
t <- as.numeric(meto$met_pos[i])
if (renumerate){
t <- renum(up_id, t, from = 'metosite', to = 'uniprot')
}
output$meto[t] <- TRUE
}
}
## ----- Phosphorylation -------- ##
p <- p.scan(up_id)
if (!grepl("Sorry", p[1])){
if (nrow(p) != 0){ # if there is any site
u <- unique(p$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$p[t] <- TRUE
}
}
}
## ----- Acetylation -------- ##
ac <- ac.scan(up_id)
if (!grepl("Sorry", ac[1])){
if (nrow(ac) != 0){ # if there is any site
u <- unique(ac$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$ac[t] <- TRUE
}
}
}
## ----- Methylation -------- ##
me <- me.scan(up_id)
if (!grepl("Sorry", me[1])){
if (nrow(me) != 0){ # if there is any site
u <- unique(me$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$me[t] <- TRUE
}
}
}
## ----- Ubiquitination -------- ##
ub <- ub.scan(up_id)
if (!grepl("Sorry", ub[1])){
if (nrow(ub) != 0){ # if there is any site
u <- unique(ub$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$ub[t] <- TRUE
}
}
}
## ----- Sumoylation -------- ##
su <- su.scan(up_id)
if (!grepl("Sorry", su[1])){
if (nrow(su) != 0){ # if there is any site
u <- unique(su$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$su[t] <- TRUE
}
}
}
## ----- OGlcNAc -------- ##
gl <- gl.scan(up_id)
if (!grepl("Sorry", gl[1])){
if (nrow(gl) != 0){ # if there is any site
u <- unique(gl$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$gl[t] <- TRUE
}
}
}
## ----- S-nitrosylation -------- ##
sni <- sni.scan(up_id)
if (!grepl("Sorry", sni[1])){
if (nrow(sni) != 0){ # if there is any site
u <- unique(sni$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$sni[t] <- TRUE
}
}
}
## ----- Nitration -------- ##
ni <- ni.scan(up_id)
if (!grepl("Sorry", ni[1])){
if (nrow(ni) != 0){ # if there is any site
u <- unique(ni$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$ni[t] <- TRUE
}
}
}
## ----- Regulation -------- ##
reg <- reg.scan(up_id)
if (!grepl("Sorry", reg[1])){
if (nrow(reg) != 0){ # if there is any site
u <- unique(reg$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
aa <- substr(t, 1, 1)
t <- as.numeric(substring(t, 2))
if (aa == 'M' & renumerate){
t <- renum(up_id, t, from = 'metosite', to = 'uniprot')
}
output$reg[t] <- TRUE
}
}
}
## ----- Disease -------- ##
dis <- dis.scan(up_id)
if (!grepl("Sorry", dis[1])){
if (nrow(dis) != 0){ # if there is any site
u <- unique(dis$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$dis[t] <- TRUE
}
}
}
# output <- output[rowSums(is.na(output[, 3:12])) != 10,]
output <- output[rowSums(is.na(output[, 4:14])) != 11,]
o <- as.matrix(output)
output$multi <- NA
for (i in 1:nrow(o)){
# output$multi[i] <- sum(as.logical(o[i,3:10]), na.rm = TRUE)
output$multi[i] <- sum(as.logical(o[i,4:12]), na.rm = TRUE)
}
attr(output, 'prot_id') <- up_id
attr(output, 'prot_length') <- length(seq)
return(output)
}
## ---------------------------------------------------------------- ##
# reg.scan <- function(up_id) #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Regulatory PTM Sites
#' @description Scans the indicated protein in search of regulatory PTM sites.
#' @usage reg.scan(up_id)
#' @param up_id a character string corresponding to the UniProt ID.
#' @return Returns a dataframe where each row corresponds to a residue, and the colums inform about the regulatory modifications.
#' @author Juan Carlos Aledo
#' @examples reg.scan('P01009')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), p.scan(), dis.scan()
#' @export
reg.scan <- function(up_id){
reg_db <- NULL
baseUrl <- "https://github.com/jcaledo/reg_db/blob/master/"
call <- paste(baseUrl, "reg_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
t <- reg_db[which(reg_db$up_id == up_id),]
m <- meto.scan(up_id, report = 2)
tt <- m$Metosite[which(meto.scan(up_id)$Metosite$reg_id > 2),]
if (nrow(tt) > 0){
meto <- as.data.frame(matrix(rep(NA, 4*nrow(tt)), ncol = 4))
names(meto) <- c('up_id','organism', 'modification', 'database')
for (i in 1:nrow(tt)){
meto$up_id[i] <- up_id
meto$organism[i] <- m$prot_sp
meto$modification[i] <- paste("M", tt$met_pos[i], "-ox", sep = "")
meto$database[i] <- 'MetOSite'
}
if (nrow(t) > 0){
t <- rbind(t, meto)
} else {
t <- meto
}
}
return(t)
}
## ---------------------------------------------------------------- ##
# dis.scan <- function(up_id) #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Disease-Related PTM Sites
#' @description Scans the indicated protein in search of disease-related PTM sites.
#' @usage dis.scan(up_id)
#' @param up_id a character string corresponding to the UniProt ID.
#' @return Returns a dataframe where each row corresponds to a residue, and the colums inform about the disease-related modifications.
#' @author Juan Carlos Aledo
#' @examples dis.scan('P31749')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), p.scan()
#' @export
dis.scan <- function(up_id){
dis_db <- NULL
baseUrl <- "https://github.com/jcaledo/dis_db/blob/master/"
call <- paste(baseUrl, "dis_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
t <- dis_db[which(dis_db$up_id == up_id),]
return(t)
}
|
/ptm_0.1.0/R/ptmScan.R
|
no_license
|
jcaledo/ptm
|
R
| false
| false
| 23,680
|
r
|
## ------- ptmScan.R -------------- ##
# #
# p.scan #
# ac.scan #
# me.scan #
# ub.scan #
# su.scan #
# gl.scan #
# sni.scan #
# ni.scan #
# ptm.scan #
# reg.scan #
# dis.scan #
# #
## -------------------------------- ##
## ---------------------------------------------------------------- ##
# p.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Phosphosites
#' @description Scans the indicated protein in search of phosphosites.
#' @usage p.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to a phosphorylatable residue.
#' @author Juan Carlos Aledo
#' @examples \dontrun{p.scan('P01009', db = 'PSP')}
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @references Ullah et al. Sci. Rep. 2016 6:23534, (PMID: 27010073).
#' @references Durek et al. Nucleic Acids Res.2010 38:D828-D834, (PMID: 19880383).
#' @references Dinkel et al. Nucleic Acids Res. 2011 39:D261-D567 (PMID: 21062810).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
p.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
p_db <- NULL
baseUrl <- "https://github.com/jcaledo/p_db/blob/master/"
call <- paste(baseUrl, "p_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
p_db <- p_db[which(p_db$database == db),]
}
return(p_db)
}
## ---------------------------------------------------------------- ##
# ac.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Acetylation Sites
#' @description Scans the indicated protein in search of acetylation sites.
#' @usage ac.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to an acetylable residue.
#' @author Juan Carlos Aledo
#' @examples ac.scan('P01009', db = 'PSP')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), p.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
ac.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
ac_db <- NULL
baseUrl <- "https://github.com/jcaledo/ac_db/blob/master/"
call <- paste(baseUrl, "ac_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
ac_db <- ac_db[which(ac_db$database == db),]
}
return(ac_db)
}
## ---------------------------------------------------------------- ##
# me.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Methylation Sites
#' @description Scans the indicated protein in search of methylation sites.
#' @usage me.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to a modifiable residue.
#' @author Juan Carlos Aledo
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @examples me.scan('Q16695', db = 'PSP')
#' @seealso meto.scan(), ac.scan(), p.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
me.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
me_db <- NULL
baseUrl <- "https://github.com/jcaledo/me_db/blob/master/"
call <- paste(baseUrl, "me_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
me_db <- me_db[which(me_db$database == db),]
}
return(me_db)
}
## ---------------------------------------------------------------- ##
# ub.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Ubiquitination Sites
#' @description Scans the indicated protein in search of ubiquitination sites.
#' @usage ub.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to a modifiable residue.
#' @author Juan Carlos Aledo
#' @examples ub.scan('Q16695', db = 'PSP')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), ac.scan(), me.scan(), p.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
ub.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
ub_db <- NULL
baseUrl <- "https://github.com/jcaledo/ub_db/blob/master/"
call <- paste(baseUrl, "ub_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
ub_db <- ub_db[which(ub_db$database == db),]
}
return(ub_db)
}
## ---------------------------------------------------------------- ##
# su.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Sumoylation Sites
#' @description Scans the indicated protein in search of sumoylation sites.
#' @usage su.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to a modifiable residue.
#' @author Juan Carlos Aledo
#' @examples su.scan('Q16695', db = 'PSP')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), p.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
su.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
su_db <- NULL
baseUrl <- "https://github.com/jcaledo/su_db/blob/master/"
call <- paste(baseUrl, "su_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
su_db <- su_db[which(su_db$database == db),]
}
return(su_db)
}
## ---------------------------------------------------------------- ##
# gl.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of OGlcNAc Sites
#' @description Scans the indicated protein in search of glycosylation sites.
#' @usage gl.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @details If db = 'all' has been selected, it may hapen that the same residue appears in several rows if it is present in diffent databases.
#' @return Returns a dataframe where each row corresponds to a modifiable residue.
#' @author Juan Carlos Aledo
#' @examples gl.scan('P08670', db = 'PSP')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), p.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
gl.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
gl_db <- NULL
baseUrl <- "https://github.com/jcaledo/gl_db/blob/master/"
call <- paste(baseUrl, "gl_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
gl_db <- gl_db[which(gl_db$database == db),]
}
return(gl_db)
}
## ---------------------------------------------------------------- ##
# sni.scan <- function(up_id", db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of S-nitrosylation Sites
#' @description Scans the indicated protein in search of S-nitrosylation sites.
#' @usage sni.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @return Returns a dataframe where each row corresponds to a modifiable residue.
#' @author Juan Carlos Aledo
#' @examples sni.scan('P01009')
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), p.scan(), ni.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
sni.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
sni_db <- NULL
baseUrl <- "https://github.com/jcaledo/sni_db/blob/master/"
call <- paste(baseUrl, "sni_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
sni_db <- sni_db[which(sni_db$database == db),]
}
return(sni_db)
}
## ---------------------------------------------------------------- ##
# ni.scan <- function(up_id, db = 'all') #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Nitration Sites
#' @description Scans the indicated protein in search of nitration sites.
#' @usage ni.scan(up_id, db = 'all')
#' @param up_id a character string corresponding to the UniProt ID.
#' @param db the database where to search. It should be one among 'PSP', 'dbPTM', 'all'.
#' @return Returns a dataframe where each row corresponds to a modified residue.
#' @author Juan Carlos Aledo
#' @examples ni.scan('P05202')
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), p.scan(), ptm.scan(), reg.scan(), dis.scan()
#' @export
ni.scan <- function(up_id, db = 'all'){
if (! db %in% c('PSP', 'dbPTM','dbPAF', 'PhosPhAt', 'Phospho.ELM', 'all')){
stop("Please, select an appropiated database!")
}
ni_db <- NULL
baseUrl <- "https://github.com/jcaledo/ni_db/blob/master/"
call <- paste(baseUrl, "ni_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
if (db != 'all'){
ni_db <- ni_db[which(ni_db$database == db),]
}
return(ni_db)
}
## ---------------------------------------------------------------- ##
# ptm.scan <- function(up_id, renumerate = TRUE) #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of PTM Sites
#' @description Scans the indicated protein in search of PTM sites.
#' @usage ptm.scan(up_id, renumerate = TRUE)
#' @param up_id a character string corresponding to the UniProt ID.
#' @param renumerate logical, when TRUE the sequence numeration of MetO sites is that given by Uniprot, which may not coincide with that from MetOSite.
#' @details The numerations of the sequences given by UniProt and MetOSite may or may not match. Sometimes one of the sequences corresponds to the precursor protein and the other to the procesed mature protein.
#' @return Returns a dataframe where each row corresponds to a residue, and the colums inform about the modifications.
#' @author Juan Carlos Aledo
#' @examples \dontrun{ptm.scan('P01009', renumerate = TRUE)}
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @references Huang et al. Nucleic Acids Res. 2019 47:D298-D308, (PMID: 30418626).
#' @references Ullah et al. Sci. Rep. 2016 6:23534, (PMID: 27010073).
#' @references Durek et al. Nucleic Acids Res.2010 38:D828-D834, (PMID: 19880383).
#' @references Dinkel et al. Nucleic Acids Res. 2011 39:D261-D567 (PMID: 21062810).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), p.scan(), reg.scan(), dis.scan()
#' @export
ptm.scan <- function(up_id, renumerate = TRUE){
seq <- get.seq(up_id, as.string = FALSE)[[1]]
output <- as.data.frame(matrix(rep(NA, length(seq)*14), ncol = 14))
names(output) <- c('id','n', 'aa', 'meto', 'p', 'ac', 'me', 'ub', 'su', 'gl', 'sni', 'ni', 'reg', 'dis')
output$id <- up_id
output$n <- 1:nrow(output)
output$aa <- seq
## ----- Sulfoxidation -------- ##
meto <- meto.scan(up_id)[[1]]
if (!is.null(nrow(meto))){ # if there is any meto site
for (i in 1:nrow(meto)){
t <- as.numeric(meto$met_pos[i])
if (renumerate){
t <- renum(up_id, t, from = 'metosite', to = 'uniprot')
}
output$meto[t] <- TRUE
}
}
## ----- Phosphorylation -------- ##
p <- p.scan(up_id)
if (!grepl("Sorry", p[1])){
if (nrow(p) != 0){ # if there is any site
u <- unique(p$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$p[t] <- TRUE
}
}
}
## ----- Acetylation -------- ##
ac <- ac.scan(up_id)
if (!grepl("Sorry", ac[1])){
if (nrow(ac) != 0){ # if there is any site
u <- unique(ac$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$ac[t] <- TRUE
}
}
}
## ----- Methylation -------- ##
me <- me.scan(up_id)
if (!grepl("Sorry", me[1])){
if (nrow(me) != 0){ # if there is any site
u <- unique(me$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$me[t] <- TRUE
}
}
}
## ----- Ubiquitination -------- ##
ub <- ub.scan(up_id)
if (!grepl("Sorry", ub[1])){
if (nrow(ub) != 0){ # if there is any site
u <- unique(ub$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$ub[t] <- TRUE
}
}
}
## ----- Sumoylation -------- ##
su <- su.scan(up_id)
if (!grepl("Sorry", su[1])){
if (nrow(su) != 0){ # if there is any site
u <- unique(su$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$su[t] <- TRUE
}
}
}
## ----- OGlcNAc -------- ##
gl <- gl.scan(up_id)
if (!grepl("Sorry", gl[1])){
if (nrow(gl) != 0){ # if there is any site
u <- unique(gl$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$gl[t] <- TRUE
}
}
}
## ----- S-nitrosylation -------- ##
sni <- sni.scan(up_id)
if (!grepl("Sorry", sni[1])){
if (nrow(sni) != 0){ # if there is any site
u <- unique(sni$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$sni[t] <- TRUE
}
}
}
## ----- Nitration -------- ##
ni <- ni.scan(up_id)
if (!grepl("Sorry", ni[1])){
if (nrow(ni) != 0){ # if there is any site
u <- unique(ni$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$ni[t] <- TRUE
}
}
}
## ----- Regulation -------- ##
reg <- reg.scan(up_id)
if (!grepl("Sorry", reg[1])){
if (nrow(reg) != 0){ # if there is any site
u <- unique(reg$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
aa <- substr(t, 1, 1)
t <- as.numeric(substring(t, 2))
if (aa == 'M' & renumerate){
t <- renum(up_id, t, from = 'metosite', to = 'uniprot')
}
output$reg[t] <- TRUE
}
}
}
## ----- Disease -------- ##
dis <- dis.scan(up_id)
if (!grepl("Sorry", dis[1])){
if (nrow(dis) != 0){ # if there is any site
u <- unique(dis$modification)
for (i in 1:length(u)){
t <- strsplit(u[i], split = "-")[[1]][-2]
t <- as.numeric(substring(t, 2))
output$dis[t] <- TRUE
}
}
}
# output <- output[rowSums(is.na(output[, 3:12])) != 10,]
output <- output[rowSums(is.na(output[, 4:14])) != 11,]
o <- as.matrix(output)
output$multi <- NA
for (i in 1:nrow(o)){
# output$multi[i] <- sum(as.logical(o[i,3:10]), na.rm = TRUE)
output$multi[i] <- sum(as.logical(o[i,4:12]), na.rm = TRUE)
}
attr(output, 'prot_id') <- up_id
attr(output, 'prot_length') <- length(seq)
return(output)
}
## ---------------------------------------------------------------- ##
# reg.scan <- function(up_id) #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Regulatory PTM Sites
#' @description Scans the indicated protein in search of regulatory PTM sites.
#' @usage reg.scan(up_id)
#' @param up_id a character string corresponding to the UniProt ID.
#' @return Returns a dataframe where each row corresponds to a residue, and the colums inform about the regulatory modifications.
#' @author Juan Carlos Aledo
#' @examples reg.scan('P01009')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), p.scan(), dis.scan()
#' @export
reg.scan <- function(up_id){
reg_db <- NULL
baseUrl <- "https://github.com/jcaledo/reg_db/blob/master/"
call <- paste(baseUrl, "reg_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
t <- reg_db[which(reg_db$up_id == up_id),]
m <- meto.scan(up_id, report = 2)
tt <- m$Metosite[which(meto.scan(up_id)$Metosite$reg_id > 2),]
if (nrow(tt) > 0){
meto <- as.data.frame(matrix(rep(NA, 4*nrow(tt)), ncol = 4))
names(meto) <- c('up_id','organism', 'modification', 'database')
for (i in 1:nrow(tt)){
meto$up_id[i] <- up_id
meto$organism[i] <- m$prot_sp
meto$modification[i] <- paste("M", tt$met_pos[i], "-ox", sep = "")
meto$database[i] <- 'MetOSite'
}
if (nrow(t) > 0){
t <- rbind(t, meto)
} else {
t <- meto
}
}
return(t)
}
## ---------------------------------------------------------------- ##
# dis.scan <- function(up_id) #
## ---------------------------------------------------------------- ##
#' Scan a Protein in Search of Disease-Related PTM Sites
#' @description Scans the indicated protein in search of disease-related PTM sites.
#' @usage dis.scan(up_id)
#' @param up_id a character string corresponding to the UniProt ID.
#' @return Returns a dataframe where each row corresponds to a residue, and the colums inform about the disease-related modifications.
#' @author Juan Carlos Aledo
#' @examples dis.scan('P31749')
#' @references Hornbeck et al. Nucleic Acids Res. 2019 47:D433-D441, (PMID: 30445427).
#' @seealso meto.scan(), ac.scan(), me.scan(), ub.scan(), su.scan(), gl.scan(), sni.scan(), ni.scan(), ptm.scan(), reg.scan(), p.scan()
#' @export
dis.scan <- function(up_id){
dis_db <- NULL
baseUrl <- "https://github.com/jcaledo/dis_db/blob/master/"
call <- paste(baseUrl, "dis_db_", up_id, ".Rda?raw=true", sep = "")
resp <- try(load(url(call)), silent = TRUE)
if (inherits(resp, "try-error")) {
text <- "Sorry, no modification sites were found for this protein"
return(text)
}
t <- dis_db[which(dis_db$up_id == up_id),]
return(t)
}
|
plot.AcrossTic <- function (x, X.values, y, grp.cols = c(2, 4), grp.pch = c(16, 17), ...)
{
#
# plot an AcrossTic object. This is intended for a two-column X and
# a two-level y.
#
# If X is supplied, use it. Otherwise use the "X" component of the
# AcrossTic object. If neither is supplied, that's an error.
#
if (missing(X.values)) {
if (!any(names(x) == "X"))
stop("Can't plot an AcrossTic object with no X",
call. = FALSE)
X <- x$X
}
else {
X <- X.values
}
#
# If y is supplied, use it. If not, use the one in x, if there is one,
# and if there isn't, that's an error.
#
if (missing (y)) {
y <- x$y
if (is.null(y))
stop("This AcrossTic object has no 'y' entry", call. = FALSE)
}
#
# This is for two-level y's only.
#
if (length(unique(y)) != 2)
stop("This function only plots 'y' entries with two values",
call. = FALSE)
y <- c(0, 1)[as.numeric(as.factor(y))]
if (ncol(X) == 1)
stop("One-column X object?", call. = FALSE)
if (ncol(X) > 2)
warning("Plotting two columns of a >2-col X object")
#
# Plot!
#
M <- x$matches
N <- x$nrow.X
plot(X[, 1], X[, 2], col = grp.cols[y + 1], pch = grp.pch[y +
1], xlim = c(min(X[, 1]) - 0.5, max(X[, 1]) + 0.5), ylim = c(min(X[,
2]) - 0.5, max(X[, 2]) + 0.2), ...)
legend("topleft", c("Group 1", "Group 2", "Within-group pairing",
"Across-group pairing"), lty = c(0, 0, 2, 1), col = c(grp.cols,
1, 1), pch = c(grp.pch, NA, NA))
#
# Draw lines to connect matched pairs.
#
x.from <- X[M[, 1], 1]
y.from <- X[M[, 1], 2]
x.to <- X[M[, 2], 1]
y.to <- X[M[, 2], 2]
cross.match <- y[M[, 1]] != y[M[, 2]]
solid.inds <- which(cross.match)
dashed.inds <- which(!cross.match)
segments(x.from[solid.inds], y.from[solid.inds], x.to[solid.inds],
y.to[solid.inds], lwd = 2)
segments(x.from[dashed.inds], y.from[dashed.inds], x.to[dashed.inds],
y.to[dashed.inds], lty = 2)
}
|
/R/plot.AcrossTic.R
|
no_license
|
cran/AcrossTic
|
R
| false
| false
| 2,135
|
r
|
plot.AcrossTic <- function (x, X.values, y, grp.cols = c(2, 4), grp.pch = c(16, 17), ...)
{
#
# plot an AcrossTic object. This is intended for a two-column X and
# a two-level y.
#
# If X is supplied, use it. Otherwise use the "X" component of the
# AcrossTic object. If neither is supplied, that's an error.
#
if (missing(X.values)) {
if (!any(names(x) == "X"))
stop("Can't plot an AcrossTic object with no X",
call. = FALSE)
X <- x$X
}
else {
X <- X.values
}
#
# If y is supplied, use it. If not, use the one in x, if there is one,
# and if there isn't, that's an error.
#
if (missing (y)) {
y <- x$y
if (is.null(y))
stop("This AcrossTic object has no 'y' entry", call. = FALSE)
}
#
# This is for two-level y's only.
#
if (length(unique(y)) != 2)
stop("This function only plots 'y' entries with two values",
call. = FALSE)
y <- c(0, 1)[as.numeric(as.factor(y))]
if (ncol(X) == 1)
stop("One-column X object?", call. = FALSE)
if (ncol(X) > 2)
warning("Plotting two columns of a >2-col X object")
#
# Plot!
#
M <- x$matches
N <- x$nrow.X
plot(X[, 1], X[, 2], col = grp.cols[y + 1], pch = grp.pch[y +
1], xlim = c(min(X[, 1]) - 0.5, max(X[, 1]) + 0.5), ylim = c(min(X[,
2]) - 0.5, max(X[, 2]) + 0.2), ...)
legend("topleft", c("Group 1", "Group 2", "Within-group pairing",
"Across-group pairing"), lty = c(0, 0, 2, 1), col = c(grp.cols,
1, 1), pch = c(grp.pch, NA, NA))
#
# Draw lines to connect matched pairs.
#
x.from <- X[M[, 1], 1]
y.from <- X[M[, 1], 2]
x.to <- X[M[, 2], 1]
y.to <- X[M[, 2], 2]
cross.match <- y[M[, 1]] != y[M[, 2]]
solid.inds <- which(cross.match)
dashed.inds <- which(!cross.match)
segments(x.from[solid.inds], y.from[solid.inds], x.to[solid.inds],
y.to[solid.inds], lwd = 2)
segments(x.from[dashed.inds], y.from[dashed.inds], x.to[dashed.inds],
y.to[dashed.inds], lty = 2)
}
|
#' @useDynLib parglm
#' @importFrom Rcpp sourceCpp
NULL
#' @name parglm
#' @title Fitting Generalized Linear Models in Parallel
#'
#' @description Function like \code{\link{glm}} which can make the computation
#' in parallel. The function supports most families listed in \code{\link{family}}.
#' See "\code{vignette("parglm", "parglm")}" for run time examples.
#'
#' @param formula an object of class \code{\link{formula}}.
#' @param family a \code{\link{family}} object.
#' @param data an optional data frame, list or environment containing the variables
#' in the model.
#' @param weights an optional vector of 'prior weights' to be used in the fitting process. Should
#' be \code{NULL} or a numeric vector.
#' @param subset an optional vector specifying a subset of observations to be used in
#' the fitting process.
#' @param na.action a function which indicates what should happen when the data contain \code{NA}s.
#' @param start starting values for the parameters in the linear predictor.
#' @param etastart starting values for the linear predictor. Not supported.
#' @param mustart starting values for the vector of means. Not supported.
#' @param offset this can be used to specify an a priori known component to be
#' included in the linear predictor during fitting.
#' @param control a list of parameters for controlling the fitting process.
#' For parglm.fit this is passed to \code{\link{parglm.control}}.
#' @param model a logical value indicating whether model frame should be included
#' as a component of the returned value.
#' @param x,y For \code{parglm}: logical values indicating whether the response vector
#' and model matrix used in the fitting process should be returned as components of the
#' returned value.
#'
#' For \code{parglm.fit}: \code{x} is a design matrix of dimension \code{n * p}, and
#' \code{y} is a vector of observations of length \code{n}.
#' @param contrasts an optional list. See the \code{contrasts.arg} of
#' \code{\link{model.matrix.default}}.
#' @param intercept logical. Should an intercept be included in the null model?
#' @param ... For \code{parglm}: arguments to be used to form the default \code{control} argument
#' if it is not supplied directly.
#'
#' For \code{parglm.fit}: unused.
#'
#' @return
#' \code{glm} object as returned by \code{\link{glm}} but differs mainly by the \code{qr}
#' element. The \code{qr} element in the object returned by \code{parglm}(\code{.fit}) only has the \eqn{R}
#' matrix from the QR decomposition.
#'
#' @details
#' The current implementation uses \code{min(as.integer(n / p), nthreads)}
#' threads where \code{n} is the number observations, \code{p} is the
#' number of covariates, and \code{nthreads} is the \code{nthreads} element of
#' the list
#' returned by \code{\link{parglm.control}}. Thus, there is likely little (if
#' any) reduction in computation time if \code{p} is almost equal to \code{n}.
#' The current implementation cannot handle \code{p > n}.
#'
#' @examples
#' # small example from `help('glm')`. Fitting this model in parallel does
#' # not matter as the data set is small
#' clotting <- data.frame(
#' u = c(5,10,15,20,30,40,60,80,100),
#' lot1 = c(118,58,42,35,27,25,21,19,18),
#' lot2 = c(69,35,26,21,18,16,13,12,12))
#' f1 <- glm (lot1 ~ log(u), data = clotting, family = Gamma)
#' f2 <- parglm(lot1 ~ log(u), data = clotting, family = Gamma,
#' control = parglm.control(nthreads = 2L))
#' all.equal(coef(f1), coef(f2))
#'
#' @importFrom stats glm
#' @export
parglm <- function(
formula, family = gaussian, data, weights, subset,
na.action, start = NULL, offset, control = list(...),
contrasts = NULL, model = TRUE, x = FALSE, y = TRUE, ...){
cl <- match.call()
cl[[1L]] <- quote(glm)
cl["method"] <- list(quote(parglm::parglm.fit))
if("singular.ok" %in% names(formals(glm)))
cl["singular.ok"] <- FALSE
eval(cl, parent.frame())
}
#' @title Auxiliary for Controlling GLM Fitting in Parallel
#'
#' @description
#' Auxiliary function for \code{\link{parglm}} fitting.
#'
#' @param epsilon positive convergence tolerance.
#' @param maxit integer giving the maximal number of IWLS iterations.
#' @param trace logical indicating if output should be produced doing estimation.
#' @param nthreads number of cores to use. You may get the best performance by
#' using your number of physical cores if your data set is sufficiently large.
#' Using the number of physical CPUs/cores may yield the best performance
#' (check your number e.g., by calling \code{parallel::detectCores(logical = FALSE)}).
#' @param block_size number of observation to include in each parallel block.
#' @param method string specifying which method to use. Either \code{"LINPACK"},
#' \code{"LAPACK"}, or \code{"FAST"}.
#'
#' @details
#' The \code{LINPACK} method uses the same QR method as \code{\link{glm.fit}} for the final QR decomposition.
#' This is the \code{dqrdc2} method described in \code{\link[base]{qr}}. All other QR
#' decompositions but the last are made with \code{DGEQP3} from \code{LAPACK}.
#' See Wood, Goude, and Shaw (2015) for details on the QR method.
#'
#' The \code{FAST} method computes the Fisher information and then solves the normal
#' equation. This is faster but less numerically stable.
#'
#' @references
#' Wood, S.N., Goude, Y. & Shaw S. (2015) Generalized additive models for
#' large datasets. Journal of the Royal Statistical Society, Series C
#' 64(1): 139-155.
#'
#' @return
#' A list with components named as the arguments.
#'
#' @examples
#' # use one core
#'clotting <- data.frame(
#' u = c(5,10,15,20,30,40,60,80,100),
#' lot1 = c(118,58,42,35,27,25,21,19,18),
#' lot2 = c(69,35,26,21,18,16,13,12,12))
#' f1 <- parglm(lot1 ~ log(u), data = clotting, family = Gamma,
#' control = parglm.control(nthreads = 1L))
#'
#' # use two cores
#' f2 <- parglm(lot1 ~ log(u), data = clotting, family = Gamma,
#' control = parglm.control(nthreads = 2L))
#' all.equal(coef(f1), coef(f2))
#'
#' @export
parglm.control <- function(
epsilon = 1e-08, maxit = 25, trace = FALSE, nthreads = 1L,
block_size = NULL, method = "LINPACK")
{
if (!is.numeric(epsilon) || epsilon <= 0)
stop("value of 'epsilon' must be > 0")
if (!is.numeric(maxit) || maxit <= 0)
stop("maximum number of iterations must be > 0")
stopifnot(
is.numeric(nthreads) && nthreads >= 1,
is.null(block_size) || (is.numeric(block_size) && block_size >= 1),
method %in% c("LAPACK", "LINPACK", "FAST"))
list(epsilon = epsilon, maxit = maxit, trace = trace, nthreads = nthreads,
block_size = block_size, method = method)
}
#' @rdname parglm
#' @importFrom stats gaussian binomial Gamma inverse.gaussian poisson
#' @export
parglm.fit <- function(
x, y, weights = rep(1, NROW(x)), start = NULL, etastart = NULL,
mustart = NULL, offset = rep(0, NROW(x)), family = gaussian(),
control = list(), intercept = TRUE, ...){
.check_fam(family)
stopifnot(nrow(x) == length(y))
if(NCOL(x) > NROW(x))
stop("not implemented with more variables than observations")
if(!is.null(mustart))
warning(sQuote("mustart"), " will not be used")
if(!is.null(etastart))
warning(sQuote("etastart"), " will not be used")
#####
# like in `glm.fit`
control <- do.call("parglm.control", control)
x <- as.matrix(x)
xnames <- dimnames(x)[[2L]]
ynames <- if(is.matrix(y)) rownames(y) else names(y)
conv <- FALSE
nobs <- NROW(y)
nvars <- ncol(x)
EMPTY <- nvars == 0
if(EMPTY)
stop("not implemented for empty model")
if(NCOL(y) > 1L)
stop("Multi column ", sQuote("y"), " is not supported")
if (is.null(weights))
weights <- rep.int(1, nobs)
if (is.null(offset))
offset <- rep.int(0, nobs)
n_min_per_thread <- 16L
n_per_thread <- nrow(x) / control$nthreads
if(n_per_thread < n_min_per_thread){
nthreads_new <- nrow(x) %/% n_min_per_thread
if(nthreads_new < 1L)
nthreads_new <- 1L
if(control$nthreads != nthreads_new)
warning(
"Too few observation compared to the number of threads. ",
nthreads_new, " thread(s) will be used instead of ",
control$nthreads, ".")
control$nthreads <- nthreads_new
}
block_size <- if(!is.null(control$block_size))
control$block_size else
if(control$nthreads > 1L)
max(nrow(x) / control$nthreads, control$nthreads) else
nrow(x)
block_size <- max(block_size, NCOL(x))
use_start <- !is.null(start)
fit <- parallelglm(
X = x, Ys = y, family = paste0(family$family, "_", family$link),
start = if(use_start) start else numeric(ncol(x)), weights = weights,
offsets = offset, tol = control$epsilon, nthreads = control$nthreads,
it_max = control$maxit, trace = control$trace, block_size = block_size,
use_start = use_start, method = control$method)
#####
# compute objects as in `glm.fit`
coef <- drop(fit$coefficients)
names(coef) <- xnames
coef_dot <- ifelse(is.na(coef), 0, coef)
eta <- drop(x %*% coef_dot) + offset
good <- weights > 0
mu <- family$linkinv(eta)
mu.eta.val <- family$mu.eta(eta)
good <- (weights > 0) & (mu.eta.val != 0)
w <- sqrt((weights[good] * mu.eta.val[good]^2) / family$variance(mu)[good])
wt <- rep.int(0, nobs)
wt[good] <- w^2
residuals <- (y - mu) / mu.eta.val
dev <- drop(fit$dev) # should mabye re-compute...
conv <- fit$conv
iter <- fit$n_iter
boundary <- FALSE # TODO: not as in `glm.fit`
Rmat <- fit$R
dimnames(Rmat) <- list(xnames, xnames)
names(residuals) <- names(mu) <- names(eta) <- names(wt) <- names(weights) <-
names(y) <- ynames
# do as in `Matrix::rankMatrix`
rtol <- max(dim(x)) * .Machine$double.eps
fit$rank <- rank <- fit$rank
rdiag <- abs(diag(fit$R))
if(control$method != "LINPACK" && any(rdiag <= rtol * max(rdiag)))
warning("Non-full rank problem. Output may not be reliable.")
#####
# do roughly as in `glm.fit`
if (!conv)
warning("parglm.fit: algorithm did not converge", call. = FALSE)
wtdmu <-
if (intercept) sum(weights * y)/sum(weights) else family$linkinv(offset)
nulldev <- sum(family$dev.resids(y, wtdmu, weights))
n.ok <- nobs - sum(weights==0)
nulldf <- n.ok - as.integer(intercept)
rank <- fit$rank
resdf <- n.ok - rank
#-----------------------------------------------------------------------------
# calculate AIC
# we need to initialize n if the family is `binomial`. As of 11/11/2018 two
# column ys are not allowed so this is easy
n <- rep(1, nobs)
aic.model <- family$aic(y, n, mu, weights, dev) + 2*rank
#-----------------------------------------------------------------------------
list(coefficients = coef, residuals = residuals, fitted.values = mu,
# effects = fit$effects, # TODO: add
R = Rmat, rank = rank,
qr = structure(c(fit, list(qr = fit$R)), class = "parglmqr"),
family = family,
linear.predictors = eta, deviance = dev, aic = aic.model,
null.deviance = nulldev, iter = iter, weights = wt,
prior.weights = weights, df.residual = resdf, df.null = nulldf,
y = y, converged = conv, boundary = boundary)
}
.check_fam <- function(family){
stopifnot(
inherits(family, "family"),
paste(family$family, family$link) %in%
sapply(parglm_supported(), function(x) paste(x$family, x$link)))
}
parglm_supported <- function()
list(
gaussian("identity"), gaussian("log"), gaussian("inverse"),
binomial("logit"), binomial("probit"), binomial("cauchit"),
binomial("log"), binomial("cloglog"),
Gamma("inverse"), Gamma("identity"), Gamma("log"),
poisson("log"), poisson("identity"), poisson("sqrt"),
inverse.gaussian("1/mu^2"), inverse.gaussian("inverse"),
inverse.gaussian("identity"), inverse.gaussian("log"))
#' @importFrom Matrix qr.R
#' @export
qr.R.parglmqr <- function(x, ...){
x$R
}
|
/R/parglm.R
|
no_license
|
boennecd/parglm
|
R
| false
| false
| 11,843
|
r
|
#' @useDynLib parglm
#' @importFrom Rcpp sourceCpp
NULL
#' @name parglm
#' @title Fitting Generalized Linear Models in Parallel
#'
#' @description Function like \code{\link{glm}} which can make the computation
#' in parallel. The function supports most families listed in \code{\link{family}}.
#' See "\code{vignette("parglm", "parglm")}" for run time examples.
#'
#' @param formula an object of class \code{\link{formula}}.
#' @param family a \code{\link{family}} object.
#' @param data an optional data frame, list or environment containing the variables
#' in the model.
#' @param weights an optional vector of 'prior weights' to be used in the fitting process. Should
#' be \code{NULL} or a numeric vector.
#' @param subset an optional vector specifying a subset of observations to be used in
#' the fitting process.
#' @param na.action a function which indicates what should happen when the data contain \code{NA}s.
#' @param start starting values for the parameters in the linear predictor.
#' @param etastart starting values for the linear predictor. Not supported.
#' @param mustart starting values for the vector of means. Not supported.
#' @param offset this can be used to specify an a priori known component to be
#' included in the linear predictor during fitting.
#' @param control a list of parameters for controlling the fitting process.
#' For parglm.fit this is passed to \code{\link{parglm.control}}.
#' @param model a logical value indicating whether model frame should be included
#' as a component of the returned value.
#' @param x,y For \code{parglm}: logical values indicating whether the response vector
#' and model matrix used in the fitting process should be returned as components of the
#' returned value.
#'
#' For \code{parglm.fit}: \code{x} is a design matrix of dimension \code{n * p}, and
#' \code{y} is a vector of observations of length \code{n}.
#' @param contrasts an optional list. See the \code{contrasts.arg} of
#' \code{\link{model.matrix.default}}.
#' @param intercept logical. Should an intercept be included in the null model?
#' @param ... For \code{parglm}: arguments to be used to form the default \code{control} argument
#' if it is not supplied directly.
#'
#' For \code{parglm.fit}: unused.
#'
#' @return
#' \code{glm} object as returned by \code{\link{glm}} but differs mainly by the \code{qr}
#' element. The \code{qr} element in the object returned by \code{parglm}(\code{.fit}) only has the \eqn{R}
#' matrix from the QR decomposition.
#'
#' @details
#' The current implementation uses \code{min(as.integer(n / p), nthreads)}
#' threads where \code{n} is the number observations, \code{p} is the
#' number of covariates, and \code{nthreads} is the \code{nthreads} element of
#' the list
#' returned by \code{\link{parglm.control}}. Thus, there is likely little (if
#' any) reduction in computation time if \code{p} is almost equal to \code{n}.
#' The current implementation cannot handle \code{p > n}.
#'
#' @examples
#' # small example from `help('glm')`. Fitting this model in parallel does
#' # not matter as the data set is small
#' clotting <- data.frame(
#' u = c(5,10,15,20,30,40,60,80,100),
#' lot1 = c(118,58,42,35,27,25,21,19,18),
#' lot2 = c(69,35,26,21,18,16,13,12,12))
#' f1 <- glm (lot1 ~ log(u), data = clotting, family = Gamma)
#' f2 <- parglm(lot1 ~ log(u), data = clotting, family = Gamma,
#' control = parglm.control(nthreads = 2L))
#' all.equal(coef(f1), coef(f2))
#'
#' @importFrom stats glm
#' @export
parglm <- function(
formula, family = gaussian, data, weights, subset,
na.action, start = NULL, offset, control = list(...),
contrasts = NULL, model = TRUE, x = FALSE, y = TRUE, ...){
cl <- match.call()
cl[[1L]] <- quote(glm)
cl["method"] <- list(quote(parglm::parglm.fit))
if("singular.ok" %in% names(formals(glm)))
cl["singular.ok"] <- FALSE
eval(cl, parent.frame())
}
#' @title Auxiliary for Controlling GLM Fitting in Parallel
#'
#' @description
#' Auxiliary function for \code{\link{parglm}} fitting.
#'
#' @param epsilon positive convergence tolerance.
#' @param maxit integer giving the maximal number of IWLS iterations.
#' @param trace logical indicating if output should be produced doing estimation.
#' @param nthreads number of cores to use. You may get the best performance by
#' using your number of physical cores if your data set is sufficiently large.
#' Using the number of physical CPUs/cores may yield the best performance
#' (check your number e.g., by calling \code{parallel::detectCores(logical = FALSE)}).
#' @param block_size number of observation to include in each parallel block.
#' @param method string specifying which method to use. Either \code{"LINPACK"},
#' \code{"LAPACK"}, or \code{"FAST"}.
#'
#' @details
#' The \code{LINPACK} method uses the same QR method as \code{\link{glm.fit}} for the final QR decomposition.
#' This is the \code{dqrdc2} method described in \code{\link[base]{qr}}. All other QR
#' decompositions but the last are made with \code{DGEQP3} from \code{LAPACK}.
#' See Wood, Goude, and Shaw (2015) for details on the QR method.
#'
#' The \code{FAST} method computes the Fisher information and then solves the normal
#' equation. This is faster but less numerically stable.
#'
#' @references
#' Wood, S.N., Goude, Y. & Shaw S. (2015) Generalized additive models for
#' large datasets. Journal of the Royal Statistical Society, Series C
#' 64(1): 139-155.
#'
#' @return
#' A list with components named as the arguments.
#'
#' @examples
#' # use one core
#'clotting <- data.frame(
#' u = c(5,10,15,20,30,40,60,80,100),
#' lot1 = c(118,58,42,35,27,25,21,19,18),
#' lot2 = c(69,35,26,21,18,16,13,12,12))
#' f1 <- parglm(lot1 ~ log(u), data = clotting, family = Gamma,
#' control = parglm.control(nthreads = 1L))
#'
#' # use two cores
#' f2 <- parglm(lot1 ~ log(u), data = clotting, family = Gamma,
#' control = parglm.control(nthreads = 2L))
#' all.equal(coef(f1), coef(f2))
#'
#' @export
parglm.control <- function(
epsilon = 1e-08, maxit = 25, trace = FALSE, nthreads = 1L,
block_size = NULL, method = "LINPACK")
{
if (!is.numeric(epsilon) || epsilon <= 0)
stop("value of 'epsilon' must be > 0")
if (!is.numeric(maxit) || maxit <= 0)
stop("maximum number of iterations must be > 0")
stopifnot(
is.numeric(nthreads) && nthreads >= 1,
is.null(block_size) || (is.numeric(block_size) && block_size >= 1),
method %in% c("LAPACK", "LINPACK", "FAST"))
list(epsilon = epsilon, maxit = maxit, trace = trace, nthreads = nthreads,
block_size = block_size, method = method)
}
#' @rdname parglm
#' @importFrom stats gaussian binomial Gamma inverse.gaussian poisson
#' @export
parglm.fit <- function(
x, y, weights = rep(1, NROW(x)), start = NULL, etastart = NULL,
mustart = NULL, offset = rep(0, NROW(x)), family = gaussian(),
control = list(), intercept = TRUE, ...){
.check_fam(family)
stopifnot(nrow(x) == length(y))
if(NCOL(x) > NROW(x))
stop("not implemented with more variables than observations")
if(!is.null(mustart))
warning(sQuote("mustart"), " will not be used")
if(!is.null(etastart))
warning(sQuote("etastart"), " will not be used")
#####
# like in `glm.fit`
control <- do.call("parglm.control", control)
x <- as.matrix(x)
xnames <- dimnames(x)[[2L]]
ynames <- if(is.matrix(y)) rownames(y) else names(y)
conv <- FALSE
nobs <- NROW(y)
nvars <- ncol(x)
EMPTY <- nvars == 0
if(EMPTY)
stop("not implemented for empty model")
if(NCOL(y) > 1L)
stop("Multi column ", sQuote("y"), " is not supported")
if (is.null(weights))
weights <- rep.int(1, nobs)
if (is.null(offset))
offset <- rep.int(0, nobs)
n_min_per_thread <- 16L
n_per_thread <- nrow(x) / control$nthreads
if(n_per_thread < n_min_per_thread){
nthreads_new <- nrow(x) %/% n_min_per_thread
if(nthreads_new < 1L)
nthreads_new <- 1L
if(control$nthreads != nthreads_new)
warning(
"Too few observation compared to the number of threads. ",
nthreads_new, " thread(s) will be used instead of ",
control$nthreads, ".")
control$nthreads <- nthreads_new
}
block_size <- if(!is.null(control$block_size))
control$block_size else
if(control$nthreads > 1L)
max(nrow(x) / control$nthreads, control$nthreads) else
nrow(x)
block_size <- max(block_size, NCOL(x))
use_start <- !is.null(start)
fit <- parallelglm(
X = x, Ys = y, family = paste0(family$family, "_", family$link),
start = if(use_start) start else numeric(ncol(x)), weights = weights,
offsets = offset, tol = control$epsilon, nthreads = control$nthreads,
it_max = control$maxit, trace = control$trace, block_size = block_size,
use_start = use_start, method = control$method)
#####
# compute objects as in `glm.fit`
coef <- drop(fit$coefficients)
names(coef) <- xnames
coef_dot <- ifelse(is.na(coef), 0, coef)
eta <- drop(x %*% coef_dot) + offset
good <- weights > 0
mu <- family$linkinv(eta)
mu.eta.val <- family$mu.eta(eta)
good <- (weights > 0) & (mu.eta.val != 0)
w <- sqrt((weights[good] * mu.eta.val[good]^2) / family$variance(mu)[good])
wt <- rep.int(0, nobs)
wt[good] <- w^2
residuals <- (y - mu) / mu.eta.val
dev <- drop(fit$dev) # should mabye re-compute...
conv <- fit$conv
iter <- fit$n_iter
boundary <- FALSE # TODO: not as in `glm.fit`
Rmat <- fit$R
dimnames(Rmat) <- list(xnames, xnames)
names(residuals) <- names(mu) <- names(eta) <- names(wt) <- names(weights) <-
names(y) <- ynames
# do as in `Matrix::rankMatrix`
rtol <- max(dim(x)) * .Machine$double.eps
fit$rank <- rank <- fit$rank
rdiag <- abs(diag(fit$R))
if(control$method != "LINPACK" && any(rdiag <= rtol * max(rdiag)))
warning("Non-full rank problem. Output may not be reliable.")
#####
# do roughly as in `glm.fit`
if (!conv)
warning("parglm.fit: algorithm did not converge", call. = FALSE)
wtdmu <-
if (intercept) sum(weights * y)/sum(weights) else family$linkinv(offset)
nulldev <- sum(family$dev.resids(y, wtdmu, weights))
n.ok <- nobs - sum(weights==0)
nulldf <- n.ok - as.integer(intercept)
rank <- fit$rank
resdf <- n.ok - rank
#-----------------------------------------------------------------------------
# calculate AIC
# we need to initialize n if the family is `binomial`. As of 11/11/2018 two
# column ys are not allowed so this is easy
n <- rep(1, nobs)
aic.model <- family$aic(y, n, mu, weights, dev) + 2*rank
#-----------------------------------------------------------------------------
list(coefficients = coef, residuals = residuals, fitted.values = mu,
# effects = fit$effects, # TODO: add
R = Rmat, rank = rank,
qr = structure(c(fit, list(qr = fit$R)), class = "parglmqr"),
family = family,
linear.predictors = eta, deviance = dev, aic = aic.model,
null.deviance = nulldev, iter = iter, weights = wt,
prior.weights = weights, df.residual = resdf, df.null = nulldf,
y = y, converged = conv, boundary = boundary)
}
.check_fam <- function(family){
stopifnot(
inherits(family, "family"),
paste(family$family, family$link) %in%
sapply(parglm_supported(), function(x) paste(x$family, x$link)))
}
parglm_supported <- function()
list(
gaussian("identity"), gaussian("log"), gaussian("inverse"),
binomial("logit"), binomial("probit"), binomial("cauchit"),
binomial("log"), binomial("cloglog"),
Gamma("inverse"), Gamma("identity"), Gamma("log"),
poisson("log"), poisson("identity"), poisson("sqrt"),
inverse.gaussian("1/mu^2"), inverse.gaussian("inverse"),
inverse.gaussian("identity"), inverse.gaussian("log"))
#' @importFrom Matrix qr.R
#' @export
qr.R.parglmqr <- function(x, ...){
x$R
}
|
#' Find PWS within the cities
#'
#' @description Find PWS within the cities
#'
#' @param myKey key to access Wunderground API
#' @param nearbyCities a vector of city names formated as {state}/{city} for US cities and {country}/{city} for cities in other countries
#' @param maxPerCity a numeric scalar that sets a limit on the maximum number of PWSs returned for a city that had data returned in the wunderground call
#' @param sleeptime a numeric scalar the specifies how many seconds to wait between calls of retrieving PWS information
#'
#' @return a tibble descriping all PWS, which will include an id, latitude, longitude, etc.
#'
#' @export
#' @importFrom jsonlite fromJSON
#' @importFrom tibble as_tibble
#' @importFrom dplyr union_all distinct n
#'
#'
#' @examples
#' findPWS('407e6151ab7de146', c("CA/San_Francisco","CA/Daly_City"),sleeptime=1)
##
## begin mcaldwel code
##
findPWS <- function(myKey, nearbyCities, maxPerCity=5, sleeptime=10) {
if( missing(myKey) ) {
stop("PLEASE PROVIDE YOUR KEY...")
}
if(
length(maxPerCity) != 1
| ( !is.na(maxPerCity) & !is.numeric(maxPerCity) )
){
stop("maxPerCity MUST BE A SCALAR OF LENGTH 1 EVEN IF NA, & NUMERIC SCALAR IF NOT NA")
}
if(
length(sleeptime) != 1
| is.na(sleeptime)
| !is.numeric(sleeptime)
){
stop("sleeptime MUST BE A NUMERIC SCALAR OF LENGTH 1 AND NOT NA")
}
wuApiPrefix <- "http://api.wunderground.com/api/"
wuFormat <- ".json"
allPws <- NULL
#to be created within data sets later
citseq <- NULL
#some calls will not result in good data
for (i in 1:length(nearbyCities)) {
callAddress <-
paste0(wuApiPrefix,
myKey,
'/geolookup/q/',
nearbyCities[i],
wuFormat)
callData <- jsonlite::fromJSON(callAddress)
Sys.sleep(sleeptime)
#print(callAddress)
#check for pws element before attempting to capture
if(
exists("location", where = callData)
&& exists("nearby_weather_stations", where = callData$location)
&& exists("pws", where = callData$location$nearby_weather_stations)
&& exists("station", where = callData$location$nearby_weather_stations$pws)
) {
callDataPws <-
tibble::as_tibble(callData$location$nearby_weather_stations$pws$station)
#keep the original sequence just in the event want to check back against
callDataPws$citseq = i
if( is.null(allPws) ){
allPws <- callDataPws
}
else{
allPws <- dplyr::union_all(allPws, callDataPws)
} #end if the pws element exists in the returned data
}
}
colNum = c(8:9)
allPws[colNum] <- sapply(allPws[colNum], as.numeric)
#reduce to distinct pws due to multiple returned in each city
allPws <- dplyr::distinct(allPws, id, .keep_all = TRUE)
#now get a true remaining sequence on pws within each city
allPws <- dplyr::mutate(
dplyr::group_by(allPws, citseq)
,pwsNo = seq(n())
)
#limit if specified
if( !is.na(maxPerCity) ){
allPws <- base::subset(allPws,pwsNo <= maxPerCity)
}
allPws
##
## end mcaldwel code
##
}
|
/weatherProject/R/findPWS.R
|
no_license
|
DrRoad/Wunderground-Weather-Project
|
R
| false
| false
| 3,113
|
r
|
#' Find PWS within the cities
#'
#' @description Find PWS within the cities
#'
#' @param myKey key to access Wunderground API
#' @param nearbyCities a vector of city names formated as {state}/{city} for US cities and {country}/{city} for cities in other countries
#' @param maxPerCity a numeric scalar that sets a limit on the maximum number of PWSs returned for a city that had data returned in the wunderground call
#' @param sleeptime a numeric scalar the specifies how many seconds to wait between calls of retrieving PWS information
#'
#' @return a tibble descriping all PWS, which will include an id, latitude, longitude, etc.
#'
#' @export
#' @importFrom jsonlite fromJSON
#' @importFrom tibble as_tibble
#' @importFrom dplyr union_all distinct n
#'
#'
#' @examples
#' findPWS('407e6151ab7de146', c("CA/San_Francisco","CA/Daly_City"),sleeptime=1)
##
## begin mcaldwel code
##
findPWS <- function(myKey, nearbyCities, maxPerCity=5, sleeptime=10) {
if( missing(myKey) ) {
stop("PLEASE PROVIDE YOUR KEY...")
}
if(
length(maxPerCity) != 1
| ( !is.na(maxPerCity) & !is.numeric(maxPerCity) )
){
stop("maxPerCity MUST BE A SCALAR OF LENGTH 1 EVEN IF NA, & NUMERIC SCALAR IF NOT NA")
}
if(
length(sleeptime) != 1
| is.na(sleeptime)
| !is.numeric(sleeptime)
){
stop("sleeptime MUST BE A NUMERIC SCALAR OF LENGTH 1 AND NOT NA")
}
wuApiPrefix <- "http://api.wunderground.com/api/"
wuFormat <- ".json"
allPws <- NULL
#to be created within data sets later
citseq <- NULL
#some calls will not result in good data
for (i in 1:length(nearbyCities)) {
callAddress <-
paste0(wuApiPrefix,
myKey,
'/geolookup/q/',
nearbyCities[i],
wuFormat)
callData <- jsonlite::fromJSON(callAddress)
Sys.sleep(sleeptime)
#print(callAddress)
#check for pws element before attempting to capture
if(
exists("location", where = callData)
&& exists("nearby_weather_stations", where = callData$location)
&& exists("pws", where = callData$location$nearby_weather_stations)
&& exists("station", where = callData$location$nearby_weather_stations$pws)
) {
callDataPws <-
tibble::as_tibble(callData$location$nearby_weather_stations$pws$station)
#keep the original sequence just in the event want to check back against
callDataPws$citseq = i
if( is.null(allPws) ){
allPws <- callDataPws
}
else{
allPws <- dplyr::union_all(allPws, callDataPws)
} #end if the pws element exists in the returned data
}
}
colNum = c(8:9)
allPws[colNum] <- sapply(allPws[colNum], as.numeric)
#reduce to distinct pws due to multiple returned in each city
allPws <- dplyr::distinct(allPws, id, .keep_all = TRUE)
#now get a true remaining sequence on pws within each city
allPws <- dplyr::mutate(
dplyr::group_by(allPws, citseq)
,pwsNo = seq(n())
)
#limit if specified
if( !is.na(maxPerCity) ){
allPws <- base::subset(allPws,pwsNo <= maxPerCity)
}
allPws
##
## end mcaldwel code
##
}
|
## File Name: frm_formula_extract_terms.R
## File Version: 0.10
frm_formula_extract_terms <- function(formula)
{
dv_form <- formula[[2]]
iv_form <- formula[[3]]
h1 <- all.vars(formula)
h2 <- stats::terms(formula)
terms_formula_transform <- FALSE
X_factors <- colnames( attr(h2,"factors") )
if ( length(X_factors) > 0 ){
if ( mean( X_factors %in% h1) < 1 ){
terms_fomula_transform <- FALSE
}
} else {
X_factors <- NULL
}
res <- list( dv_form=dv_form, iv_form=iv_form, all_vars=h1,
terms_formula=h2,
terms_formula_transform=terms_formula_transform,
X_factors=X_factors )
res$dv_vars <- h1[1]
res$iv_vars <- h1[-1]
return(res)
}
|
/R/frm_formula_extract_terms.R
|
no_license
|
cran/mdmb
|
R
| false
| false
| 790
|
r
|
## File Name: frm_formula_extract_terms.R
## File Version: 0.10
frm_formula_extract_terms <- function(formula)
{
dv_form <- formula[[2]]
iv_form <- formula[[3]]
h1 <- all.vars(formula)
h2 <- stats::terms(formula)
terms_formula_transform <- FALSE
X_factors <- colnames( attr(h2,"factors") )
if ( length(X_factors) > 0 ){
if ( mean( X_factors %in% h1) < 1 ){
terms_fomula_transform <- FALSE
}
} else {
X_factors <- NULL
}
res <- list( dv_form=dv_form, iv_form=iv_form, all_vars=h1,
terms_formula=h2,
terms_formula_transform=terms_formula_transform,
X_factors=X_factors )
res$dv_vars <- h1[1]
res$iv_vars <- h1[-1]
return(res)
}
|
\name{syn.survctree}
\alias{syn.survctree}
%\alias{syn.survctree.proper}
\title{Synthesis of survival time by classification and regression trees (CART)}
\description{
Generates synthetic event indicator and time to event data using
classification and regression trees (without or with bootstrap).
}
\usage{
syn.survctree(y, yevent, x, xp, proper = FALSE, minbucket = 5, ...)
}
\arguments{
\item{y}{a vector of length \code{n} with original time data.}
\item{yevent}{a vector of length \code{n} with original event indicator data.}
\item{x}{a matrix (\code{n} x \code{p}) of original covariates.}
\item{xp}{a matrix (\code{k} x \code{p}) of synthesised covariates.}
\item{proper}{for proper synthesis (\code{proper = TRUE}) a CART
model is fitted to a bootstrapped sample of the original data.}
\item{minbucket}{the minimum number of observations in
any terminal node. See \code{\link{ctree_control}}
for details.}
\item{\dots}{additional parameters passed to \code{\link{ctree}}.}
}
\details{
The procedure for synthesis by a CART model is as follows:
\enumerate{\item Fit a tree-structured survival model by
binary recursive partitioning (the terminal nodes include
Kaplan-Meier estimates of the survival time).
\item For each \code{xp} find the terminal node. \item Randomly
draw a donor from the members of the node and take the observed
value of \code{yevent} and \code{y} from that draw as the
synthetic values.}
NOTE that when the function is called by setting elements of method in
\code{syn()} to \code{"survctree"}, the parameter \code{minbucket}
can be supplied to \code{syn()} as e.g. \code{survctree.minbucket}.
}
\value{
A list with the following components:
\item{syn.time}{a vector of length \code{k} with synthetic time values.}
\item{syn.event}{a vector of length \code{k} with synthetic event indicator values.}
\item{fit}{the fitted model which is an item of class \code{ctree.object}.}
}
%references{...}
\seealso{
\code{\link{syn}}, \code{\link{syn.ctree}}
}
\keyword{datagen}
|
/man/syn.survctree.Rd
|
no_license
|
xfang-cloud/SynthpopDebug
|
R
| false
| false
| 2,134
|
rd
|
\name{syn.survctree}
\alias{syn.survctree}
%\alias{syn.survctree.proper}
\title{Synthesis of survival time by classification and regression trees (CART)}
\description{
Generates synthetic event indicator and time to event data using
classification and regression trees (without or with bootstrap).
}
\usage{
syn.survctree(y, yevent, x, xp, proper = FALSE, minbucket = 5, ...)
}
\arguments{
\item{y}{a vector of length \code{n} with original time data.}
\item{yevent}{a vector of length \code{n} with original event indicator data.}
\item{x}{a matrix (\code{n} x \code{p}) of original covariates.}
\item{xp}{a matrix (\code{k} x \code{p}) of synthesised covariates.}
\item{proper}{for proper synthesis (\code{proper = TRUE}) a CART
model is fitted to a bootstrapped sample of the original data.}
\item{minbucket}{the minimum number of observations in
any terminal node. See \code{\link{ctree_control}}
for details.}
\item{\dots}{additional parameters passed to \code{\link{ctree}}.}
}
\details{
The procedure for synthesis by a CART model is as follows:
\enumerate{\item Fit a tree-structured survival model by
binary recursive partitioning (the terminal nodes include
Kaplan-Meier estimates of the survival time).
\item For each \code{xp} find the terminal node. \item Randomly
draw a donor from the members of the node and take the observed
value of \code{yevent} and \code{y} from that draw as the
synthetic values.}
NOTE that when the function is called by setting elements of method in
\code{syn()} to \code{"survctree"}, the parameter \code{minbucket}
can be supplied to \code{syn()} as e.g. \code{survctree.minbucket}.
}
\value{
A list with the following components:
\item{syn.time}{a vector of length \code{k} with synthetic time values.}
\item{syn.event}{a vector of length \code{k} with synthetic event indicator values.}
\item{fit}{the fitted model which is an item of class \code{ctree.object}.}
}
%references{...}
\seealso{
\code{\link{syn}}, \code{\link{syn.ctree}}
}
\keyword{datagen}
|
###############################################################################
# TANOVA: test.R
#
# TODO: Add comment
#
# Author: Weihong
# Mar 20, 2009, 2009
###############################################################################
group.ix<-function(f1,f2){
n1<-nlevels(as.factor(f1))
n2<-nlevels(as.factor(f2))
ix<-list()
k<-1
for (i in 1:n1){
for (j in 1:n2){
ix[[k]]<-which(f1==i & f2==j)
k<-k+1
}
}
return (ix)
}
|
/TANOVA/R/group.ix.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 463
|
r
|
###############################################################################
# TANOVA: test.R
#
# TODO: Add comment
#
# Author: Weihong
# Mar 20, 2009, 2009
###############################################################################
group.ix<-function(f1,f2){
n1<-nlevels(as.factor(f1))
n2<-nlevels(as.factor(f2))
ix<-list()
k<-1
for (i in 1:n1){
for (j in 1:n2){
ix[[k]]<-which(f1==i & f2==j)
k<-k+1
}
}
return (ix)
}
|
library(magrittr)
library(tidyverse)
# Import -------------
rm(list = ls())
nestdb <- readRDS("rdsobj/nestdb.RDS")
nestdb_md5 <- tools::md5sum("rdsobj/nestdb.RDS")
coln <- read.csv('reference/col_names.csv', header = FALSE)
# Analysis -------------------
## Age(mean,sd)
age <- nestdb %>%
summarise(
mean = mean(AgeDx,na.rm = TRUE),
sd = sd(AgeDx,na.rm = TRUE)
)
## Ethnicity(n,%)
eth <- nestdb %>%
group_by(Eth) %>%
summarise(n = n()) %>%
mutate(pct = n/sum(n) * 100)
## First degree fam-relative(n,%)
fmhx <- nestdb %>%
mutate(famcount = data.fhx %>%
map_int(~ .x$FamHx %in% c("son","sibling","brother","parent","father") %>% sum()) %>% {ifelse(. > 0,T,F)}) %>%
group_by(famcount) %>%
summarise(n = n()) %>%
mutate(pct = n/sum(n) * 100)
## First PSA (n, %)
psa <- nestdb %>%
mutate(firstpsa = data.biop %>%
map_dbl(~ .x %>% slice(which.min(Biopsy.Dat)) %>% .$Biopsy.PSA)) %>%
summarise(
med = median(firstpsa, na.rm=TRUE),
firstq = quantile(firstpsa, na.rm = TRUE) %>% .[[2]],
thirdq = quantile(firstpsa, na.rm = TRUE) %>% .[[4]]
)
##GGG on first biopsy (n,%)
replace_na <- function(df){
while(is.na(df[1,"Biopsy.GGG"])==TRUE){
df=df[-1,]
}
return(df)
}
gggfb <- nestdb %>%
mutate(firstggg = data.biop %>%
map_int(~ .x %>% replace_na() %>% slice(which.min(Biopsy.Dat)) %>% .$Biopsy.GGG)) %>%
group_by(firstggg) %>%
summarise(n = n()) %>%
mutate(pct = n/sum(n) * 100)
## Of positive cores (n,%)
cores <- nestdb %>%
mutate(firstc = data.biop %>%
map_int(~ .x %>% slice(which.min(Biopsy.Dat)) %>% .$Biopsy.PosCores)) %>%
group_by(firstc) %>%
summarise(n = n()) %>%
mutate(pct = n/sum(n) * 100)
## PSA Density
dens <- nestdb %>%
mutate(firstden = data.biop %>%
map_dbl(~ .x %>% mutate(Biopsy.PSADens = Biopsy.PSA/Biopsy.Vol) %>% slice(which.min(Biopsy.Dat)) %>% .$Biopsy.PSADens)) %>%
mutate(gate = case_when(
firstden < 0.15 ~ "low",
firstden >= 0.15 ~ "high"
)) %>%
group_by(gate) %>%
summarise(n = n()) %>%
mutate(pct = n/sum(n) * 100)
# Create a list of all the subtables ---------------------
subtbl_bl <- list("Age at diagnosis" = age,
"Ethnicity" = eth,
"Family Hx" = fmhx,
"First PSA" = psa,
"GGG on first biopsy" = gggfb,
"Number of postivie cores on first biopsy" = cores,
"PSA density at time of first biopsy" = dens) %>%
map(~ .x %>% mutate_if(is.numeric, ~ round(.,2)))
print(subtbl_bl)
|
/BLDescr.R
|
no_license
|
kalsajana/prostatecancer
|
R
| false
| false
| 2,604
|
r
|
library(magrittr)
library(tidyverse)
# Import -------------
rm(list = ls())
nestdb <- readRDS("rdsobj/nestdb.RDS")
nestdb_md5 <- tools::md5sum("rdsobj/nestdb.RDS")
coln <- read.csv('reference/col_names.csv', header = FALSE)
# Analysis -------------------
## Age(mean,sd)
age <- nestdb %>%
summarise(
mean = mean(AgeDx,na.rm = TRUE),
sd = sd(AgeDx,na.rm = TRUE)
)
## Ethnicity(n,%)
eth <- nestdb %>%
group_by(Eth) %>%
summarise(n = n()) %>%
mutate(pct = n/sum(n) * 100)
## First degree fam-relative(n,%)
fmhx <- nestdb %>%
mutate(famcount = data.fhx %>%
map_int(~ .x$FamHx %in% c("son","sibling","brother","parent","father") %>% sum()) %>% {ifelse(. > 0,T,F)}) %>%
group_by(famcount) %>%
summarise(n = n()) %>%
mutate(pct = n/sum(n) * 100)
## First PSA (n, %)
psa <- nestdb %>%
mutate(firstpsa = data.biop %>%
map_dbl(~ .x %>% slice(which.min(Biopsy.Dat)) %>% .$Biopsy.PSA)) %>%
summarise(
med = median(firstpsa, na.rm=TRUE),
firstq = quantile(firstpsa, na.rm = TRUE) %>% .[[2]],
thirdq = quantile(firstpsa, na.rm = TRUE) %>% .[[4]]
)
##GGG on first biopsy (n,%)
replace_na <- function(df){
while(is.na(df[1,"Biopsy.GGG"])==TRUE){
df=df[-1,]
}
return(df)
}
gggfb <- nestdb %>%
mutate(firstggg = data.biop %>%
map_int(~ .x %>% replace_na() %>% slice(which.min(Biopsy.Dat)) %>% .$Biopsy.GGG)) %>%
group_by(firstggg) %>%
summarise(n = n()) %>%
mutate(pct = n/sum(n) * 100)
## Of positive cores (n,%)
cores <- nestdb %>%
mutate(firstc = data.biop %>%
map_int(~ .x %>% slice(which.min(Biopsy.Dat)) %>% .$Biopsy.PosCores)) %>%
group_by(firstc) %>%
summarise(n = n()) %>%
mutate(pct = n/sum(n) * 100)
## PSA Density
dens <- nestdb %>%
mutate(firstden = data.biop %>%
map_dbl(~ .x %>% mutate(Biopsy.PSADens = Biopsy.PSA/Biopsy.Vol) %>% slice(which.min(Biopsy.Dat)) %>% .$Biopsy.PSADens)) %>%
mutate(gate = case_when(
firstden < 0.15 ~ "low",
firstden >= 0.15 ~ "high"
)) %>%
group_by(gate) %>%
summarise(n = n()) %>%
mutate(pct = n/sum(n) * 100)
# Create a list of all the subtables ---------------------
subtbl_bl <- list("Age at diagnosis" = age,
"Ethnicity" = eth,
"Family Hx" = fmhx,
"First PSA" = psa,
"GGG on first biopsy" = gggfb,
"Number of postivie cores on first biopsy" = cores,
"PSA density at time of first biopsy" = dens) %>%
map(~ .x %>% mutate_if(is.numeric, ~ round(.,2)))
print(subtbl_bl)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lgb.unloader.R
\name{lgb.unloader}
\alias{lgb.unloader}
\title{Remove lightgbm and its objects from an environment}
\usage{
lgb.unloader(restore = TRUE, wipe = FALSE, envir = .GlobalEnv)
}
\arguments{
\item{restore}{Whether to reload \code{LightGBM} immediately after detaching from R.
Defaults to \code{TRUE} which means automatically reload \code{LightGBM} once
unloading is performed.}
\item{wipe}{Whether to wipe all \code{lgb.Dataset} and \code{lgb.Booster} from the global
environment. Defaults to \code{FALSE} which means to not remove them.}
\item{envir}{The environment to perform wiping on if \code{wipe == TRUE}. Defaults to
\code{.GlobalEnv} which is the global environment.}
}
\value{
NULL invisibly.
}
\description{
Attempts to unload LightGBM packages so you can remove objects cleanly without
having to restart R. This is useful for instance if an object becomes stuck for no
apparent reason and you do not want to restart R to fix the lost object.
}
\examples{
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
dtrain <- lgb.Dataset(train$data, label = train$label)
data(agaricus.test, package = "lightgbm")
test <- agaricus.test
dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label)
params <- list(objective = "regression", metric = "l2")
valids <- list(test = dtest)
model <- lgb.train(
params = params
, data = dtrain
, nrounds = 5L
, valids = valids
, min_data = 1L
, learning_rate = 1.0
)
\dontrun{
lgb.unloader(restore = FALSE, wipe = FALSE, envir = .GlobalEnv)
rm(model, dtrain, dtest) # Not needed if wipe = TRUE
gc() # Not needed if wipe = TRUE
library(lightgbm)
# Do whatever you want again with LightGBM without object clashing
}
}
|
/R-package/man/lgb.unloader.Rd
|
permissive
|
edwarchou/LightGBM
|
R
| false
| true
| 1,817
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lgb.unloader.R
\name{lgb.unloader}
\alias{lgb.unloader}
\title{Remove lightgbm and its objects from an environment}
\usage{
lgb.unloader(restore = TRUE, wipe = FALSE, envir = .GlobalEnv)
}
\arguments{
\item{restore}{Whether to reload \code{LightGBM} immediately after detaching from R.
Defaults to \code{TRUE} which means automatically reload \code{LightGBM} once
unloading is performed.}
\item{wipe}{Whether to wipe all \code{lgb.Dataset} and \code{lgb.Booster} from the global
environment. Defaults to \code{FALSE} which means to not remove them.}
\item{envir}{The environment to perform wiping on if \code{wipe == TRUE}. Defaults to
\code{.GlobalEnv} which is the global environment.}
}
\value{
NULL invisibly.
}
\description{
Attempts to unload LightGBM packages so you can remove objects cleanly without
having to restart R. This is useful for instance if an object becomes stuck for no
apparent reason and you do not want to restart R to fix the lost object.
}
\examples{
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
dtrain <- lgb.Dataset(train$data, label = train$label)
data(agaricus.test, package = "lightgbm")
test <- agaricus.test
dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label)
params <- list(objective = "regression", metric = "l2")
valids <- list(test = dtest)
model <- lgb.train(
params = params
, data = dtrain
, nrounds = 5L
, valids = valids
, min_data = 1L
, learning_rate = 1.0
)
\dontrun{
lgb.unloader(restore = FALSE, wipe = FALSE, envir = .GlobalEnv)
rm(model, dtrain, dtest) # Not needed if wipe = TRUE
gc() # Not needed if wipe = TRUE
library(lightgbm)
# Do whatever you want again with LightGBM without object clashing
}
}
|
AttachMnems <- function(fulldbmelted,srcDir){
codedir <- "Directory-namestonaicscodes-2.xlsx"
smerge <- "SeriesMerge"
naicsdir <- paste(srcDir, eval(codedir), sep = "")
mnems <- as.data.table(read_excel(naicsdir, na = "NA", sheet = "FinalMnem", col_names = TRUE))
sectors <- as.data.table(read_excel(naicsdir, na = "NA", sheet = "sectorcode", col_names = TRUE))
merge <- as.data.table(read_excel(naicsdir, na = "NA", sheet = eval(smerge), col_names = TRUE))
fulldbmelted <- fulldbmelted[ !(code == "NAT" & !(variable %in% macrovars))]
fulldbmelted <- fulldbmelted[ !(code != "NAT" & (variable %in% macrovars))]
# Mnemonics
fulldbmelted[, `:=`
(
variable = str_trim(variable, side = "both"),
geography = str_trim(geography, side = "both")
)]
matchbase <- merge[!is.na(VarNeumonic)]
fulldbmelted[, `:=`
(
varsymbol = matchbase$VarNeumonic[match(fulldbmelted$variable, matchbase$newvar, nomatch = NA)],
pricesymbol = matchbase$PriceSymbol[match(fulldbmelted$variable, matchbase$newvar, nomatch = NA)],
sectorcode = sectors$code[match(fulldbmelted$geography, sectors$geography, nomatch = NA)],
sectormnems = mnems$mnem[match(fulldbmelted$code, mnems$code, nomatch = NA)],
leveltag = mnems$Tag[match(fulldbmelted$code, mnems$code, nomatch = NA)]
)]
# get rid of any extra sectors
fulldbmelted <- fulldbmelted[!is.na(sectormnems) | code == 1 | code == "NAT"]
# get rid of top downs and aggregates in the delta and avhr series
# fulldbmelted<-fulldbmelted[!(leveltag %in% c("aggregate","topdown") & grepl("^K|DELTA|AVHR",varsymbol))]
# get rid of other assorted vars
fulldbmelted <- fulldbmelted[!(geography == "Canada" & grepl("consolidated provincial-territorial and local governments-3850042", variable))]
# get rid of any variables that we don't actually need (ie dont have mnemonics)
fulldbmelted <- fulldbmelted[!is.na(varsymbol)]
fulldbmelted[is.na(varsymbol), varsymbol := ""]
fulldbmelted[is.na(pricesymbol), pricesymbol := ""]
fulldbmelted[is.na(sectorcode), sectorcode := ""]
fulldbmelted[is.na(sectormnems), sectormnems := ""]
fulldbmelted[, `:=`
(mnemonic = paste(varsymbol, sectormnems, pricesymbol, sep = "")) ][, c("pricesymbol", "varsymbol", "sectormnems","leveltag") := NULL]
}
|
/code/functions/AttachMnemonics.R
|
no_license
|
fpalacio-OE/eccc
|
R
| false
| false
| 2,482
|
r
|
AttachMnems <- function(fulldbmelted,srcDir){
codedir <- "Directory-namestonaicscodes-2.xlsx"
smerge <- "SeriesMerge"
naicsdir <- paste(srcDir, eval(codedir), sep = "")
mnems <- as.data.table(read_excel(naicsdir, na = "NA", sheet = "FinalMnem", col_names = TRUE))
sectors <- as.data.table(read_excel(naicsdir, na = "NA", sheet = "sectorcode", col_names = TRUE))
merge <- as.data.table(read_excel(naicsdir, na = "NA", sheet = eval(smerge), col_names = TRUE))
fulldbmelted <- fulldbmelted[ !(code == "NAT" & !(variable %in% macrovars))]
fulldbmelted <- fulldbmelted[ !(code != "NAT" & (variable %in% macrovars))]
# Mnemonics
fulldbmelted[, `:=`
(
variable = str_trim(variable, side = "both"),
geography = str_trim(geography, side = "both")
)]
matchbase <- merge[!is.na(VarNeumonic)]
fulldbmelted[, `:=`
(
varsymbol = matchbase$VarNeumonic[match(fulldbmelted$variable, matchbase$newvar, nomatch = NA)],
pricesymbol = matchbase$PriceSymbol[match(fulldbmelted$variable, matchbase$newvar, nomatch = NA)],
sectorcode = sectors$code[match(fulldbmelted$geography, sectors$geography, nomatch = NA)],
sectormnems = mnems$mnem[match(fulldbmelted$code, mnems$code, nomatch = NA)],
leveltag = mnems$Tag[match(fulldbmelted$code, mnems$code, nomatch = NA)]
)]
# get rid of any extra sectors
fulldbmelted <- fulldbmelted[!is.na(sectormnems) | code == 1 | code == "NAT"]
# get rid of top downs and aggregates in the delta and avhr series
# fulldbmelted<-fulldbmelted[!(leveltag %in% c("aggregate","topdown") & grepl("^K|DELTA|AVHR",varsymbol))]
# get rid of other assorted vars
fulldbmelted <- fulldbmelted[!(geography == "Canada" & grepl("consolidated provincial-territorial and local governments-3850042", variable))]
# get rid of any variables that we don't actually need (ie dont have mnemonics)
fulldbmelted <- fulldbmelted[!is.na(varsymbol)]
fulldbmelted[is.na(varsymbol), varsymbol := ""]
fulldbmelted[is.na(pricesymbol), pricesymbol := ""]
fulldbmelted[is.na(sectorcode), sectorcode := ""]
fulldbmelted[is.na(sectormnems), sectormnems := ""]
fulldbmelted[, `:=`
(mnemonic = paste(varsymbol, sectormnems, pricesymbol, sep = "")) ][, c("pricesymbol", "varsymbol", "sectormnems","leveltag") := NULL]
}
|
############################################################################
#Step 1: Download data, unzipe, and create list of all files in zipped file
#Steps that do not need to be repeated more than once are commented out
setwd("C:\\Users\\Diane\\Desktop\\workingDirectory")
file="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destfile="dat.zip"
download.file(file, destfile)
zipList=unzip(destfile, list=TRUE)
unzip(destfile)
############################################################################
#Step 2: Load data, subset, and convert dates and times
power=read.table("household_power_consumption.txt", header=TRUE, sep=";")
power2=power[power$Date %in% c("1/2/2007", "2/2/2007"),]
write.table(power2, "power.txt",sep=",")
pow=read.table("power.txt", sep=",",header=TRUE)
pow$datetime=as.POSIXct(paste(pow$Date, pow$Time), format="%d/%m/%Y %H:%M:%S")
############################################################################
#Step 3: Produce plot
setwd("C:\\Users\\Diane\\Documents\\gitRepo\\ExData_Plotting1")
png(filename="plot1.png", width=480, height=480)
hist(pow$Global_active_power, xlab="Global Active Power (kilowatts)", col="red",
breaks=16, main="Global Active Power")
dev.off()
|
/plot1.R
|
no_license
|
dmenuz/ExData_Plotting1
|
R
| false
| false
| 1,253
|
r
|
############################################################################
#Step 1: Download data, unzipe, and create list of all files in zipped file
#Steps that do not need to be repeated more than once are commented out
setwd("C:\\Users\\Diane\\Desktop\\workingDirectory")
file="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destfile="dat.zip"
download.file(file, destfile)
zipList=unzip(destfile, list=TRUE)
unzip(destfile)
############################################################################
#Step 2: Load data, subset, and convert dates and times
power=read.table("household_power_consumption.txt", header=TRUE, sep=";")
power2=power[power$Date %in% c("1/2/2007", "2/2/2007"),]
write.table(power2, "power.txt",sep=",")
pow=read.table("power.txt", sep=",",header=TRUE)
pow$datetime=as.POSIXct(paste(pow$Date, pow$Time), format="%d/%m/%Y %H:%M:%S")
############################################################################
#Step 3: Produce plot
setwd("C:\\Users\\Diane\\Documents\\gitRepo\\ExData_Plotting1")
png(filename="plot1.png", width=480, height=480)
hist(pow$Global_active_power, xlab="Global Active Power (kilowatts)", col="red",
breaks=16, main="Global Active Power")
dev.off()
|
# Create a combo table to show breakdown of multiple choice answers
mycombolocus = data.frame(
Informatics = mysurveys[,paste0(myvarstub,'_locus_options___1')],
ResearchOffice = mysurveys[,paste0(myvarstub,'_locus_options___2')],
IT = mysurveys[,paste0(myvarstub,'_locus_options___3')],
Other = mysurveys[,paste0(myvarstub,'_locus_options___4')]
)
mytabulatelocus = as.data.frame(table(mycombolocus))
write.csv(mytabulatelocus,paste0('output/',myvarstub,'_locus.csv'))
mycombosustain = data.frame(
Institutional = mysurveys[,paste0(myvarstub,'_sust_options___1')],
Fees = mysurveys[,paste0(myvarstub,'_sust_options___2')],
Grants = mysurveys[,paste0(myvarstub,'_sust_options___3')],
Other = mysurveys[,paste0(myvarstub,'_sust_options___4')]
)
mytabulatesustain = as.data.frame(table(mycombosustain))
write.csv(mytabulatesustain,paste0('output/',myvarstub,'_sustain.csv'))
|
/answercombo.R
|
no_license
|
firaswehbe/ctsa-sustainability
|
R
| false
| false
| 890
|
r
|
# Create a combo table to show breakdown of multiple choice answers
mycombolocus = data.frame(
Informatics = mysurveys[,paste0(myvarstub,'_locus_options___1')],
ResearchOffice = mysurveys[,paste0(myvarstub,'_locus_options___2')],
IT = mysurveys[,paste0(myvarstub,'_locus_options___3')],
Other = mysurveys[,paste0(myvarstub,'_locus_options___4')]
)
mytabulatelocus = as.data.frame(table(mycombolocus))
write.csv(mytabulatelocus,paste0('output/',myvarstub,'_locus.csv'))
mycombosustain = data.frame(
Institutional = mysurveys[,paste0(myvarstub,'_sust_options___1')],
Fees = mysurveys[,paste0(myvarstub,'_sust_options___2')],
Grants = mysurveys[,paste0(myvarstub,'_sust_options___3')],
Other = mysurveys[,paste0(myvarstub,'_sust_options___4')]
)
mytabulatesustain = as.data.frame(table(mycombosustain))
write.csv(mytabulatesustain,paste0('output/',myvarstub,'_sustain.csv'))
|
library(memoise)
nps_cats <<-list("promoter" = "promoter","detractor" = "detractor")
getIgraph <- memoise(function(nps_cat) {
# Careful not to let just any name slip in here; a
# malicious user could manipulate this value.
backbone <- readRDS(file = sprintf("%s.rda", nps_cat))
})
|
/dashboard/global.R
|
no_license
|
jamiewan1989/LDA-topicmodel
|
R
| false
| false
| 289
|
r
|
library(memoise)
nps_cats <<-list("promoter" = "promoter","detractor" = "detractor")
getIgraph <- memoise(function(nps_cat) {
# Careful not to let just any name slip in here; a
# malicious user could manipulate this value.
backbone <- readRDS(file = sprintf("%s.rda", nps_cat))
})
|
## Test draw() methods
## load packages
library("testthat")
library("gratia")
library("mgcv")
library("ggplot2")
library("vdiffr")
context("draw-methods")
## Fit models
set.seed(1)
dat1 <- gamSim(1, n = 400, dist = "normal", scale = 2, verbose = FALSE)
m1 <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat1, method = "REML")
set.seed(1)
dat2 <- gamSim(2, n = 4000, dist = "normal", scale = 1, verbose = FALSE)
m2 <- gam(y ~ s(x, z, k = 40), data = dat2$data, method = "REML")
set.seed(1)
dat3 <- gamSim(4, verbose = FALSE)
m3 <- gam(y ~ fac + s(x2, by = fac) + s(x0), data = dat3)
test_that("draw.evaluated_1d_smooth() plots the smooth", {
sm <- evaluate_smooth(m1, "s(x2)")
plt <- draw(sm)
expect_doppelganger("draw 1d smooth for selected smooth", plt)
})
test_that("draw.gam works with numeric select", {
plt <- draw(m1, select = 2)
expect_doppelganger("draw gam smooth for selected smooth numeric", plt)
plt <- draw(m1, select = c(1,2))
expect_doppelganger("draw gam smooth for two selected smooths numeric", plt)
})
test_that("draw.gam fails with bad select", {
expect_error(draw(m1, select = 8),
"One or more indices in 'select' > than the number of smooths in the model.",
fixed = TRUE)
expect_error(draw(m1, select = c(1,3,5,6)),
"One or more indices in 'select' > than the number of smooths in the model.",
fixed = TRUE)
expect_error(draw(m1, select = c(1,2,3,4,5)),
"Trying to select more smooths than are in the model.",
fixed = TRUE)
expect_error(draw(m1, select = TRUE),
"When 'select' is a logical vector, 'length(select)' must equal
the number of smooths in the model.", fixed = TRUE)
})
test_that("draw.gam works with character select", {
plt <- draw(m1, select = "s(x1)")
expect_doppelganger("draw gam smooth for selected smooth character", plt)
plt <- draw(m1, select = c("s(x0)", "s(x1)"))
expect_doppelganger("draw gam smooth for two selected smooths character", plt)
})
test_that("draw.gam works with logical select", {
plt <- draw(m1, select = c(TRUE, rep(FALSE, 3)))
expect_doppelganger("draw gam smooth for selected smooth logical", plt)
plt <- draw(m1, select = rep(c(TRUE, FALSE), each = 2))
expect_doppelganger("draw gam smooth for two selected smooths logical", plt)
})
test_that("draw.gam works with partial_match", {
plt <- draw(m3, select = 's(x2)', partial_match = TRUE)
expect_doppelganger("draw gam with partial match TRUE", plt)
expect_message(draw(m3, select = 's(x2)', partial_match = FALSE),
"Unable to draw any of the model terms.",
fixed = TRUE)
})
test_that("draw.gam works with select and parametric", {
plt <- draw(m3, select = 's(x2)', partial_match = TRUE)
expect_doppelganger("draw gam with select and parametric is NULL", plt)
plt <- draw(m3, select = 's(x2)', partial_match = TRUE, parametric = FALSE)
expect_doppelganger("draw gam with select and parametric is FALSE", plt)
plt <- draw(m3, select = 's(x2)', partial_match = TRUE, parametric = TRUE)
expect_doppelganger("draw gam with select and parametric is TRUE", plt)
plt <- draw(m3, parametric = TRUE)
expect_doppelganger("draw gam without select and parametric is TRUE", plt)
plt <- draw(m3, parametric = FALSE)
expect_doppelganger("draw gam without select and parametric is FALSE", plt)
})
test_that("draw.evaluated_2d_smooth() plots the smooth", {
skip_on_os("mac")
sm <- evaluate_smooth(m2, "s(x,z)", n = 100)
plt <- draw(sm)
expect_doppelganger("draw 2d smooth", plt)
plt <- draw(sm, contour_col = "red")
expect_doppelganger("draw 2d smooth diff contour colour", plt)
})
test_that("draw.evaluated_2d_smooth() plots the smooth without contours", {
skip_on_os("mac")
sm <- evaluate_smooth(m2, "s(x,z)", n = 100)
plt <- draw(sm, contour = FALSE)
expect_doppelganger("draw 2d smooth without contours", plt)
})
test_that("draw.evaluated_2d_smooth() plots the smooth with different contour bins", {
skip_on_os("mac")
sm <- evaluate_smooth(m2, "s(x,z)", n = 100)
plt <- draw(sm, n_contour = 5)
expect_doppelganger("draw 2d smooth with 5 contour bins", plt)
plt <- draw(sm, n_contour = 20)
expect_doppelganger("draw 2d smooth with 20 contour bins", plt)
})
test_that("draw.evaluated_2d_smooth() plots the SE", {
skip_on_os("mac")
sm <- evaluate_smooth(m2, "s(x,z)", n = 100)
plt <- draw(sm, show = "se")
expect_doppelganger("draw std error of 2d smooth", plt)
})
test_that("draw.gam() plots a simple multi-smooth AM", {
plt <- draw(m1)
expect_doppelganger("draw simple multi-smooth AM", plt)
plt <- draw(m1, scales = "fixed")
expect_doppelganger("draw simple multi-smooth AM with fixed scales", plt)
})
test_that("draw.gam() can draw partial residuals", {
plt <- draw(m1, residuals = TRUE)
expect_doppelganger("draw simple partial residuals", plt)
plt <- draw(m1, residuals = TRUE, scales = "fixed")
expect_doppelganger("draw simple partial residuals with fixed scales", plt)
})
test_that("draw.gam() plots an AM with a single 2d smooth", {
skip_on_os("mac")
plt <- draw(m2)
expect_doppelganger("draw AM with 2d smooth", plt)
sm <- evaluate_smooth(m2, smooth = "s(x,z)")
plt <- draw(sm, show = "se")
expect_doppelganger("draw evaulated 2d smooth standard errors", plt)
})
test_that("draw.gam() plots an AM with a single factor by-variable smooth", {
plt <- draw(m3)
expect_doppelganger("draw AM with factor by-variable smooth", plt)
plt <- draw(m3, scales = "fixed")
expect_doppelganger("draw AM with factor by-variable smooth with fixed scales", plt)
})
## simulate date from y = f(x2)*x1 + error
dat <- gamSim(3, n = 400, verbose = FALSE)
mod <- gam(y ~ s(x2, by = x1), data = dat)
test_that("draw() works with continuous by", {
plt <- draw(mod)
expect_doppelganger("draw AM with continuous by-variable smooth", plt)
})
test_that("draw() works with continuous by and fixed scales", {
plt <- draw(mod, scales = "fixed")
expect_doppelganger("draw AM with continuous by-var fixed scale", plt)
})
test_that("draw() works with random effect smooths (bs = 're')", {
## simulate example... from ?mgcv::random.effects
dat <- gamSim(1, n = 400, scale = 2, verbose = FALSE) ## simulate 4 term additive truth
fac <- as.factor(sample(1:20, 400, replace = TRUE))
dat$X <- model.matrix(~ fac - 1)
b <- rnorm(20) * 0.5
dat <- transform(dat, y = y + X %*% b)
rm1 <- gam(y ~ s(fac, bs = "re") + s(x0) + s(x1) + s(x2) +
s(x3), data = dat, method = "ML")
sm <- evaluate_smooth(rm1, "s(fac)")
expect_s3_class(sm, "evaluated_re_smooth")
p1 <- draw(sm)
expect_doppelganger("draw.evaluated_re_smooth", p1)
p2 <- draw(rm1, ncol = 3)
expect_doppelganger("draw.gam model with ranef smooth", p2)
p3 <- draw(rm1, ncol = 3, scales = "fixed")
expect_doppelganger("draw.gam model with ranef smooth fixed scales", p3)
})
test_that("draw() with random effect smooths (bs = 're') & factor by variable ", {
## simulate example...
set.seed(1)
df <- gamSim(4, n = 400, scale = 2, verbose = FALSE) ## simulate 4 term additive truth
## random effects
ranef <- as.factor(sample(1:20, 400, replace = TRUE))
df$X <- model.matrix(~ ranef - 1)
b <- rnorm(20) * 0.5
da1 <- transform(df, y = y + X %*% b)
## fit model
rm2 <- gam(y ~ fac + s(ranef, bs = "re", by = fac) + s(x0) + s(x1) + s(x2),
data = df, method = "ML")
sm <- evaluate_smooth(rm2, "s(ranef)")
expect_s3_class(sm, "evaluated_re_smooth")
p1 <- draw(sm)
expect_doppelganger("draw.evaluated_re_smooth with factor by", p1)
p2 <- draw(rm2, ncol = 3)
expect_doppelganger("draw.gam model with ranef smooth factor by", p2)
p3 <- draw(rm2, ncol = 3, scales = "fixed")
expect_doppelganger("draw.gam model with ranef smooth factor by fixed scales", p3)
})
test_that("draw() can handle non-standard names -- a function call as a name", {
df <- data.frame(y = c(0.15,0.17,0.07,0.17,0.01,0.15,0.18,0.04,-0.06,-0.08,
0, 0.03,-0.27,-0.93,0.04,0.12,0.08,0.15,0.04,0.15,
0.03,0.09,0.11,0.13,-0.11,-0.32,-0.7,-0.78,0.07,0.04,
0.06,0.12,-0.15,0.05,-0.08,0.14,-0.02,-0.14,-0.24,
-0.32,-0.78,-0.81,-0.04,-0.25,-0.09,0.02,-0.13,-0.2,
-0.04,0,0.02,-0.05,-0.19,-0.37,-0.57,-0.81),
time = rep(2^c(-1, 0, 1, 1.58,2, 2.58, 3, 3.32, 3.58, 4.17,
4.58, 5.58, 6.17, 7.39), 4))
## the smooth is of `log2(time)` but this needs special handling
## in the `ggplot()` to avoid `ggplot()` looking incorrectly for `time` and
## not the correct `log2(time)`
fit <- gam(y ~ s(log2(time)), data = df, method = "REML")
p1 <- draw(fit)
expect_doppelganger("draw.gam model with non-standard names", p1)
})
test_that("draw() works with factor-smooth interactions (bs = 'fs')", {
## simulate example... from ?mgcv::factor.smooth.interaction
set.seed(0)
## simulate data...
f0 <- function(x) 2 * sin(pi * x)
f1 <- function(x, a=2, b=-1) exp(a * x)+b
f2 <- function(x) 0.2 * x^11 * (10 * (1 - x))^6 + 10 *
(10 * x)^3 * (1 - x)^10
n <- 500
nf <- 10
fac <- sample(1:nf, n, replace=TRUE)
x0 <- runif(n)
x1 <- runif(n)
x2 <- runif(n)
a <- rnorm(nf) * .2 + 2;
b <- rnorm(nf) * .5
f <- f0(x0) + f1(x1, a[fac], b[fac]) + f2(x2)
fac <- factor(fac)
y <- f + rnorm(n) * 2
df <- data.frame(y = y, x0 = x0, x1 = x1, x2 = x2, fac = fac)
mod <- gam(y~s(x0) + s(x1, fac, bs="fs", k=5) + s(x2, k=20),
method = "ML")
sm <- evaluate_smooth(mod, "s(x1,fac)")
expect_s3_class(sm, "evaluated_fs_smooth")
p1 <- draw(sm)
expect_doppelganger("draw.evaluated_fs_smooth", p1)
p2 <- draw(mod, ncol = 2)
expect_doppelganger("draw.gam model with fs smooth", p2)
p3 <- draw(mod, ncol = 2, scales = "fixed")
expect_doppelganger("draw.gam model with fs smooth fixed scales", p3)
})
test_that("draw() works with parametric terms", {
set.seed(0)
## fake some data...
f1 <- function(x) {exp(2 * x)}
f2 <- function(x) {
0.2*x^11*(10*(1-x))^6+10*(10*x)^3*(1-x)^10
}
f3 <- function(x) {x*0}
n <- 200
sig2 <- 4
x0 <- rep(1:4,50)
x1 <- runif(n, 0, 1)
x2 <- runif(n, 0, 1)
x3 <- runif(n, 0, 1)
e <- rnorm(n, 0, sqrt(sig2))
y <- 2*x0 + f1(x1) + f2(x2) + f3(x3) + e
df <- data.frame(x0 = x0, x1 = x1, x2 = x2, x3 = x3, y = y)
## fit
mod <- gam(y ~ x0 + s(x1) + s(x2) + s(x3), data = df)
## evaluate parametric terms directly
e1 <- evaluate_parametric_term(mod, term = "x0")
expect_s3_class(e1, "evaluated_parametric_term")
expect_equal(ncol(e1), 5L)
expect_named(e1, c("term", "type", "value", "partial", "se"))
p1 <- draw(e1)
expect_doppelganger("draw.evaluated_parametric_term with linear parametric term", p1)
## check evaluate_parametric_term works
p2 <- draw(mod)
expect_doppelganger("draw.gam with linear parametric term", p2)
## factor parametric terms
x0 <- factor(x0)
df <- data.frame(x0 = x0, x1 = x1, x2 = x2, x3 = x3, y = y)
## fit
mod <- gam(y ~ x0 + s(x1) + s(x2) + s(x3), data = df)
## check evaluate_parametric_term works
p3 <- draw(mod)
expect_doppelganger("draw.gam with factor parametric term", p3)
## evaluate parametric terms directly
e2 <- evaluate_parametric_term(mod, term = "x0")
expect_s3_class(e2, "evaluated_parametric_term")
expect_error(evaluate_parametric_term(mod, term = "x1"),
"Term is not in the parametric part of model: <x1>",
fixed = TRUE)
expect_warning(evaluate_parametric_term(mod, term = c('x0', 'x1')),
"More than one `term` requested; using the first <x0>",
fixed = TRUE)
})
test_that("component-wise CIs work without seWithMean", {
sm <- evaluate_smooth(m1, "s(x3)", overall_uncertainty = FALSE)
plt <- draw(sm)
expect_doppelganger("draw 1d smooth for selected smooth with overall_uncertainty false", plt)
plt <- draw(m1, overall_uncertainty = FALSE)
expect_doppelganger("draw gam with overall_uncertainty false", plt)
})
test_that("draw.derivates() plots derivatives for a GAM", {
d1 <- derivatives(m1)
plt <- draw(d1)
expect_doppelganger("draw derivatives for a GAM", plt)
plt <- draw(d1, scales = "fixed")
expect_doppelganger("draw derivatives for a GAM with fixed scales", plt)
})
## test that issue 39 stays fixed
test_that("draw.gam doesn't create empty plots with multiple parametric terms", {
set.seed(42)
dat <- gamSim(4, n = 300, verbose = FALSE)
dat <- transform(dat, fac = factor(fac), fac2 = factor(fac)) # second factor
## GAM with 2 factors and 2 numeric terms
m2f <- gam(y ~ s(x0) + s(x1) + fac + fac2, data = dat,
family = gaussian(link = "identity"))
plt <- draw(m2f)
expect_doppelganger("draw issue 39 empty plots", plt)
})
test_that("draw.mgcv_smooth() can plot basic smooth bases", {
set.seed(42)
dat <- gamSim(1, n = 400, verbose = FALSE)
bs <- basis(s(x0), data = dat)
plt <- draw(bs)
expect_doppelganger("draw basic tprs basis", plt)
})
test_that("draw.mgcv_smooth() can plot by factor basis smooth bases", {
set.seed(42)
dat <- gamSim(4, n = 400, verbose = FALSE)
bs <- basis(s(x2, by = fac), data = dat)
plt <- draw(bs)
expect_doppelganger("draw by factor basis", plt)
})
test_that("draw() works with a ziplss models; issue #45", {
## simulate some data...
f0 <- function(x) 2 * sin(pi * x); f1 <- function(x) exp(2 * x)
f2 <- function(x) 0.2 * x^11 * (10 * (1 - x))^6 + 10 *
(10 * x)^3 * (1 - x)^10
n <- 500
set.seed(5)
x0 <- runif(n)
x1 <- runif(n)
x2 <- runif(n)
x3 <- runif(n)
## Simulate probability of potential presence...
eta1 <- f0(x0) + f1(x1) - 3
p <- binomial()$linkinv(eta1)
y <- as.numeric(runif(n) < p) ## 1 for presence, 0 for absence
## Simulate y given potentially present (not exactly model fitted!)...
ind <- y > 0
eta2 <- f2(x2[ind])/3
y[ind] <- rpois(exp(eta2), exp(eta2))
df <- data.frame(y, x0, x1, x2, x3)
b1 <- gam(list(y ~ s(x2) + x3,
~ s(x0) + x1), family = ziplss(), data = df)
plt <- draw(b1)
vdiffr::expect_doppelganger("draw ziplss parametric terms issue 45", plt)
})
test_that("draw works for sample_smooths objects", {
sm1 <- smooth_samples(m1, n = 15, seed = 23478)
plt <- draw(sm1, alpha = 0.7)
vdiffr:::expect_doppelganger("draw smooth_samples for GAM m1", plt)
sm2 <- smooth_samples(m2, n = 4, seed = 23478)
## FIXME #71
##plt <- draw(sm2, alpha = 0.7)
##vdiffr:::expect_doppelganger("draw smooth_samples for GAM m2", plt)
sm3 <- smooth_samples(m3, n = 15, seed = 23478)
plt <- draw(sm3, alpha = 0.7)
vdiffr:::expect_doppelganger("draw smooth_samples for GAM m3", plt)
})
test_that("draw works for sample_smooths objects with user specified smooth", {
sm3 <- smooth_samples(m3, n = 15, seed = 23478)
plt <- draw(sm3, select = "s(x0)", alpha = 0.7)
vdiffr:::expect_doppelganger("draw selected smooth_samples for GAM m3", plt)
plt <- draw(sm3, select = "s(x2)", alpha = 0.7, partial_match = TRUE)
vdiffr:::expect_doppelganger("draw selected factor by smooth_samples for GAM m3", plt)
})
## Issue #22
test_that("draw() can handle a mixture of numeric and factor random effects", {
df <- data_sim("eg4", seed = 42)
m <- gam(y ~ s(x2, fac, bs = "re"), data = df, method = "REML")
plt <- draw(m)
vdiffr:::expect_doppelganger("issue 22 draw with mixed random effects", plt)
})
|
/tests/testthat/test-draw-methods.R
|
permissive
|
romainfrancois/gratia
|
R
| false
| false
| 16,161
|
r
|
## Test draw() methods
## load packages
library("testthat")
library("gratia")
library("mgcv")
library("ggplot2")
library("vdiffr")
context("draw-methods")
## Fit models
set.seed(1)
dat1 <- gamSim(1, n = 400, dist = "normal", scale = 2, verbose = FALSE)
m1 <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat1, method = "REML")
set.seed(1)
dat2 <- gamSim(2, n = 4000, dist = "normal", scale = 1, verbose = FALSE)
m2 <- gam(y ~ s(x, z, k = 40), data = dat2$data, method = "REML")
set.seed(1)
dat3 <- gamSim(4, verbose = FALSE)
m3 <- gam(y ~ fac + s(x2, by = fac) + s(x0), data = dat3)
test_that("draw.evaluated_1d_smooth() plots the smooth", {
sm <- evaluate_smooth(m1, "s(x2)")
plt <- draw(sm)
expect_doppelganger("draw 1d smooth for selected smooth", plt)
})
test_that("draw.gam works with numeric select", {
plt <- draw(m1, select = 2)
expect_doppelganger("draw gam smooth for selected smooth numeric", plt)
plt <- draw(m1, select = c(1,2))
expect_doppelganger("draw gam smooth for two selected smooths numeric", plt)
})
test_that("draw.gam fails with bad select", {
expect_error(draw(m1, select = 8),
"One or more indices in 'select' > than the number of smooths in the model.",
fixed = TRUE)
expect_error(draw(m1, select = c(1,3,5,6)),
"One or more indices in 'select' > than the number of smooths in the model.",
fixed = TRUE)
expect_error(draw(m1, select = c(1,2,3,4,5)),
"Trying to select more smooths than are in the model.",
fixed = TRUE)
expect_error(draw(m1, select = TRUE),
"When 'select' is a logical vector, 'length(select)' must equal
the number of smooths in the model.", fixed = TRUE)
})
test_that("draw.gam works with character select", {
plt <- draw(m1, select = "s(x1)")
expect_doppelganger("draw gam smooth for selected smooth character", plt)
plt <- draw(m1, select = c("s(x0)", "s(x1)"))
expect_doppelganger("draw gam smooth for two selected smooths character", plt)
})
test_that("draw.gam works with logical select", {
plt <- draw(m1, select = c(TRUE, rep(FALSE, 3)))
expect_doppelganger("draw gam smooth for selected smooth logical", plt)
plt <- draw(m1, select = rep(c(TRUE, FALSE), each = 2))
expect_doppelganger("draw gam smooth for two selected smooths logical", plt)
})
test_that("draw.gam works with partial_match", {
plt <- draw(m3, select = 's(x2)', partial_match = TRUE)
expect_doppelganger("draw gam with partial match TRUE", plt)
expect_message(draw(m3, select = 's(x2)', partial_match = FALSE),
"Unable to draw any of the model terms.",
fixed = TRUE)
})
test_that("draw.gam works with select and parametric", {
plt <- draw(m3, select = 's(x2)', partial_match = TRUE)
expect_doppelganger("draw gam with select and parametric is NULL", plt)
plt <- draw(m3, select = 's(x2)', partial_match = TRUE, parametric = FALSE)
expect_doppelganger("draw gam with select and parametric is FALSE", plt)
plt <- draw(m3, select = 's(x2)', partial_match = TRUE, parametric = TRUE)
expect_doppelganger("draw gam with select and parametric is TRUE", plt)
plt <- draw(m3, parametric = TRUE)
expect_doppelganger("draw gam without select and parametric is TRUE", plt)
plt <- draw(m3, parametric = FALSE)
expect_doppelganger("draw gam without select and parametric is FALSE", plt)
})
test_that("draw.evaluated_2d_smooth() plots the smooth", {
skip_on_os("mac")
sm <- evaluate_smooth(m2, "s(x,z)", n = 100)
plt <- draw(sm)
expect_doppelganger("draw 2d smooth", plt)
plt <- draw(sm, contour_col = "red")
expect_doppelganger("draw 2d smooth diff contour colour", plt)
})
test_that("draw.evaluated_2d_smooth() plots the smooth without contours", {
skip_on_os("mac")
sm <- evaluate_smooth(m2, "s(x,z)", n = 100)
plt <- draw(sm, contour = FALSE)
expect_doppelganger("draw 2d smooth without contours", plt)
})
test_that("draw.evaluated_2d_smooth() plots the smooth with different contour bins", {
skip_on_os("mac")
sm <- evaluate_smooth(m2, "s(x,z)", n = 100)
plt <- draw(sm, n_contour = 5)
expect_doppelganger("draw 2d smooth with 5 contour bins", plt)
plt <- draw(sm, n_contour = 20)
expect_doppelganger("draw 2d smooth with 20 contour bins", plt)
})
test_that("draw.evaluated_2d_smooth() plots the SE", {
skip_on_os("mac")
sm <- evaluate_smooth(m2, "s(x,z)", n = 100)
plt <- draw(sm, show = "se")
expect_doppelganger("draw std error of 2d smooth", plt)
})
test_that("draw.gam() plots a simple multi-smooth AM", {
plt <- draw(m1)
expect_doppelganger("draw simple multi-smooth AM", plt)
plt <- draw(m1, scales = "fixed")
expect_doppelganger("draw simple multi-smooth AM with fixed scales", plt)
})
test_that("draw.gam() can draw partial residuals", {
plt <- draw(m1, residuals = TRUE)
expect_doppelganger("draw simple partial residuals", plt)
plt <- draw(m1, residuals = TRUE, scales = "fixed")
expect_doppelganger("draw simple partial residuals with fixed scales", plt)
})
test_that("draw.gam() plots an AM with a single 2d smooth", {
skip_on_os("mac")
plt <- draw(m2)
expect_doppelganger("draw AM with 2d smooth", plt)
sm <- evaluate_smooth(m2, smooth = "s(x,z)")
plt <- draw(sm, show = "se")
expect_doppelganger("draw evaulated 2d smooth standard errors", plt)
})
test_that("draw.gam() plots an AM with a single factor by-variable smooth", {
plt <- draw(m3)
expect_doppelganger("draw AM with factor by-variable smooth", plt)
plt <- draw(m3, scales = "fixed")
expect_doppelganger("draw AM with factor by-variable smooth with fixed scales", plt)
})
## simulate date from y = f(x2)*x1 + error
dat <- gamSim(3, n = 400, verbose = FALSE)
mod <- gam(y ~ s(x2, by = x1), data = dat)
test_that("draw() works with continuous by", {
plt <- draw(mod)
expect_doppelganger("draw AM with continuous by-variable smooth", plt)
})
test_that("draw() works with continuous by and fixed scales", {
plt <- draw(mod, scales = "fixed")
expect_doppelganger("draw AM with continuous by-var fixed scale", plt)
})
test_that("draw() works with random effect smooths (bs = 're')", {
## simulate example... from ?mgcv::random.effects
dat <- gamSim(1, n = 400, scale = 2, verbose = FALSE) ## simulate 4 term additive truth
fac <- as.factor(sample(1:20, 400, replace = TRUE))
dat$X <- model.matrix(~ fac - 1)
b <- rnorm(20) * 0.5
dat <- transform(dat, y = y + X %*% b)
rm1 <- gam(y ~ s(fac, bs = "re") + s(x0) + s(x1) + s(x2) +
s(x3), data = dat, method = "ML")
sm <- evaluate_smooth(rm1, "s(fac)")
expect_s3_class(sm, "evaluated_re_smooth")
p1 <- draw(sm)
expect_doppelganger("draw.evaluated_re_smooth", p1)
p2 <- draw(rm1, ncol = 3)
expect_doppelganger("draw.gam model with ranef smooth", p2)
p3 <- draw(rm1, ncol = 3, scales = "fixed")
expect_doppelganger("draw.gam model with ranef smooth fixed scales", p3)
})
test_that("draw() with random effect smooths (bs = 're') & factor by variable ", {
## simulate example...
set.seed(1)
df <- gamSim(4, n = 400, scale = 2, verbose = FALSE) ## simulate 4 term additive truth
## random effects
ranef <- as.factor(sample(1:20, 400, replace = TRUE))
df$X <- model.matrix(~ ranef - 1)
b <- rnorm(20) * 0.5
da1 <- transform(df, y = y + X %*% b)
## fit model
rm2 <- gam(y ~ fac + s(ranef, bs = "re", by = fac) + s(x0) + s(x1) + s(x2),
data = df, method = "ML")
sm <- evaluate_smooth(rm2, "s(ranef)")
expect_s3_class(sm, "evaluated_re_smooth")
p1 <- draw(sm)
expect_doppelganger("draw.evaluated_re_smooth with factor by", p1)
p2 <- draw(rm2, ncol = 3)
expect_doppelganger("draw.gam model with ranef smooth factor by", p2)
p3 <- draw(rm2, ncol = 3, scales = "fixed")
expect_doppelganger("draw.gam model with ranef smooth factor by fixed scales", p3)
})
test_that("draw() can handle non-standard names -- a function call as a name", {
df <- data.frame(y = c(0.15,0.17,0.07,0.17,0.01,0.15,0.18,0.04,-0.06,-0.08,
0, 0.03,-0.27,-0.93,0.04,0.12,0.08,0.15,0.04,0.15,
0.03,0.09,0.11,0.13,-0.11,-0.32,-0.7,-0.78,0.07,0.04,
0.06,0.12,-0.15,0.05,-0.08,0.14,-0.02,-0.14,-0.24,
-0.32,-0.78,-0.81,-0.04,-0.25,-0.09,0.02,-0.13,-0.2,
-0.04,0,0.02,-0.05,-0.19,-0.37,-0.57,-0.81),
time = rep(2^c(-1, 0, 1, 1.58,2, 2.58, 3, 3.32, 3.58, 4.17,
4.58, 5.58, 6.17, 7.39), 4))
## the smooth is of `log2(time)` but this needs special handling
## in the `ggplot()` to avoid `ggplot()` looking incorrectly for `time` and
## not the correct `log2(time)`
fit <- gam(y ~ s(log2(time)), data = df, method = "REML")
p1 <- draw(fit)
expect_doppelganger("draw.gam model with non-standard names", p1)
})
test_that("draw() works with factor-smooth interactions (bs = 'fs')", {
## simulate example... from ?mgcv::factor.smooth.interaction
set.seed(0)
## simulate data...
f0 <- function(x) 2 * sin(pi * x)
f1 <- function(x, a=2, b=-1) exp(a * x)+b
f2 <- function(x) 0.2 * x^11 * (10 * (1 - x))^6 + 10 *
(10 * x)^3 * (1 - x)^10
n <- 500
nf <- 10
fac <- sample(1:nf, n, replace=TRUE)
x0 <- runif(n)
x1 <- runif(n)
x2 <- runif(n)
a <- rnorm(nf) * .2 + 2;
b <- rnorm(nf) * .5
f <- f0(x0) + f1(x1, a[fac], b[fac]) + f2(x2)
fac <- factor(fac)
y <- f + rnorm(n) * 2
df <- data.frame(y = y, x0 = x0, x1 = x1, x2 = x2, fac = fac)
mod <- gam(y~s(x0) + s(x1, fac, bs="fs", k=5) + s(x2, k=20),
method = "ML")
sm <- evaluate_smooth(mod, "s(x1,fac)")
expect_s3_class(sm, "evaluated_fs_smooth")
p1 <- draw(sm)
expect_doppelganger("draw.evaluated_fs_smooth", p1)
p2 <- draw(mod, ncol = 2)
expect_doppelganger("draw.gam model with fs smooth", p2)
p3 <- draw(mod, ncol = 2, scales = "fixed")
expect_doppelganger("draw.gam model with fs smooth fixed scales", p3)
})
test_that("draw() works with parametric terms", {
set.seed(0)
## fake some data...
f1 <- function(x) {exp(2 * x)}
f2 <- function(x) {
0.2*x^11*(10*(1-x))^6+10*(10*x)^3*(1-x)^10
}
f3 <- function(x) {x*0}
n <- 200
sig2 <- 4
x0 <- rep(1:4,50)
x1 <- runif(n, 0, 1)
x2 <- runif(n, 0, 1)
x3 <- runif(n, 0, 1)
e <- rnorm(n, 0, sqrt(sig2))
y <- 2*x0 + f1(x1) + f2(x2) + f3(x3) + e
df <- data.frame(x0 = x0, x1 = x1, x2 = x2, x3 = x3, y = y)
## fit
mod <- gam(y ~ x0 + s(x1) + s(x2) + s(x3), data = df)
## evaluate parametric terms directly
e1 <- evaluate_parametric_term(mod, term = "x0")
expect_s3_class(e1, "evaluated_parametric_term")
expect_equal(ncol(e1), 5L)
expect_named(e1, c("term", "type", "value", "partial", "se"))
p1 <- draw(e1)
expect_doppelganger("draw.evaluated_parametric_term with linear parametric term", p1)
## check evaluate_parametric_term works
p2 <- draw(mod)
expect_doppelganger("draw.gam with linear parametric term", p2)
## factor parametric terms
x0 <- factor(x0)
df <- data.frame(x0 = x0, x1 = x1, x2 = x2, x3 = x3, y = y)
## fit
mod <- gam(y ~ x0 + s(x1) + s(x2) + s(x3), data = df)
## check evaluate_parametric_term works
p3 <- draw(mod)
expect_doppelganger("draw.gam with factor parametric term", p3)
## evaluate parametric terms directly
e2 <- evaluate_parametric_term(mod, term = "x0")
expect_s3_class(e2, "evaluated_parametric_term")
expect_error(evaluate_parametric_term(mod, term = "x1"),
"Term is not in the parametric part of model: <x1>",
fixed = TRUE)
expect_warning(evaluate_parametric_term(mod, term = c('x0', 'x1')),
"More than one `term` requested; using the first <x0>",
fixed = TRUE)
})
test_that("component-wise CIs work without seWithMean", {
sm <- evaluate_smooth(m1, "s(x3)", overall_uncertainty = FALSE)
plt <- draw(sm)
expect_doppelganger("draw 1d smooth for selected smooth with overall_uncertainty false", plt)
plt <- draw(m1, overall_uncertainty = FALSE)
expect_doppelganger("draw gam with overall_uncertainty false", plt)
})
test_that("draw.derivates() plots derivatives for a GAM", {
d1 <- derivatives(m1)
plt <- draw(d1)
expect_doppelganger("draw derivatives for a GAM", plt)
plt <- draw(d1, scales = "fixed")
expect_doppelganger("draw derivatives for a GAM with fixed scales", plt)
})
## test that issue 39 stays fixed
test_that("draw.gam doesn't create empty plots with multiple parametric terms", {
set.seed(42)
dat <- gamSim(4, n = 300, verbose = FALSE)
dat <- transform(dat, fac = factor(fac), fac2 = factor(fac)) # second factor
## GAM with 2 factors and 2 numeric terms
m2f <- gam(y ~ s(x0) + s(x1) + fac + fac2, data = dat,
family = gaussian(link = "identity"))
plt <- draw(m2f)
expect_doppelganger("draw issue 39 empty plots", plt)
})
test_that("draw.mgcv_smooth() can plot basic smooth bases", {
set.seed(42)
dat <- gamSim(1, n = 400, verbose = FALSE)
bs <- basis(s(x0), data = dat)
plt <- draw(bs)
expect_doppelganger("draw basic tprs basis", plt)
})
test_that("draw.mgcv_smooth() can plot by factor basis smooth bases", {
set.seed(42)
dat <- gamSim(4, n = 400, verbose = FALSE)
bs <- basis(s(x2, by = fac), data = dat)
plt <- draw(bs)
expect_doppelganger("draw by factor basis", plt)
})
test_that("draw() works with a ziplss models; issue #45", {
## simulate some data...
f0 <- function(x) 2 * sin(pi * x); f1 <- function(x) exp(2 * x)
f2 <- function(x) 0.2 * x^11 * (10 * (1 - x))^6 + 10 *
(10 * x)^3 * (1 - x)^10
n <- 500
set.seed(5)
x0 <- runif(n)
x1 <- runif(n)
x2 <- runif(n)
x3 <- runif(n)
## Simulate probability of potential presence...
eta1 <- f0(x0) + f1(x1) - 3
p <- binomial()$linkinv(eta1)
y <- as.numeric(runif(n) < p) ## 1 for presence, 0 for absence
## Simulate y given potentially present (not exactly model fitted!)...
ind <- y > 0
eta2 <- f2(x2[ind])/3
y[ind] <- rpois(exp(eta2), exp(eta2))
df <- data.frame(y, x0, x1, x2, x3)
b1 <- gam(list(y ~ s(x2) + x3,
~ s(x0) + x1), family = ziplss(), data = df)
plt <- draw(b1)
vdiffr::expect_doppelganger("draw ziplss parametric terms issue 45", plt)
})
test_that("draw works for sample_smooths objects", {
sm1 <- smooth_samples(m1, n = 15, seed = 23478)
plt <- draw(sm1, alpha = 0.7)
vdiffr:::expect_doppelganger("draw smooth_samples for GAM m1", plt)
sm2 <- smooth_samples(m2, n = 4, seed = 23478)
## FIXME #71
##plt <- draw(sm2, alpha = 0.7)
##vdiffr:::expect_doppelganger("draw smooth_samples for GAM m2", plt)
sm3 <- smooth_samples(m3, n = 15, seed = 23478)
plt <- draw(sm3, alpha = 0.7)
vdiffr:::expect_doppelganger("draw smooth_samples for GAM m3", plt)
})
test_that("draw works for sample_smooths objects with user specified smooth", {
sm3 <- smooth_samples(m3, n = 15, seed = 23478)
plt <- draw(sm3, select = "s(x0)", alpha = 0.7)
vdiffr:::expect_doppelganger("draw selected smooth_samples for GAM m3", plt)
plt <- draw(sm3, select = "s(x2)", alpha = 0.7, partial_match = TRUE)
vdiffr:::expect_doppelganger("draw selected factor by smooth_samples for GAM m3", plt)
})
## Issue #22
test_that("draw() can handle a mixture of numeric and factor random effects", {
df <- data_sim("eg4", seed = 42)
m <- gam(y ~ s(x2, fac, bs = "re"), data = df, method = "REML")
plt <- draw(m)
vdiffr:::expect_doppelganger("issue 22 draw with mixed random effects", plt)
})
|
testlist <- list(id = integer(0), x = c(1.90359856625529e+185, 7.29111993354965e-304, NaN, 2.52467545024877e-321, 0, 0, 0, 9.61236224620517e+281, 3.96573944649364e-317, 0, 4.53801546776667e+279, 9.80104716176339e+281, 2.88109526018606e+284, 7.06327445644536e-304, 7.1071553048134e-15, 6.8181059126092e-322, 0, -3.27585619210153e+221, 2.12186639171417e-314, 7.52893732228503e-313, 6.05127749546858e-307, 4.66602416025939e-299, -5.48612406879369e+303, 1.55498017282131e-57, 5.42744864273861e-315, 7.29112021326053e-304, 3.22526053605166e-319, 9.61276090537297e+281, 1.54909342597064e-319, 0, 9.43907217295468e+281, NaN, 9.01350868401674e-313, 6.35269607422675e-320, 2.78134225999478e-309, 1.01521386764412e-314, 0, 4.53819723496227e+279, -5.48612406879377e+303, 2.03711628245548e-312, 1.39064994193288e-309, 2.11370674490681e-314, NaN, 3.409471078095e-304, 5.10527172876216e+279, 9.28935029746543e-310, 2.59898715796066e-312), y = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result)
|
/ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1609955684-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 1,280
|
r
|
testlist <- list(id = integer(0), x = c(1.90359856625529e+185, 7.29111993354965e-304, NaN, 2.52467545024877e-321, 0, 0, 0, 9.61236224620517e+281, 3.96573944649364e-317, 0, 4.53801546776667e+279, 9.80104716176339e+281, 2.88109526018606e+284, 7.06327445644536e-304, 7.1071553048134e-15, 6.8181059126092e-322, 0, -3.27585619210153e+221, 2.12186639171417e-314, 7.52893732228503e-313, 6.05127749546858e-307, 4.66602416025939e-299, -5.48612406879369e+303, 1.55498017282131e-57, 5.42744864273861e-315, 7.29112021326053e-304, 3.22526053605166e-319, 9.61276090537297e+281, 1.54909342597064e-319, 0, 9.43907217295468e+281, NaN, 9.01350868401674e-313, 6.35269607422675e-320, 2.78134225999478e-309, 1.01521386764412e-314, 0, 4.53819723496227e+279, -5.48612406879377e+303, 2.03711628245548e-312, 1.39064994193288e-309, 2.11370674490681e-314, NaN, 3.409471078095e-304, 5.10527172876216e+279, 9.28935029746543e-310, 2.59898715796066e-312), y = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{stats}
\alias{stats}
\title{Classical estimates for tables}
\usage{
stats(x, margins = NULL, statistics = c("phi", "cramer", "chisq",
"yates"), maggr = mean)
}
\arguments{
\item{x}{a data.frame, matrix or table}
\item{margins}{margins}
\item{statistics}{statistics of interest}
\item{maggr}{a function for calculating the mean margins of a table, default is the arithmetic mean}
}
\value{
List containing all statistics
}
\description{
Some standard/classical (non-compositional) statistics
}
\details{
statistics \sQuote{phi} is the values of the table divided by the product of margins. \sQuote{cramer} normalize these values according to the dimension of the table. \sQuote{chisq} are the expected values according to Pearson while \sQuote{yates} according to Yates.
For the \code{maggr} function argument, arithmetic means (\code{mean}) should be chosen to obtain the classical results. Any other user-provided functions should be take with care since the classical estimations relies on the arithmetic mean.
}
\examples{
data(precipitation)
tab1 <- indTab(precipitation)
stats(precipitation)
stats(precipitation, statistics = "cramer")
stats(precipitation, statistics = "chisq")
stats(precipitation, statistics = "yates")
## take with care
## (the provided statistics are not designed for that case):
stats(precipitation, statistics = "chisq", maggr = gmean)
}
\references{
Juan Jose Egozcuea, Vera Pawlowsky-Glahn, Matthias Templ, Karel Hron (2015)
Independence in Contingency Tables Using Simplicial Geometry.
\emph{Communications in Statistics - Theory and Methods}, Vol. 44 (18), 3978--3996.
DOI:10.1080/03610926.2013.824980
}
\author{
Matthias Templ
}
|
/man/stats.Rd
|
no_license
|
hronkare/robCompositions
|
R
| false
| true
| 1,764
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{stats}
\alias{stats}
\title{Classical estimates for tables}
\usage{
stats(x, margins = NULL, statistics = c("phi", "cramer", "chisq",
"yates"), maggr = mean)
}
\arguments{
\item{x}{a data.frame, matrix or table}
\item{margins}{margins}
\item{statistics}{statistics of interest}
\item{maggr}{a function for calculating the mean margins of a table, default is the arithmetic mean}
}
\value{
List containing all statistics
}
\description{
Some standard/classical (non-compositional) statistics
}
\details{
statistics \sQuote{phi} is the values of the table divided by the product of margins. \sQuote{cramer} normalize these values according to the dimension of the table. \sQuote{chisq} are the expected values according to Pearson while \sQuote{yates} according to Yates.
For the \code{maggr} function argument, arithmetic means (\code{mean}) should be chosen to obtain the classical results. Any other user-provided functions should be take with care since the classical estimations relies on the arithmetic mean.
}
\examples{
data(precipitation)
tab1 <- indTab(precipitation)
stats(precipitation)
stats(precipitation, statistics = "cramer")
stats(precipitation, statistics = "chisq")
stats(precipitation, statistics = "yates")
## take with care
## (the provided statistics are not designed for that case):
stats(precipitation, statistics = "chisq", maggr = gmean)
}
\references{
Juan Jose Egozcuea, Vera Pawlowsky-Glahn, Matthias Templ, Karel Hron (2015)
Independence in Contingency Tables Using Simplicial Geometry.
\emph{Communications in Statistics - Theory and Methods}, Vol. 44 (18), 3978--3996.
DOI:10.1080/03610926.2013.824980
}
\author{
Matthias Templ
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shift_pickr.R
\name{shift_pickr}
\alias{shift_pickr}
\title{Chemical Shift Picking}
\usage{
shift_pickr(x, p, sh, pm = 0.005)
}
\arguments{
\item{x}{The spectrum of which you want to calculate the total area}
\item{p}{The matched ppm variable to x}
\item{sh}{Takes arrangements of values:
\enumerate{
\item The first is two values where the first is the lower ppm value and the second is the upper ppm value. This is then parsed to \code{get_idx()} to find the idx which is then parsed to the x variable.
\item The second arrangement is an array of numbers that are interpreted as the already calculated idx values and are parse straight to the x variable
}}
\item{pm}{The plus/minus value you want to add or subtract from the peak. Default = 0.005}
}
\value{
An array of values mapped to defined peak
}
\description{
\code{shift_pickr()} is programmed to search a given ppm region for a maximum value (i.e., the apex of a metabolite peak) and return the chemical shift that best encapsulates said peak. Is most useful when picking peaks to estimate their area.
}
\details{
\code{shift_pickr()} takes chemical shift region in the \strong{sh} parameter and searches for the largest value in x in that chemical shift.
From there, the value of pm is added and subtracted from the ppm where the maximum x values resides to give a shift that best encapsulates the peak which is then returned by the function.
}
\examples{
data(x,p)
idx <- shift_pickr(x, p, sh = c(5,5.5), pm = 0.01)
}
\author{
Kyle Bario \email{kylebario1@gmail.com}
}
|
/man/shift_pickr.Rd
|
permissive
|
kbario/NMRadjustr
|
R
| false
| true
| 1,612
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shift_pickr.R
\name{shift_pickr}
\alias{shift_pickr}
\title{Chemical Shift Picking}
\usage{
shift_pickr(x, p, sh, pm = 0.005)
}
\arguments{
\item{x}{The spectrum of which you want to calculate the total area}
\item{p}{The matched ppm variable to x}
\item{sh}{Takes arrangements of values:
\enumerate{
\item The first is two values where the first is the lower ppm value and the second is the upper ppm value. This is then parsed to \code{get_idx()} to find the idx which is then parsed to the x variable.
\item The second arrangement is an array of numbers that are interpreted as the already calculated idx values and are parse straight to the x variable
}}
\item{pm}{The plus/minus value you want to add or subtract from the peak. Default = 0.005}
}
\value{
An array of values mapped to defined peak
}
\description{
\code{shift_pickr()} is programmed to search a given ppm region for a maximum value (i.e., the apex of a metabolite peak) and return the chemical shift that best encapsulates said peak. Is most useful when picking peaks to estimate their area.
}
\details{
\code{shift_pickr()} takes chemical shift region in the \strong{sh} parameter and searches for the largest value in x in that chemical shift.
From there, the value of pm is added and subtracted from the ppm where the maximum x values resides to give a shift that best encapsulates the peak which is then returned by the function.
}
\examples{
data(x,p)
idx <- shift_pickr(x, p, sh = c(5,5.5), pm = 0.01)
}
\author{
Kyle Bario \email{kylebario1@gmail.com}
}
|
library(stratifyR)
### Name: math
### Title: Mathematics Marks for First-year University Students
### Aliases: math
### Keywords: datasets
### ** Examples
data(math)
min(math$final_marks); max(math$final_marks)
hist(math$final_marks)
boxplot(math$final_marks)
|
/data/genthat_extracted_code/stratifyR/examples/math.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 268
|
r
|
library(stratifyR)
### Name: math
### Title: Mathematics Marks for First-year University Students
### Aliases: math
### Keywords: datasets
### ** Examples
data(math)
min(math$final_marks); max(math$final_marks)
hist(math$final_marks)
boxplot(math$final_marks)
|
# Generate a sequence of Gaussian random numbers and
# convert the sequence into a time-series object
noise <- ts(rnorm(200, mean = 0, sd = 1))
# Code for the histogram
hist(noise, freq=FALSE, prob=T,ylim=c(0,0.5),xlim=c(-5,5),col="red")
mu <- mean(noise)
sigma <- sd(noise)
x<-seq(-5,5,length=100)
y<-dnorm(x,mu,sigma)
lines(x,y,lwd=2,col="blue")
# Code for the QQ Plot
qqnorm(noise)
abline(0,1, col="red")
|
/study/White noise test_Time Series.R
|
no_license
|
kanupriyasaxena/datascience
|
R
| false
| false
| 411
|
r
|
# Generate a sequence of Gaussian random numbers and
# convert the sequence into a time-series object
noise <- ts(rnorm(200, mean = 0, sd = 1))
# Code for the histogram
hist(noise, freq=FALSE, prob=T,ylim=c(0,0.5),xlim=c(-5,5),col="red")
mu <- mean(noise)
sigma <- sd(noise)
x<-seq(-5,5,length=100)
y<-dnorm(x,mu,sigma)
lines(x,y,lwd=2,col="blue")
# Code for the QQ Plot
qqnorm(noise)
abline(0,1, col="red")
|
#' Retrieve structured posts data from news articles, blog posts and online discussions
#'
#' @md
#' @param query A string query containing the filters that define which posts will be returned.
#' @param sort By default the results are sorted by relevancy. Acceptable values are
#' "`relevancy`", "`social.facebook.likes`", "`social.facebook.shares`",
#' "`social.facebook.comments`", "`social.gplus.shares`", "`social.pinterest.shares`",
#' "`social.linkedin.shares`", "`social.stumbledupon.shares`", "`social.vk.shares`",
#' "`replies_count`", "`participants_count`", "`spam_score`", "`performance_score`",
#' "`published`", "`thread.published`", "`domain_rank`", "`ord_in_thread`",
#' "`rating`".
#' @param ts A timestamp to start the search from. If a `POSIXct` is passed in, it will
#' be converted to the necessary value in milliseconds. Default is previous 3 days.
#' @param order `asc` (ascending) or `desc` (descending, default) sort order for results
#' @param size Total number of posts returned per request, ranges between `1:100`
#' (default is `100`).
#' @param accuracy_confidence `NULL` or `high`. If `high`, return only posts with high
#' extraction accuracy, but removes about 30% of the total matching posts (with
#' lower confidence).
#' @param highlight `FALSE` or `TRUE`. Return the fragments in the post that matched the
#' textual boolean query. The matched keywords will be surrounded by `<em/>` tags.
#' Default: `FALSE`
#' @param from Paging parameter (starting record number). Default is `0`.
#' @param quiet By default, calls in interactive sessions will return updates during fetching.
#' Use `TRUE` to suppress these messages.
#' @param token Your private access token. You get a unique access token when you sign up.
#' Store it in an environment variable `WEBHOSE_TOKEN` (usually in `~/.Renviron`)
#' or provide it directly.
#' @param ... other parameters passed on to `httr::GET()`
#' @return a `list` with these fields:
#' * `totalResults`: The total number of posts matching your query (numeric)
#' * `moreResultsAvailable`: How many more results are available (numeric)
#' * `next`: A URL to get the next batch of posts matching your query. (character)
#' * `requestsLeft`: How many more requests are available in your current subscription plan. (numeric)
#' @references [webhose API](https://docs.webhose.io/docs/get-parameters)
#' @export
#' @examples \dontrun{
#' res <- filter_posts("(China AND United) language:english site_type:news site:bloomberg.com",
#' ts = 1213456)
#' }
filter_posts <- function(query, sort = "relevancy",
ts = (Sys.time() - (3 * 24 * 60 * 60)),
order = "desc", size = 100,
accuracy_confidence = NULL, highlight = FALSE,
from = 0, quiet = !interactive(),
token = Sys.getenv("WEBHOSE_TOKEN"), ...) {
if (inherits(ts, "POSIXct")) ts <- as.numeric(ts)
sort <- match.arg(tolower(sort[1]), sort_params)
order <- match.arg(tolower(order[1]), c("asc", "desc"))
params <- list(
token = token,
format = "json",
q = query,
sort = sort,
order = order,
size = size,
ts = ts,
from = from,
highlight = highlight
)
if (!is.null(accuracy_confidence)) {
accuracy_confidence <- match.arg(accuracy_confidence, "high")
params$accuracy_confidence = accuracy_confidence
}
httr::GET(
url = "https://webhose.io/filterWebContent",
query = params,
...
) -> res
httr::stop_for_status(res)
res <- httr::content(res, as="text", encoding = "UTF-8")
res <- jsonlite::fromJSON(res, flatten=TRUE)
if (!quiet) message(sprintf("You have %s API calls remaining on your plan",
comma(res$requestsLeft)))
res
}
|
/R/filter_posts.R
|
no_license
|
hrbrmstr/webhose
|
R
| false
| false
| 3,949
|
r
|
#' Retrieve structured posts data from news articles, blog posts and online discussions
#'
#' @md
#' @param query A string query containing the filters that define which posts will be returned.
#' @param sort By default the results are sorted by relevancy. Acceptable values are
#' "`relevancy`", "`social.facebook.likes`", "`social.facebook.shares`",
#' "`social.facebook.comments`", "`social.gplus.shares`", "`social.pinterest.shares`",
#' "`social.linkedin.shares`", "`social.stumbledupon.shares`", "`social.vk.shares`",
#' "`replies_count`", "`participants_count`", "`spam_score`", "`performance_score`",
#' "`published`", "`thread.published`", "`domain_rank`", "`ord_in_thread`",
#' "`rating`".
#' @param ts A timestamp to start the search from. If a `POSIXct` is passed in, it will
#' be converted to the necessary value in milliseconds. Default is previous 3 days.
#' @param order `asc` (ascending) or `desc` (descending, default) sort order for results
#' @param size Total number of posts returned per request, ranges between `1:100`
#' (default is `100`).
#' @param accuracy_confidence `NULL` or `high`. If `high`, return only posts with high
#' extraction accuracy, but removes about 30% of the total matching posts (with
#' lower confidence).
#' @param highlight `FALSE` or `TRUE`. Return the fragments in the post that matched the
#' textual boolean query. The matched keywords will be surrounded by `<em/>` tags.
#' Default: `FALSE`
#' @param from Paging parameter (starting record number). Default is `0`.
#' @param quiet By default, calls in interactive sessions will return updates during fetching.
#' Use `TRUE` to suppress these messages.
#' @param token Your private access token. You get a unique access token when you sign up.
#' Store it in an environment variable `WEBHOSE_TOKEN` (usually in `~/.Renviron`)
#' or provide it directly.
#' @param ... other parameters passed on to `httr::GET()`
#' @return a `list` with these fields:
#' * `totalResults`: The total number of posts matching your query (numeric)
#' * `moreResultsAvailable`: How many more results are available (numeric)
#' * `next`: A URL to get the next batch of posts matching your query. (character)
#' * `requestsLeft`: How many more requests are available in your current subscription plan. (numeric)
#' @references [webhose API](https://docs.webhose.io/docs/get-parameters)
#' @export
#' @examples \dontrun{
#' res <- filter_posts("(China AND United) language:english site_type:news site:bloomberg.com",
#' ts = 1213456)
#' }
filter_posts <- function(query, sort = "relevancy",
ts = (Sys.time() - (3 * 24 * 60 * 60)),
order = "desc", size = 100,
accuracy_confidence = NULL, highlight = FALSE,
from = 0, quiet = !interactive(),
token = Sys.getenv("WEBHOSE_TOKEN"), ...) {
if (inherits(ts, "POSIXct")) ts <- as.numeric(ts)
sort <- match.arg(tolower(sort[1]), sort_params)
order <- match.arg(tolower(order[1]), c("asc", "desc"))
params <- list(
token = token,
format = "json",
q = query,
sort = sort,
order = order,
size = size,
ts = ts,
from = from,
highlight = highlight
)
if (!is.null(accuracy_confidence)) {
accuracy_confidence <- match.arg(accuracy_confidence, "high")
params$accuracy_confidence = accuracy_confidence
}
httr::GET(
url = "https://webhose.io/filterWebContent",
query = params,
...
) -> res
httr::stop_for_status(res)
res <- httr::content(res, as="text", encoding = "UTF-8")
res <- jsonlite::fromJSON(res, flatten=TRUE)
if (!quiet) message(sprintf("You have %s API calls remaining on your plan",
comma(res$requestsLeft)))
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeLine.R
\name{makeLine}
\alias{makeLine}
\title{Create a linear patch (beta version).}
\usage{
makeLine(context, size, direction = NULL, convol = 0.5, spt = NULL,
bgr = 0, edge = FALSE, rast = FALSE, val = 1)
}
\arguments{
\item{context}{Raster object or matrix, an empty landscape raster or a mask indicating where the patch cannot be generated (see bgr below).}
\item{size}{integer. Size of the patch to be generated, as number of raster cells.}
\item{direction}{numeric. The direction towards which the patch will point and grow, expressed in degrees between 0 and 360.}
\item{convol}{numeric. Level of convolution to be assigned to the patch (default is \code{convol=0.5}). A value
between 0 and 1, where 0 is no convolution at all (basically a straight line) and 1 is maximum convolution (i.e. tends to form a circular patch).}
\item{spt}{integer or matrix. The seed point location around which the patch is generated (a random point is given by default). It can be an integer, as index of the cell in the raster, or a two columns matrix indicating x and y coordinates (an integer vector of length 2 is accepted too).}
\item{bgr}{integer. A single value of background cells, where a patch can be generated (default is zero). Cells/classes which cannot be changed must have a different value.}
\item{edge}{logical. Should the vector of edge cells of the patch be returned?}
\item{rast}{logical. If TRUE returns a Raster object, otherwise a vector of cell numbers where the patch occurs}
\item{val}{integer. The value to be assigned to patch cells, when \code{rast=TRUE}}
}
\value{
A vector of raster cell numbers, or a RasterLayer object if \code{rast=TRUE}. If \code{edge=TRUE} a
list of two vectors is returned: one for the inner raster cells and the second for cells at the edge of the patch.
}
\description{
Create a linear patch, setting direction and convolution. The higher the convolution degree, the weaker the
linear shape (and direction).
% ADD set length instead of size
% FIX values of direction at 0 and 360
% FIX values of convol of 1 and 0
}
\details{
For any values of \code{convol} > 0.8, no big differences are observed noted. Also direction is progressively lost
as convolution increases.
}
\examples{
library(raster)
r <- matrix(0,33,33)
r <- raster(r, xmn=0, xmx=10, ymn=0, ymx=10)
plot(makeLine(r, size=50, spt = 545, direction=45, convol=0.05, rast=TRUE))
}
|
/man/makeLine.Rd
|
no_license
|
dariomasante/landscapeR
|
R
| false
| true
| 2,478
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeLine.R
\name{makeLine}
\alias{makeLine}
\title{Create a linear patch (beta version).}
\usage{
makeLine(context, size, direction = NULL, convol = 0.5, spt = NULL,
bgr = 0, edge = FALSE, rast = FALSE, val = 1)
}
\arguments{
\item{context}{Raster object or matrix, an empty landscape raster or a mask indicating where the patch cannot be generated (see bgr below).}
\item{size}{integer. Size of the patch to be generated, as number of raster cells.}
\item{direction}{numeric. The direction towards which the patch will point and grow, expressed in degrees between 0 and 360.}
\item{convol}{numeric. Level of convolution to be assigned to the patch (default is \code{convol=0.5}). A value
between 0 and 1, where 0 is no convolution at all (basically a straight line) and 1 is maximum convolution (i.e. tends to form a circular patch).}
\item{spt}{integer or matrix. The seed point location around which the patch is generated (a random point is given by default). It can be an integer, as index of the cell in the raster, or a two columns matrix indicating x and y coordinates (an integer vector of length 2 is accepted too).}
\item{bgr}{integer. A single value of background cells, where a patch can be generated (default is zero). Cells/classes which cannot be changed must have a different value.}
\item{edge}{logical. Should the vector of edge cells of the patch be returned?}
\item{rast}{logical. If TRUE returns a Raster object, otherwise a vector of cell numbers where the patch occurs}
\item{val}{integer. The value to be assigned to patch cells, when \code{rast=TRUE}}
}
\value{
A vector of raster cell numbers, or a RasterLayer object if \code{rast=TRUE}. If \code{edge=TRUE} a
list of two vectors is returned: one for the inner raster cells and the second for cells at the edge of the patch.
}
\description{
Create a linear patch, setting direction and convolution. The higher the convolution degree, the weaker the
linear shape (and direction).
% ADD set length instead of size
% FIX values of direction at 0 and 360
% FIX values of convol of 1 and 0
}
\details{
For any values of \code{convol} > 0.8, no big differences are observed noted. Also direction is progressively lost
as convolution increases.
}
\examples{
library(raster)
r <- matrix(0,33,33)
r <- raster(r, xmn=0, xmx=10, ymn=0, ymx=10)
plot(makeLine(r, size=50, spt = 545, direction=45, convol=0.05, rast=TRUE))
}
|
plot4 <- function() {
require(data.table)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Convert NEI to data.table & subset data for Baltimore, fips == "24510"
NEI.dt <- data.table(NEI)
# Searching for all Short.Name with Coal and then Comb
SCC.coal <- SCC[grep("Coal", ignore.case = TRUE, SCC$Short.Name),]
SCC.coal.comb <- as.character(SCC.coal[grep("Comb", ignore.case = TRUE, SCC.coal$Short.Name),1])
# Subsetting NEI dataset for those matching Coal Combustion
NEI.coal <- subset(NEI.dt, NEI.dt$SCC %in% SCC.coal.comb)
# Use list() to get sum of emissions by year
x <- NEI.coal[,list(Emissions=sum(Emissions)), by='year']
png(filename = "plot4.png", width = 480, height = 480)
barplot(x$Emissions,
ylab="Emissions",
xlab="Year",
main="Total Emissions by Coal Combustion",
names.arg=c("1999","2002","2005","2008"),
ylim=c(0,600000),
axis.lty=1)
dev.off()
}
|
/plot4.R
|
no_license
|
avo1d/ExData_Project2
|
R
| false
| false
| 1,151
|
r
|
plot4 <- function() {
require(data.table)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Convert NEI to data.table & subset data for Baltimore, fips == "24510"
NEI.dt <- data.table(NEI)
# Searching for all Short.Name with Coal and then Comb
SCC.coal <- SCC[grep("Coal", ignore.case = TRUE, SCC$Short.Name),]
SCC.coal.comb <- as.character(SCC.coal[grep("Comb", ignore.case = TRUE, SCC.coal$Short.Name),1])
# Subsetting NEI dataset for those matching Coal Combustion
NEI.coal <- subset(NEI.dt, NEI.dt$SCC %in% SCC.coal.comb)
# Use list() to get sum of emissions by year
x <- NEI.coal[,list(Emissions=sum(Emissions)), by='year']
png(filename = "plot4.png", width = 480, height = 480)
barplot(x$Emissions,
ylab="Emissions",
xlab="Year",
main="Total Emissions by Coal Combustion",
names.arg=c("1999","2002","2005","2008"),
ylim=c(0,600000),
axis.lty=1)
dev.off()
}
|
print.llgpcptab <- function( x, ... )
{
###
### This function prints out the augmented tableau at the k-th priority level
###
### Parameters
### x = an object of class 'llgpcptab' that is the modified simplex tableau
### ... = other arguments as they may apply to the generic S3 print function
###
tab <- x
fmt.e <- paste( "%", 14, ".", 6, "e", sep="" )
fmt.s <- paste( "%", 14, "s", sep="" )
cat( "\n" )
cat( "Iteration Number: ", tab$iter, "\n" )
cat( "Priority Level: ", tab$level, "\n" )
###
### create, format and print a data frame for the top stub
###
cat( "\n" )
cat( "Top Stub\n" )
tw.frame <- data.frame( matrix( nrow=tab$levels, ncol=tab$nonbasics ) )
for ( i in 1:tab$levels ) {
for ( j in 1:tab$nonbasics ) {
tw.frame[i,j] <- sprintf( fmt.e, tab$tw[tab$levels-i+1,j] )
}
}
names( tw.frame ) <- sprintf( fmt.s, tab$col.headings )
row.names( tw.frame ) <- paste( "P", seq( tab$levels, 1, -1 ), sep="" )
print( tw.frame )
###
### create, format and print a data frame for the center stub
###
cat( "\n" )
cat( "Center Stub\n" )
te.frame <- data.frame( matrix( nrow=tab$objectives, ncol=tab$nonbasics ) )
for ( i in 1:tab$objectives ) {
for ( j in 1:tab$nonbasics ) {
te.frame[i,j] <- sprintf( fmt.e, tab$te[i,j] )
}
}
names( te.frame ) <- sprintf( fmt.s, tab$col.headings )
row.names( te.frame ) <- tab$row.headings
print( te.frame )
###
### create, format and print a data frame for the left stub
###
cat( "\n" )
cat( "Left Stub\n" )
tu.frame <- data.frame( matrix( nrow=tab$objectives, ncol=tab$levels ) )
for ( i in 1:tab$objectives ) {
for ( j in 1:tab$levels ) {
tu.frame[i,j] <- sprintf( fmt.e, tab$tu[i,tab$levels-j+1] )
}
}
names( tu.frame ) <- sprintf( fmt.s, paste( "P", seq( tab$levels, 1, -1 ), sep="" ) )
row.names( tu.frame ) <- tab$row.headings
print( tu.frame )
###
### create, format and print a data frame for the index rows
###
cat( "\n" )
cat( "Index Rows\n" )
ti.frame <- data.frame( matrix( nrow=tab$levels, ncol=tab$nonbasics ) )
for ( i in 1:tab$levels ) {
for ( j in 1:tab$nonbasics ) {
ti.frame[i,j] <- sprintf( fmt.e, tab$ti[i,j] )
}
}
names( ti.frame ) <- sprintf( fmt.s, tab$col.headings )
row.names( ti.frame ) <- paste( "P", 1:tab$levels, sep="" )
print( ti.frame )
###
### create, format and print a data frame for the current solution
###
cat( "\n" )
cat( "Current Solution\n" )
tb.frame <- data.frame( matrix( nrow=tab$objectives, ncol=1 ) )
for ( i in 1:tab$objectives ) {
tb.frame[i,1] <- sprintf( fmt.e, tab$tb[i] )
}
names( tb.frame ) <- sprintf( fmt.s, c( "Value" ) )
row.names( tb.frame ) <- tab$row.headings
print( tb.frame )
###
### create, format and print a data frame for the achievement function
###
cat( "\n" )
cat( "Achievement Function\n" )
ta.frame <- data.frame( matrix( nrow=tab$levels, ncol=1 ) )
for ( i in 1:tab$levels ) {
ta.frame[i,1] <- sprintf( fmt.e, tab$ta[i] )
}
names( ta.frame ) <- sprintf( fmt.s, c( "Value" ) )
row.names( ta.frame ) <- paste( "P", 1:tab$levels, sep="" )
print( ta.frame )
###
### print the variable classes
###
cat( "\nVariable Classes\n" )
print( tab$variable.classes )
}
|
/goalprog/R/print.llgpcptab.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 3,526
|
r
|
print.llgpcptab <- function( x, ... )
{
###
### This function prints out the augmented tableau at the k-th priority level
###
### Parameters
### x = an object of class 'llgpcptab' that is the modified simplex tableau
### ... = other arguments as they may apply to the generic S3 print function
###
tab <- x
fmt.e <- paste( "%", 14, ".", 6, "e", sep="" )
fmt.s <- paste( "%", 14, "s", sep="" )
cat( "\n" )
cat( "Iteration Number: ", tab$iter, "\n" )
cat( "Priority Level: ", tab$level, "\n" )
###
### create, format and print a data frame for the top stub
###
cat( "\n" )
cat( "Top Stub\n" )
tw.frame <- data.frame( matrix( nrow=tab$levels, ncol=tab$nonbasics ) )
for ( i in 1:tab$levels ) {
for ( j in 1:tab$nonbasics ) {
tw.frame[i,j] <- sprintf( fmt.e, tab$tw[tab$levels-i+1,j] )
}
}
names( tw.frame ) <- sprintf( fmt.s, tab$col.headings )
row.names( tw.frame ) <- paste( "P", seq( tab$levels, 1, -1 ), sep="" )
print( tw.frame )
###
### create, format and print a data frame for the center stub
###
cat( "\n" )
cat( "Center Stub\n" )
te.frame <- data.frame( matrix( nrow=tab$objectives, ncol=tab$nonbasics ) )
for ( i in 1:tab$objectives ) {
for ( j in 1:tab$nonbasics ) {
te.frame[i,j] <- sprintf( fmt.e, tab$te[i,j] )
}
}
names( te.frame ) <- sprintf( fmt.s, tab$col.headings )
row.names( te.frame ) <- tab$row.headings
print( te.frame )
###
### create, format and print a data frame for the left stub
###
cat( "\n" )
cat( "Left Stub\n" )
tu.frame <- data.frame( matrix( nrow=tab$objectives, ncol=tab$levels ) )
for ( i in 1:tab$objectives ) {
for ( j in 1:tab$levels ) {
tu.frame[i,j] <- sprintf( fmt.e, tab$tu[i,tab$levels-j+1] )
}
}
names( tu.frame ) <- sprintf( fmt.s, paste( "P", seq( tab$levels, 1, -1 ), sep="" ) )
row.names( tu.frame ) <- tab$row.headings
print( tu.frame )
###
### create, format and print a data frame for the index rows
###
cat( "\n" )
cat( "Index Rows\n" )
ti.frame <- data.frame( matrix( nrow=tab$levels, ncol=tab$nonbasics ) )
for ( i in 1:tab$levels ) {
for ( j in 1:tab$nonbasics ) {
ti.frame[i,j] <- sprintf( fmt.e, tab$ti[i,j] )
}
}
names( ti.frame ) <- sprintf( fmt.s, tab$col.headings )
row.names( ti.frame ) <- paste( "P", 1:tab$levels, sep="" )
print( ti.frame )
###
### create, format and print a data frame for the current solution
###
cat( "\n" )
cat( "Current Solution\n" )
tb.frame <- data.frame( matrix( nrow=tab$objectives, ncol=1 ) )
for ( i in 1:tab$objectives ) {
tb.frame[i,1] <- sprintf( fmt.e, tab$tb[i] )
}
names( tb.frame ) <- sprintf( fmt.s, c( "Value" ) )
row.names( tb.frame ) <- tab$row.headings
print( tb.frame )
###
### create, format and print a data frame for the achievement function
###
cat( "\n" )
cat( "Achievement Function\n" )
ta.frame <- data.frame( matrix( nrow=tab$levels, ncol=1 ) )
for ( i in 1:tab$levels ) {
ta.frame[i,1] <- sprintf( fmt.e, tab$ta[i] )
}
names( ta.frame ) <- sprintf( fmt.s, c( "Value" ) )
row.names( ta.frame ) <- paste( "P", 1:tab$levels, sep="" )
print( ta.frame )
###
### print the variable classes
###
cat( "\nVariable Classes\n" )
print( tab$variable.classes )
}
|
# data munging for all letter distortion experiment data.
library(plyr)
library(dplyr)
top_dir <- getwd()
# top_dir <- '~/Dropbox/Projects/letter-distortion-detection'
out_dir <- file.path(top_dir, "results", "r-analysis-final-paper")
# raw data experiment 1 --------------------------------------------------------------
fname <- file.path(top_dir, "results", "experiment_1", "all_data.csv")
raw_dat_1 <- read.csv(fname)
raw_dat_1$distortion <- revalue(raw_dat_1$distortion,
c(" bex" = "BPN",
" rf" = "RF"))
raw_dat_1$subject <- factor(raw_dat_1$subject)
raw_dat_1$subject <- revalue(raw_dat_1$subject,
c("2" = "TW",
"5" = "ST",
"7" = "AM",
"8" = "RM",
"9" = "MF"))
raw_dat_1$targ_letter <- revalue(raw_dat_1$targ_letter,
c(" D" = "D",
" H" = "H",
" K" = "K",
" N" = "N"))
raw_dat_1$targ_letter <- factor(raw_dat_1$targ_letter,
levels = c("K", "H", "D", "N"))
# Threshold data expt 1 ----------------------------------------------------------
fname <- file.path(top_dir, "results", "saskia_analysis", "sensitivitydata", "alldatasensexp1+2.csv")
threshold_dat <- read.csv(fname, sep='\t')
threshold_dat$distortion <- threshold_dat$distortiontype
threshold_dat$distortiontype <- NULL
threshold_dat$distortion <- revalue(threshold_dat$distortion,
c(" Bex" = "BPN",
" RF" = "RF"))
threshold_dat$log_freq <- log(threshold_dat$freq)
# remove space:
threshold_dat$flanked <- revalue(threshold_dat$flanked,
c(" flanked" = "flanked",
" unflanked" = "unflanked"))
# reorder:
threshold_dat$flanked <- factor(threshold_dat$flanked,
levels = c("unflanked", "flanked"))
threshold_dat_1 <- threshold_dat
# raw data experiment 2 --------------------------------------------------------------
fname <- file.path(top_dir, "results", "experiment_2", "all_data.csv")
raw_dat_2 <- read.csv(fname)
raw_dat_2$distortion <- revalue(raw_dat_2$distortion,
c(" bex" = "BPN",
" rf" = "RF"))
raw_dat_2$subject <- factor(raw_dat_2$subject)
raw_dat_2$subject <- revalue(raw_dat_2$subject,
c("2" = "TW",
"5" = "ST",
"7" = "AM"))
# expt 2 thresholds ------------------------------------------------------------------
fname <- file.path(top_dir, "results", "saskia_analysis", "sensitivitydata", "alldatasensexp3c.csv")
threshold_dat <- read.csv(fname, sep='\t')
threshold_dat$distortion <- threshold_dat$distortiontype
threshold_dat$distortiontype <- NULL
threshold_dat$n_dist_flanks <- threshold_dat$distflanker
threshold_dat$distflanker <- NULL
threshold_dat$experiment <- threshold_dat$exp3
threshold_dat$exp3 <- NULL
threshold_dat$distortion <- revalue(threshold_dat$distortion,
c(" Bex" = "BPN",
" RF" = "RF"))
threshold_dat$experiment <- revalue(threshold_dat$experiment,
c(" a" = "a",
" b" = "b",
" c" = "c"))
threshold_dat_2 <- threshold_dat
# export data -------------------------------------------------------------
save(threshold_dat_1, raw_dat_1, threshold_dat_2, raw_dat_2,
file = file.path(out_dir, "all_data.RData"))
# export to csv too:
write.csv(threshold_dat_1,
file = file.path(out_dir, "expt_1_thresholds.csv"))
write.csv(raw_dat_1,
file = file.path(out_dir, "expt_1_raw.csv"))
write.csv(threshold_dat_2,
file = file.path(out_dir, "expt_2_thresholds.csv"))
write.csv(raw_dat_2,
file = file.path(out_dir, "expt_2_raw.csv"))
|
/code/analysis/data_munging_all.R
|
no_license
|
tomwallis/letter_distortion_detection
|
R
| false
| false
| 4,197
|
r
|
# data munging for all letter distortion experiment data.
library(plyr)
library(dplyr)
top_dir <- getwd()
# top_dir <- '~/Dropbox/Projects/letter-distortion-detection'
out_dir <- file.path(top_dir, "results", "r-analysis-final-paper")
# raw data experiment 1 --------------------------------------------------------------
fname <- file.path(top_dir, "results", "experiment_1", "all_data.csv")
raw_dat_1 <- read.csv(fname)
raw_dat_1$distortion <- revalue(raw_dat_1$distortion,
c(" bex" = "BPN",
" rf" = "RF"))
raw_dat_1$subject <- factor(raw_dat_1$subject)
raw_dat_1$subject <- revalue(raw_dat_1$subject,
c("2" = "TW",
"5" = "ST",
"7" = "AM",
"8" = "RM",
"9" = "MF"))
raw_dat_1$targ_letter <- revalue(raw_dat_1$targ_letter,
c(" D" = "D",
" H" = "H",
" K" = "K",
" N" = "N"))
raw_dat_1$targ_letter <- factor(raw_dat_1$targ_letter,
levels = c("K", "H", "D", "N"))
# Threshold data expt 1 ----------------------------------------------------------
fname <- file.path(top_dir, "results", "saskia_analysis", "sensitivitydata", "alldatasensexp1+2.csv")
threshold_dat <- read.csv(fname, sep='\t')
threshold_dat$distortion <- threshold_dat$distortiontype
threshold_dat$distortiontype <- NULL
threshold_dat$distortion <- revalue(threshold_dat$distortion,
c(" Bex" = "BPN",
" RF" = "RF"))
threshold_dat$log_freq <- log(threshold_dat$freq)
# remove space:
threshold_dat$flanked <- revalue(threshold_dat$flanked,
c(" flanked" = "flanked",
" unflanked" = "unflanked"))
# reorder:
threshold_dat$flanked <- factor(threshold_dat$flanked,
levels = c("unflanked", "flanked"))
threshold_dat_1 <- threshold_dat
# raw data experiment 2 --------------------------------------------------------------
fname <- file.path(top_dir, "results", "experiment_2", "all_data.csv")
raw_dat_2 <- read.csv(fname)
raw_dat_2$distortion <- revalue(raw_dat_2$distortion,
c(" bex" = "BPN",
" rf" = "RF"))
raw_dat_2$subject <- factor(raw_dat_2$subject)
raw_dat_2$subject <- revalue(raw_dat_2$subject,
c("2" = "TW",
"5" = "ST",
"7" = "AM"))
# expt 2 thresholds ------------------------------------------------------------------
fname <- file.path(top_dir, "results", "saskia_analysis", "sensitivitydata", "alldatasensexp3c.csv")
threshold_dat <- read.csv(fname, sep='\t')
threshold_dat$distortion <- threshold_dat$distortiontype
threshold_dat$distortiontype <- NULL
threshold_dat$n_dist_flanks <- threshold_dat$distflanker
threshold_dat$distflanker <- NULL
threshold_dat$experiment <- threshold_dat$exp3
threshold_dat$exp3 <- NULL
threshold_dat$distortion <- revalue(threshold_dat$distortion,
c(" Bex" = "BPN",
" RF" = "RF"))
threshold_dat$experiment <- revalue(threshold_dat$experiment,
c(" a" = "a",
" b" = "b",
" c" = "c"))
threshold_dat_2 <- threshold_dat
# export data -------------------------------------------------------------
save(threshold_dat_1, raw_dat_1, threshold_dat_2, raw_dat_2,
file = file.path(out_dir, "all_data.RData"))
# export to csv too:
write.csv(threshold_dat_1,
file = file.path(out_dir, "expt_1_thresholds.csv"))
write.csv(raw_dat_1,
file = file.path(out_dir, "expt_1_raw.csv"))
write.csv(threshold_dat_2,
file = file.path(out_dir, "expt_2_thresholds.csv"))
write.csv(raw_dat_2,
file = file.path(out_dir, "expt_2_raw.csv"))
|
# LT 19/10/2020
# plot of proportion in OMIM vs metric decile
# (figure e9a in flagship paper)
load_omim_by_year_data = function() {
omim_data = read_delim('data/forKonrad_cleaned_gene_discovery_years_2018-10-09.tsv', delim = '\t') %>%
filter(yearDiscovered > 1990)
# omim_data %>%
# count(yearDiscovered) %>%
# ggplot + aes(x = yearDiscovered, y = n) + geom_bar(stat='identity')
omim_data %>%
group_by(Ensembl.Gene.ID) %>%
summarize(year = min(yearDiscovered),
discoverybyNGS = any(discoverybyNGS)
) %>%
return
}
proportion_in_omim = function(save_plot=F) {
#omim_data = load_omim_by_year_data()
gene_omim_data = gene_data %>%
mutate(in_omim = gene_id %in% omim_data$Ensembl.Gene.ID)
gene_omim_data %>%
summarize(ttest = list(t.test(oe_lof_upper ~ in_omim))) %>%
tidy(ttest)
gene_omim_data %>%
summarize(ttest = list(glm(in_omim ~ oe_lof_upper + cds_length, family='binomial'))) %>%
tidy(ttest)
p = gene_omim_data %>%
group_by(oe_lof_upper_bin) %>%
summarize(num_in_omim = sum(in_omim, na.rm=T), n=n(),
prop_in_omim = num_in_omim / n, sem = sqrt(prop_in_omim * (1 - prop_in_omim) / n),
prop_upper = prop_in_omim + 1.96 * sem, prop_lower = prop_in_omim - 1.96 * sem) %>%
ggplot + aes(x = oe_lof_upper_bin, y = prop_in_omim, ymin = prop_lower, ymax = prop_upper) +
geom_pointrange() + scale_y_continuous(labels=percent_format(accuracy=1)) +
theme_classic() + oe_x_axis + ylab('Proportion in OMIM')
if (save_plot) {
pdf('e9_proportion_in_omim.pdf', height=3, width=4)
print(p)
dev.off()
}
return(p)
}
metric='oe_lof_upper'
variable_label = function(variable) {
switch(variable,
'oe_lof_upper'='LOEUF',
'psfs'='powerSFS',
'cds_length'='CDS length',
variable)
}
proportion_in_clinvar = function(metric='oe_lof_upper', save_plot=F) {
oe_x_label = paste(variable_label(metric), 'decile')
label_function = function(x) {
y = 10 * x + 5
ifelse(y %% 20 == 0, paste0(y, '%'), "")
}
oe_x_axis = list(xlab(oe_x_label),
scale_x_continuous(labels=label_function, breaks=seq(-0.5, 9.5, 1), limits=c(-0.5, 9.5)))
clinvar_data = read_delim('gene_lists/lists/clinvar_path_likelypath.tsv', delim='\t', col_names=c('gene'))
gene_clinvar_data = gene_data %>% mutate(in_clinvar = gene %in% clinvar_data$gene)
p = gene_clinvar_data %>%
group_by(.data[[paste0(metric,'_bin')]]) %>%
summarize(num_in_clinvar = sum(in_clinvar, na.rm=T), n=n(),
prop_in_clinvar = num_in_clinvar / n, sem = sqrt(prop_in_clinvar * (1 - prop_in_clinvar) / n),
prop_upper = prop_in_clinvar + 1.96 * sem, prop_lower = prop_in_clinvar - 1.96 * sem) %>%
ggplot + aes(x = .data[[paste0(metric,'_bin')]], y = prop_in_clinvar, ymin = prop_lower, ymax = prop_upper) +
geom_pointrange() + scale_y_continuous() + coord_cartesian(ylim=c(0,.25)) +
theme_classic() + oe_x_axis + ylab('Proportion in ClinVar')
if (save_plot) {
pdf('e9_proportion_in_omim.pdf', height=3, width=4)
print(p)
dev.off()
}
return(p)
}
|
/Oct2020/Plot.OMIMperdecile.R
|
no_license
|
tiboloic/optiRVIS
|
R
| false
| false
| 3,185
|
r
|
# LT 19/10/2020
# plot of proportion in OMIM vs metric decile
# (figure e9a in flagship paper)
load_omim_by_year_data = function() {
omim_data = read_delim('data/forKonrad_cleaned_gene_discovery_years_2018-10-09.tsv', delim = '\t') %>%
filter(yearDiscovered > 1990)
# omim_data %>%
# count(yearDiscovered) %>%
# ggplot + aes(x = yearDiscovered, y = n) + geom_bar(stat='identity')
omim_data %>%
group_by(Ensembl.Gene.ID) %>%
summarize(year = min(yearDiscovered),
discoverybyNGS = any(discoverybyNGS)
) %>%
return
}
proportion_in_omim = function(save_plot=F) {
#omim_data = load_omim_by_year_data()
gene_omim_data = gene_data %>%
mutate(in_omim = gene_id %in% omim_data$Ensembl.Gene.ID)
gene_omim_data %>%
summarize(ttest = list(t.test(oe_lof_upper ~ in_omim))) %>%
tidy(ttest)
gene_omim_data %>%
summarize(ttest = list(glm(in_omim ~ oe_lof_upper + cds_length, family='binomial'))) %>%
tidy(ttest)
p = gene_omim_data %>%
group_by(oe_lof_upper_bin) %>%
summarize(num_in_omim = sum(in_omim, na.rm=T), n=n(),
prop_in_omim = num_in_omim / n, sem = sqrt(prop_in_omim * (1 - prop_in_omim) / n),
prop_upper = prop_in_omim + 1.96 * sem, prop_lower = prop_in_omim - 1.96 * sem) %>%
ggplot + aes(x = oe_lof_upper_bin, y = prop_in_omim, ymin = prop_lower, ymax = prop_upper) +
geom_pointrange() + scale_y_continuous(labels=percent_format(accuracy=1)) +
theme_classic() + oe_x_axis + ylab('Proportion in OMIM')
if (save_plot) {
pdf('e9_proportion_in_omim.pdf', height=3, width=4)
print(p)
dev.off()
}
return(p)
}
metric='oe_lof_upper'
variable_label = function(variable) {
switch(variable,
'oe_lof_upper'='LOEUF',
'psfs'='powerSFS',
'cds_length'='CDS length',
variable)
}
proportion_in_clinvar = function(metric='oe_lof_upper', save_plot=F) {
oe_x_label = paste(variable_label(metric), 'decile')
label_function = function(x) {
y = 10 * x + 5
ifelse(y %% 20 == 0, paste0(y, '%'), "")
}
oe_x_axis = list(xlab(oe_x_label),
scale_x_continuous(labels=label_function, breaks=seq(-0.5, 9.5, 1), limits=c(-0.5, 9.5)))
clinvar_data = read_delim('gene_lists/lists/clinvar_path_likelypath.tsv', delim='\t', col_names=c('gene'))
gene_clinvar_data = gene_data %>% mutate(in_clinvar = gene %in% clinvar_data$gene)
p = gene_clinvar_data %>%
group_by(.data[[paste0(metric,'_bin')]]) %>%
summarize(num_in_clinvar = sum(in_clinvar, na.rm=T), n=n(),
prop_in_clinvar = num_in_clinvar / n, sem = sqrt(prop_in_clinvar * (1 - prop_in_clinvar) / n),
prop_upper = prop_in_clinvar + 1.96 * sem, prop_lower = prop_in_clinvar - 1.96 * sem) %>%
ggplot + aes(x = .data[[paste0(metric,'_bin')]], y = prop_in_clinvar, ymin = prop_lower, ymax = prop_upper) +
geom_pointrange() + scale_y_continuous() + coord_cartesian(ylim=c(0,.25)) +
theme_classic() + oe_x_axis + ylab('Proportion in ClinVar')
if (save_plot) {
pdf('e9_proportion_in_omim.pdf', height=3, width=4)
print(p)
dev.off()
}
return(p)
}
|
#' Plot selectivity
#'
#' Plot selectivity, including retention and other quantities, with additional
#' plots for time-varying selectivity.
#'
#'
#' @template replist
#' @template fleets
#' @param infotable Optional table of information controlling appearance of
#' plot and legend. Is produced as output and can be modified and entered as
#' input.
#' @template fleetnames
#' @param sizefactors Which elements of the factors column of SIZE_SELEX should
#' be included in plot of selectivity across multiple fleets?
#' @param agefactors Which elements of the factors column of AGE_SELEX should
#' be included in plot of selectivity across multiple fleets?
#' @param years Which years for selectivity are shown in multi-line plot
#' (default = last year of model).
#' @param minyr optional input for minimum year to show in plots
#' @param maxyr optional input for maximum year to show in plots
#' @param season Which season (if seasonal model) for selectivity shown in
#' multi-line plot (default = 1).
#' @param sexes Optional vector to subset sexes for which to make plots
#' (1=females, 2=males)
#' @param selexlines Vector to select which lines get plotted. values are 1.
#' Selectivity, 2. Retention, 3. Discard mortality, 4. Keep.
#' @param subplots Vector controlling which subplots to create.
#' Numbering of subplots is as follows,
#'
#' *Plots with all fleets grouped together*
#' \itemize{
#' \item 1 selectivity at length in end year for all fleets shown together
#' \item 2 selectivity at age in end year for all fleets shown together
#' (this includes both age-based selectivity "Asel" and age values derived
#' from length-based, "Asel2". You can choose only one using
#' "agefactors" if needed.)
#' }
#'
#' *Plots of time-varying length-based selectivity*
#' \itemize{
#' \item 3 selectivity at length time-varying surface
#' \item 4 selectivity at length time-varying contour
#' \item 5 retention at length time-varying surface
#' \item 6 retention at length time-varying surface
#' \item 7 discard mortality time-varying surface
#' \item 8 discard mortality time-varying contour
#' }
#'
#' *Selectivity at length in end year by fleet*
#' \itemize{
#' \item 9 selectivity, retention, and discard mortality at length in ending year
#' }
#'
#' *Plots of time-varying age-based selectivity*
#' \itemize{
#' \item 11 selectivity at age time-varying surface
#' \item 12 selectivity at age time-varying contour
#' }
#'
#' *Selectivity at age in end year by fleet*
#' \itemize{
#' \item 13 selectivity at age in ending year if time-varying
#' \item 14 selectivity at age in ending year if NOT time-varying
#' \item 15 matrix of selectivity deviations for semi-parametric selectivity
#' }
#'
#' *Selectivity for both/either age or length*
#' \itemize{
#' \item 21 selectivity at age and length contour with overlaid growth curve
#' \item 22 selectivity with uncertainty if requested at end of control file
#' }
#' @param subplot Deprecated. Use subplots instead.
#' @param skipAgeSelex10 Exclude plots for age selectivity type 10 (selectivity
#' = 1.0 for all ages beginning at age 1)?
#' @template lwd
#' @param spacepoints number of years between points shown on top of lines (for
#' long timeseries, points every year get mashed together)
#' @param staggerpoints number of years to stagger the first point (if
#' `spacepoints > 1`) for each line (so that adjacent lines have points in
#' different years)
#' @template legendloc
#' @template pwidth
#' @template pheight
#' @template punits
#' @template ptsize
#' @template res
#' @template plot
#' @template print
#' @param add Add to existing plot (not yet implemented)
#' @template labels
#' @param col1 color for female growth curve
#' @param col2 color for male growth curve
#' @template cex.main
#' @template mainTitle
#' @template mar
#' @template plotdir
#' @template verbose
#' @author Ian Stewart, Ian Taylor
#' @export
#' @seealso [SS_plots()], [SS_output()]
SSplotSelex <-
function(replist, infotable = NULL,
fleets = "all", fleetnames = "default",
sizefactors = c("Lsel"),
agefactors = c("Asel", "Asel2"),
years = "endyr",
minyr = -Inf,
maxyr = Inf,
season = 1,
sexes = "all",
selexlines = 1:6,
subplots = 1:25,
skipAgeSelex10 = TRUE,
plot = TRUE, print = FALSE, add = FALSE,
labels = c(
"Length (cm)", # 1
"Age (yr)", # 2
"Year", # 3
"Selectivity", # 4
"Retention", # 5
"Discard mortality"
), # 6
col1 = "red", col2 = "blue", lwd = 2,
spacepoints = 5,
staggerpoints = 1,
legendloc = "bottomright",
pwidth = 6.5, pheight = 5.0, punits = "in",
res = 300, ptsize = 10,
cex.main = 1,
mainTitle = TRUE,
mar = NULL,
plotdir = "default",
verbose = TRUE, subplot = lifecycle::deprecated()) {
# Warning about deprecated arguments. Should be removed after 1 release.
if (lifecycle::is_present(subplot)) {
lifecycle::deprecate_warn(
when = "1.45.1",
what = "SSplotSelex(subplot)",
details = "Please use subplots instead. Assigning subplot to subplots."
)
subplots <- subplot
}
# empty table into which information on line types etc. might be copied
infotable2 <- NULL
nsexes <- replist[["nsexes"]]
nseasons <- replist[["nseasons"]]
nfleets <- replist[["nfleets"]]
lbinspop <- replist[["lbinspop"]]
nlbinspop <- replist[["nlbinspop"]]
sizeselex <- replist[["sizeselex"]]
ageselex <- replist[["ageselex"]]
accuage <- replist[["accuage"]]
startyr <- replist[["startyr"]]
endyr <- replist[["endyr"]]
FleetNames <- replist[["FleetNames"]]
growdat <- replist[["endgrowth"]]
growthCVtype <- replist[["growthCVtype"]]
mainmorphs <- replist[["mainmorphs"]]
nareas <- replist[["nareas"]]
ngpatterns <- replist[["ngpatterns"]]
derived_quants <- replist[["derived_quants"]]
# message about skipping plots
if (is.null(ageselex)) {
message("Skipping age-based selectivity plots: no output available")
}
if (is.null(sizeselex)) {
message("Skipping length-based selectivity plots: no output available")
}
# table to store information on each plot
plotinfo <- NULL
if (plotdir == "default") {
plotdir <- replist[["inputs"]][["dir"]]
}
ians_blues <- c(
"white", "grey", "lightblue", "skyblue", "steelblue1",
"slateblue", topo.colors(6), "blue", "blue2", "blue3",
"blue4", "black"
)
if (fleets[1] == "all") {
fleets <- 1:nfleets
} else {
if (length(intersect(fleets, 1:nfleets)) != length(fleets)) {
return("Input 'fleets' should be 'all' or a vector of values between 1 and nfleets.")
}
}
# note lower-case value is the one used below (either equal to vector from replist, or input by user)
if (fleetnames[1] == "default") fleetnames <- FleetNames
if (sexes[1] == "all") {
sexes <- 1:nsexes
} else {
if (length(intersect(sexes, 1:nsexes)) != length(sexes)) {
return("Input 'sexes' should be 'all' or a vector of values between 1 and nsexes.")
}
}
if (years[1] == "endyr") years <- endyr
# set default plot margins
if (is.null(mar)) {
if (mainTitle) {
mar <- c(5, 4, 4, 2) + 0.1
} else {
mar <- c(5, 4, 2, 2) + 0.1
}
}
################################################################################
### make plot of selectivity at length for all fleets together
# make plots
plotAllSel <- function(factor = "Lsel") {
# first subset values
if (factor %in% unique(sizeselex[["Factor"]])) {
agebased <- FALSE
allselex <- sizeselex[sizeselex[["Factor"]] == factor &
sizeselex[["Fleet"]] %in% fleets &
sizeselex[["Sex"]] %in% sexes, ]
}
if (factor %in% unique(ageselex[["Factor"]])) {
agebased <- TRUE
allselex <- ageselex[ageselex[["Factor"]] == factor &
ageselex[["Seas"]] == season &
ageselex[["Fleet"]] %in% fleets &
ageselex[["Sex"]] %in% sexes, ]
}
if (!factor %in% unique(c(sizeselex[["Factor"]], ageselex[["Factor"]]))) {
stop(
"Factor '", factor, "' not found in age- or length-based selectivity. ",
"This may be due to having 'detailed age-structured reports' ",
"turned off in the starter file."
)
}
if (nrow(allselex) == 0) {
stop("Combination of season, fleets, & sexes didn't produce any results")
}
# figure out which fleets have time-varying qunatities
time <- rep(FALSE, nfleets)
for (ifleet in fleets) {
time[ifleet] <- any(apply(
allselex[allselex[["Fleet"]] == ifleet &
allselex[["Yr"]] %in% (startyr:endyr), ],
2, function(x) {
any(x != x[1])
}
))
}
if (any(time)) {
if (length(years) > 1 & length(fleets) > 1) {
message("plot not yet configured to work well with multiple years and multiple fleets")
}
# do a bunch of tedious filtering to get unique year ranges
inputyears <- years
years <- NULL
years2 <- NULL
year_ranges <- NULL
for (i in 1:length(inputyears)) {
if (inputyears[i] >= startyr) {
newyear <- min(endyr, allselex[["Yr"]][allselex[["Yr"]] >= inputyears[i]])
newyear2 <- max(startyr, allselex[["Yr"]][allselex[["Yr"]] <= inputyears[i]])
if (newyear2 <= newyear) {
newyear_range <- paste(newyear2, "-", newyear, sep = "")
if (newyear == newyear2 & newyear > startyr - 3) newyear_range <- newyear
if (!newyear_range %in% year_ranges) {
years <- c(years, newyear)
years2 <- c(years2, newyear2)
year_ranges <- c(year_ranges, newyear_range)
}
}
}
}
if (all(years2 == startyr & years == endyr)) {
years <- endyr
years2 <- startyr
year_ranges <- paste(startyr, "-", endyr, sep = "")
}
bad <- rep(FALSE, length(years))
for (i in 1:length(years)) {
y <- years[i]
y2 <- years2[i]
if (sum(years == y) > 1) bad[years == y & years2 == y] <- TRUE
if (sum(years2 == y2) > 1) bad[years == y2 & years2 == y2] <- TRUE
}
years <- years[!bad]
years2 <- years2[!bad]
year_ranges <- year_ranges[!bad]
if ((startyr - 3) %in% inputyears) {
years <- c(years, startyr - 3)
year_ranges <- c(year_ranges, "Benchmarks")
}
} else {
year_ranges <- ""
}
allselex <- allselex2 <- allselex[allselex[["Yr"]] %in% years, ]
if (nrow(allselex) == 0) {
stop("No values found for this combination of years and factor")
}
# do some processing
Sex <- allselex[["Sex"]]
if (!agebased) {
allselex <- allselex[, -(1:5)]
xlab <- labels[1]
}
if (agebased) {
allselex <- allselex[, -(1:7)]
xlab <- labels[2]
}
if (!is.null(infotable)) {
infotable2 <- infotable
good <- Sex %in% infotable[["Sex"]]
allselex <- allselex[good, ]
allselex2 <- allselex2[good, ]
if (nrow(infotable2) != nrow(allselex)) {
stop("Problem with input 'infotable'. Number of rows doesn't match.")
}
} else {
# make table of info for each row (unless it is supplied already)
infotable2 <- allselex2[c("Fleet", "Sex", "Yr")]
infotable2[["ifleet"]] <- NA
infotable2[["FleetName"]] <- fleetnames[infotable2[["Fleet"]]]
infotable2[["longname"]] <- infotable2[["FleetName"]]
for (i in 1:nrow(infotable2)) {
infotable2[["Yr_range"]][i] <- year_ranges[years == infotable2[["Yr"]][i]]
}
if (length(unique(infotable2[["Yr"]])) > 1) {
infotable2[["longname"]] <- paste(infotable2[["FleetName"]], infotable2[["Yr_range"]])
}
# check for whether there are differences between males and females
twosex <- all(1:2 %in% infotable2[["Sex"]]) &&
any(allselex[infotable2[["Sex"]] == 1, ] != allselex[infotable2[["Sex"]] == 2, ])
if (!twosex) { # show only sex with lowest number if no differences between sexes
good <- infotable2[["Sex"]] == min(infotable2[["Sex"]])
allselex <- allselex[good, ]
allselex2 <- allselex2[good, ]
infotable2 <- infotable2[good, ]
} else {
infotable2[["longname"]] <- paste(infotable2[["longname"]], c("(f)", "(m)")[infotable2[["Sex"]]])
}
# add index from 1 up to number of fleets plotted
allfleets <- sort(unique(infotable2[["Fleet"]]))
for (ifleet in 1:length(allfleets)) {
infotable2[["ifleet"]][infotable2[["Fleet"]] == allfleets[ifleet]] <- ifleet
}
# choose colors
colvec <- rich.colors.short(length(allfleets))
infotable2[["col"]] <- colvec[infotable2[["ifleet"]]]
# choose line types
infotable2[["lty"]] <- 1
# either line by Sex
infotable2[["lwd"]] <- lwd
if (twosex) infotable2[["lty"]] <- infotable2[["Sex"]]
# or line by year (with line width by Sex)
allyears <- sort(unique(infotable2[["Yr"]]))
if (length(allyears) > 1) {
for (iyear in 1:length(allyears)) {
infotable2[["lty"]][infotable2[["Yr"]] == allyears[iyear]] <- iyear
}
if (twosex) infotable2[["lwd"]][infotable2[["Sex"]] == 2] <- lwd / 2
}
# choose plot characters
infotable2[["pch"]] <- infotable2[["ifleet"]] %% 25
}
main <- factor
if (factor == "Lsel") main <- paste("Length-based selectivity")
if (factor == "Asel") main <- paste("Age-based selectivity")
if (factor == "Asel2") main <- paste("Derived age-based from length-based selectivity")
if (factor == "Ret") main <- paste("Retention")
if (length(fleets) > 1) main <- paste(main, "by fleet")
if (length(fleets) == 1) main <- paste(main, "for", fleetnames[fleets])
if (length(unique(infotable2[["Yr"]])) == 1) {
main <- paste(main, "in", unique(infotable2[["Yr"]]))
}
bins <- as.numeric(names(allselex))
# make empty plot
if (!add) {
par(mar = mar)
plot(0,
xlim = range(bins), ylim = c(0, 1), type = "n",
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
xlab = xlab, ylab = labels[4]
)
}
# add grey lines
abline(h = 0, col = "grey")
abline(h = 1, col = "grey")
# add lines for selectivities
matplot(
x = bins, y = t(allselex), col = infotable2[["col"]],
lty = infotable2[["lty"]], lwd = infotable2[["lwd"]], type = "l", add = TRUE
)
# add points on top of lines (first subsetting to optionally show fewer points)
allselex2 <- allselex
if (spacepoints > 0) {
for (iline in 1:nrow(allselex)) {
allselex2[iline, (1:ncol(allselex)) %%
spacepoints != (staggerpoints * iline) %% spacepoints] <- NA
}
matplot(
x = bins, y = t(allselex2), col = infotable2[["col"]],
lwd = infotable2[["lwd"]], pch = infotable2[["pch"]], type = "p", add = TRUE
)
} else {
# if 'spacepoints' is less than or equal to 0, don't show points
infotable2[["pch"]] <- NA
}
# add legend
if (nrow(infotable2) > 1) {
legend(legendloc,
inset = c(0, 0.05),
legend = infotable2[["longname"]],
col = infotable2[["col"]],
seg.len = 4,
lty = infotable2[["lty"]],
pch = infotable2[["pch"]],
lwd = infotable2[["lwd"]],
bty = "n"
)
}
return(infotable2)
}
if (1 %in% subplots & !is.null(sizeselex)) {
for (ifactor in 1:length(sizefactors)) {
if (plot) {
infotable2 <- plotAllSel(factor = sizefactors[ifactor])
}
if (print) {
file <- paste("sel01_multiple_fleets_length", ifactor, ".png", sep = "")
caption <- "Selectivity at length for multiple fleets."
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
infotable2 <- plotAllSel(factor = "Lsel")
dev.off()
}
}
}
if (2 %in% subplots & !is.null(ageselex)) {
# remove factor == "Asel" if all age-based selectivity == 1
if ("Asel" %in% agefactors &&
all(ageselex[
ageselex[["Factor"]] == "Asel",
paste(0:replist[["accuage"]])
] == 1)) {
agefactors <- setdiff(agefactors, "Asel")
message("Skipping plot of age-based selectivity as all values = 1.0")
}
if (length(agefactors) > 0) {
for (ifactor in 1:length(agefactors)) {
factor <- agefactors[ifactor]
if (plot) {
infotable2 <- plotAllSel(factor = factor)
}
if (print) {
file <- paste("sel02_multiple_fleets_age", ifactor, ".png", sep = "")
caption <- "Selectivity at age for multiple fleets."
if (factor == "Asel2") {
caption <- paste(
"Selectivity at age derived from selectivity at",
"length for multiple fleets."
)
}
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
infotable2 <- plotAllSel(factor = factor)
dev.off()
}
} # end loop over age factors
} # end check for any agefactors
}
################################################################################
### loop over fleets and sexes to make individual plot of length-based patterns
# first check if any of these plots are requested
if (any(3:9 %in% subplots) & !is.null(sizeselex)) {
# selex and retention
for (i in fleets)
{
for (m in sexes)
{
if (m == 1 & nsexes == 1) sextitle1 <- "Time-"
if (m == 1 & nsexes == 2) sextitle1 <- "Female time-"
if (m == 2) sextitle1 <- "Male time-"
if (m == 1 & nsexes == 1) sextitle2 <- "Ending"
if (m == 1 & nsexes == 2) sextitle2 <- "Female ending"
if (m == 2) sextitle2 <- "Male ending"
intret <- sizeselex[sizeselex[["Factor"]] == "Ret" &
sizeselex[["Yr"]] != startyr - 3 &
sizeselex[["Sex"]] == m, ]
intmort <- sizeselex[sizeselex[["Factor"]] == "Mort" &
sizeselex[["Yr"]] != startyr - 3 &
sizeselex[["Sex"]] == m, ]
intkeep <- sizeselex[sizeselex[["Factor"]] == "Keep" &
sizeselex[["Yr"]] != startyr - 3 &
sizeselex[["Sex"]] == m, ]
intdead <- sizeselex[sizeselex[["Factor"]] == "Dead" &
sizeselex[["Yr"]] != startyr - 3 &
sizeselex[["Sex"]] == m, ]
intselex <- sizeselex[sizeselex[["Factor"]] == "Lsel" &
sizeselex[["Yr"]] != startyr - 3 &
sizeselex[["Sex"]] == m, ]
plotselex <- intselex[intselex[["Fleet"]] == i, ]
plotret <- intret[intret[["Fleet"]] == i, ]
plotmort <- intmort[intmort[["Fleet"]] == i, ]
# test for time-varying length selectivity
time <- any(apply(plotselex[-c(1, nrow(plotselex)), -(1:5)], 2, function(x) {
any(x != x[1])
}))
if (time) {
x <- lbinspop
subset <- plotselex[["Yr"]] >= minyr & plotselex[["Yr"]] <= maxyr
y <- plotselex[["Yr"]][subset]
z <- plotselex[subset, -(1:5)]
z <- matrix(as.numeric(as.matrix(z)), ncol = ncol(z))
z <- t(z)
main <- paste(sextitle1, "varying selectivity for ", fleetnames[i], sep = "")
if (plot) {
if (3 %in% subplots) {
persp(x, y, z,
col = "white", xlab = labels[1],
ylab = labels[3], zlab = labels[4], expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed",
phi = 35, theta = -10
)
}
if (4 %in% subplots) {
contour(x, y, z,
nlevels = 5, xlab = labels[1],
ylab = labels[3], main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
}
}
if (print) {
if (3 %in% subplots) {
file <- paste("sel03_len_timevary_surf_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Surface plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
persp(x, y, z,
col = "white", xlab = labels[1], ylab = labels[3],
zlab = labels[4], expand = 0.5, box = TRUE,
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
ticktype = "detailed", phi = 35, theta = -10
)
dev.off()
}
if (4 %in% subplots) {
file <- paste("sel04_len_timevary_contour_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Countour plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
contour(x, y, z,
nlevels = 5, xlab = labels[1], ylab = labels[3],
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
col = ians_blues, lwd = lwd
)
dev.off()
}
}
}
# test for time-varying length retention
time2 <- any(apply(plotret[-nrow(plotret), -(1:5)], 2, function(x) {
any(x != x[1])
}))
if (time2) {
x <- lbinspop
subset <- intret[["Yr"]] >= minyr & intret[["Yr"]] <= maxyr
y <- intret[["Yr"]][subset & intret[["Fleet"]] == i]
z <- intret[subset & intret[["Fleet"]] == i, -(1:5)]
z <- matrix(as.numeric(as.matrix(z)), ncol = ncol(z))
z <- t(z)
main <- paste(sextitle1, "varying retention for ", fleetnames[i], sep = "")
if (plot) {
if (5 %in% subplots) {
persp(x, y, z,
col = "white", xlab = labels[1],
ylab = labels[3], zlab = labels[5], expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed",
phi = 35, theta = -10
)
}
if (6 %in% subplots) {
contour(x, y, z,
nlevels = 5, xlab = labels[1],
ylab = labels[3], main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
}
}
if (print) {
if (5 %in% subplots) {
file <- paste("sel05_timevary_ret_surf_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Surface plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
persp(x, y, z,
col = "white", xlab = labels[1],
ylab = labels[3], zlab = labels[5], expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed",
phi = 35, theta = -10
)
dev.off()
}
if (6 %in% subplots) {
file <- paste("sel06_timevary_ret_contour_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Countour plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
contour(x, y, z,
nlevels = 5, xlab = labels[1],
ylab = labels[3], main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
dev.off()
}
}
}
# test for time-varying discard mortality rates
time3 <- any(apply(plotmort[-nrow(plotmort), -(1:5)], 2, function(x) {
any(x != x[1])
}))
if (time3) {
x <- lbinspop
subset <- intmort[["Yr"]] >= minyr & intmort[["Yr"]] <= maxyr
y <- intmort[["Yr"]][subset & intmort[["Fleet"]] == i]
z <- intmort[subset & intmort[["Fleet"]] == i, -(1:5)]
z <- matrix(as.numeric(as.matrix(z)), ncol = ncol(z))
z <- t(z)
main <- paste(sextitle1, "varying discard mortality for ", fleetnames[i], sep = "")
if (plot) {
if (7 %in% subplots) {
persp(x, y, z,
col = "white", xlab = labels[1], ylab = labels[3], zlab = labels[6],
expand = 0.5, box = TRUE,
main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed",
phi = 35, theta = -10, zlim = c(0, max(z))
)
}
if (8 %in% subplots) {
contour(x, y, z,
nlevels = 5, xlab = labels[1], ylab = labels[3],
main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
}
}
if (print) {
if (7 %in% subplots) {
file <- paste("sel07_timevary_mort_surf_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Surface plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
persp(x, y, z,
col = "white", xlab = labels[1],
ylab = labels[3], zlab = labels[6], expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed", phi = 35, theta = -10
)
dev.off()
}
if (8 %in% subplots) {
file <- paste("sel08_timevary_mort_contour_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Surface plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
contour(x, y, z,
nlevels = 5, xlab = labels[1],
ylab = labels[3], main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
dev.off()
}
}
}
# make plot of end year selectivity (with retention and discard mortality if used)
endselex <- plotselex[plotselex[["Yr"]] == endyr, -(1:5)]
plotret <- plotret[nrow(plotret), -(1:5)] # final year only
ylab <- labels[4]
bins <- as.numeric(names(endselex))
vals <- as.numeric(paste(endselex))
retvals <- as.numeric(plotret)
main <- paste(sextitle2, " year selectivity for ", fleetnames[i], sep = "")
selfunc <- function() {
# determine whether retention was used
intret2 <- intret[intret[["Fleet"]] == i, ]
retchecktemp <- as.vector(unlist(intret2[1, ]))
retcheck <- as.numeric(retchecktemp[6:length(retchecktemp)])
if (is.na(sum(retcheck))) retcheckuse <- 0
# if minimum retention is less than 1, show additional stuff in plot
if (!is.na(sum(retcheck))) retcheckuse <- 1 - min(retcheck)
# make plot
if (!add) {
par(mar = mar)
plot(bins, vals,
xlab = labels[1], ylim = c(0, 1),
main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ylab = "", type = "n"
)
}
abline(h = 0, col = "grey")
abline(h = 1, col = "grey")
if (1 %in% selexlines) lines(bins, vals, type = "o", col = col2, cex = 1.1)
if (retcheckuse > 0) {
# if retention, then add additional lines & legend
useret <- intret[intret[["Fleet"]] == i, ]
usekeep <- intkeep[intkeep[["Fleet"]] == i, ]
usemort <- intmort[intmort[["Fleet"]] == i, ]
usedead <- intdead[intdead[["Fleet"]] == i, ]
if (endyr %in% as.numeric(useret[["Yr"]])) {
useyr <- endyr
} else {
useyr <- max(as.numeric(useret[["Yr"]]))
}
plotret <- useret[useret[["Yr"]] == useyr, ]
plotkeep <- usekeep[usekeep[["Yr"]] == useyr, ]
plotmort <- usemort[usemort[["Yr"]] == useyr, ]
plotdead <- usedead[usedead[["Yr"]] == useyr, ]
# compute discard as function of size: selectivity*(1 - retention)
plotdisc <- plotret
plotdisc[-(1:5)] <- vals * (1 - plotret[, -(1:5)])
# add additional lines if requested
if (2 %in% selexlines) {
lines(as.numeric(as.vector(names(plotret)[-(1:5)])),
as.numeric(as.character(plotret[1, -(1:5)])),
col = "red", type = "o", pch = 3, cex = .9
)
ylab <- paste(ylab, ", Retention", sep = "")
}
if (3 %in% selexlines) {
lines(as.numeric(as.vector(names(plotmort)[-(1:5)])),
as.numeric(as.character(plotmort[1, -(1:5)])),
col = "orange", type = "o", pch = 4, cex = .9
)
ylab <- paste(ylab, ", Mortality", sep = "")
}
if (4 %in% selexlines) {
lines(as.numeric(as.vector(names(plotkeep)[-(1:5)])),
as.numeric(as.character(plotkeep[1, -(1:5)])),
col = "purple", type = "o", pch = 2, cex = .9
)
}
if (5 %in% selexlines) {
lines(as.numeric(as.vector(names(plotdead)[-(1:5)])),
as.numeric(as.character(plotdead[1, -(1:5)])),
col = "green3", type = "o", pch = 5, cex = .9
)
}
if (6 %in% selexlines) {
lines(as.numeric(as.vector(names(plotdead)[-(1:5)])),
as.numeric(as.character(plotdisc[1, -(1:5)])),
col = "grey50", type = "o", pch = 6, cex = .9
)
}
# add legend
legend(legendloc,
inset = c(0, 0.05), bty = "n",
c(
labels[4], labels[5], labels[6], "Keep = Sel*Ret",
"Dead = Sel*(Ret+(1-Ret)*Mort)", "Discard = Sel*(1-Ret)"
)[selexlines],
lty = 1, col = c("blue", "red", "orange", "purple", "green3", "grey50")[selexlines],
pch = c(1, 3, 4, 2, 5, 6)[selexlines], pt.cex = c(1.1, .9, .9, .9, .9, .9)[selexlines]
)
}
mtext(ylab, side = 2, line = 3)
}
# make plot if selectivity is not constant at 0 or 1 for all bins
if ((min(vals) < 1 & max(vals) > 0) |
(!is.na(diff(range(retvals))) && diff(range(retvals)) != 0)) {
if (9 %in% subplots) {
if (plot) selfunc()
if (print) {
file <- paste("sel09_len_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
selfunc()
dev.off()
}
}
}
} # sexes
} # fleets
} # check for any of the plots in this section requested
################################################################################
### loop over fleets and sexes to make individual plot of age-based patterns
if (any(11:14 %in% subplots) & !is.null(ageselex)) {
# Age based selex
ylab <- labels[4]
for (facnum in 1) {
factor <- c("Asel", "Asel2")[facnum]
for (i in fleets) {
for (m in sexes) {
if (m == 1 & nsexes == 1) sextitle1 <- "Time-"
if (m == 1 & nsexes == 2) sextitle1 <- "Female time-"
if (m == 2) sextitle1 <- "Male time-"
if (m == 1 & nsexes == 1) sextitle2 <- "Ending"
if (m == 1 & nsexes == 2) sextitle2 <- "Female ending"
if (m == 2) sextitle2 <- "Male ending"
ageselexcols <- (1:ncol(ageselex))[names(ageselex) %in% as.character(0:accuage)]
plotageselex <- ageselex[ageselex[["Factor"]] == factor & ageselex[["Fleet"]] == i &
ageselex[["Yr"]] != startyr - 3 & ageselex[["Sex"]] == m, ]
# test for time-varying age selectivity
time <- any(apply(
plotageselex[-c(1, nrow(plotageselex)), ageselexcols],
2, function(x) {
any(x != x[1])
}
))
if (time) {
if ((min(as.numeric(as.vector(t(plotageselex[, -(1:7)])))) < 1)) {
subset <- as.numeric(plotageselex[["Yr"]]) >= minyr &
as.numeric(plotageselex[["Yr"]]) <= maxyr
x <- seq(0, accuage, by = 1)
y <- as.numeric(plotageselex[["Yr"]])[subset]
z <- plotageselex[subset, -(1:7)]
z <- matrix(as.numeric(as.matrix(z)), ncol = ncol(z))
z <- t(z)
main <- paste(sextitle1, "varying selectivity for ", fleetnames[i], sep = "")
if (plot) {
if (11 %in% subplots) {
persp(x, y, z,
col = "white", xlab = labels[2],
ylab = labels[3], zlab = ylab, expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""), cex.main = cex.main,
ticktype = "detailed", phi = 35, theta = -10
)
}
if (12 %in% subplots) {
contour(x, y, z,
nlevels = 5, xlab = labels[2],
main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
}
}
if (print) {
if (11 %in% subplots) {
file <- paste("sel11_timevary_surf_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
persp(x, y, z,
col = "white", xlab = labels[2],
ylab = labels[3], zlab = ylab, expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed",
phi = 35, theta = -10
)
dev.off()
}
if (12 %in% subplots) {
file <- paste("sel12_timevary_contour_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
contour(x, y, z,
nlevels = 5, xlab = labels[2],
main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
dev.off()
}
}
plotageselex2 <- plotageselex[plotageselex[["Yr"]] %in% c(max(as.numeric(plotageselex[["Yr"]]))), ]
plotageselex2 <- plotageselex2[, -(1:7)]
main <- paste(sextitle2, " year selectivity for ", fleetnames[i], sep = "")
endselfunc <- function() {
if (!add) {
par(mar = mar)
plot(as.numeric(names(plotageselex2)),
as.numeric(paste(c(plotageselex2))),
xlab = labels[2], ylim = c(0, 1), main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ylab = ylab,
type = "n", col = col2, cex = 1.1
)
}
lines(as.numeric(names(plotageselex2)),
as.numeric(paste(c(plotageselex2))),
type = "o", col = col2, cex = 1.1
)
abline(h = 0, col = "grey")
}
if (13 %in% subplots) {
if (plot) {
endselfunc()
}
if (print) {
file <- paste("sel13_age_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
endselfunc()
dev.off()
}
}
}
}
if (!time) {
plotageselex <- plotageselex[plotageselex[["Yr"]] == max(plotageselex[["Yr"]]), ]
plotageselex <- plotageselex[, -(1:7)]
vals <- as.numeric(paste(c(plotageselex)))
# test for constant across all values
doplot <- nrow(plotageselex) > 0 && diff(range(vals)) != 0
if (doplot & skipAgeSelex10) {
# skip if selectivity type 10 is used (0.0 for age-0, 1.0 for other ages)
doplot <- !(vals[1] == 0 & all(vals[-1] == 1))
}
if (doplot) {
main <- paste(sextitle2, " year selectivity for ", fleetnames[i], sep = "")
endselfunc2 <- function() {
if (!add) {
par(mar = mar)
plot((as.numeric(names(plotageselex))), vals,
xlab = labels[2], ylim = c(0, 1),
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
ylab = ylab, type = "n"
)
}
lines((as.numeric(names(plotageselex))), vals, type = "o", col = col2, cex = 1.1)
abline(h = 0, col = "grey")
}
if (14 %in% subplots) {
if (plot) endselfunc2()
if (print) {
file <- paste("sel14_age_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
endselfunc2()
dev.off()
}
}
} # end if
} # end if not time varying
} # sexes
} # fleets
flush.console()
} # factor (Asel vs. Asel2)
} # check for any of the plots in this section requested
################################################################################
### Matrix of selectivity deviations for semi-parametric (2D-AR1) selectivity
if (15 %in% subplots & !is.null(replist[["seldev_matrix"]])) {
seldev_pars <- replist[["seldev_pars"]]
seldev_matrix <- replist[["seldev_matrix"]]
# define color palette
devcol.fn <- colorRampPalette(colors = c("red", "white", "blue"))
# define function to make an image plot
seldev_func <- function(m, mar = c(4.1, 4.1, 1, 1)) {
bins <- as.numeric(colnames(m))
years <- as.numeric(rownames(m))
par(mar = mar)
image(
x = bins, y = years, z = t(m), col = devcol.fn(10),
xlab = names(dimnames(m))[2],
ylab = names(dimnames(m))[1],
axes = FALSE,
ylim = rev(range(years) + c(-0.5, 0.5))
)
axis(1, at = bins)
axis(2, at = years, las = 1)
box()
}
for (imatrix in 1:length(seldev_matrix)) {
label <- names(seldev_matrix)[imatrix]
main <- gsub(pattern = "_", replacement = " ", x = label)
main <- gsub(pattern = "seldevs", replacement = "selectivity deviations", x = main)
if (plot) {
seldev_func(m = seldev_matrix[[imatrix]], mar = c(5, 4, 4, 1) + 0.1)
title(main = ifelse(mainTitle, main, ""))
}
if (print) {
file <- paste("sel15_", label, ".png", sep = "")
caption <- gsub(pattern = "selectivity ", replacement = "", x = main)
caption <- paste0(
caption,
" for semi-parametric (2D-AR1) selectivity. ",
"Blue value are positive deviations and red values negative. ",
"The matrix of values is available in the list created by ",
"<code>SS_output()</code> as <code>$seldev_matrix</code> which ",
"is a list with an element for each combination of fleet and length or ",
"age which uses the semi-parametric selectivity."
)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
seldev_func(m = seldev_matrix[[imatrix]])
dev.off()
}
} # end loop over matrices
} # end subplots
################################################################################
### Selectivity contours over age and length shown with growth curve
if (21 %in% subplots &
!is.null(ngpatterns) &&
ngpatterns == 1 &
!is.null(growdat) &
!is.null(sizeselex) &
!is.null(ageselex) &
all(!is.na(lbinspop))
) { # need to connect growth patterns to fleets in future
# subsetting for one season only. This could be replaced
# by info on the growth within the season when each fleet operates.
growdat <- growdat[growdat[["Seas"]] == season, ]
if (nseasons > 1) {
message(
"Warning: plots showing growth curve with selectivity are using season ",
season, " growth, which may not match the timing of the fishery."
)
}
# Mid year mean length at age with 95% range of lengths (by sex if applicable)
growdatF <- growdat[growdat[["Sex"]] == 1 & growdat[["Morph"]] == mainmorphs[1], ]
growdatF[["Sd_Size"]] <- growdatF[["SD_Mid"]]
if (growthCVtype == "logSD=f(A)") { # lognormal distribution of length at age
growdatF[["high"]] <- qlnorm(0.975, meanlog = log(growdatF[["Len_Mid"]]), sdlog = growdatF[["Sd_Size"]])
growdatF[["low"]] <- qlnorm(0.025, meanlog = log(growdatF[["Len_Mid"]]), sdlog = growdatF[["Sd_Size"]])
} else { # normal distribution of length at age
growdatF[["high"]] <- qnorm(0.975, mean = growdatF[["Len_Mid"]], sd = growdatF[["Sd_Size"]])
growdatF[["low"]] <- qnorm(0.025, mean = growdatF[["Len_Mid"]], sd = growdatF[["Sd_Size"]])
}
if (nsexes > 1) {
growdatM <- growdat[growdat[["Sex"]] == 2 & growdat[["Morph"]] == mainmorphs[2], ]
growdatM[["Sd_Size"]] <- growdatM[["SD_Mid"]]
if (growthCVtype == "logSD=f(A)") { # lognormal distribution of length at age
growdatM[["high"]] <- qlnorm(0.975, meanlog = log(growdatM[["Len_Mid"]]), sdlog = growdatM[["Sd_Size"]])
growdatM[["low"]] <- qlnorm(0.025, meanlog = log(growdatM[["Len_Mid"]]), sdlog = growdatM[["Sd_Size"]])
} else { # normal distribution of length at age
growdatM[["high"]] <- qnorm(0.975, mean = growdatM[["Len_Mid"]], sd = growdatM[["Sd_Size"]])
growdatM[["low"]] <- qnorm(0.025, mean = growdatM[["Len_Mid"]], sd = growdatM[["Sd_Size"]])
}
}
xlab <- labels[2]
ylab <- labels[1]
zlab <- labels[4]
for (i in fleets)
{
for (m in sexes)
{
if (m == 1 & nsexes == 1) sextitle2 <- "Ending"
if (m == 1 & nsexes == 2) sextitle2 <- "Female ending"
if (m == 2) sextitle2 <- "Male ending"
plotlenselex <- as.numeric(sizeselex[sizeselex[["Factor"]] == "Lsel" &
sizeselex[["Yr"]] == endyr &
sizeselex[["Fleet"]] == i &
sizeselex[["Sex"]] == m, -(1:5)])
# test if there is any length-based selectivity (otherwise plot is uninformative)
if (any(plotlenselex != 1)) {
plotageselex <- as.numeric(ageselex[ageselex[["Factor"]] == "Asel" &
ageselex[["Yr"]] == endyr &
ageselex[["Fleet"]] == i &
ageselex[["Sex"]] == m, -(1:7)])
# x here should probably be replaced by $Age_Mid or some more informative value
x <- seq(0, accuage, by = 1)
y <- lbinspop
z <- plotageselex %o% plotlenselex # outer product of age- and length-selectivity
main <- paste(sextitle2, " year selectivity and growth for ", fleetnames[i], sep = "")
agelenselcontour <- function() {
contour(x, y, z,
nlevels = 5, xlab = xlab, ylab = ylab,
main = ifelse(mainTitle, main, ""), cex.main = cex.main, col = ians_blues, lwd = lwd
)
if (m == 1) {
lines(x, growdatF[["Len_Mid"]], col = "white", lwd = 5)
lines(x, growdatF[["Len_Mid"]], col = col1, lwd = 3)
lines(x, growdatF[["high"]], col = "white", lwd = 1, lty = 1)
lines(x, growdatF[["high"]], col = col1, lwd = 1, lty = "dashed")
lines(x, growdatF[["low"]], col = "white", lwd = 1, lty = 1)
lines(x, growdatF[["low"]], col = col1, lwd = 1, lty = "dashed")
}
if (m == 2) {
lines(x, growdatM[["Len_Mid"]], col = "white", lwd = 5)
lines(x, growdatM[["Len_Mid"]], col = col2, lwd = 3)
lines(x, growdatM[["high"]], col = "white", lwd = 1, lty = 1)
lines(x, growdatM[["high"]], col = col2, lwd = 1, lty = "dashed")
lines(x, growdatM[["low"]], col = "white", lwd = 1, lty = 1)
lines(x, growdatM[["low"]], col = col2, lwd = 1, lty = "dashed")
}
}
if (plot) {
if (21 %in% subplots) agelenselcontour()
}
if (print) {
if (21 %in% subplots) {
file <- paste("sel21_agelen_contour_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
agelenselcontour()
dev.off()
}
}
} # if there is any length-based selectivity
} # sexes
} # fleets
} # end subplots
################################################################################
### Plot selectivity with uncertainty if "Extra SD reporting" requested in control file
if (22 %in% subplots) {
# get values from Extra SD reporting if created by request at bottom of control file
rows <- grep("Selex_std", derived_quants[["Label"]])
if (length(rows) > 0) {
sel <- derived_quants[rows, ]
names <- sel[["Label"]]
splitnames <- strsplit(names, "_")
namesDF <- as.data.frame(matrix(unlist(strsplit(names, "_")), ncol = 6, byrow = T))
sel[["Fleet"]] <- as.numeric(as.character(namesDF[["V3"]]))
sel[["Sex"]] <- as.character(namesDF[["V4"]])
sel[["agelen"]] <- as.character(namesDF[["V5"]])
sel[["bin"]] <- as.numeric(as.character(namesDF[["V6"]]))
sel[["lower"]] <- pmax(qnorm(0.025, mean = sel[["Value"]], sd = sel[["StdDev"]]), 0) # trim at 0
sel[["upper"]] <- pmin(qnorm(0.975, mean = sel[["Value"]], sd = sel[["StdDev"]]), 1) # trim at 1
i <- sel[["Fleet"]][1]
agelen <- sel[["agelen"]][1]
xlab <- labels[1:2][1 + (sel[["agelen"]][1] == "A")] # decide label between length and age
for (m in intersect(unique(sel[["Sex"]]), c("Fem", "Mal")[sexes])) {
seltemp <- sel[sel[["Sex"]] == m, ]
if (m == "Fem" & nsexes == 1) sextitle3 <- ""
if (m == "Fem" & nsexes == 2) sextitle3 <- "females"
if (m == "Mal") sextitle3 <- "males"
main <- paste("Uncertainty in selectivity for", fleetnames[i], sextitle3)
no0 <- seltemp[["StdDev"]] > 0.001
if (FALSE) {
# Ian T.: this is the beginning of code to add the full selectivity line,
# including bins for which no uncertainty was requested
if (agelen == "L") {
plotselex <- sizeselex[sizeselex[["Factor"]] == "Lsel" &
ageselex[["Fleet"]] == i &
sizeselex[["Sex"]] == m, ]
}
if (agelen == "A") {
plotselex <- ageselex[ageselex[["Factor"]] == "Asel" &
ageselex[["Fleet"]] == i &
ageselex[["Sex"]] == m, ]
}
}
plot_extra_selex_SD <- function() {
if (!add) {
par(mar = mar)
plot(seltemp[["bin"]], seltemp[["Value"]],
xlab = xlab, ylim = c(0, 1),
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
ylab = labels[4], type = "n", col = col2, cex = 1.1,
xlim = c(0, max(seltemp[["bin"]])),
)
}
lines(seltemp[["bin"]], seltemp[["Value"]],
xlab = xlab, ylim = c(0, 1),
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
ylab = labels[4], type = "o", col = col2, cex = 1.1,
xlim = c(0, max(seltemp[["bin"]]))
)
arrows(
x0 = seltemp[["bin"]][no0], y0 = seltemp[["lower"]][no0],
x1 = seltemp[["bin"]][no0], y1 = seltemp[["upper"]][no0],
length = 0.01, angle = 90, code = 3, col = col2
)
abline(h = 0, col = "grey")
}
if (plot) {
plot_extra_selex_SD()
}
if (print) {
file <- paste("sel22_uncertainty", "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
plot_extra_selex_SD()
dev.off()
}
}
} # end test for presence of selectivity uncertainty output
} # check check for subplots in list
# return info on any PNG files created
if (!is.null(plotinfo)) plotinfo[["category"]] <- "Sel"
return(invisible(list(infotable = infotable2, plotinfo = plotinfo)))
}
|
/R/SSplotSelex.R
|
no_license
|
yukio-takeuchi/r4ss
|
R
| false
| false
| 54,253
|
r
|
#' Plot selectivity
#'
#' Plot selectivity, including retention and other quantities, with additional
#' plots for time-varying selectivity.
#'
#'
#' @template replist
#' @template fleets
#' @param infotable Optional table of information controlling appearance of
#' plot and legend. Is produced as output and can be modified and entered as
#' input.
#' @template fleetnames
#' @param sizefactors Which elements of the factors column of SIZE_SELEX should
#' be included in plot of selectivity across multiple fleets?
#' @param agefactors Which elements of the factors column of AGE_SELEX should
#' be included in plot of selectivity across multiple fleets?
#' @param years Which years for selectivity are shown in multi-line plot
#' (default = last year of model).
#' @param minyr optional input for minimum year to show in plots
#' @param maxyr optional input for maximum year to show in plots
#' @param season Which season (if seasonal model) for selectivity shown in
#' multi-line plot (default = 1).
#' @param sexes Optional vector to subset sexes for which to make plots
#' (1=females, 2=males)
#' @param selexlines Vector to select which lines get plotted. values are 1.
#' Selectivity, 2. Retention, 3. Discard mortality, 4. Keep.
#' @param subplots Vector controlling which subplots to create.
#' Numbering of subplots is as follows,
#'
#' *Plots with all fleets grouped together*
#' \itemize{
#' \item 1 selectivity at length in end year for all fleets shown together
#' \item 2 selectivity at age in end year for all fleets shown together
#' (this includes both age-based selectivity "Asel" and age values derived
#' from length-based, "Asel2". You can choose only one using
#' "agefactors" if needed.)
#' }
#'
#' *Plots of time-varying length-based selectivity*
#' \itemize{
#' \item 3 selectivity at length time-varying surface
#' \item 4 selectivity at length time-varying contour
#' \item 5 retention at length time-varying surface
#' \item 6 retention at length time-varying surface
#' \item 7 discard mortality time-varying surface
#' \item 8 discard mortality time-varying contour
#' }
#'
#' *Selectivity at length in end year by fleet*
#' \itemize{
#' \item 9 selectivity, retention, and discard mortality at length in ending year
#' }
#'
#' *Plots of time-varying age-based selectivity*
#' \itemize{
#' \item 11 selectivity at age time-varying surface
#' \item 12 selectivity at age time-varying contour
#' }
#'
#' *Selectivity at age in end year by fleet*
#' \itemize{
#' \item 13 selectivity at age in ending year if time-varying
#' \item 14 selectivity at age in ending year if NOT time-varying
#' \item 15 matrix of selectivity deviations for semi-parametric selectivity
#' }
#'
#' *Selectivity for both/either age or length*
#' \itemize{
#' \item 21 selectivity at age and length contour with overlaid growth curve
#' \item 22 selectivity with uncertainty if requested at end of control file
#' }
#' @param subplot Deprecated. Use subplots instead.
#' @param skipAgeSelex10 Exclude plots for age selectivity type 10 (selectivity
#' = 1.0 for all ages beginning at age 1)?
#' @template lwd
#' @param spacepoints number of years between points shown on top of lines (for
#' long timeseries, points every year get mashed together)
#' @param staggerpoints number of years to stagger the first point (if
#' `spacepoints > 1`) for each line (so that adjacent lines have points in
#' different years)
#' @template legendloc
#' @template pwidth
#' @template pheight
#' @template punits
#' @template ptsize
#' @template res
#' @template plot
#' @template print
#' @param add Add to existing plot (not yet implemented)
#' @template labels
#' @param col1 color for female growth curve
#' @param col2 color for male growth curve
#' @template cex.main
#' @template mainTitle
#' @template mar
#' @template plotdir
#' @template verbose
#' @author Ian Stewart, Ian Taylor
#' @export
#' @seealso [SS_plots()], [SS_output()]
SSplotSelex <-
function(replist, infotable = NULL,
fleets = "all", fleetnames = "default",
sizefactors = c("Lsel"),
agefactors = c("Asel", "Asel2"),
years = "endyr",
minyr = -Inf,
maxyr = Inf,
season = 1,
sexes = "all",
selexlines = 1:6,
subplots = 1:25,
skipAgeSelex10 = TRUE,
plot = TRUE, print = FALSE, add = FALSE,
labels = c(
"Length (cm)", # 1
"Age (yr)", # 2
"Year", # 3
"Selectivity", # 4
"Retention", # 5
"Discard mortality"
), # 6
col1 = "red", col2 = "blue", lwd = 2,
spacepoints = 5,
staggerpoints = 1,
legendloc = "bottomright",
pwidth = 6.5, pheight = 5.0, punits = "in",
res = 300, ptsize = 10,
cex.main = 1,
mainTitle = TRUE,
mar = NULL,
plotdir = "default",
verbose = TRUE, subplot = lifecycle::deprecated()) {
# Warning about deprecated arguments. Should be removed after 1 release.
if (lifecycle::is_present(subplot)) {
lifecycle::deprecate_warn(
when = "1.45.1",
what = "SSplotSelex(subplot)",
details = "Please use subplots instead. Assigning subplot to subplots."
)
subplots <- subplot
}
# empty table into which information on line types etc. might be copied
infotable2 <- NULL
nsexes <- replist[["nsexes"]]
nseasons <- replist[["nseasons"]]
nfleets <- replist[["nfleets"]]
lbinspop <- replist[["lbinspop"]]
nlbinspop <- replist[["nlbinspop"]]
sizeselex <- replist[["sizeselex"]]
ageselex <- replist[["ageselex"]]
accuage <- replist[["accuage"]]
startyr <- replist[["startyr"]]
endyr <- replist[["endyr"]]
FleetNames <- replist[["FleetNames"]]
growdat <- replist[["endgrowth"]]
growthCVtype <- replist[["growthCVtype"]]
mainmorphs <- replist[["mainmorphs"]]
nareas <- replist[["nareas"]]
ngpatterns <- replist[["ngpatterns"]]
derived_quants <- replist[["derived_quants"]]
# message about skipping plots
if (is.null(ageselex)) {
message("Skipping age-based selectivity plots: no output available")
}
if (is.null(sizeselex)) {
message("Skipping length-based selectivity plots: no output available")
}
# table to store information on each plot
plotinfo <- NULL
if (plotdir == "default") {
plotdir <- replist[["inputs"]][["dir"]]
}
ians_blues <- c(
"white", "grey", "lightblue", "skyblue", "steelblue1",
"slateblue", topo.colors(6), "blue", "blue2", "blue3",
"blue4", "black"
)
if (fleets[1] == "all") {
fleets <- 1:nfleets
} else {
if (length(intersect(fleets, 1:nfleets)) != length(fleets)) {
return("Input 'fleets' should be 'all' or a vector of values between 1 and nfleets.")
}
}
# note lower-case value is the one used below (either equal to vector from replist, or input by user)
if (fleetnames[1] == "default") fleetnames <- FleetNames
if (sexes[1] == "all") {
sexes <- 1:nsexes
} else {
if (length(intersect(sexes, 1:nsexes)) != length(sexes)) {
return("Input 'sexes' should be 'all' or a vector of values between 1 and nsexes.")
}
}
if (years[1] == "endyr") years <- endyr
# set default plot margins
if (is.null(mar)) {
if (mainTitle) {
mar <- c(5, 4, 4, 2) + 0.1
} else {
mar <- c(5, 4, 2, 2) + 0.1
}
}
################################################################################
### make plot of selectivity at length for all fleets together
# make plots
plotAllSel <- function(factor = "Lsel") {
# first subset values
if (factor %in% unique(sizeselex[["Factor"]])) {
agebased <- FALSE
allselex <- sizeselex[sizeselex[["Factor"]] == factor &
sizeselex[["Fleet"]] %in% fleets &
sizeselex[["Sex"]] %in% sexes, ]
}
if (factor %in% unique(ageselex[["Factor"]])) {
agebased <- TRUE
allselex <- ageselex[ageselex[["Factor"]] == factor &
ageselex[["Seas"]] == season &
ageselex[["Fleet"]] %in% fleets &
ageselex[["Sex"]] %in% sexes, ]
}
if (!factor %in% unique(c(sizeselex[["Factor"]], ageselex[["Factor"]]))) {
stop(
"Factor '", factor, "' not found in age- or length-based selectivity. ",
"This may be due to having 'detailed age-structured reports' ",
"turned off in the starter file."
)
}
if (nrow(allselex) == 0) {
stop("Combination of season, fleets, & sexes didn't produce any results")
}
# figure out which fleets have time-varying qunatities
time <- rep(FALSE, nfleets)
for (ifleet in fleets) {
time[ifleet] <- any(apply(
allselex[allselex[["Fleet"]] == ifleet &
allselex[["Yr"]] %in% (startyr:endyr), ],
2, function(x) {
any(x != x[1])
}
))
}
if (any(time)) {
if (length(years) > 1 & length(fleets) > 1) {
message("plot not yet configured to work well with multiple years and multiple fleets")
}
# do a bunch of tedious filtering to get unique year ranges
inputyears <- years
years <- NULL
years2 <- NULL
year_ranges <- NULL
for (i in 1:length(inputyears)) {
if (inputyears[i] >= startyr) {
newyear <- min(endyr, allselex[["Yr"]][allselex[["Yr"]] >= inputyears[i]])
newyear2 <- max(startyr, allselex[["Yr"]][allselex[["Yr"]] <= inputyears[i]])
if (newyear2 <= newyear) {
newyear_range <- paste(newyear2, "-", newyear, sep = "")
if (newyear == newyear2 & newyear > startyr - 3) newyear_range <- newyear
if (!newyear_range %in% year_ranges) {
years <- c(years, newyear)
years2 <- c(years2, newyear2)
year_ranges <- c(year_ranges, newyear_range)
}
}
}
}
if (all(years2 == startyr & years == endyr)) {
years <- endyr
years2 <- startyr
year_ranges <- paste(startyr, "-", endyr, sep = "")
}
bad <- rep(FALSE, length(years))
for (i in 1:length(years)) {
y <- years[i]
y2 <- years2[i]
if (sum(years == y) > 1) bad[years == y & years2 == y] <- TRUE
if (sum(years2 == y2) > 1) bad[years == y2 & years2 == y2] <- TRUE
}
years <- years[!bad]
years2 <- years2[!bad]
year_ranges <- year_ranges[!bad]
if ((startyr - 3) %in% inputyears) {
years <- c(years, startyr - 3)
year_ranges <- c(year_ranges, "Benchmarks")
}
} else {
year_ranges <- ""
}
allselex <- allselex2 <- allselex[allselex[["Yr"]] %in% years, ]
if (nrow(allselex) == 0) {
stop("No values found for this combination of years and factor")
}
# do some processing
Sex <- allselex[["Sex"]]
if (!agebased) {
allselex <- allselex[, -(1:5)]
xlab <- labels[1]
}
if (agebased) {
allselex <- allselex[, -(1:7)]
xlab <- labels[2]
}
if (!is.null(infotable)) {
infotable2 <- infotable
good <- Sex %in% infotable[["Sex"]]
allselex <- allselex[good, ]
allselex2 <- allselex2[good, ]
if (nrow(infotable2) != nrow(allselex)) {
stop("Problem with input 'infotable'. Number of rows doesn't match.")
}
} else {
# make table of info for each row (unless it is supplied already)
infotable2 <- allselex2[c("Fleet", "Sex", "Yr")]
infotable2[["ifleet"]] <- NA
infotable2[["FleetName"]] <- fleetnames[infotable2[["Fleet"]]]
infotable2[["longname"]] <- infotable2[["FleetName"]]
for (i in 1:nrow(infotable2)) {
infotable2[["Yr_range"]][i] <- year_ranges[years == infotable2[["Yr"]][i]]
}
if (length(unique(infotable2[["Yr"]])) > 1) {
infotable2[["longname"]] <- paste(infotable2[["FleetName"]], infotable2[["Yr_range"]])
}
# check for whether there are differences between males and females
twosex <- all(1:2 %in% infotable2[["Sex"]]) &&
any(allselex[infotable2[["Sex"]] == 1, ] != allselex[infotable2[["Sex"]] == 2, ])
if (!twosex) { # show only sex with lowest number if no differences between sexes
good <- infotable2[["Sex"]] == min(infotable2[["Sex"]])
allselex <- allselex[good, ]
allselex2 <- allselex2[good, ]
infotable2 <- infotable2[good, ]
} else {
infotable2[["longname"]] <- paste(infotable2[["longname"]], c("(f)", "(m)")[infotable2[["Sex"]]])
}
# add index from 1 up to number of fleets plotted
allfleets <- sort(unique(infotable2[["Fleet"]]))
for (ifleet in 1:length(allfleets)) {
infotable2[["ifleet"]][infotable2[["Fleet"]] == allfleets[ifleet]] <- ifleet
}
# choose colors
colvec <- rich.colors.short(length(allfleets))
infotable2[["col"]] <- colvec[infotable2[["ifleet"]]]
# choose line types
infotable2[["lty"]] <- 1
# either line by Sex
infotable2[["lwd"]] <- lwd
if (twosex) infotable2[["lty"]] <- infotable2[["Sex"]]
# or line by year (with line width by Sex)
allyears <- sort(unique(infotable2[["Yr"]]))
if (length(allyears) > 1) {
for (iyear in 1:length(allyears)) {
infotable2[["lty"]][infotable2[["Yr"]] == allyears[iyear]] <- iyear
}
if (twosex) infotable2[["lwd"]][infotable2[["Sex"]] == 2] <- lwd / 2
}
# choose plot characters
infotable2[["pch"]] <- infotable2[["ifleet"]] %% 25
}
main <- factor
if (factor == "Lsel") main <- paste("Length-based selectivity")
if (factor == "Asel") main <- paste("Age-based selectivity")
if (factor == "Asel2") main <- paste("Derived age-based from length-based selectivity")
if (factor == "Ret") main <- paste("Retention")
if (length(fleets) > 1) main <- paste(main, "by fleet")
if (length(fleets) == 1) main <- paste(main, "for", fleetnames[fleets])
if (length(unique(infotable2[["Yr"]])) == 1) {
main <- paste(main, "in", unique(infotable2[["Yr"]]))
}
bins <- as.numeric(names(allselex))
# make empty plot
if (!add) {
par(mar = mar)
plot(0,
xlim = range(bins), ylim = c(0, 1), type = "n",
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
xlab = xlab, ylab = labels[4]
)
}
# add grey lines
abline(h = 0, col = "grey")
abline(h = 1, col = "grey")
# add lines for selectivities
matplot(
x = bins, y = t(allselex), col = infotable2[["col"]],
lty = infotable2[["lty"]], lwd = infotable2[["lwd"]], type = "l", add = TRUE
)
# add points on top of lines (first subsetting to optionally show fewer points)
allselex2 <- allselex
if (spacepoints > 0) {
for (iline in 1:nrow(allselex)) {
allselex2[iline, (1:ncol(allselex)) %%
spacepoints != (staggerpoints * iline) %% spacepoints] <- NA
}
matplot(
x = bins, y = t(allselex2), col = infotable2[["col"]],
lwd = infotable2[["lwd"]], pch = infotable2[["pch"]], type = "p", add = TRUE
)
} else {
# if 'spacepoints' is less than or equal to 0, don't show points
infotable2[["pch"]] <- NA
}
# add legend
if (nrow(infotable2) > 1) {
legend(legendloc,
inset = c(0, 0.05),
legend = infotable2[["longname"]],
col = infotable2[["col"]],
seg.len = 4,
lty = infotable2[["lty"]],
pch = infotable2[["pch"]],
lwd = infotable2[["lwd"]],
bty = "n"
)
}
return(infotable2)
}
if (1 %in% subplots & !is.null(sizeselex)) {
for (ifactor in 1:length(sizefactors)) {
if (plot) {
infotable2 <- plotAllSel(factor = sizefactors[ifactor])
}
if (print) {
file <- paste("sel01_multiple_fleets_length", ifactor, ".png", sep = "")
caption <- "Selectivity at length for multiple fleets."
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
infotable2 <- plotAllSel(factor = "Lsel")
dev.off()
}
}
}
if (2 %in% subplots & !is.null(ageselex)) {
# remove factor == "Asel" if all age-based selectivity == 1
if ("Asel" %in% agefactors &&
all(ageselex[
ageselex[["Factor"]] == "Asel",
paste(0:replist[["accuage"]])
] == 1)) {
agefactors <- setdiff(agefactors, "Asel")
message("Skipping plot of age-based selectivity as all values = 1.0")
}
if (length(agefactors) > 0) {
for (ifactor in 1:length(agefactors)) {
factor <- agefactors[ifactor]
if (plot) {
infotable2 <- plotAllSel(factor = factor)
}
if (print) {
file <- paste("sel02_multiple_fleets_age", ifactor, ".png", sep = "")
caption <- "Selectivity at age for multiple fleets."
if (factor == "Asel2") {
caption <- paste(
"Selectivity at age derived from selectivity at",
"length for multiple fleets."
)
}
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
infotable2 <- plotAllSel(factor = factor)
dev.off()
}
} # end loop over age factors
} # end check for any agefactors
}
################################################################################
### loop over fleets and sexes to make individual plot of length-based patterns
# first check if any of these plots are requested
if (any(3:9 %in% subplots) & !is.null(sizeselex)) {
# selex and retention
for (i in fleets)
{
for (m in sexes)
{
if (m == 1 & nsexes == 1) sextitle1 <- "Time-"
if (m == 1 & nsexes == 2) sextitle1 <- "Female time-"
if (m == 2) sextitle1 <- "Male time-"
if (m == 1 & nsexes == 1) sextitle2 <- "Ending"
if (m == 1 & nsexes == 2) sextitle2 <- "Female ending"
if (m == 2) sextitle2 <- "Male ending"
intret <- sizeselex[sizeselex[["Factor"]] == "Ret" &
sizeselex[["Yr"]] != startyr - 3 &
sizeselex[["Sex"]] == m, ]
intmort <- sizeselex[sizeselex[["Factor"]] == "Mort" &
sizeselex[["Yr"]] != startyr - 3 &
sizeselex[["Sex"]] == m, ]
intkeep <- sizeselex[sizeselex[["Factor"]] == "Keep" &
sizeselex[["Yr"]] != startyr - 3 &
sizeselex[["Sex"]] == m, ]
intdead <- sizeselex[sizeselex[["Factor"]] == "Dead" &
sizeselex[["Yr"]] != startyr - 3 &
sizeselex[["Sex"]] == m, ]
intselex <- sizeselex[sizeselex[["Factor"]] == "Lsel" &
sizeselex[["Yr"]] != startyr - 3 &
sizeselex[["Sex"]] == m, ]
plotselex <- intselex[intselex[["Fleet"]] == i, ]
plotret <- intret[intret[["Fleet"]] == i, ]
plotmort <- intmort[intmort[["Fleet"]] == i, ]
# test for time-varying length selectivity
time <- any(apply(plotselex[-c(1, nrow(plotselex)), -(1:5)], 2, function(x) {
any(x != x[1])
}))
if (time) {
x <- lbinspop
subset <- plotselex[["Yr"]] >= minyr & plotselex[["Yr"]] <= maxyr
y <- plotselex[["Yr"]][subset]
z <- plotselex[subset, -(1:5)]
z <- matrix(as.numeric(as.matrix(z)), ncol = ncol(z))
z <- t(z)
main <- paste(sextitle1, "varying selectivity for ", fleetnames[i], sep = "")
if (plot) {
if (3 %in% subplots) {
persp(x, y, z,
col = "white", xlab = labels[1],
ylab = labels[3], zlab = labels[4], expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed",
phi = 35, theta = -10
)
}
if (4 %in% subplots) {
contour(x, y, z,
nlevels = 5, xlab = labels[1],
ylab = labels[3], main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
}
}
if (print) {
if (3 %in% subplots) {
file <- paste("sel03_len_timevary_surf_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Surface plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
persp(x, y, z,
col = "white", xlab = labels[1], ylab = labels[3],
zlab = labels[4], expand = 0.5, box = TRUE,
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
ticktype = "detailed", phi = 35, theta = -10
)
dev.off()
}
if (4 %in% subplots) {
file <- paste("sel04_len_timevary_contour_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Countour plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
contour(x, y, z,
nlevels = 5, xlab = labels[1], ylab = labels[3],
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
col = ians_blues, lwd = lwd
)
dev.off()
}
}
}
# test for time-varying length retention
time2 <- any(apply(plotret[-nrow(plotret), -(1:5)], 2, function(x) {
any(x != x[1])
}))
if (time2) {
x <- lbinspop
subset <- intret[["Yr"]] >= minyr & intret[["Yr"]] <= maxyr
y <- intret[["Yr"]][subset & intret[["Fleet"]] == i]
z <- intret[subset & intret[["Fleet"]] == i, -(1:5)]
z <- matrix(as.numeric(as.matrix(z)), ncol = ncol(z))
z <- t(z)
main <- paste(sextitle1, "varying retention for ", fleetnames[i], sep = "")
if (plot) {
if (5 %in% subplots) {
persp(x, y, z,
col = "white", xlab = labels[1],
ylab = labels[3], zlab = labels[5], expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed",
phi = 35, theta = -10
)
}
if (6 %in% subplots) {
contour(x, y, z,
nlevels = 5, xlab = labels[1],
ylab = labels[3], main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
}
}
if (print) {
if (5 %in% subplots) {
file <- paste("sel05_timevary_ret_surf_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Surface plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
persp(x, y, z,
col = "white", xlab = labels[1],
ylab = labels[3], zlab = labels[5], expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed",
phi = 35, theta = -10
)
dev.off()
}
if (6 %in% subplots) {
file <- paste("sel06_timevary_ret_contour_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Countour plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
contour(x, y, z,
nlevels = 5, xlab = labels[1],
ylab = labels[3], main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
dev.off()
}
}
}
# test for time-varying discard mortality rates
time3 <- any(apply(plotmort[-nrow(plotmort), -(1:5)], 2, function(x) {
any(x != x[1])
}))
if (time3) {
x <- lbinspop
subset <- intmort[["Yr"]] >= minyr & intmort[["Yr"]] <= maxyr
y <- intmort[["Yr"]][subset & intmort[["Fleet"]] == i]
z <- intmort[subset & intmort[["Fleet"]] == i, -(1:5)]
z <- matrix(as.numeric(as.matrix(z)), ncol = ncol(z))
z <- t(z)
main <- paste(sextitle1, "varying discard mortality for ", fleetnames[i], sep = "")
if (plot) {
if (7 %in% subplots) {
persp(x, y, z,
col = "white", xlab = labels[1], ylab = labels[3], zlab = labels[6],
expand = 0.5, box = TRUE,
main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed",
phi = 35, theta = -10, zlim = c(0, max(z))
)
}
if (8 %in% subplots) {
contour(x, y, z,
nlevels = 5, xlab = labels[1], ylab = labels[3],
main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
}
}
if (print) {
if (7 %in% subplots) {
file <- paste("sel07_timevary_mort_surf_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Surface plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
persp(x, y, z,
col = "white", xlab = labels[1],
ylab = labels[3], zlab = labels[6], expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed", phi = 35, theta = -10
)
dev.off()
}
if (8 %in% subplots) {
file <- paste("sel08_timevary_mort_contour_flt", i, "sex", m, ".png", sep = "")
caption <- paste("Surface plot of", main)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
contour(x, y, z,
nlevels = 5, xlab = labels[1],
ylab = labels[3], main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
dev.off()
}
}
}
# make plot of end year selectivity (with retention and discard mortality if used)
endselex <- plotselex[plotselex[["Yr"]] == endyr, -(1:5)]
plotret <- plotret[nrow(plotret), -(1:5)] # final year only
ylab <- labels[4]
bins <- as.numeric(names(endselex))
vals <- as.numeric(paste(endselex))
retvals <- as.numeric(plotret)
main <- paste(sextitle2, " year selectivity for ", fleetnames[i], sep = "")
selfunc <- function() {
# determine whether retention was used
intret2 <- intret[intret[["Fleet"]] == i, ]
retchecktemp <- as.vector(unlist(intret2[1, ]))
retcheck <- as.numeric(retchecktemp[6:length(retchecktemp)])
if (is.na(sum(retcheck))) retcheckuse <- 0
# if minimum retention is less than 1, show additional stuff in plot
if (!is.na(sum(retcheck))) retcheckuse <- 1 - min(retcheck)
# make plot
if (!add) {
par(mar = mar)
plot(bins, vals,
xlab = labels[1], ylim = c(0, 1),
main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ylab = "", type = "n"
)
}
abline(h = 0, col = "grey")
abline(h = 1, col = "grey")
if (1 %in% selexlines) lines(bins, vals, type = "o", col = col2, cex = 1.1)
if (retcheckuse > 0) {
# if retention, then add additional lines & legend
useret <- intret[intret[["Fleet"]] == i, ]
usekeep <- intkeep[intkeep[["Fleet"]] == i, ]
usemort <- intmort[intmort[["Fleet"]] == i, ]
usedead <- intdead[intdead[["Fleet"]] == i, ]
if (endyr %in% as.numeric(useret[["Yr"]])) {
useyr <- endyr
} else {
useyr <- max(as.numeric(useret[["Yr"]]))
}
plotret <- useret[useret[["Yr"]] == useyr, ]
plotkeep <- usekeep[usekeep[["Yr"]] == useyr, ]
plotmort <- usemort[usemort[["Yr"]] == useyr, ]
plotdead <- usedead[usedead[["Yr"]] == useyr, ]
# compute discard as function of size: selectivity*(1 - retention)
plotdisc <- plotret
plotdisc[-(1:5)] <- vals * (1 - plotret[, -(1:5)])
# add additional lines if requested
if (2 %in% selexlines) {
lines(as.numeric(as.vector(names(plotret)[-(1:5)])),
as.numeric(as.character(plotret[1, -(1:5)])),
col = "red", type = "o", pch = 3, cex = .9
)
ylab <- paste(ylab, ", Retention", sep = "")
}
if (3 %in% selexlines) {
lines(as.numeric(as.vector(names(plotmort)[-(1:5)])),
as.numeric(as.character(plotmort[1, -(1:5)])),
col = "orange", type = "o", pch = 4, cex = .9
)
ylab <- paste(ylab, ", Mortality", sep = "")
}
if (4 %in% selexlines) {
lines(as.numeric(as.vector(names(plotkeep)[-(1:5)])),
as.numeric(as.character(plotkeep[1, -(1:5)])),
col = "purple", type = "o", pch = 2, cex = .9
)
}
if (5 %in% selexlines) {
lines(as.numeric(as.vector(names(plotdead)[-(1:5)])),
as.numeric(as.character(plotdead[1, -(1:5)])),
col = "green3", type = "o", pch = 5, cex = .9
)
}
if (6 %in% selexlines) {
lines(as.numeric(as.vector(names(plotdead)[-(1:5)])),
as.numeric(as.character(plotdisc[1, -(1:5)])),
col = "grey50", type = "o", pch = 6, cex = .9
)
}
# add legend
legend(legendloc,
inset = c(0, 0.05), bty = "n",
c(
labels[4], labels[5], labels[6], "Keep = Sel*Ret",
"Dead = Sel*(Ret+(1-Ret)*Mort)", "Discard = Sel*(1-Ret)"
)[selexlines],
lty = 1, col = c("blue", "red", "orange", "purple", "green3", "grey50")[selexlines],
pch = c(1, 3, 4, 2, 5, 6)[selexlines], pt.cex = c(1.1, .9, .9, .9, .9, .9)[selexlines]
)
}
mtext(ylab, side = 2, line = 3)
}
# make plot if selectivity is not constant at 0 or 1 for all bins
if ((min(vals) < 1 & max(vals) > 0) |
(!is.na(diff(range(retvals))) && diff(range(retvals)) != 0)) {
if (9 %in% subplots) {
if (plot) selfunc()
if (print) {
file <- paste("sel09_len_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
selfunc()
dev.off()
}
}
}
} # sexes
} # fleets
} # check for any of the plots in this section requested
################################################################################
### loop over fleets and sexes to make individual plot of age-based patterns
if (any(11:14 %in% subplots) & !is.null(ageselex)) {
# Age based selex
ylab <- labels[4]
for (facnum in 1) {
factor <- c("Asel", "Asel2")[facnum]
for (i in fleets) {
for (m in sexes) {
if (m == 1 & nsexes == 1) sextitle1 <- "Time-"
if (m == 1 & nsexes == 2) sextitle1 <- "Female time-"
if (m == 2) sextitle1 <- "Male time-"
if (m == 1 & nsexes == 1) sextitle2 <- "Ending"
if (m == 1 & nsexes == 2) sextitle2 <- "Female ending"
if (m == 2) sextitle2 <- "Male ending"
ageselexcols <- (1:ncol(ageselex))[names(ageselex) %in% as.character(0:accuage)]
plotageselex <- ageselex[ageselex[["Factor"]] == factor & ageselex[["Fleet"]] == i &
ageselex[["Yr"]] != startyr - 3 & ageselex[["Sex"]] == m, ]
# test for time-varying age selectivity
time <- any(apply(
plotageselex[-c(1, nrow(plotageselex)), ageselexcols],
2, function(x) {
any(x != x[1])
}
))
if (time) {
if ((min(as.numeric(as.vector(t(plotageselex[, -(1:7)])))) < 1)) {
subset <- as.numeric(plotageselex[["Yr"]]) >= minyr &
as.numeric(plotageselex[["Yr"]]) <= maxyr
x <- seq(0, accuage, by = 1)
y <- as.numeric(plotageselex[["Yr"]])[subset]
z <- plotageselex[subset, -(1:7)]
z <- matrix(as.numeric(as.matrix(z)), ncol = ncol(z))
z <- t(z)
main <- paste(sextitle1, "varying selectivity for ", fleetnames[i], sep = "")
if (plot) {
if (11 %in% subplots) {
persp(x, y, z,
col = "white", xlab = labels[2],
ylab = labels[3], zlab = ylab, expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""), cex.main = cex.main,
ticktype = "detailed", phi = 35, theta = -10
)
}
if (12 %in% subplots) {
contour(x, y, z,
nlevels = 5, xlab = labels[2],
main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
}
}
if (print) {
if (11 %in% subplots) {
file <- paste("sel11_timevary_surf_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
persp(x, y, z,
col = "white", xlab = labels[2],
ylab = labels[3], zlab = ylab, expand = 0.5,
box = TRUE, main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ticktype = "detailed",
phi = 35, theta = -10
)
dev.off()
}
if (12 %in% subplots) {
file <- paste("sel12_timevary_contour_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
contour(x, y, z,
nlevels = 5, xlab = labels[2],
main = ifelse(mainTitle, main, ""),
cex.main = cex.main, col = ians_blues, lwd = lwd
)
dev.off()
}
}
plotageselex2 <- plotageselex[plotageselex[["Yr"]] %in% c(max(as.numeric(plotageselex[["Yr"]]))), ]
plotageselex2 <- plotageselex2[, -(1:7)]
main <- paste(sextitle2, " year selectivity for ", fleetnames[i], sep = "")
endselfunc <- function() {
if (!add) {
par(mar = mar)
plot(as.numeric(names(plotageselex2)),
as.numeric(paste(c(plotageselex2))),
xlab = labels[2], ylim = c(0, 1), main = ifelse(mainTitle, main, ""),
cex.main = cex.main, ylab = ylab,
type = "n", col = col2, cex = 1.1
)
}
lines(as.numeric(names(plotageselex2)),
as.numeric(paste(c(plotageselex2))),
type = "o", col = col2, cex = 1.1
)
abline(h = 0, col = "grey")
}
if (13 %in% subplots) {
if (plot) {
endselfunc()
}
if (print) {
file <- paste("sel13_age_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
endselfunc()
dev.off()
}
}
}
}
if (!time) {
plotageselex <- plotageselex[plotageselex[["Yr"]] == max(plotageselex[["Yr"]]), ]
plotageselex <- plotageselex[, -(1:7)]
vals <- as.numeric(paste(c(plotageselex)))
# test for constant across all values
doplot <- nrow(plotageselex) > 0 && diff(range(vals)) != 0
if (doplot & skipAgeSelex10) {
# skip if selectivity type 10 is used (0.0 for age-0, 1.0 for other ages)
doplot <- !(vals[1] == 0 & all(vals[-1] == 1))
}
if (doplot) {
main <- paste(sextitle2, " year selectivity for ", fleetnames[i], sep = "")
endselfunc2 <- function() {
if (!add) {
par(mar = mar)
plot((as.numeric(names(plotageselex))), vals,
xlab = labels[2], ylim = c(0, 1),
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
ylab = ylab, type = "n"
)
}
lines((as.numeric(names(plotageselex))), vals, type = "o", col = col2, cex = 1.1)
abline(h = 0, col = "grey")
}
if (14 %in% subplots) {
if (plot) endselfunc2()
if (print) {
file <- paste("sel14_age_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
endselfunc2()
dev.off()
}
}
} # end if
} # end if not time varying
} # sexes
} # fleets
flush.console()
} # factor (Asel vs. Asel2)
} # check for any of the plots in this section requested
################################################################################
### Matrix of selectivity deviations for semi-parametric (2D-AR1) selectivity
if (15 %in% subplots & !is.null(replist[["seldev_matrix"]])) {
seldev_pars <- replist[["seldev_pars"]]
seldev_matrix <- replist[["seldev_matrix"]]
# define color palette
devcol.fn <- colorRampPalette(colors = c("red", "white", "blue"))
# define function to make an image plot
seldev_func <- function(m, mar = c(4.1, 4.1, 1, 1)) {
bins <- as.numeric(colnames(m))
years <- as.numeric(rownames(m))
par(mar = mar)
image(
x = bins, y = years, z = t(m), col = devcol.fn(10),
xlab = names(dimnames(m))[2],
ylab = names(dimnames(m))[1],
axes = FALSE,
ylim = rev(range(years) + c(-0.5, 0.5))
)
axis(1, at = bins)
axis(2, at = years, las = 1)
box()
}
for (imatrix in 1:length(seldev_matrix)) {
label <- names(seldev_matrix)[imatrix]
main <- gsub(pattern = "_", replacement = " ", x = label)
main <- gsub(pattern = "seldevs", replacement = "selectivity deviations", x = main)
if (plot) {
seldev_func(m = seldev_matrix[[imatrix]], mar = c(5, 4, 4, 1) + 0.1)
title(main = ifelse(mainTitle, main, ""))
}
if (print) {
file <- paste("sel15_", label, ".png", sep = "")
caption <- gsub(pattern = "selectivity ", replacement = "", x = main)
caption <- paste0(
caption,
" for semi-parametric (2D-AR1) selectivity. ",
"Blue value are positive deviations and red values negative. ",
"The matrix of values is available in the list created by ",
"<code>SS_output()</code> as <code>$seldev_matrix</code> which ",
"is a list with an element for each combination of fleet and length or ",
"age which uses the semi-parametric selectivity."
)
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
seldev_func(m = seldev_matrix[[imatrix]])
dev.off()
}
} # end loop over matrices
} # end subplots
################################################################################
### Selectivity contours over age and length shown with growth curve
if (21 %in% subplots &
!is.null(ngpatterns) &&
ngpatterns == 1 &
!is.null(growdat) &
!is.null(sizeselex) &
!is.null(ageselex) &
all(!is.na(lbinspop))
) { # need to connect growth patterns to fleets in future
# subsetting for one season only. This could be replaced
# by info on the growth within the season when each fleet operates.
growdat <- growdat[growdat[["Seas"]] == season, ]
if (nseasons > 1) {
message(
"Warning: plots showing growth curve with selectivity are using season ",
season, " growth, which may not match the timing of the fishery."
)
}
# Mid year mean length at age with 95% range of lengths (by sex if applicable)
growdatF <- growdat[growdat[["Sex"]] == 1 & growdat[["Morph"]] == mainmorphs[1], ]
growdatF[["Sd_Size"]] <- growdatF[["SD_Mid"]]
if (growthCVtype == "logSD=f(A)") { # lognormal distribution of length at age
growdatF[["high"]] <- qlnorm(0.975, meanlog = log(growdatF[["Len_Mid"]]), sdlog = growdatF[["Sd_Size"]])
growdatF[["low"]] <- qlnorm(0.025, meanlog = log(growdatF[["Len_Mid"]]), sdlog = growdatF[["Sd_Size"]])
} else { # normal distribution of length at age
growdatF[["high"]] <- qnorm(0.975, mean = growdatF[["Len_Mid"]], sd = growdatF[["Sd_Size"]])
growdatF[["low"]] <- qnorm(0.025, mean = growdatF[["Len_Mid"]], sd = growdatF[["Sd_Size"]])
}
if (nsexes > 1) {
growdatM <- growdat[growdat[["Sex"]] == 2 & growdat[["Morph"]] == mainmorphs[2], ]
growdatM[["Sd_Size"]] <- growdatM[["SD_Mid"]]
if (growthCVtype == "logSD=f(A)") { # lognormal distribution of length at age
growdatM[["high"]] <- qlnorm(0.975, meanlog = log(growdatM[["Len_Mid"]]), sdlog = growdatM[["Sd_Size"]])
growdatM[["low"]] <- qlnorm(0.025, meanlog = log(growdatM[["Len_Mid"]]), sdlog = growdatM[["Sd_Size"]])
} else { # normal distribution of length at age
growdatM[["high"]] <- qnorm(0.975, mean = growdatM[["Len_Mid"]], sd = growdatM[["Sd_Size"]])
growdatM[["low"]] <- qnorm(0.025, mean = growdatM[["Len_Mid"]], sd = growdatM[["Sd_Size"]])
}
}
xlab <- labels[2]
ylab <- labels[1]
zlab <- labels[4]
for (i in fleets)
{
for (m in sexes)
{
if (m == 1 & nsexes == 1) sextitle2 <- "Ending"
if (m == 1 & nsexes == 2) sextitle2 <- "Female ending"
if (m == 2) sextitle2 <- "Male ending"
plotlenselex <- as.numeric(sizeselex[sizeselex[["Factor"]] == "Lsel" &
sizeselex[["Yr"]] == endyr &
sizeselex[["Fleet"]] == i &
sizeselex[["Sex"]] == m, -(1:5)])
# test if there is any length-based selectivity (otherwise plot is uninformative)
if (any(plotlenselex != 1)) {
plotageselex <- as.numeric(ageselex[ageselex[["Factor"]] == "Asel" &
ageselex[["Yr"]] == endyr &
ageselex[["Fleet"]] == i &
ageselex[["Sex"]] == m, -(1:7)])
# x here should probably be replaced by $Age_Mid or some more informative value
x <- seq(0, accuage, by = 1)
y <- lbinspop
z <- plotageselex %o% plotlenselex # outer product of age- and length-selectivity
main <- paste(sextitle2, " year selectivity and growth for ", fleetnames[i], sep = "")
agelenselcontour <- function() {
contour(x, y, z,
nlevels = 5, xlab = xlab, ylab = ylab,
main = ifelse(mainTitle, main, ""), cex.main = cex.main, col = ians_blues, lwd = lwd
)
if (m == 1) {
lines(x, growdatF[["Len_Mid"]], col = "white", lwd = 5)
lines(x, growdatF[["Len_Mid"]], col = col1, lwd = 3)
lines(x, growdatF[["high"]], col = "white", lwd = 1, lty = 1)
lines(x, growdatF[["high"]], col = col1, lwd = 1, lty = "dashed")
lines(x, growdatF[["low"]], col = "white", lwd = 1, lty = 1)
lines(x, growdatF[["low"]], col = col1, lwd = 1, lty = "dashed")
}
if (m == 2) {
lines(x, growdatM[["Len_Mid"]], col = "white", lwd = 5)
lines(x, growdatM[["Len_Mid"]], col = col2, lwd = 3)
lines(x, growdatM[["high"]], col = "white", lwd = 1, lty = 1)
lines(x, growdatM[["high"]], col = col2, lwd = 1, lty = "dashed")
lines(x, growdatM[["low"]], col = "white", lwd = 1, lty = 1)
lines(x, growdatM[["low"]], col = col2, lwd = 1, lty = "dashed")
}
}
if (plot) {
if (21 %in% subplots) agelenselcontour()
}
if (print) {
if (21 %in% subplots) {
file <- paste("sel21_agelen_contour_flt", i, "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
agelenselcontour()
dev.off()
}
}
} # if there is any length-based selectivity
} # sexes
} # fleets
} # end subplots
################################################################################
### Plot selectivity with uncertainty if "Extra SD reporting" requested in control file
if (22 %in% subplots) {
# get values from Extra SD reporting if created by request at bottom of control file
rows <- grep("Selex_std", derived_quants[["Label"]])
if (length(rows) > 0) {
sel <- derived_quants[rows, ]
names <- sel[["Label"]]
splitnames <- strsplit(names, "_")
namesDF <- as.data.frame(matrix(unlist(strsplit(names, "_")), ncol = 6, byrow = T))
sel[["Fleet"]] <- as.numeric(as.character(namesDF[["V3"]]))
sel[["Sex"]] <- as.character(namesDF[["V4"]])
sel[["agelen"]] <- as.character(namesDF[["V5"]])
sel[["bin"]] <- as.numeric(as.character(namesDF[["V6"]]))
sel[["lower"]] <- pmax(qnorm(0.025, mean = sel[["Value"]], sd = sel[["StdDev"]]), 0) # trim at 0
sel[["upper"]] <- pmin(qnorm(0.975, mean = sel[["Value"]], sd = sel[["StdDev"]]), 1) # trim at 1
i <- sel[["Fleet"]][1]
agelen <- sel[["agelen"]][1]
xlab <- labels[1:2][1 + (sel[["agelen"]][1] == "A")] # decide label between length and age
for (m in intersect(unique(sel[["Sex"]]), c("Fem", "Mal")[sexes])) {
seltemp <- sel[sel[["Sex"]] == m, ]
if (m == "Fem" & nsexes == 1) sextitle3 <- ""
if (m == "Fem" & nsexes == 2) sextitle3 <- "females"
if (m == "Mal") sextitle3 <- "males"
main <- paste("Uncertainty in selectivity for", fleetnames[i], sextitle3)
no0 <- seltemp[["StdDev"]] > 0.001
if (FALSE) {
# Ian T.: this is the beginning of code to add the full selectivity line,
# including bins for which no uncertainty was requested
if (agelen == "L") {
plotselex <- sizeselex[sizeselex[["Factor"]] == "Lsel" &
ageselex[["Fleet"]] == i &
sizeselex[["Sex"]] == m, ]
}
if (agelen == "A") {
plotselex <- ageselex[ageselex[["Factor"]] == "Asel" &
ageselex[["Fleet"]] == i &
ageselex[["Sex"]] == m, ]
}
}
plot_extra_selex_SD <- function() {
if (!add) {
par(mar = mar)
plot(seltemp[["bin"]], seltemp[["Value"]],
xlab = xlab, ylim = c(0, 1),
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
ylab = labels[4], type = "n", col = col2, cex = 1.1,
xlim = c(0, max(seltemp[["bin"]])),
)
}
lines(seltemp[["bin"]], seltemp[["Value"]],
xlab = xlab, ylim = c(0, 1),
main = ifelse(mainTitle, main, ""), cex.main = cex.main,
ylab = labels[4], type = "o", col = col2, cex = 1.1,
xlim = c(0, max(seltemp[["bin"]]))
)
arrows(
x0 = seltemp[["bin"]][no0], y0 = seltemp[["lower"]][no0],
x1 = seltemp[["bin"]][no0], y1 = seltemp[["upper"]][no0],
length = 0.01, angle = 90, code = 3, col = col2
)
abline(h = 0, col = "grey")
}
if (plot) {
plot_extra_selex_SD()
}
if (print) {
file <- paste("sel22_uncertainty", "sex", m, ".png", sep = "")
caption <- main
plotinfo <- save_png(
plotinfo = plotinfo, file = file, plotdir = plotdir, pwidth = pwidth,
pheight = pheight, punits = punits, res = res, ptsize = ptsize,
caption = caption
)
plot_extra_selex_SD()
dev.off()
}
}
} # end test for presence of selectivity uncertainty output
} # check check for subplots in list
# return info on any PNG files created
if (!is.null(plotinfo)) plotinfo[["category"]] <- "Sel"
return(invisible(list(infotable = infotable2, plotinfo = plotinfo)))
}
|
ucipBound <- function(RT, CR, stopping.rule=c("AND", "OR"), bound=c("upper", "lower"), Cspace =FALSE) {
tvec <- c(0,sort(unique(unlist(RT))))
returnlist <- vector("list", 0)
if (agrep("upper", bound, ignore.case=TRUE ) ) {
if(agrep("or", stopping.rule, ignore.case=TRUE) ){
if(Cspace) {
numer <- 1-ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec) + 1-ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec) -1
denom <- (1-ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec) ) * (1-ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec) )
fn <- stepfun(tvec, c(log(numer) / log(denom), NA ))
# NEED TO FIGURE OUT PLOTTING LIMITS
attr(fn, "call") <- "log(S1(t) + S2(t)-1)/log(S1(t)S2(t)"
} else {
fn <- stepfun(tvec, c(ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec) + ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec), 2))
attr(fn, "call") <- "F1(t) + F2(t)"
}
returnlist <- c(returnlist, upper.or = fn)
}
if(agrep("and", stopping.rule, ignore.case=TRUE) ) {
if(Cspace) {
} else {
fn <- stepfun(tvec, c(pmin( ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec), ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec) ),1) )
attr(fn, "call") <- "MIN(F1(t), F2(t))"
}
returnlist <- c(returnlist, upper.and = fn)
}
}
if (agrep("lower", bound, ignore.case=TRUE ) ) {
if(agrep("or", stopping.rule, ignore.case=TRUE) ){
if(Cspace) {
} else {
fn <- stepfun(tvec, c(pmax( ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec), ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec) ),1) )
attr(fn, "call") <- "MAX(F1(t), F2(t))"
}
returnlist <- c(returnlist, lower.or = fn)
}
if(agrep("and", stopping.rule, ignore.case=TRUE)) {
if(Cspace) {
} else {
fn <- stepfun(tvec, c(ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec) + ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec) - 1, 1))
attr(fn, "call") <- "F1(t) + F2(t)-1"
}
returnlist <- c(returnlist, lower.and = fn)
}
}
return(returnlist)
}
|
/R/capacitySpace.R
|
no_license
|
jhoupt/sft
|
R
| false
| false
| 1,952
|
r
|
ucipBound <- function(RT, CR, stopping.rule=c("AND", "OR"), bound=c("upper", "lower"), Cspace =FALSE) {
tvec <- c(0,sort(unique(unlist(RT))))
returnlist <- vector("list", 0)
if (agrep("upper", bound, ignore.case=TRUE ) ) {
if(agrep("or", stopping.rule, ignore.case=TRUE) ){
if(Cspace) {
numer <- 1-ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec) + 1-ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec) -1
denom <- (1-ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec) ) * (1-ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec) )
fn <- stepfun(tvec, c(log(numer) / log(denom), NA ))
# NEED TO FIGURE OUT PLOTTING LIMITS
attr(fn, "call") <- "log(S1(t) + S2(t)-1)/log(S1(t)S2(t)"
} else {
fn <- stepfun(tvec, c(ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec) + ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec), 2))
attr(fn, "call") <- "F1(t) + F2(t)"
}
returnlist <- c(returnlist, upper.or = fn)
}
if(agrep("and", stopping.rule, ignore.case=TRUE) ) {
if(Cspace) {
} else {
fn <- stepfun(tvec, c(pmin( ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec), ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec) ),1) )
attr(fn, "call") <- "MIN(F1(t), F2(t))"
}
returnlist <- c(returnlist, upper.and = fn)
}
}
if (agrep("lower", bound, ignore.case=TRUE ) ) {
if(agrep("or", stopping.rule, ignore.case=TRUE) ){
if(Cspace) {
} else {
fn <- stepfun(tvec, c(pmax( ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec), ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec) ),1) )
attr(fn, "call") <- "MAX(F1(t), F2(t))"
}
returnlist <- c(returnlist, lower.or = fn)
}
if(agrep("and", stopping.rule, ignore.case=TRUE)) {
if(Cspace) {
} else {
fn <- stepfun(tvec, c(ecdf( RT[[2]][ CR[[2]]==1 ] )(tvec) + ecdf( RT[[3]][ CR[[3]]==1 ] )(tvec) - 1, 1))
attr(fn, "call") <- "F1(t) + F2(t)-1"
}
returnlist <- c(returnlist, lower.and = fn)
}
}
return(returnlist)
}
|
#' Extract GitHub RSS feed
#'
#' @param feed a list containing two elements: a username and a RSS atom link
#'
#' @return a data.frame of the RSS feed contents
#' @export
extract_feed <- function(feed) {
entries <- httr::GET(feed) %>%
xml2::read_html() %>%
rvest::html_nodes("entry")
id <- rvest::html_nodes(entries, "id") %>%
rvest::html_text() %>%
gsub("^.*:", "", x = .) %>%
gsub("Event.*$", "", x = .)
title <- rvest::html_nodes(entries, "title") %>%
rvest::html_text()
user <- sub("^(.*?) .*", "\\1", title)
action <- sub("^(.*?) (.*) .*$", "\\2", title)
target <- sub("^.* (.*?)$", "\\1", title)
published <- rvest::html_nodes(entries, "published") %>%
rvest::html_text()
links <- rvest::html_nodes(entries, "link") %>%
rvest::html_attr("href")
thumb <- rvest::html_nodes(entries, "thumbnail") %>%
rvest::html_attr("url")
tidyfeed <- data.frame(
thumb, user, action, target, published, id, links, stringsAsFactors = FALSE
)
return(tidyfeed)
}
#' Pipe operator
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
globalVariables(".")
|
/R/process.R
|
no_license
|
jonocarroll/starryeyes
|
R
| false
| false
| 1,244
|
r
|
#' Extract GitHub RSS feed
#'
#' @param feed a list containing two elements: a username and a RSS atom link
#'
#' @return a data.frame of the RSS feed contents
#' @export
extract_feed <- function(feed) {
entries <- httr::GET(feed) %>%
xml2::read_html() %>%
rvest::html_nodes("entry")
id <- rvest::html_nodes(entries, "id") %>%
rvest::html_text() %>%
gsub("^.*:", "", x = .) %>%
gsub("Event.*$", "", x = .)
title <- rvest::html_nodes(entries, "title") %>%
rvest::html_text()
user <- sub("^(.*?) .*", "\\1", title)
action <- sub("^(.*?) (.*) .*$", "\\2", title)
target <- sub("^.* (.*?)$", "\\1", title)
published <- rvest::html_nodes(entries, "published") %>%
rvest::html_text()
links <- rvest::html_nodes(entries, "link") %>%
rvest::html_attr("href")
thumb <- rvest::html_nodes(entries, "thumbnail") %>%
rvest::html_attr("url")
tidyfeed <- data.frame(
thumb, user, action, target, published, id, links, stringsAsFactors = FALSE
)
return(tidyfeed)
}
#' Pipe operator
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
globalVariables(".")
|
#!/usr/bin/env Rscript
# ==========
# ==========
# ==========
# load ggplot2 library
library(ggplot2)
library(grid)
library(scales)
#file_name="denews-25-set1.denews.10"
#file_name="ap-50-set3.denews.10"
#file_name="ap-50-set2.denews.10"
#file_name="ap-50-set1.denews.10"
#file_name="20news-10.20news.10"
#file_name="kos-50-set1.denews.10"
#file_name="kos-50-set2.denews.10"
#file_name="kos-50-set1.20news.10"
#file_name="kos-50-set2.20news.10"
#file_name="nips-150-15K-50-1500-500.denews.10"
#file_name="nips-150-15K-50-1500-500.20news.10"
file_name="statistics.20news.10"
#project_home="/windows/d/Workspace/PyHDP/"
project_home="/Users/student/Workspace/PyHDP/"
input_directory=paste(project_home, "result/", sep="");
output_directory=paste(project_home, "figure/", sep="");
input_file=paste(input_directory, file_name, ".csv", sep="");
output_file=paste(output_directory, file_name, ".pdf", sep="");
pdf(width=8, height=5)
# load in csv data
input_data <- read.csv(input_file)
#my_function<-function(x){data.frame(ymin=mean(x)-sd(x),ymax=mean(x)+sd(x),y=mean(x))}
#dodge = position_dodge(width=0.9)
#plot_pic <- ggplot(data=input_data, aes(x=factor(inference), y=value)) +
#stat_summary(fun.y = mean, geom = "bar", position = "dodge", alpha=0.5) +
#stat_summary(fun.data = my_function, geom = "pointrange", position = position_dodge(width = 0.90))
plot_pic <- ggplot(data=input_data, aes(x=factor(inference), y=value, lower='25%', middle='50%', upper='75%')) +
geom_boxplot() +
facet_grid(metric ~ dataset, scales="free") +
#labs(size="Tokens Count") +
labs(x="", y="") +
theme(legend.margin = unit(0, "line")) +
theme(legend.key.height=unit(1.5,"line")) +
theme(legend.position="bottom") +
theme(legend.title = element_text(size = 12, angle = 0), legend.text = element_text(size = 15)) +
guides(colour = guide_legend(nrow = 1)) +
#coord_cartesian(xlim=c(0.5, 25.5)) +
#coord_cartesian(ylim=c(0, 13, 2)) +
#scale_y_continuous(breaks = round(seq(-6000, -4000, by=1000), 1)) +
#scale_x_continuous(breaks = round(seq(1, 500, by=100), 1)) +
#theme(legend.direction='vertical', legend.box='vertical', legend.position = c(1, 0)) +
#theme(legend.direction='vertical', legend.box='vertical', legend.position = c(0, 0), legend.justification = c(0, 1)) +
theme(axis.text.x = element_text(size = 15, colour = "black")) +
theme(axis.text.y = element_text(size = 15, colour = "black")) +
theme(axis.title.x = element_text(size = 15), axis.title.y = element_text(size = 15, angle = 90)) +
theme(strip.text.x = element_text(size = 15), strip.text.y = element_text(size = 15, angle = -90))
ggsave(plot_pic,filename=output_file);
|
/src/util/plot_statistics.R
|
no_license
|
kzhai/PyHDPOld
|
R
| false
| false
| 2,673
|
r
|
#!/usr/bin/env Rscript
# ==========
# ==========
# ==========
# load ggplot2 library
library(ggplot2)
library(grid)
library(scales)
#file_name="denews-25-set1.denews.10"
#file_name="ap-50-set3.denews.10"
#file_name="ap-50-set2.denews.10"
#file_name="ap-50-set1.denews.10"
#file_name="20news-10.20news.10"
#file_name="kos-50-set1.denews.10"
#file_name="kos-50-set2.denews.10"
#file_name="kos-50-set1.20news.10"
#file_name="kos-50-set2.20news.10"
#file_name="nips-150-15K-50-1500-500.denews.10"
#file_name="nips-150-15K-50-1500-500.20news.10"
file_name="statistics.20news.10"
#project_home="/windows/d/Workspace/PyHDP/"
project_home="/Users/student/Workspace/PyHDP/"
input_directory=paste(project_home, "result/", sep="");
output_directory=paste(project_home, "figure/", sep="");
input_file=paste(input_directory, file_name, ".csv", sep="");
output_file=paste(output_directory, file_name, ".pdf", sep="");
pdf(width=8, height=5)
# load in csv data
input_data <- read.csv(input_file)
#my_function<-function(x){data.frame(ymin=mean(x)-sd(x),ymax=mean(x)+sd(x),y=mean(x))}
#dodge = position_dodge(width=0.9)
#plot_pic <- ggplot(data=input_data, aes(x=factor(inference), y=value)) +
#stat_summary(fun.y = mean, geom = "bar", position = "dodge", alpha=0.5) +
#stat_summary(fun.data = my_function, geom = "pointrange", position = position_dodge(width = 0.90))
plot_pic <- ggplot(data=input_data, aes(x=factor(inference), y=value, lower='25%', middle='50%', upper='75%')) +
geom_boxplot() +
facet_grid(metric ~ dataset, scales="free") +
#labs(size="Tokens Count") +
labs(x="", y="") +
theme(legend.margin = unit(0, "line")) +
theme(legend.key.height=unit(1.5,"line")) +
theme(legend.position="bottom") +
theme(legend.title = element_text(size = 12, angle = 0), legend.text = element_text(size = 15)) +
guides(colour = guide_legend(nrow = 1)) +
#coord_cartesian(xlim=c(0.5, 25.5)) +
#coord_cartesian(ylim=c(0, 13, 2)) +
#scale_y_continuous(breaks = round(seq(-6000, -4000, by=1000), 1)) +
#scale_x_continuous(breaks = round(seq(1, 500, by=100), 1)) +
#theme(legend.direction='vertical', legend.box='vertical', legend.position = c(1, 0)) +
#theme(legend.direction='vertical', legend.box='vertical', legend.position = c(0, 0), legend.justification = c(0, 1)) +
theme(axis.text.x = element_text(size = 15, colour = "black")) +
theme(axis.text.y = element_text(size = 15, colour = "black")) +
theme(axis.title.x = element_text(size = 15), axis.title.y = element_text(size = 15, angle = 90)) +
theme(strip.text.x = element_text(size = 15), strip.text.y = element_text(size = 15, angle = -90))
ggsave(plot_pic,filename=output_file);
|
# Converts numeric value to a human readable currency format
# Works for $ and , seperator, Needs more work to support dot representation
currency<-function(amount){
if(amount<1000000){
return (paste0("$",signif(amount, digits=3)/1000, "K"))
}else{
return (paste0("$", signif(amount, digits=3)/1000000, "M"))
}
}
|
/housing/currency.R
|
no_license
|
bhattsachin/datascience
|
R
| false
| false
| 326
|
r
|
# Converts numeric value to a human readable currency format
# Works for $ and , seperator, Needs more work to support dot representation
currency<-function(amount){
if(amount<1000000){
return (paste0("$",signif(amount, digits=3)/1000, "K"))
}else{
return (paste0("$", signif(amount, digits=3)/1000000, "M"))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.CBFM.R
\name{print.CBFM}
\alias{print.CBFM}
\alias{print.CBFM_hurdle}
\title{Print a (hurdle) CBFM object}
\usage{
\method{print}{CBFM}(x, ...)
\method{print}{CBFM_hurdle}(x, ...)
}
\arguments{
\item{x}{An object of class \code{CBFM} or \code{CBFM_hurdle}.}
\item{...}{Not used.}
}
\description{
`r lifecycle::badge("stable")
The default print method for a \code{CBFM} or \code{CBFM_hurdle} object.
}
\details{
Print out details such as the function call, assumed family/response distribution, number of observational units and species, response-environment relationship fitted as given by the formula, and which sets of basis functions are used.`
For a hurdle CBFM, details are provided for both of the component CBFMs separately.
}
\examples{
\dontrun{
# Please see the examples in the help file for the CBFM and makeahurdle function.s
}
}
\seealso{
\code{\link[=CBFM]{CBFM()}} for fitting CBFMs and \code{\link[=makeahurdle]{makeahurdle()}} for forming a hurdle CBFM based on two component CBFMs (a presence-absence component and a zero-truncated count component).
}
\author{
Francis K.C. Hui \href{mailto:fhui28@gmail.com}{fhui28@gmail.com}, Chris Haak
}
|
/man/print.CBFM.Rd
|
no_license
|
fhui28/CBFM
|
R
| false
| true
| 1,248
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.CBFM.R
\name{print.CBFM}
\alias{print.CBFM}
\alias{print.CBFM_hurdle}
\title{Print a (hurdle) CBFM object}
\usage{
\method{print}{CBFM}(x, ...)
\method{print}{CBFM_hurdle}(x, ...)
}
\arguments{
\item{x}{An object of class \code{CBFM} or \code{CBFM_hurdle}.}
\item{...}{Not used.}
}
\description{
`r lifecycle::badge("stable")
The default print method for a \code{CBFM} or \code{CBFM_hurdle} object.
}
\details{
Print out details such as the function call, assumed family/response distribution, number of observational units and species, response-environment relationship fitted as given by the formula, and which sets of basis functions are used.`
For a hurdle CBFM, details are provided for both of the component CBFMs separately.
}
\examples{
\dontrun{
# Please see the examples in the help file for the CBFM and makeahurdle function.s
}
}
\seealso{
\code{\link[=CBFM]{CBFM()}} for fitting CBFMs and \code{\link[=makeahurdle]{makeahurdle()}} for forming a hurdle CBFM based on two component CBFMs (a presence-absence component and a zero-truncated count component).
}
\author{
Francis K.C. Hui \href{mailto:fhui28@gmail.com}{fhui28@gmail.com}, Chris Haak
}
|
library(haven)
library(stringr)
library(reshape)
library(dplyr)
library(plyr)
library(data.table)
library(splitstackshape)
library(doBy)
library(ggplot2)
library(foreign)
#setwd("T:/Practice ClickStream/NewApp")
#NewData <- read_csv("data/NewData.csv",col_types = cols(X1 = col_skip()))
NewData<- read_sav("data/Data.sav")
NewData$video_name=str_trim(NewData$video_name, side = "both")
NewData2 = data.frame(NewData)
NewData2 = subset(NewData2, !is.na(NewData2$timecode))
video_names <- as.data.frame(unique(NewData2$video_name))
group <- c("First Group","Second Group","Third Group")
attach(NewData2)
KeyForNewData2 = cbind(aggregate(key=="dowbkiad_subtitle", by=list(NewData2$illinois_user_id, NewData2$video_name), sum),
aggregate(key=="end", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="heartbeat", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="pause", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="play", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="playback_rate_change", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="seek", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="start", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="subtitle_change", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="volume_change", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="wait", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3])
colnames(KeyForNewData2) = c("UserID","Video", "Delete", "end", "heartbeat","pause","play","playback_rate_change","seek","start",
"subtitle_change","volume_change","wait")
detach(NewData2)
KeyForNewData2 = subset(KeyForNewData2, select = c("UserID","Video", "end",
"heartbeat","pause","play",
"playback_rate_change","seek","start",
"subtitle_change","volume_change","wait"))
KeyForNewData2$Secs = KeyForNewData2$heartbeat * 5
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Clickstream Data Analysis"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("video","Video:", choices=video_names),
selectInput("group","User Group: (10 users per user group)", choices=c(1,2,3)),
hr(),
helpText("Data from Coursera")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Histogram",plotOutput("histplot"),verbatimTextOutput("basic")),
tabPanel("Cumulative",plotOutput("cumulative")),
tabPanel("Density",plotOutput("ggplot")),
tabPanel("BoxPlot",plotOutput("BoxPlot"),plotOutput("BoxPlot2")),
# tabPanel("Density plot per user", plotOutput("ggplot2")),
tabPanel("Graph", plotOutput("graph"),plotOutput("graph2"),plotOutput("graph3"))
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$histplot <- renderPlot({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
hist(DigiConP1$timecode, main = "Histogram of the time user spent by video",xlim = range(DigiConP1$timecode),
ylim = c(0,max(DigiConP1$timecode)),xlab = "Seconds", breaks = 50)
})
output$basic <- renderPrint({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
summary(DigiConP1$timecode)
})
output$cumulative <- renderPlot({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
plot(ecdf(DigiConP1$timecode),main="Empirical Cumulative Distribution of the time by video", xlab="time by seconds")
})
output$ggplot <- renderPlot({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
plot(density(NewData2$timecode),col="red", xlim = c(0,2500),ylim=c(0,0.0035),xlab="time in seconds",
main = "Density plot of the time user spent, Red is for all videos, black is for the selected video")
lines(density(DigiConP1$timecode), col = "black")
})
output$BoxPlot <- renderPlot({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
boxplot(DigiConP1$timecode, data=DigiConP1, outline = TRUE, col="bisque")
title("Boxplot for the duration of the video")
})
output$BoxPlot2 <- renderPlot({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
boxplot(timecode~key, data=DigiConP1, outline = TRUE, col="bisque")
title("Comparing boxplot()s by different keys")
})
# output$ggplot2 <- renderPlot({
# if(input$group==1) temp <- FirstGroup
# if(input$group==2) temp <- SecondGroup
# if(input$group==3) temp <- ThirdGroup
# DigiConP1 = subset(NewData2, NewData2$video_name == video)
# ggplot(temp, aes(timecode, color = illinois_user_id)) + title("Density Plots by Group users")+
# geom_density(alpha=.5) +
# geom_vline(data = MFG, aes(xintercept=rating.mean, colour = illinois_user_id),
# linetype="dashed", size=1) +
# theme(legend.position="none")
#
# })
output$graph <- renderPlot({
#KeyForDigiConP4 <- KeyForDigiConP4[KeyForDigiConP4$Video==input$video,]
#KeySub = subset(KeyData2, KeyData2$Video == input$video)
KeySub = subset(KeyForNewData2, KeyForNewData2$Video == input$video)
plot(KeySub$Secs, KeySub$pause,
main = "Pause clicks per students per video",
xlim = c(0,max(KeySub$Secs)+100),
ylim = c(0,max(KeySub$pause)+10),
xlab = "Seconds",
ylab = "Number of Pause Clicks",
pch=18, col = "red")
abline(v=mean(KeySub$Secs))
abline(h=mean(KeySub$pause))
text(KeySub$Secs, KeySub$pause, pos = 3, cex= 0.6)
})
output$graph2 <- renderPlot({
#KeyForDigiConP4 <- KeyForDigiConP4[KeyForDigiConP4$Video==input$video,]
#KeySub = subset(KeyData2, KeyData2$Video == input$video)
KeySub = subset(KeyForNewData2, KeyForNewData2$Video == input$video)
plot(KeySub$Secs, KeySub$wait,
main = "wait clicks per students per video",
xlim = c(0,max(KeySub$Secs)+100),
ylim = c(0,max(KeySub$wait)+10),
xlab = "Seconds",
ylab = "Number of wait Clicks",
pch=18, col = "Green")
abline(v=mean(KeySub$Secs))
abline(h=mean(KeySub$wait))
text(KeySub$Secs, KeySub$wait, pos = 3, cex= 0.6)
})
output$graph3 <- renderPlot({
#KeySub = subset(KeyData2, KeyData2$Video == input$video)
KeySub = subset(KeyForNewData2, KeyForNewData2$Video == input$video)
plot(KeySub$Secs, KeySub$seek,
main = "Seek clicks per students per video",
xlim = c(0,max(KeySub$Secs)+100),
ylim = c(0,max(KeySub$seek)+10),
xlab = "Seconds",
ylab = "Number of seek Clicks",
pch=18, col = "blue")
abline(v=mean(KeySub$Secs))
abline(h=mean(KeySub$seek))
text(KeySub$Secs, KeySub$Seek, pos = 3, cex= 0.6)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
ATLAS-CITLDataAnalyticsServices/Coursera-ClickStream-ShinyApp
|
R
| false
| false
| 7,854
|
r
|
library(haven)
library(stringr)
library(reshape)
library(dplyr)
library(plyr)
library(data.table)
library(splitstackshape)
library(doBy)
library(ggplot2)
library(foreign)
#setwd("T:/Practice ClickStream/NewApp")
#NewData <- read_csv("data/NewData.csv",col_types = cols(X1 = col_skip()))
NewData<- read_sav("data/Data.sav")
NewData$video_name=str_trim(NewData$video_name, side = "both")
NewData2 = data.frame(NewData)
NewData2 = subset(NewData2, !is.na(NewData2$timecode))
video_names <- as.data.frame(unique(NewData2$video_name))
group <- c("First Group","Second Group","Third Group")
attach(NewData2)
KeyForNewData2 = cbind(aggregate(key=="dowbkiad_subtitle", by=list(NewData2$illinois_user_id, NewData2$video_name), sum),
aggregate(key=="end", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="heartbeat", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="pause", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="play", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="playback_rate_change", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="seek", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="start", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="subtitle_change", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="volume_change", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="wait", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3])
colnames(KeyForNewData2) = c("UserID","Video", "Delete", "end", "heartbeat","pause","play","playback_rate_change","seek","start",
"subtitle_change","volume_change","wait")
detach(NewData2)
KeyForNewData2 = subset(KeyForNewData2, select = c("UserID","Video", "end",
"heartbeat","pause","play",
"playback_rate_change","seek","start",
"subtitle_change","volume_change","wait"))
KeyForNewData2$Secs = KeyForNewData2$heartbeat * 5
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Clickstream Data Analysis"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("video","Video:", choices=video_names),
selectInput("group","User Group: (10 users per user group)", choices=c(1,2,3)),
hr(),
helpText("Data from Coursera")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Histogram",plotOutput("histplot"),verbatimTextOutput("basic")),
tabPanel("Cumulative",plotOutput("cumulative")),
tabPanel("Density",plotOutput("ggplot")),
tabPanel("BoxPlot",plotOutput("BoxPlot"),plotOutput("BoxPlot2")),
# tabPanel("Density plot per user", plotOutput("ggplot2")),
tabPanel("Graph", plotOutput("graph"),plotOutput("graph2"),plotOutput("graph3"))
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$histplot <- renderPlot({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
hist(DigiConP1$timecode, main = "Histogram of the time user spent by video",xlim = range(DigiConP1$timecode),
ylim = c(0,max(DigiConP1$timecode)),xlab = "Seconds", breaks = 50)
})
output$basic <- renderPrint({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
summary(DigiConP1$timecode)
})
output$cumulative <- renderPlot({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
plot(ecdf(DigiConP1$timecode),main="Empirical Cumulative Distribution of the time by video", xlab="time by seconds")
})
output$ggplot <- renderPlot({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
plot(density(NewData2$timecode),col="red", xlim = c(0,2500),ylim=c(0,0.0035),xlab="time in seconds",
main = "Density plot of the time user spent, Red is for all videos, black is for the selected video")
lines(density(DigiConP1$timecode), col = "black")
})
output$BoxPlot <- renderPlot({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
boxplot(DigiConP1$timecode, data=DigiConP1, outline = TRUE, col="bisque")
title("Boxplot for the duration of the video")
})
output$BoxPlot2 <- renderPlot({
DigiConP1 = subset(NewData2, NewData2$video_name == input$video)
boxplot(timecode~key, data=DigiConP1, outline = TRUE, col="bisque")
title("Comparing boxplot()s by different keys")
})
# output$ggplot2 <- renderPlot({
# if(input$group==1) temp <- FirstGroup
# if(input$group==2) temp <- SecondGroup
# if(input$group==3) temp <- ThirdGroup
# DigiConP1 = subset(NewData2, NewData2$video_name == video)
# ggplot(temp, aes(timecode, color = illinois_user_id)) + title("Density Plots by Group users")+
# geom_density(alpha=.5) +
# geom_vline(data = MFG, aes(xintercept=rating.mean, colour = illinois_user_id),
# linetype="dashed", size=1) +
# theme(legend.position="none")
#
# })
output$graph <- renderPlot({
#KeyForDigiConP4 <- KeyForDigiConP4[KeyForDigiConP4$Video==input$video,]
#KeySub = subset(KeyData2, KeyData2$Video == input$video)
KeySub = subset(KeyForNewData2, KeyForNewData2$Video == input$video)
plot(KeySub$Secs, KeySub$pause,
main = "Pause clicks per students per video",
xlim = c(0,max(KeySub$Secs)+100),
ylim = c(0,max(KeySub$pause)+10),
xlab = "Seconds",
ylab = "Number of Pause Clicks",
pch=18, col = "red")
abline(v=mean(KeySub$Secs))
abline(h=mean(KeySub$pause))
text(KeySub$Secs, KeySub$pause, pos = 3, cex= 0.6)
})
output$graph2 <- renderPlot({
#KeyForDigiConP4 <- KeyForDigiConP4[KeyForDigiConP4$Video==input$video,]
#KeySub = subset(KeyData2, KeyData2$Video == input$video)
KeySub = subset(KeyForNewData2, KeyForNewData2$Video == input$video)
plot(KeySub$Secs, KeySub$wait,
main = "wait clicks per students per video",
xlim = c(0,max(KeySub$Secs)+100),
ylim = c(0,max(KeySub$wait)+10),
xlab = "Seconds",
ylab = "Number of wait Clicks",
pch=18, col = "Green")
abline(v=mean(KeySub$Secs))
abline(h=mean(KeySub$wait))
text(KeySub$Secs, KeySub$wait, pos = 3, cex= 0.6)
})
output$graph3 <- renderPlot({
#KeySub = subset(KeyData2, KeyData2$Video == input$video)
KeySub = subset(KeyForNewData2, KeyForNewData2$Video == input$video)
plot(KeySub$Secs, KeySub$seek,
main = "Seek clicks per students per video",
xlim = c(0,max(KeySub$Secs)+100),
ylim = c(0,max(KeySub$seek)+10),
xlab = "Seconds",
ylab = "Number of seek Clicks",
pch=18, col = "blue")
abline(v=mean(KeySub$Secs))
abline(h=mean(KeySub$seek))
text(KeySub$Secs, KeySub$Seek, pos = 3, cex= 0.6)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/badge.R
\name{badge_bioc_download}
\alias{badge_bioc_download}
\title{badge_bioc_download}
\usage{
badge_bioc_download(pkg = NULL, by, color, type = "distinct")
}
\arguments{
\item{pkg}{package. If \code{NULL} (the default) the package
is determined via the DESCRIPTION file.}
\item{by}{one of total or month}
\item{color}{badge color}
\item{type}{one of distinct and total}
}
\value{
badge in markdown syntax
}
\description{
badge of bioconductor download number
}
\author{
Guangchuang Yu
}
|
/man/badge_bioc_download.Rd
|
permissive
|
GuangchuangYu/badger
|
R
| false
| true
| 573
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/badge.R
\name{badge_bioc_download}
\alias{badge_bioc_download}
\title{badge_bioc_download}
\usage{
badge_bioc_download(pkg = NULL, by, color, type = "distinct")
}
\arguments{
\item{pkg}{package. If \code{NULL} (the default) the package
is determined via the DESCRIPTION file.}
\item{by}{one of total or month}
\item{color}{badge color}
\item{type}{one of distinct and total}
}
\value{
badge in markdown syntax
}
\description{
badge of bioconductor download number
}
\author{
Guangchuang Yu
}
|
library(MASS)
## File handling for Random Jungle
rjungleExe <- file.path("rjungle")
rjungleInFile <- file.path("input/finaldatainput.scrambled")
rjungleOutFile <- file.path("output/rjungle.scrambled")
random_seed<-read.table(paste("input/random_seed"), header=FALSE)
system("module load randomjungle")
n=dim(random_seed)[1] #500
#n=3
for(i in 2:n){
## Run Random Jungle
rjunglePermCMD <- paste(rjungleExe,"-f", rjungleInFile, "-v", ## show processing
"-i2", ## chose permutation-Importance
"-m 19",
"-t 1000", ## 1000 trees
"-D SCID_AgeOnset", ## response variable name
"-z",
random_seed[i,],
"-o",
paste(rjungleOutFile,".",i, sep="")
) ## out file path
try(system(rjunglePermCMD))
}
|
/workingset_lisa/process_rjungle.scrambled.R
|
no_license
|
iqbalrosiadi/intern_igmm
|
R
| false
| false
| 721
|
r
|
library(MASS)
## File handling for Random Jungle
rjungleExe <- file.path("rjungle")
rjungleInFile <- file.path("input/finaldatainput.scrambled")
rjungleOutFile <- file.path("output/rjungle.scrambled")
random_seed<-read.table(paste("input/random_seed"), header=FALSE)
system("module load randomjungle")
n=dim(random_seed)[1] #500
#n=3
for(i in 2:n){
## Run Random Jungle
rjunglePermCMD <- paste(rjungleExe,"-f", rjungleInFile, "-v", ## show processing
"-i2", ## chose permutation-Importance
"-m 19",
"-t 1000", ## 1000 trees
"-D SCID_AgeOnset", ## response variable name
"-z",
random_seed[i,],
"-o",
paste(rjungleOutFile,".",i, sep="")
) ## out file path
try(system(rjunglePermCMD))
}
|
#' 給定一個矩陣X
X <- cbind(x1 = 1, x2 = 1:10, x3 = sin(1:10))
#' 以及一個長度為3 的向量 beta
beta <- c(0.5, -1, 4.3)
#' 我們稱`X[,1]`為x1, `X[,2]`為x2, `X[,3]`為x3
#' 向量y 的值是 x1 * beta[1] + x2 * beta[2] + x3 * beta[3]
#' 請用矩陣乘法`%*%`算出向量y
#' ps. class(y) 應該要是 "matrix"
#' dim(y) 應該是 c(10, 1)
y <- X %*% beta
#' epsilon 是一個隨機產生的雜訊向量
epsilon <- c(-1.24462014500259, 0.146172987456978, 1.56426869006839, -0.856920339050681,
-1.15277300953772, 0.717919832604741, -0.270623615316431, -1.66281578024014,
-1.15557078461633, -0.730253254897595)
#' 我們讓y 參雜了雜訊
y <- y + epsilon
#' 假設我們只有看到X和y ,看不到epsilon和beta。根據X 和y ,要如何找出beta?
#' 這是一個標準的迴歸分析問題。
#' 請參考<https://en.wikipedia.org/wiki/Ordinary_least_squares#Estimation>裡的公式,
#' 利用這章學到的矩陣乘法,與線性代數函式,算出beta的估計值
#'
#' 你可以寫很多行的程式碼,但是請把結果存到beta.hat這個變數之中
#' ps. class(beta.hat) 應該要是 matrix
#' dim(beta.hat) 應該是 c(3, 1)
#' rownames(beta.hat) 應該是 c("x1", "x2", "x3")
beta.hat <- solve(t(X) %*% X) %*% t(X) %*% y
#' 同學可以比較一下beta.hat和beta,體驗看看迴歸分析的方法,是不是真的有道理。
|
/hw2/RBasic-05-HW.R
|
no_license
|
behappycc/Data-Science
|
R
| false
| false
| 1,402
|
r
|
#' 給定一個矩陣X
X <- cbind(x1 = 1, x2 = 1:10, x3 = sin(1:10))
#' 以及一個長度為3 的向量 beta
beta <- c(0.5, -1, 4.3)
#' 我們稱`X[,1]`為x1, `X[,2]`為x2, `X[,3]`為x3
#' 向量y 的值是 x1 * beta[1] + x2 * beta[2] + x3 * beta[3]
#' 請用矩陣乘法`%*%`算出向量y
#' ps. class(y) 應該要是 "matrix"
#' dim(y) 應該是 c(10, 1)
y <- X %*% beta
#' epsilon 是一個隨機產生的雜訊向量
epsilon <- c(-1.24462014500259, 0.146172987456978, 1.56426869006839, -0.856920339050681,
-1.15277300953772, 0.717919832604741, -0.270623615316431, -1.66281578024014,
-1.15557078461633, -0.730253254897595)
#' 我們讓y 參雜了雜訊
y <- y + epsilon
#' 假設我們只有看到X和y ,看不到epsilon和beta。根據X 和y ,要如何找出beta?
#' 這是一個標準的迴歸分析問題。
#' 請參考<https://en.wikipedia.org/wiki/Ordinary_least_squares#Estimation>裡的公式,
#' 利用這章學到的矩陣乘法,與線性代數函式,算出beta的估計值
#'
#' 你可以寫很多行的程式碼,但是請把結果存到beta.hat這個變數之中
#' ps. class(beta.hat) 應該要是 matrix
#' dim(beta.hat) 應該是 c(3, 1)
#' rownames(beta.hat) 應該是 c("x1", "x2", "x3")
beta.hat <- solve(t(X) %*% X) %*% t(X) %*% y
#' 同學可以比較一下beta.hat和beta,體驗看看迴歸分析的方法,是不是真的有道理。
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{cells_stub_grand_summary}
\alias{cells_stub_grand_summary}
\title{Location helper for targeting the stub cells in a grand summary}
\usage{
cells_stub_grand_summary(rows = everything())
}
\arguments{
\item{rows}{\emph{Rows to target}
\verb{<row-targeting expression>} // \emph{default:} \code{everything()}
We can specify which rows should be targeted. The default \code{\link[=everything]{everything()}}
results in all rows in \code{columns} being formatted. Alternatively, we can
supply a vector of row captions within \code{\link[=c]{c()}}, a vector of row indices, or a
select helper function. Examples of select helper functions include
\code{\link[=starts_with]{starts_with()}}, \code{\link[=ends_with]{ends_with()}}, \code{\link[=contains]{contains()}}, \code{\link[=matches]{matches()}}, \code{\link[=one_of]{one_of()}},
\code{\link[=num_range]{num_range()}}, and \code{\link[=everything]{everything()}}. We can also use expressions to filter
down to the rows we need (e.g., \verb{[colname_1] > 100 & [colname_2] < 50}).}
}
\value{
A list object with the classes \code{cells_stub_grand_summary} and
\code{location_cells}.
}
\description{
The \code{cells_stub_grand_summary()} function is used to target the stub cells of
a grand summary and it is useful when applying a footnote with
\code{\link[=tab_footnote]{tab_footnote()}} or adding custom styles with \code{\link[=tab_style]{tab_style()}}. The function is
expressly used in each of those functions' \code{locations} argument. The
'stub_grand_summary' location is generated by the \code{\link[=grand_summary_rows]{grand_summary_rows()}}
function.
}
\section{Targeting grand summary stub cells with \code{rows}}{
Targeting the stub cells of a grand summary row is done through the \code{rows}
argument. Grand summary cells in the stub will have ID values that can be
used much like column names in the \code{columns}-targeting scenario. We can use
simpler \strong{tidyselect}-style expressions (the select helpers should work well
here) and we can use quoted row identifiers in \code{c()}. It's also possible to
use row indices (e.g., \code{c(3, 5, 6)}) that correspond to the row number of a
grand summary row.
}
\section{Overview of location helper functions}{
Location helper functions can be used to target cells with virtually any
function that has a \code{locations} argument. Here is a listing of all of the
location helper functions, with locations corresponding roughly from top to
bottom of a table:
\itemize{
\item \code{\link[=cells_title]{cells_title()}}: targets the table title or the table subtitle depending on
the value given to the \code{groups} argument (\code{"title"} or \code{"subtitle"}).
\item \code{\link[=cells_stubhead]{cells_stubhead()}}: targets the stubhead location, a cell of which is only
available when there is a stub; a label in that location can be created by
using the \code{\link[=tab_stubhead]{tab_stubhead()}} function.
\item \code{\link[=cells_column_spanners]{cells_column_spanners()}}: targets the spanner column labels with the
\code{spanners} argument; spanner column labels appear above the column labels.
\item \code{\link[=cells_column_labels]{cells_column_labels()}}: targets the column labels with its \code{columns}
argument.
\item \code{\link[=cells_row_groups]{cells_row_groups()}}: targets the row group labels in any available row
groups using the \code{groups} argument.
\item \code{\link[=cells_stub]{cells_stub()}}: targets row labels in the table stub using the \code{rows}
argument.
\item \code{\link[=cells_body]{cells_body()}}: targets data cells in the table body using intersections of
\code{columns} and \code{rows}.
\item \code{\link[=cells_summary]{cells_summary()}}: targets summary cells in the table body using the
\code{groups} argument and intersections of \code{columns} and \code{rows}.
\item \code{\link[=cells_grand_summary]{cells_grand_summary()}}: targets cells of the table's grand summary using
intersections of \code{columns} and \code{rows}
\item \code{\link[=cells_stub_summary]{cells_stub_summary()}}: targets summary row labels in the table stub using
the \code{groups} and \code{rows} arguments.
\item \code{\link[=cells_stub_grand_summary]{cells_stub_grand_summary()}}: targets grand summary row labels in the table
stub using the \code{rows} argument.
\item \code{\link[=cells_footnotes]{cells_footnotes()}}: targets all footnotes in the table footer (cannot be
used with \code{\link[=tab_footnote]{tab_footnote()}}).
\item \code{\link[=cells_source_notes]{cells_source_notes()}}: targets all source notes in the table footer
(cannot be used with \code{\link[=tab_footnote]{tab_footnote()}}).
}
When using any of the location helper functions with an appropriate function
that has a \code{locations} argument (e.g., \code{\link[=tab_style]{tab_style()}}), multiple locations
can be targeted by enclosing several \verb{cells_*()} helper functions in a
\code{list()} (e.g., \code{list(cells_body(), cells_grand_summary())}).
}
\section{Examples}{
Use a portion of the \code{\link{countrypops}} dataset to create a \strong{gt} table. Add
some styling to a grand summary stub cell with the \code{\link[=tab_style]{tab_style()}} function and
using \code{cells_stub_grand_summary()} in the \code{locations} argument.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{countrypops |>
dplyr::filter(country_name == "Spain", year < 1970) |>
dplyr::select(-contains("country")) |>
gt(rowname_col = "year") |>
fmt_number(
columns = population,
decimals = 0
) |>
grand_summary_rows(
columns = population,
fns = list(change = ~max(.) - min(.)),
fmt = ~ fmt_integer(.)
) |>
tab_style(
style = cell_text(weight = "bold", transform = "uppercase"),
locations = cells_stub_grand_summary(rows = "change")
)
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_cells_stub_grand_summary_1.png" alt="This image of a table was generated from the first code example in the `cells_stub_grand_summary()` help file." style="width:100\%;">
}}
}
\section{Function ID}{
8-20
}
\section{Function Introduced}{
\code{v0.3.0} (May 12, 2021)
}
\seealso{
Other helper functions:
\code{\link{adjust_luminance}()},
\code{\link{cell_borders}()},
\code{\link{cell_fill}()},
\code{\link{cell_text}()},
\code{\link{cells_body}()},
\code{\link{cells_column_labels}()},
\code{\link{cells_column_spanners}()},
\code{\link{cells_footnotes}()},
\code{\link{cells_grand_summary}()},
\code{\link{cells_row_groups}()},
\code{\link{cells_source_notes}()},
\code{\link{cells_stub_summary}()},
\code{\link{cells_stubhead}()},
\code{\link{cells_stub}()},
\code{\link{cells_summary}()},
\code{\link{cells_title}()},
\code{\link{currency}()},
\code{\link{default_fonts}()},
\code{\link{define_units}()},
\code{\link{escape_latex}()},
\code{\link{from_column}()},
\code{\link{google_font}()},
\code{\link{gt_latex_dependencies}()},
\code{\link{html}()},
\code{\link{md}()},
\code{\link{pct}()},
\code{\link{px}()},
\code{\link{random_id}()},
\code{\link{stub}()},
\code{\link{system_fonts}()}
}
\concept{helper functions}
|
/man/cells_stub_grand_summary.Rd
|
permissive
|
rstudio/gt
|
R
| false
| true
| 7,242
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{cells_stub_grand_summary}
\alias{cells_stub_grand_summary}
\title{Location helper for targeting the stub cells in a grand summary}
\usage{
cells_stub_grand_summary(rows = everything())
}
\arguments{
\item{rows}{\emph{Rows to target}
\verb{<row-targeting expression>} // \emph{default:} \code{everything()}
We can specify which rows should be targeted. The default \code{\link[=everything]{everything()}}
results in all rows in \code{columns} being formatted. Alternatively, we can
supply a vector of row captions within \code{\link[=c]{c()}}, a vector of row indices, or a
select helper function. Examples of select helper functions include
\code{\link[=starts_with]{starts_with()}}, \code{\link[=ends_with]{ends_with()}}, \code{\link[=contains]{contains()}}, \code{\link[=matches]{matches()}}, \code{\link[=one_of]{one_of()}},
\code{\link[=num_range]{num_range()}}, and \code{\link[=everything]{everything()}}. We can also use expressions to filter
down to the rows we need (e.g., \verb{[colname_1] > 100 & [colname_2] < 50}).}
}
\value{
A list object with the classes \code{cells_stub_grand_summary} and
\code{location_cells}.
}
\description{
The \code{cells_stub_grand_summary()} function is used to target the stub cells of
a grand summary and it is useful when applying a footnote with
\code{\link[=tab_footnote]{tab_footnote()}} or adding custom styles with \code{\link[=tab_style]{tab_style()}}. The function is
expressly used in each of those functions' \code{locations} argument. The
'stub_grand_summary' location is generated by the \code{\link[=grand_summary_rows]{grand_summary_rows()}}
function.
}
\section{Targeting grand summary stub cells with \code{rows}}{
Targeting the stub cells of a grand summary row is done through the \code{rows}
argument. Grand summary cells in the stub will have ID values that can be
used much like column names in the \code{columns}-targeting scenario. We can use
simpler \strong{tidyselect}-style expressions (the select helpers should work well
here) and we can use quoted row identifiers in \code{c()}. It's also possible to
use row indices (e.g., \code{c(3, 5, 6)}) that correspond to the row number of a
grand summary row.
}
\section{Overview of location helper functions}{
Location helper functions can be used to target cells with virtually any
function that has a \code{locations} argument. Here is a listing of all of the
location helper functions, with locations corresponding roughly from top to
bottom of a table:
\itemize{
\item \code{\link[=cells_title]{cells_title()}}: targets the table title or the table subtitle depending on
the value given to the \code{groups} argument (\code{"title"} or \code{"subtitle"}).
\item \code{\link[=cells_stubhead]{cells_stubhead()}}: targets the stubhead location, a cell of which is only
available when there is a stub; a label in that location can be created by
using the \code{\link[=tab_stubhead]{tab_stubhead()}} function.
\item \code{\link[=cells_column_spanners]{cells_column_spanners()}}: targets the spanner column labels with the
\code{spanners} argument; spanner column labels appear above the column labels.
\item \code{\link[=cells_column_labels]{cells_column_labels()}}: targets the column labels with its \code{columns}
argument.
\item \code{\link[=cells_row_groups]{cells_row_groups()}}: targets the row group labels in any available row
groups using the \code{groups} argument.
\item \code{\link[=cells_stub]{cells_stub()}}: targets row labels in the table stub using the \code{rows}
argument.
\item \code{\link[=cells_body]{cells_body()}}: targets data cells in the table body using intersections of
\code{columns} and \code{rows}.
\item \code{\link[=cells_summary]{cells_summary()}}: targets summary cells in the table body using the
\code{groups} argument and intersections of \code{columns} and \code{rows}.
\item \code{\link[=cells_grand_summary]{cells_grand_summary()}}: targets cells of the table's grand summary using
intersections of \code{columns} and \code{rows}
\item \code{\link[=cells_stub_summary]{cells_stub_summary()}}: targets summary row labels in the table stub using
the \code{groups} and \code{rows} arguments.
\item \code{\link[=cells_stub_grand_summary]{cells_stub_grand_summary()}}: targets grand summary row labels in the table
stub using the \code{rows} argument.
\item \code{\link[=cells_footnotes]{cells_footnotes()}}: targets all footnotes in the table footer (cannot be
used with \code{\link[=tab_footnote]{tab_footnote()}}).
\item \code{\link[=cells_source_notes]{cells_source_notes()}}: targets all source notes in the table footer
(cannot be used with \code{\link[=tab_footnote]{tab_footnote()}}).
}
When using any of the location helper functions with an appropriate function
that has a \code{locations} argument (e.g., \code{\link[=tab_style]{tab_style()}}), multiple locations
can be targeted by enclosing several \verb{cells_*()} helper functions in a
\code{list()} (e.g., \code{list(cells_body(), cells_grand_summary())}).
}
\section{Examples}{
Use a portion of the \code{\link{countrypops}} dataset to create a \strong{gt} table. Add
some styling to a grand summary stub cell with the \code{\link[=tab_style]{tab_style()}} function and
using \code{cells_stub_grand_summary()} in the \code{locations} argument.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{countrypops |>
dplyr::filter(country_name == "Spain", year < 1970) |>
dplyr::select(-contains("country")) |>
gt(rowname_col = "year") |>
fmt_number(
columns = population,
decimals = 0
) |>
grand_summary_rows(
columns = population,
fns = list(change = ~max(.) - min(.)),
fmt = ~ fmt_integer(.)
) |>
tab_style(
style = cell_text(weight = "bold", transform = "uppercase"),
locations = cells_stub_grand_summary(rows = "change")
)
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_cells_stub_grand_summary_1.png" alt="This image of a table was generated from the first code example in the `cells_stub_grand_summary()` help file." style="width:100\%;">
}}
}
\section{Function ID}{
8-20
}
\section{Function Introduced}{
\code{v0.3.0} (May 12, 2021)
}
\seealso{
Other helper functions:
\code{\link{adjust_luminance}()},
\code{\link{cell_borders}()},
\code{\link{cell_fill}()},
\code{\link{cell_text}()},
\code{\link{cells_body}()},
\code{\link{cells_column_labels}()},
\code{\link{cells_column_spanners}()},
\code{\link{cells_footnotes}()},
\code{\link{cells_grand_summary}()},
\code{\link{cells_row_groups}()},
\code{\link{cells_source_notes}()},
\code{\link{cells_stub_summary}()},
\code{\link{cells_stubhead}()},
\code{\link{cells_stub}()},
\code{\link{cells_summary}()},
\code{\link{cells_title}()},
\code{\link{currency}()},
\code{\link{default_fonts}()},
\code{\link{define_units}()},
\code{\link{escape_latex}()},
\code{\link{from_column}()},
\code{\link{google_font}()},
\code{\link{gt_latex_dependencies}()},
\code{\link{html}()},
\code{\link{md}()},
\code{\link{pct}()},
\code{\link{px}()},
\code{\link{random_id}()},
\code{\link{stub}()},
\code{\link{system_fonts}()}
}
\concept{helper functions}
|
context("each_of")
test_that("each_of", {
done <- character()
index <- integer()
coll <- letters[1:10]
dx <- when_all(
.list = lapply(seq_along(coll), function(i) {
delay(1/1000)$then(function(value) {
done <<- c(done, coll[[i]])
index <<- c(index, i)
})
})
)$then(function(value) {
expect_identical(sort(index), seq_along(coll))
expect_identical(sort(done), sort(coll))
})
})
|
/tests/testthat/test-each-of.R
|
permissive
|
strategist922/async-2
|
R
| false
| false
| 438
|
r
|
context("each_of")
test_that("each_of", {
done <- character()
index <- integer()
coll <- letters[1:10]
dx <- when_all(
.list = lapply(seq_along(coll), function(i) {
delay(1/1000)$then(function(value) {
done <<- c(done, coll[[i]])
index <<- c(index, i)
})
})
)$then(function(value) {
expect_identical(sort(index), seq_along(coll))
expect_identical(sort(done), sort(coll))
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/factorial_function.R
\name{factorial_function}
\alias{factorial_function}
\title{A Factorial Function}
\usage{
factorial_function(n)
}
\description{
This function does a factorial given a few conditions
}
\examples{
factorial_function()
}
\keyword{factorial}
|
/man/factorial_function.Rd
|
no_license
|
James9669/R-Cats-Factorial
|
R
| false
| true
| 337
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/factorial_function.R
\name{factorial_function}
\alias{factorial_function}
\title{A Factorial Function}
\usage{
factorial_function(n)
}
\description{
This function does a factorial given a few conditions
}
\examples{
factorial_function()
}
\keyword{factorial}
|
library(Biostrings)
library(VennDiagram)
workingDir <- "E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.4.Insect_Libraries_Filtered/5.4.1.Filter_2/"
setwd(workingDir)
getwd()
make.italic <- function(x) as.expression(lapply(x, function(y) bquote(italic(.(y)))))
############## BLATTELLA GERMANICA ################
Bger1 <- readDNAStringSet("Bger1_filt2.fasta")
Bger1_seqs <- as.data.frame(Bger1)$x
Bger2 <- readDNAStringSet("Bger2_filt2.fasta")
Bger2_seqs <- as.data.frame(Bger2)$x
total_seq_Bger1 <- length(Bger1_seqs)
total_seq_Bger2 <- length(Bger2_seqs)
IS_Bger1_Bger2 <- sum(Bger1%in%Bger2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/B.germanica_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Bger1,area2 = total_seq_Bger2,
cross.area = IS_Bger1_Bger2, category = make.italic(c("B.germanica_1","B.germanica_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.dist = c(-0.42,-0.4), cat.pos = c(15, 350))
dev.off()
############## ONCOPELTUS FASCIATUS ################
Ofas1 <- readDNAStringSet("Ofas1_filt2.fasta")
Ofas1_seqs <- as.data.frame(Ofas1)$x
Ofas2 <- readDNAStringSet("Ofas2_filt2.fasta")
Ofas2_seqs <- as.data.frame(Ofas2)$x
total_seq_Ofas1 <- length(Ofas1_seqs)
total_seq_Ofas2 <- length(Ofas2_seqs)
IS_Ofas1_Ofas2 <- sum(Ofas1%in%Ofas2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/O.fasciatus_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Ofas1,area2 = total_seq_Ofas2,
cross.area = IS_Ofas1_Ofas2, category = make.italic(c("O.fasciatus_1","O.fasciatus_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.dist = c(-0.42,-0.4), cat.pos = c(15, 350))
dev.off()
############## ACYRTHOSIPHON PISUM ################
Apisum1 <- readDNAStringSet("Apisum1_filt2.fasta")
Apisum1_seqs <- as.data.frame(Apisum1)$x
Apisum2 <- readDNAStringSet("Apisum2_filt2.fasta")
Apisum2_seqs <- as.data.frame(Apisum2)$x
total_seq_Apisum1 <- length(Apisum1_seqs)
total_seq_Apisum2 <- length(Apisum2_seqs)
IS_Apisum1_Apisum2 <- sum(Apisum1%in%Apisum2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/A.pisum_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Apisum1,area2 = total_seq_Apisum2,
cross.area = IS_Apisum1_Apisum2, category = make.italic(c("A.pisum_1","A.pisum_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.dist = c(-0.43,-0.4), cat.pos = c(15, 350))
dev.off()
############## TRIBOLIUM CASTANEUM ################
Tcas1 <- readDNAStringSet("Tcas1_filt2.fasta")
Tcas1_seqs <- as.data.frame(Tcas1)$x
Tcas2 <- readDNAStringSet("Tcas2_filt2.fasta")
Tcas2_seqs <- as.data.frame(Tcas2)$x
total_seq_Tcas1 <- length(Tcas1_seqs)
total_seq_Tcas2 <- length(Tcas2_seqs)
IS_Tcas1_Tcas2 <- sum(Tcas1%in%Tcas2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/T.castaneum_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Tcas1,area2 = total_seq_Tcas2,
cross.area = IS_Tcas1_Tcas2, category = make.italic(c("T.castaneum_1","T.castaneum_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(350, 15), cat.dist = c(-0.425, -0.42), inverted = T)
dev.off()
############## DIABROTICA VIRGIFERA ################
Dvir1 <- readDNAStringSet("Dvir1_filt2.fasta")
Dvir1_seqs <- as.data.frame(Dvir1)$x
Dvir2 <- readDNAStringSet("Dvir2_filt2.fasta")
Dvir2_seqs <- as.data.frame(Dvir2)$x
total_seq_Dvir1 <- length(Dvir1_seqs)
total_seq_Dvir2 <- length(Dvir2_seqs)
IS_Dvir1_Dvir2 <- sum(Dvir1%in%Dvir2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/D.virgifera_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Dvir1,area2 = total_seq_Dvir2,
cross.area = IS_Dvir1_Dvir2, category = make.italic(c("D.virgifera_1","D.virgifera_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(350, 15), cat.dist = c(-0.425, -0.44), inverted = T)
dev.off()
############## APIS MELLIFERA ################
Amell1 <- readDNAStringSet("Amell1_filt2.fasta")
Amell1_seqs <- as.data.frame(Amell1)$x
Amell2 <- readDNAStringSet("Amell2_filt2.fasta")
Amell2_seqs <- as.data.frame(Amell2)$x
total_seq_Amell1 <- length(Amell1_seqs)
total_seq_Amell2 <- length(Amell2_seqs)
IS_Amell1_Amell2 <- sum(Amell1%in%Amell2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/A.mellifera_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Amell1,area2 = total_seq_Amell2,
cross.area = IS_Amell1_Amell2, category = make.italic(c("A.mellifera_1","A.mellifera_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(350, 15), cat.dist = c(-0.425, -0.43), inverted = T)
dev.off()
############## BOMBUS TERRESTRIS ################
Bterr1 <- readDNAStringSet("Bterr1_filt2.fasta")
Bterr1_seqs <- as.data.frame(Bterr1)$x
Bterr2 <- readDNAStringSet("Bterr2_filt2.fasta")
Bterr2_seqs <- as.data.frame(Bterr2)$x
total_seq_Bterr1 <- length(Bterr1_seqs)
total_seq_Bterr2 <- length(Bterr2_seqs)
IS_Bterr1_Bterr2 <- sum(Bterr1%in%Bterr2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/B.terrestris_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Bterr1,area2 = total_seq_Bterr2,
cross.area = IS_Bterr1_Bterr2, category = make.italic(c("B.terrestris_1","B.terrestris_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(5, 360), cat.dist = c(-0.425, -0.34))
dev.off()
############## PLUTELLA XYLOSTELLA ################
Pxyl1 <- readDNAStringSet("Pxyl1_filt2.fasta")
Pxyl1_seqs <- as.data.frame(Pxyl1)$x
Pxyl2 <- readDNAStringSet("Pxyl2_filt2.fasta")
Pxyl2_seqs <- as.data.frame(Pxyl2)$x
total_seq_Pxyl1 <- length(Pxyl1_seqs)
total_seq_Pxyl2 <- length(Pxyl2_seqs)
IS_Pxyl1_Pxyl2 <- sum(Pxyl1%in%Pxyl2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/P.xylostella_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Pxyl1,area2 = total_seq_Pxyl2,
cross.area = IS_Pxyl1_Pxyl2, category = make.italic(c("P.xylostella_1","P.xylosetlla_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(10, 340), cat.dist = c(-0.425, -0.425))
dev.off()
############## TRICHOPLUSIA NI ################
Tni1 <- readDNAStringSet("Tni1_filt2.fasta")
Tni1_seqs <- as.data.frame(Tni1)$x
Tni2 <- readDNAStringSet("Tni2_filt2.fasta")
Tni2_seqs <- as.data.frame(Tni2)$x
total_seq_Tni1 <- length(Tni1_seqs)
total_seq_Tni2 <- length(Tni2_seqs)
IS_Tni1_Tni2 <- sum(Tni1%in%Tni2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/T.ni_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Tni1,area2 = total_seq_Tni2,
cross.area = IS_Tni1_Tni2, category = make.italic(c("T.ni_1","T.ni_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(5, 360), cat.dist = c(-0.425, -0.395))
dev.off()
############## AEDES AEGYPTI ################
Aaeg1 <- readDNAStringSet("Aaeg1_filt2.fasta")
Aaeg1_seqs <- as.data.frame(Aaeg1)$x
Aaeg2 <- readDNAStringSet("Aaeg2_filt2.fasta")
Aaeg2_seqs <- as.data.frame(Aaeg2)$x
total_seq_Aaeg1 <- length(Aaeg1_seqs)
total_seq_Aaeg2 <- length(Aaeg2_seqs)
IS_Aaeg1_Aaeg2 <- sum(Aaeg1%in%Aaeg2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/A.aegypti_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Aaeg1,area2 = total_seq_Aaeg2,
cross.area = IS_Aaeg1_Aaeg2, category = make.italic(c("A.aegypti_1","A.aegypti_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(5, 360), cat.dist = c(-0.425, -0.34))
dev.off()
############## MUSCA DOMESTICA ################
Mdom1 <- readDNAStringSet("Mdom1_filt2.fasta")
Mdom1_seqs <- as.data.frame(Mdom1)$x
Mdom2 <- readDNAStringSet("Mdom2_filt2.fasta")
Mdom2_seqs <- as.data.frame(Mdom2)$x
total_seq_Mdom1 <- length(Mdom1_seqs)
total_seq_Mdom2 <- length(Mdom2_seqs)
IS_Mdom1_Mdom2 <- sum(Mdom1%in%Mdom2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/M.domestica_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Mdom1,area2 = total_seq_Mdom2,
cross.area = IS_Mdom1_Mdom2, category = make.italic(c("M.domestica_1","M.domestica_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(350, 15), cat.dist = c(-0.440, -0.45), inverted = T)
dev.off()
|
/Replicates_VennDiagram_Filter2.R
|
no_license
|
PaniPaniello/TFM
|
R
| false
| false
| 10,263
|
r
|
library(Biostrings)
library(VennDiagram)
workingDir <- "E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.4.Insect_Libraries_Filtered/5.4.1.Filter_2/"
setwd(workingDir)
getwd()
make.italic <- function(x) as.expression(lapply(x, function(y) bquote(italic(.(y)))))
############## BLATTELLA GERMANICA ################
Bger1 <- readDNAStringSet("Bger1_filt2.fasta")
Bger1_seqs <- as.data.frame(Bger1)$x
Bger2 <- readDNAStringSet("Bger2_filt2.fasta")
Bger2_seqs <- as.data.frame(Bger2)$x
total_seq_Bger1 <- length(Bger1_seqs)
total_seq_Bger2 <- length(Bger2_seqs)
IS_Bger1_Bger2 <- sum(Bger1%in%Bger2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/B.germanica_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Bger1,area2 = total_seq_Bger2,
cross.area = IS_Bger1_Bger2, category = make.italic(c("B.germanica_1","B.germanica_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.dist = c(-0.42,-0.4), cat.pos = c(15, 350))
dev.off()
############## ONCOPELTUS FASCIATUS ################
Ofas1 <- readDNAStringSet("Ofas1_filt2.fasta")
Ofas1_seqs <- as.data.frame(Ofas1)$x
Ofas2 <- readDNAStringSet("Ofas2_filt2.fasta")
Ofas2_seqs <- as.data.frame(Ofas2)$x
total_seq_Ofas1 <- length(Ofas1_seqs)
total_seq_Ofas2 <- length(Ofas2_seqs)
IS_Ofas1_Ofas2 <- sum(Ofas1%in%Ofas2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/O.fasciatus_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Ofas1,area2 = total_seq_Ofas2,
cross.area = IS_Ofas1_Ofas2, category = make.italic(c("O.fasciatus_1","O.fasciatus_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.dist = c(-0.42,-0.4), cat.pos = c(15, 350))
dev.off()
############## ACYRTHOSIPHON PISUM ################
Apisum1 <- readDNAStringSet("Apisum1_filt2.fasta")
Apisum1_seqs <- as.data.frame(Apisum1)$x
Apisum2 <- readDNAStringSet("Apisum2_filt2.fasta")
Apisum2_seqs <- as.data.frame(Apisum2)$x
total_seq_Apisum1 <- length(Apisum1_seqs)
total_seq_Apisum2 <- length(Apisum2_seqs)
IS_Apisum1_Apisum2 <- sum(Apisum1%in%Apisum2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/A.pisum_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Apisum1,area2 = total_seq_Apisum2,
cross.area = IS_Apisum1_Apisum2, category = make.italic(c("A.pisum_1","A.pisum_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.dist = c(-0.43,-0.4), cat.pos = c(15, 350))
dev.off()
############## TRIBOLIUM CASTANEUM ################
Tcas1 <- readDNAStringSet("Tcas1_filt2.fasta")
Tcas1_seqs <- as.data.frame(Tcas1)$x
Tcas2 <- readDNAStringSet("Tcas2_filt2.fasta")
Tcas2_seqs <- as.data.frame(Tcas2)$x
total_seq_Tcas1 <- length(Tcas1_seqs)
total_seq_Tcas2 <- length(Tcas2_seqs)
IS_Tcas1_Tcas2 <- sum(Tcas1%in%Tcas2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/T.castaneum_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Tcas1,area2 = total_seq_Tcas2,
cross.area = IS_Tcas1_Tcas2, category = make.italic(c("T.castaneum_1","T.castaneum_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(350, 15), cat.dist = c(-0.425, -0.42), inverted = T)
dev.off()
############## DIABROTICA VIRGIFERA ################
Dvir1 <- readDNAStringSet("Dvir1_filt2.fasta")
Dvir1_seqs <- as.data.frame(Dvir1)$x
Dvir2 <- readDNAStringSet("Dvir2_filt2.fasta")
Dvir2_seqs <- as.data.frame(Dvir2)$x
total_seq_Dvir1 <- length(Dvir1_seqs)
total_seq_Dvir2 <- length(Dvir2_seqs)
IS_Dvir1_Dvir2 <- sum(Dvir1%in%Dvir2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/D.virgifera_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Dvir1,area2 = total_seq_Dvir2,
cross.area = IS_Dvir1_Dvir2, category = make.italic(c("D.virgifera_1","D.virgifera_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(350, 15), cat.dist = c(-0.425, -0.44), inverted = T)
dev.off()
############## APIS MELLIFERA ################
Amell1 <- readDNAStringSet("Amell1_filt2.fasta")
Amell1_seqs <- as.data.frame(Amell1)$x
Amell2 <- readDNAStringSet("Amell2_filt2.fasta")
Amell2_seqs <- as.data.frame(Amell2)$x
total_seq_Amell1 <- length(Amell1_seqs)
total_seq_Amell2 <- length(Amell2_seqs)
IS_Amell1_Amell2 <- sum(Amell1%in%Amell2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/A.mellifera_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Amell1,area2 = total_seq_Amell2,
cross.area = IS_Amell1_Amell2, category = make.italic(c("A.mellifera_1","A.mellifera_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(350, 15), cat.dist = c(-0.425, -0.43), inverted = T)
dev.off()
############## BOMBUS TERRESTRIS ################
Bterr1 <- readDNAStringSet("Bterr1_filt2.fasta")
Bterr1_seqs <- as.data.frame(Bterr1)$x
Bterr2 <- readDNAStringSet("Bterr2_filt2.fasta")
Bterr2_seqs <- as.data.frame(Bterr2)$x
total_seq_Bterr1 <- length(Bterr1_seqs)
total_seq_Bterr2 <- length(Bterr2_seqs)
IS_Bterr1_Bterr2 <- sum(Bterr1%in%Bterr2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/B.terrestris_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Bterr1,area2 = total_seq_Bterr2,
cross.area = IS_Bterr1_Bterr2, category = make.italic(c("B.terrestris_1","B.terrestris_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(5, 360), cat.dist = c(-0.425, -0.34))
dev.off()
############## PLUTELLA XYLOSTELLA ################
Pxyl1 <- readDNAStringSet("Pxyl1_filt2.fasta")
Pxyl1_seqs <- as.data.frame(Pxyl1)$x
Pxyl2 <- readDNAStringSet("Pxyl2_filt2.fasta")
Pxyl2_seqs <- as.data.frame(Pxyl2)$x
total_seq_Pxyl1 <- length(Pxyl1_seqs)
total_seq_Pxyl2 <- length(Pxyl2_seqs)
IS_Pxyl1_Pxyl2 <- sum(Pxyl1%in%Pxyl2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/P.xylostella_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Pxyl1,area2 = total_seq_Pxyl2,
cross.area = IS_Pxyl1_Pxyl2, category = make.italic(c("P.xylostella_1","P.xylosetlla_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(10, 340), cat.dist = c(-0.425, -0.425))
dev.off()
############## TRICHOPLUSIA NI ################
Tni1 <- readDNAStringSet("Tni1_filt2.fasta")
Tni1_seqs <- as.data.frame(Tni1)$x
Tni2 <- readDNAStringSet("Tni2_filt2.fasta")
Tni2_seqs <- as.data.frame(Tni2)$x
total_seq_Tni1 <- length(Tni1_seqs)
total_seq_Tni2 <- length(Tni2_seqs)
IS_Tni1_Tni2 <- sum(Tni1%in%Tni2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/T.ni_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Tni1,area2 = total_seq_Tni2,
cross.area = IS_Tni1_Tni2, category = make.italic(c("T.ni_1","T.ni_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(5, 360), cat.dist = c(-0.425, -0.395))
dev.off()
############## AEDES AEGYPTI ################
Aaeg1 <- readDNAStringSet("Aaeg1_filt2.fasta")
Aaeg1_seqs <- as.data.frame(Aaeg1)$x
Aaeg2 <- readDNAStringSet("Aaeg2_filt2.fasta")
Aaeg2_seqs <- as.data.frame(Aaeg2)$x
total_seq_Aaeg1 <- length(Aaeg1_seqs)
total_seq_Aaeg2 <- length(Aaeg2_seqs)
IS_Aaeg1_Aaeg2 <- sum(Aaeg1%in%Aaeg2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/A.aegypti_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Aaeg1,area2 = total_seq_Aaeg2,
cross.area = IS_Aaeg1_Aaeg2, category = make.italic(c("A.aegypti_1","A.aegypti_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(5, 360), cat.dist = c(-0.425, -0.34))
dev.off()
############## MUSCA DOMESTICA ################
Mdom1 <- readDNAStringSet("Mdom1_filt2.fasta")
Mdom1_seqs <- as.data.frame(Mdom1)$x
Mdom2 <- readDNAStringSet("Mdom2_filt2.fasta")
Mdom2_seqs <- as.data.frame(Mdom2)$x
total_seq_Mdom1 <- length(Mdom1_seqs)
total_seq_Mdom2 <- length(Mdom2_seqs)
IS_Mdom1_Mdom2 <- sum(Mdom1%in%Mdom2)
png("E:/oscar/Documents/TFM/5.Filter_and_ReplicateLibraries/5.3.Replicates_Filter/5.3.1.Filter_2/M.domestica_filtered_replicates_filt_2.png",width = 1500,height = 1500,res = 150)
draw.pairwise.venn(area1 = total_seq_Mdom1,area2 = total_seq_Mdom2,
cross.area = IS_Mdom1_Mdom2, category = make.italic(c("M.domestica_1","M.domestica_2")),
print.mode = c('raw', 'percent'), cex = 2.5, fill = c("skyblue","mediumorchid"), cat.cex = 2.5, cat.pos = c(350, 15), cat.dist = c(-0.440, -0.45), inverted = T)
dev.off()
|
random_algo <- function(no, donnees, x) {
donnees$random_sorting_variable <- rnorm(400,0,1)
donnees <- donnees[order(donnees$random_sorting_variable),]
donnees$group_allocation <- rep(c(1:20),each=20)
donnees <- select(donnees, -random_sorting_variable)
donnees <<- donnees
}
#random_algo(no, donnees, x)
|
/Functions/F_random_algo.R
|
no_license
|
hannahbull/peer_effects
|
R
| false
| false
| 319
|
r
|
random_algo <- function(no, donnees, x) {
donnees$random_sorting_variable <- rnorm(400,0,1)
donnees <- donnees[order(donnees$random_sorting_variable),]
donnees$group_allocation <- rep(c(1:20),each=20)
donnees <- select(donnees, -random_sorting_variable)
donnees <<- donnees
}
#random_algo(no, donnees, x)
|
###############################################################################
###This function calculates Mean recurrence time of a markov chain
ergodic_projector <- function(P, n){
n <- n
A <- array(1, dim=c(dim(P)[1], dim(P)[2] ,n))
for (i in 0:n){
result <- P %^% i
A[,,i] <- result
}
#print(X)
output <- apply(A, c(1,2), mean, na.rm = TRUE)
return(output)
}
deviation_matrix <- function(P,n){
ep <- ergodic_projector(P,n)
c <- ncol(P)
D <- inv(diag(c) - P + ep) - ep
return(D)
}
MRT <- function(P,n){
c <- ncol(P)
D <- (deviation_matrix(P,n))
E <- diag(diag(ergodic_projector(P,n)), c,c)
R <- ((diag(c)) - D + (as.matrix(rep(1,c))) %*% t(as.matrix(rep(1,c))* diag(D))) %*% inv(E)
return(R)
}
MRT_distance <- function(P,n){
M <- MRT(P,n)
average <- matrix(0, ncol(M), ncol(M))
for (i in 1:ncol(M)){
for (j in 1:ncol(M)){
average[i,j] <- mean(as.vector(c(M[i,j], M[j,i])))
}
}
return(average)
}
|
/Mean_recurrence.R
|
no_license
|
khyejin1231/SocialNetworkAnalysis
|
R
| false
| false
| 1,016
|
r
|
###############################################################################
###This function calculates Mean recurrence time of a markov chain
ergodic_projector <- function(P, n){
n <- n
A <- array(1, dim=c(dim(P)[1], dim(P)[2] ,n))
for (i in 0:n){
result <- P %^% i
A[,,i] <- result
}
#print(X)
output <- apply(A, c(1,2), mean, na.rm = TRUE)
return(output)
}
deviation_matrix <- function(P,n){
ep <- ergodic_projector(P,n)
c <- ncol(P)
D <- inv(diag(c) - P + ep) - ep
return(D)
}
MRT <- function(P,n){
c <- ncol(P)
D <- (deviation_matrix(P,n))
E <- diag(diag(ergodic_projector(P,n)), c,c)
R <- ((diag(c)) - D + (as.matrix(rep(1,c))) %*% t(as.matrix(rep(1,c))* diag(D))) %*% inv(E)
return(R)
}
MRT_distance <- function(P,n){
M <- MRT(P,n)
average <- matrix(0, ncol(M), ncol(M))
for (i in 1:ncol(M)){
for (j in 1:ncol(M)){
average[i,j] <- mean(as.vector(c(M[i,j], M[j,i])))
}
}
return(average)
}
|
#simple closure store for request data
req <- local({
state = NULL;
init <- function(reqdata){
state <<- reqdata;
};
reset <- function(){
init(NULL);
}
getvalue <- function(name){
if(is.null(state)){
stop("req not initiated.")
}
return(state[[name]]);
};
method <- function(){
getvalue("METHOD");
};
uri <- function(){
#this will result in relative url redirects
#return(mount())
#this will result in absolute url redirects
return(fullmount())
};
mount <- function(){
getvalue("MOUNT");
};
ctype <- function(){
getvalue("CTYPE");
};
accept <- function(){
getvalue("ACCEPT")
}
rawbody <- function(){
getvalue("RAW")$body
}
fullmount <- function(){
getvalue("FULLMOUNT");
}
path_info <- function(){
getvalue("PATH_INFO");
};
post <- function(){
postvar = getvalue("POST");
if(is.null(postvar)) {
postvar = list();
}
return(postvar)
};
get <- function(){
getvar = getvalue("GET");
if(is.null(getvar)) {
getvar = list();
}
return(lapply(getvar, parse_arg_prim))
};
args <- function(){
if(method() %in% c("PUT", "POST")){
return(post());
} else {
return(get());
}
};
files <- function(){
filevar = getvalue("FILES");
if(is.null(filevar)) {
filevar = list();
}
return(filevar)
};
environment();
});
|
/R/req.R
|
permissive
|
nagyistge/opencpu
|
R
| false
| false
| 1,482
|
r
|
#simple closure store for request data
req <- local({
state = NULL;
init <- function(reqdata){
state <<- reqdata;
};
reset <- function(){
init(NULL);
}
getvalue <- function(name){
if(is.null(state)){
stop("req not initiated.")
}
return(state[[name]]);
};
method <- function(){
getvalue("METHOD");
};
uri <- function(){
#this will result in relative url redirects
#return(mount())
#this will result in absolute url redirects
return(fullmount())
};
mount <- function(){
getvalue("MOUNT");
};
ctype <- function(){
getvalue("CTYPE");
};
accept <- function(){
getvalue("ACCEPT")
}
rawbody <- function(){
getvalue("RAW")$body
}
fullmount <- function(){
getvalue("FULLMOUNT");
}
path_info <- function(){
getvalue("PATH_INFO");
};
post <- function(){
postvar = getvalue("POST");
if(is.null(postvar)) {
postvar = list();
}
return(postvar)
};
get <- function(){
getvar = getvalue("GET");
if(is.null(getvar)) {
getvar = list();
}
return(lapply(getvar, parse_arg_prim))
};
args <- function(){
if(method() %in% c("PUT", "POST")){
return(post());
} else {
return(get());
}
};
files <- function(){
filevar = getvalue("FILES");
if(is.null(filevar)) {
filevar = list();
}
return(filevar)
};
environment();
});
|
\name{UpsideRisk}
\alias{UpsideRisk}
\title{upside risk, variance and potential of the return distribution}
\usage{
UpsideRisk(R, MAR = 0, method = c("full", "subset"), stat = c("risk",
"variance", "potential"), ...)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries
or zoo object of asset returns}
\item{MAR}{Minimum Acceptable Return, in the same
periodicity as your returns}
\item{method}{one of "full" or "subset", indicating
whether to use the length of the full series or the
length of the subset of the series below the MAR as the
denominator, defaults to "full"}
\item{stat}{one of "risk", "variance" or "potential"
indicating whether to return the Upside risk, variance or
potential}
\item{\dots}{any other passthru parameters}
}
\description{
Upside Risk is the similar of semideviation taking the
return above the Minimum Acceptable Return instead of using
the mean return or zero. To calculate it, we take the
subset of returns that are more than the target (or Minimum
Acceptable Returns (MAR)) returns and take the differences
of those to the target. We sum the squares and divide by
the total number of returns and return the square root.
}
\details{
\deqn{ UpsideRisk(R , MAR) = \sqrt{\sum^{n}_{t=1}\frac{
max[(R_{t} - MAR), 0]^2}{n}}}{UpsideRisk(R, MAR) = sqrt(1/n
* sum(t=1..n) ((max(R(t)-MAR, 0))^2))}
\deqn{ UpsideVariance(R, MAR) =
\sum^{n}_{t=1}\frac{max[(R_{t} - MAR), 0]^2}
{n}}{UpsideVariance(R, MAR) = 1/n *
sum(t=1..n)((max(R(t)-MAR, 0))^2)}
\deqn{UpsidePotential(R, MAR) =
\sum^{n}_{t=1}\frac{max[(R_{t} - MAR), 0]}
{n}}{DownsidePotential(R, MAR) = 1/n *
sum(t=1..n)(max(R(t)-MAR, 0))}
where \eqn{n} is either the number of observations of the
entire series or the number of observations in the subset
of the series falling below the MAR.
}
\examples{
data(portfolio_bacon)
MAR = 0.005
print(UpsideRisk(portfolio_bacon[,1], MAR, stat="risk")) #expected 0.02937
print(UpsideRisk(portfolio_bacon[,1], MAR, stat="variance")) #expected 0.08628
print(UpsideRisk(portfolio_bacon[,1], MAR, stat="potential")) #expected 0.01771
MAR = 0
data(managers)
print(UpsideRisk(managers['1996'], MAR, stat="risk"))
print(UpsideRisk(managers['1996',1], MAR, stat="risk")) #expected 1.820
}
\author{
Matthieu Lestel
}
\references{
Carl Bacon, \emph{Practical portfolio performance
measurement and attribution}, second edition 2008
}
\keyword{distribution}
\keyword{models}
\keyword{multivariate}
\keyword{ts}
|
/man/UpsideRisk.Rd
|
no_license
|
guillermozbta/portafolio-master
|
R
| false
| false
| 2,473
|
rd
|
\name{UpsideRisk}
\alias{UpsideRisk}
\title{upside risk, variance and potential of the return distribution}
\usage{
UpsideRisk(R, MAR = 0, method = c("full", "subset"), stat = c("risk",
"variance", "potential"), ...)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries
or zoo object of asset returns}
\item{MAR}{Minimum Acceptable Return, in the same
periodicity as your returns}
\item{method}{one of "full" or "subset", indicating
whether to use the length of the full series or the
length of the subset of the series below the MAR as the
denominator, defaults to "full"}
\item{stat}{one of "risk", "variance" or "potential"
indicating whether to return the Upside risk, variance or
potential}
\item{\dots}{any other passthru parameters}
}
\description{
Upside Risk is the similar of semideviation taking the
return above the Minimum Acceptable Return instead of using
the mean return or zero. To calculate it, we take the
subset of returns that are more than the target (or Minimum
Acceptable Returns (MAR)) returns and take the differences
of those to the target. We sum the squares and divide by
the total number of returns and return the square root.
}
\details{
\deqn{ UpsideRisk(R , MAR) = \sqrt{\sum^{n}_{t=1}\frac{
max[(R_{t} - MAR), 0]^2}{n}}}{UpsideRisk(R, MAR) = sqrt(1/n
* sum(t=1..n) ((max(R(t)-MAR, 0))^2))}
\deqn{ UpsideVariance(R, MAR) =
\sum^{n}_{t=1}\frac{max[(R_{t} - MAR), 0]^2}
{n}}{UpsideVariance(R, MAR) = 1/n *
sum(t=1..n)((max(R(t)-MAR, 0))^2)}
\deqn{UpsidePotential(R, MAR) =
\sum^{n}_{t=1}\frac{max[(R_{t} - MAR), 0]}
{n}}{DownsidePotential(R, MAR) = 1/n *
sum(t=1..n)(max(R(t)-MAR, 0))}
where \eqn{n} is either the number of observations of the
entire series or the number of observations in the subset
of the series falling below the MAR.
}
\examples{
data(portfolio_bacon)
MAR = 0.005
print(UpsideRisk(portfolio_bacon[,1], MAR, stat="risk")) #expected 0.02937
print(UpsideRisk(portfolio_bacon[,1], MAR, stat="variance")) #expected 0.08628
print(UpsideRisk(portfolio_bacon[,1], MAR, stat="potential")) #expected 0.01771
MAR = 0
data(managers)
print(UpsideRisk(managers['1996'], MAR, stat="risk"))
print(UpsideRisk(managers['1996',1], MAR, stat="risk")) #expected 1.820
}
\author{
Matthieu Lestel
}
\references{
Carl Bacon, \emph{Practical portfolio performance
measurement and attribution}, second edition 2008
}
\keyword{distribution}
\keyword{models}
\keyword{multivariate}
\keyword{ts}
|
#' Compose multiple cli functions
#'
#' `cli()` will record all `cli_*` calls in `expr`, and emit them together
#' in a single message. This is useful if you want to built a larger
#' piece of output from multiple `cli_*` calls.
#'
#' Use this function to build a more complex piece of CLI that would not
#' make sense to show in pieces.
#'
#' @param expr Expression that contains `cli_*` calls. Their output is
#' collected and sent as a single message.
#' @return Nothing.
#'
#' @export
#' @examples
#' cli({
#' cli_h1("Title")
#' cli_h2("Subtitle")
#' cli_ul(c("this", "that", "end"))
#' })
cli <- function(expr) {
cond <- cli__message_create("meta", cli__rec(expr))
cli__message_emit(cond)
invisible()
}
cli__rec <- function(expr) {
id <- new_uuid()
cli_recorded[[id]] <- list()
on.exit(rm(list = id, envir = cli_recorded), add = TRUE)
old <- options(cli.record = id)
on.exit(options(old), add = TRUE)
expr
cli_recorded[[id]]
}
cli__fmt <- function(record, collapse = FALSE, strip_newline = FALSE,
app = NULL) {
app <- app %||% default_app() %||% start_app(.auto_close = FALSE)
old <- app$output
on.exit(app$output <- old, add = TRUE)
on.exit(app$signal <- NULL, add = TRUE)
out <- rawConnection(raw(1000), open = "w")
on.exit(close(out), add = TRUE)
app$output <- out
app$signal <- FALSE
for (msg in record) {
do.call(app[[msg$type]], msg$args)
}
txt <- rawToChar(rawConnectionValue(out))
if (!collapse) {
txt <- unlist(strsplit(txt, "\n", fixed = TRUE))
} else if (strip_newline) {
txt <- substr(txt, 1, nchar(txt) - 1L)
}
txt
}
# cli__rec + cli__fmt
fmt <- function(expr, collapse = FALSE, strip_newline = FALSE, app = NULL) {
rec <- cli__rec(expr)
cli__fmt(rec, collapse, strip_newline, app)
}
#' CLI text
#'
#' It is wrapped to the screen width automatically. It may contain inline
#' markup. (See [inline-markup].)
#'
#' @param ... The text to show, in character vectors. They will be
#' concatenated into a single string. Newlines are _not_ preserved.
#' @param .envir Environment to evaluate the glue expressions in.
#'
#' @export
#' @examples
#' cli_text("Hello world!")
#' cli_text(packageDescription("cli")$Description)
#'
#' ## Arguments are concatenated
#' cli_text("this", "that")
#'
#' ## Command substitution
#' greeting <- "Hello"
#' subject <- "world"
#' cli_text("{greeting} {subject}!")
#'
#' ## Inline theming
#' cli_text("The {.fn cli_text} function in the {.pkg cli} package")
#'
#' ## Use within container elements
#' ul <- cli_ul()
#' cli_li()
#' cli_text("{.emph First} item")
#' cli_li()
#' cli_text("{.emph Second} item")
#' cli_end(ul)
cli_text <- function(..., .envir = parent.frame()) {
cli__message("text", list(text = glue_cmd(..., .envir = .envir)))
}
#' CLI verbatim text
#'
#' It is not wrapped, but printed as is.
#'
#' @param ... The text to show, in character vectors. Each element is
#' printed on a new line.
#' @param .envir Environment to evaluate the glue expressions in.
#'
#' @export
#' @examples
#' cli_verbatim("This has\nthree", "lines")
cli_verbatim <- function(..., .envir = parent.frame()) {
cli__message("verbatim", c(list(...), list(.envir = .envir)))
}
#' CLI headings
#'
#' @param text Text of the heading. It can contain inline markup.
#' @param id Id of the heading element, string. It can be used in themes.
#' @param class Class of the heading element, string. It can be used in
#' themes.
#' @param .envir Environment to evaluate the glue expressions in.
#'
#' @export
#' @examples
#' cli_h1("Main title")
#' cli_h2("Subtitle")
#' cli_text("And some regular text....")
cli_h1 <- function(text, id = NULL, class = NULL, .envir = parent.frame()) {
cli__message("h1", list(text = glue_cmd(text, .envir = .envir), id = id,
class = class))
}
#' @rdname cli_h1
#' @export
cli_h2 <- function(text, id = NULL, class = NULL, .envir = parent.frame()) {
cli__message("h2", list(text = glue_cmd(text, .envir = .envir), id = id,
class = class))
}
#' @rdname cli_h1
#' @export
cli_h3 <- function(text, id = NULL, class = NULL, .envir = parent.frame()) {
cli__message("h3", list(text = glue_cmd(text, .envir = .envir), id = id,
class = class))
}
#' Generic CLI container
#'
#' See [containers]. A `cli_div` container is special, because it may
#' add new themes, that are valid within the container.
#'
#' @param id Element id, a string. If `NULL`, then a new id is generated
#' and returned.
#' @param class Class name, sting. Can be used in themes.
#' @param theme A custom theme for the container. See [themes].
#' @param .auto_close Whether to close the container, when the calling
#' function finishes (or `.envir` is removed, if specified).
#' @param .envir Environment to evaluate the glue expressions in. It is
#' also used to auto-close the container if `.auto_close` is `TRUE`.
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' ## div with custom theme
#' d <- cli_div(theme = list(h1 = list(color = "blue",
#' "font-weight" = "bold")))
#' cli_h1("Custom title")
#' cli_end(d)
#'
#' ## Close automatically
#' div <- function() {
#' cli_div(class = "tmp", theme = list(.tmp = list(color = "yellow")))
#' cli_text("This is yellow")
#' }
#' div()
#' cli_text("This is not yellow any more")
cli_div <- function(id = NULL, class = NULL, theme = NULL,
.auto_close = TRUE, .envir = parent.frame()) {
cli__message("div", list(id = id, class = class, theme = theme),
.auto_close = .auto_close, .envir = .envir)
}
#' CLI paragraph
#'
#' See [containers].
#'
#' @param id Element id, a string. If `NULL`, then a new id is generated
#' and returned.
#' @param class Class name, sting. Can be used in themes.
#' @inheritParams cli_div
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' id <- cli_par()
#' cli_text("First paragraph")
#' cli_end(id)
#' id <- cli_par()
#' cli_text("Second paragraph")
#' cli_end(id)
cli_par <- function(id = NULL, class = NULL, .auto_close = TRUE,
.envir = parent.frame()) {
cli__message("par", list(id = id, class = class),
.auto_close = .auto_close, .envir = .envir)
}
#' Close a CLI container
#'
#' @param id Id of the container to close. If missing, the current
#' container is closed, if any.
#'
#' @export
#' @examples
#' ## If id is omitted
#' cli_par()
#' cli_text("First paragraph")
#' cli_end()
#' cli_par()
#' cli_text("Second paragraph")
#' cli_end()
cli_end <- function(id = NULL) {
cli__message("end", list(id = id %||% NA_character_))
}
#' Unordered CLI list
#'
#' An unordered list is a container, see [containers].
#'
#' @param items If not `NULL`, then a character vector. Each element of
#' the vector will be one list item, and the list container will be
#' closed by default (see the `.close` argument).
#' @param id Id of the list container. Can be used for closing it with
#' [cli_end()] or in themes. If `NULL`, then an id is generated and
#' returned invisibly.
#' @param class Class of the list container. Can be used in themes.
#' @param .close Whether to close the list container if the `items` were
#' specified. If `FALSE` then new items can be added to the list.
#' @inheritParams cli_div
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' ## Specifying the items at the beginning
#' cli_ul(c("one", "two", "three"))
#'
#' ## Adding items one by one
#' cli_ul()
#' cli_li("one")
#' cli_li("two")
#' cli_li("three")
#' cli_end()
#'
#' ## Complex item, added gradually.
#' cli_ul()
#' cli_li()
#' cli_verbatim("Beginning of the {.emph first} item")
#' cli_text("Still the first item")
#' cli_end()
#' cli_li("Second item")
#' cli_end()
cli_ul <- function(items = NULL, id = NULL, class = NULL,
.close = TRUE, .auto_close = TRUE,
.envir = parent.frame()) {
cli__message(
"ul",
list(
items = lapply(items, glue_cmd, .envir = .envir), id = id,
class = class, .close = .close),
.auto_close = .auto_close, .envir = .envir)
}
#' Ordered CLI list
#'
#' An ordered list is a container, see [containers].
#'
#' @inheritParams cli_ul
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' ## Specifying the items at the beginning
#' cli_ol(c("one", "two", "three"))
#'
#' ## Adding items one by one
#' cli_ol()
#' cli_li("one")
#' cli_li("two")
#' cli_li("three")
#' cli_end()
#'
#' ## Nested lists
#' cli_div(theme = list(ol = list("margin-left" = 2)))
#' cli_ul()
#' cli_li("one")
#' cli_ol(c("foo", "bar", "foobar"))
#' cli_li("two")
#' cli_end()
#' cli_end()
cli_ol <- function(items = NULL, id = NULL, class = NULL,
.close = TRUE, .auto_close = TRUE,
.envir = parent.frame()) {
cli__message(
"ol",
list(
items = lapply(items, glue_cmd, .envir = .envir), id = id,
class = class, .close = .close),
.auto_close = .auto_close, .envir = .envir)
}
#' Definition list
#'
#' A definition list is a container, see [containers].
#'
#' @param items Named character vector, or `NULL`. If not `NULL`, they
#' are used as list items.
#' @inheritParams cli_ul
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' ## Specifying the items at the beginning
#' cli_dl(c(foo = "one", bar = "two", baz = "three"))
#'
#' ## Adding items one by one
#' cli_dl()
#' cli_li(c(foo = "one"))
#' cli_li(c(bar = "two"))
#' cli_li(c(baz = "three"))
#' cli_end()
cli_dl <- function(items = NULL, id = NULL, class = NULL,
.close = TRUE, .auto_close = TRUE,
.envir = parent.frame()) {
cli__message(
"dl",
list(
items = lapply(items, glue_cmd, .envir = .envir), id = id,
class = class, .close = .close),
.auto_close = .auto_close, .envir = .envir)
}
#' CLI list item(s)
#'
#' A list item is a container, see [containers].
#'
#' @param items Character vector of items, or `NULL`.
#' @param id Id of the new container. Can be used for closing it with
#' [cli_end()] or in themes. If `NULL`, then an id is generated and
#' returned invisibly.
#' @param class Class of the item container. Can be used in themes.
#' @inheritParams cli_div
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' ## Adding items one by one
#' cli_ul()
#' cli_li("one")
#' cli_li("two")
#' cli_li("three")
#' cli_end()
#'
#' ## Complex item, added gradually.
#' cli_ul()
#' cli_li()
#' cli_verbatim("Beginning of the {.emph first} item")
#' cli_text("Still the first item")
#' cli_end()
#' cli_li("Second item")
#' cli_end()
cli_li <- function(items = NULL, id = NULL, class = NULL,
.auto_close = TRUE, .envir = parent.frame()) {
cli__message(
"li",
list(
items = lapply(items, glue_cmd, .envir = .envir), id = id,
class = class),
.auto_close = .auto_close, .envir = .envir)
}
#' CLI alerts
#'
#' Alerts are typically short status messages.
#'
#' @param text Text of the alert.
#' @param id Id of the alert element. Can be used in themes.
#' @param class Class of the alert element. Can be used in themes.
#' @param wrap Whether to auto-wrap the text of the alert.
#' @param .envir Environment to evaluate the glue expressions in.
#'
#' @export
#' @examples
#'
#' cli_alert("Cannot lock package library.")
#' cli_alert_success("Package {.pkg cli} installed successfully.")
#' cli_alert_danger("Could not download {.pkg cli}.")
#' cli_alert_warning("Internet seems to be unreacheable.")
#' cli_alert_info("Downloaded 1.45MiB of data")
cli_alert <- function(text, id = NULL, class = NULL, wrap = FALSE,
.envir = parent.frame()) {
cli__message(
"alert",
list(
text = glue_cmd(text, .envir = .envir),
id = id,
class = class,
wrap = wrap
)
)
}
#' @rdname cli_alert
#' @export
cli_alert_success <- function(text, id = NULL, class = NULL, wrap = FALSE,
.envir = parent.frame()) {
cli__message(
"alert_success",
list(
text = glue_cmd(text, .envir = .envir),
id = id,
class = class,
wrap = wrap
)
)
}
#' @rdname cli_alert
#' @export
cli_alert_danger <- function(text, id = NULL, class = NULL, wrap = FALSE,
.envir = parent.frame()) {
cli__message(
"alert_danger",
list(
text = glue_cmd(text, .envir = .envir),
id = id,
class = class,
wrap = wrap
)
)
}
#' @rdname cli_alert
#' @export
cli_alert_warning <- function(text, id = NULL, class = NULL, wrap = FALSE,
.envir = parent.frame()) {
cli__message(
"alert_warning",
list(
text = glue_cmd(text, .envir = .envir),
id = id,
class = class,
wrap = wrap
)
)
}
#' @rdname cli_alert
#' @export
cli_alert_info <- function(text, id = NULL, class = NULL, wrap = FALSE,
.envir = parent.frame()) {
cli__message(
"alert_info",
list(
text = glue_cmd(text, .envir = .envir),
id = id,
class = class,
wrap = wrap
)
)
}
#' CLI horizontal rule
#'
#' It can be used to separate parts of the output. The line style of the
#' rule can be changed via the the `line-type` property. Possible values
#' are:
#'
#' * `"single"`: (same as `1`), a single line,
#' * `"double"`: (same as `2`), a double line,
#' * `"bar1"`, `"bar2"`, `"bar3"`, etc., `"bar8"` uses varying height bars.
#'
#' Colors and background colors can similarly changed via a theme, see
#' examples below.
#'
#' @param .envir Environment to evaluate the glue expressions in.
#' @inheritParams rule
#' @inheritParams cli_div
#'
#' @export
#' @examples
#' cli_rule()
#' cli_text(packageDescription("cli")$Description)
#' cli_rule()
#'
#' # Theming
#' d <- cli_div(theme = list(rule = list(
#' color = "blue",
#' "background-color" = "darkgrey",
#' "line-type" = "double")))
#' cli_rule("Left", right = "Right")
#' cli_end(d)
#'
#' # Interpolation
#' cli_rule(left = "One plus one is {1+1}")
#' cli_rule(left = "Package {.pkg mypackage}")
cli_rule <- function(left = "", center = "", right = "", id = NULL,
.envir = parent.frame()) {
cli__message("rule", list(left = glue_cmd(left, .envir = .envir),
center = glue_cmd(center, .envir = .envir),
right = glue_cmd(right, .envir = .envir),
id = id))
}
#' CLI block quote
#'
#' A section that is quoted from another source. It is typically indented.
#'
#' @export
#' @param quote Text of the quotation.
#' @param citation Source of the quotation, typically a link or the name
#' of a person.
#' @inheritParams cli_div
#' @examples
#' cli_blockquote(cli:::lorem_ipsum(), citation = "Nobody, ever")
cli_blockquote <- function(quote, citation = NULL, id = NULL,
class = NULL, .envir = parent.frame()) {
cli__message(
"blockquote",
list(
quote = glue_cmd(quote, .envir = .envir),
citation = glue_cmd(citation, .envir = .envir),
id = id,
class = class
)
)
}
#' A block of code
#'
#' A helper function that creates a `div` with class `code` and then calls
#' `cli_verbatim()` to output code lines. The builtin theme formats these
#' containers specially. In particular, it adds syntax highlighting to
#' valid R code.
#'
#' @param lines Chracter vector, each line will be a line of code, and
#' newline charactes also create new lines. Note that _no_ glue
#' substitution is performed on the code.
#' @param ... More character vectors, they are appended to `lines`.
#' @param language Programming language. This is also added as a class,
#' in addition to `code`.
#' @param .auto_close Passed to `cli_div()` when creating the container of
#' the code. By default the code container is closed after emitting
#' `lines` and `...` via `cli_verbatim()`. You can keep that container
#' open with `.auto_close` and/or `.envir`, and then calling
#' `cli_verbatim()` to add (more) code. Note that the code will be
#' formatted and syntax highlighted separately for each `cli_verbatim()`
#' call.
#' @param .envir Passed to `cli_div()` when creating the container of the
#' code.
#' @return The id of the container that contains the code.
#'
#' @export
#' @examples
#' cli_code(format(cli::cli_blockquote))
cli_code <- function(lines = NULL, ..., language = "R",
.auto_close = TRUE, .envir = environment()) {
lines <- c(lines, unlist(list(...)))
id <- cli_div(
class = paste("code", language),
.auto_close = .auto_close,
.envir = .envir
)
cli_verbatim(lines)
invisible(id)
}
cli_recorded <- new.env(parent = emptyenv())
cli__message <- function(type, args, .auto_close = TRUE, .envir = NULL,
record = getOption("cli.record")) {
if ("id" %in% names(args) && is.null(args$id)) args$id <- new_uuid()
if (.auto_close && !is.null(.envir) && !identical(.envir, .GlobalEnv)) {
if (type == "status") {
defer(cli_status_clear(id = args$id, result = args$auto_result),
envir = .envir, priority = "first")
} else {
defer(cli_end(id = args$id), envir = .envir, priority = "first")
}
}
cond <- cli__message_create(type, args)
if (is.null(record)) {
cli__message_emit(cond)
invisible(args$id)
} else {
cli_recorded[[record]] <- c(cli_recorded[[record]], list(cond))
invisible(cond)
}
}
cli__message_create <- function(type, args) {
cond <- list(message = paste("cli message", type),
type = type, args = args, pid = clienv$pid)
class(cond) <- c(
getOption("cli.message_class"),
"cli_message",
"condition"
)
cond
}
cli__message_emit <- function(cond) {
withRestarts(
{
signalCondition(cond)
cli__default_handler(cond)
},
cli_message_handled = function() NULL)
}
cli__default_handler <- function(msg) {
custom_handler <- getOption("cli.default_handler")
if (is.function(custom_handler)) {
custom_handler(msg)
} else {
cli_server_default(msg)
}
}
|
/R/cli.R
|
permissive
|
queilawithaQ/cli-1
|
R
| false
| false
| 18,370
|
r
|
#' Compose multiple cli functions
#'
#' `cli()` will record all `cli_*` calls in `expr`, and emit them together
#' in a single message. This is useful if you want to built a larger
#' piece of output from multiple `cli_*` calls.
#'
#' Use this function to build a more complex piece of CLI that would not
#' make sense to show in pieces.
#'
#' @param expr Expression that contains `cli_*` calls. Their output is
#' collected and sent as a single message.
#' @return Nothing.
#'
#' @export
#' @examples
#' cli({
#' cli_h1("Title")
#' cli_h2("Subtitle")
#' cli_ul(c("this", "that", "end"))
#' })
cli <- function(expr) {
cond <- cli__message_create("meta", cli__rec(expr))
cli__message_emit(cond)
invisible()
}
cli__rec <- function(expr) {
id <- new_uuid()
cli_recorded[[id]] <- list()
on.exit(rm(list = id, envir = cli_recorded), add = TRUE)
old <- options(cli.record = id)
on.exit(options(old), add = TRUE)
expr
cli_recorded[[id]]
}
cli__fmt <- function(record, collapse = FALSE, strip_newline = FALSE,
app = NULL) {
app <- app %||% default_app() %||% start_app(.auto_close = FALSE)
old <- app$output
on.exit(app$output <- old, add = TRUE)
on.exit(app$signal <- NULL, add = TRUE)
out <- rawConnection(raw(1000), open = "w")
on.exit(close(out), add = TRUE)
app$output <- out
app$signal <- FALSE
for (msg in record) {
do.call(app[[msg$type]], msg$args)
}
txt <- rawToChar(rawConnectionValue(out))
if (!collapse) {
txt <- unlist(strsplit(txt, "\n", fixed = TRUE))
} else if (strip_newline) {
txt <- substr(txt, 1, nchar(txt) - 1L)
}
txt
}
# cli__rec + cli__fmt
fmt <- function(expr, collapse = FALSE, strip_newline = FALSE, app = NULL) {
rec <- cli__rec(expr)
cli__fmt(rec, collapse, strip_newline, app)
}
#' CLI text
#'
#' It is wrapped to the screen width automatically. It may contain inline
#' markup. (See [inline-markup].)
#'
#' @param ... The text to show, in character vectors. They will be
#' concatenated into a single string. Newlines are _not_ preserved.
#' @param .envir Environment to evaluate the glue expressions in.
#'
#' @export
#' @examples
#' cli_text("Hello world!")
#' cli_text(packageDescription("cli")$Description)
#'
#' ## Arguments are concatenated
#' cli_text("this", "that")
#'
#' ## Command substitution
#' greeting <- "Hello"
#' subject <- "world"
#' cli_text("{greeting} {subject}!")
#'
#' ## Inline theming
#' cli_text("The {.fn cli_text} function in the {.pkg cli} package")
#'
#' ## Use within container elements
#' ul <- cli_ul()
#' cli_li()
#' cli_text("{.emph First} item")
#' cli_li()
#' cli_text("{.emph Second} item")
#' cli_end(ul)
cli_text <- function(..., .envir = parent.frame()) {
cli__message("text", list(text = glue_cmd(..., .envir = .envir)))
}
#' CLI verbatim text
#'
#' It is not wrapped, but printed as is.
#'
#' @param ... The text to show, in character vectors. Each element is
#' printed on a new line.
#' @param .envir Environment to evaluate the glue expressions in.
#'
#' @export
#' @examples
#' cli_verbatim("This has\nthree", "lines")
cli_verbatim <- function(..., .envir = parent.frame()) {
cli__message("verbatim", c(list(...), list(.envir = .envir)))
}
#' CLI headings
#'
#' @param text Text of the heading. It can contain inline markup.
#' @param id Id of the heading element, string. It can be used in themes.
#' @param class Class of the heading element, string. It can be used in
#' themes.
#' @param .envir Environment to evaluate the glue expressions in.
#'
#' @export
#' @examples
#' cli_h1("Main title")
#' cli_h2("Subtitle")
#' cli_text("And some regular text....")
cli_h1 <- function(text, id = NULL, class = NULL, .envir = parent.frame()) {
cli__message("h1", list(text = glue_cmd(text, .envir = .envir), id = id,
class = class))
}
#' @rdname cli_h1
#' @export
cli_h2 <- function(text, id = NULL, class = NULL, .envir = parent.frame()) {
cli__message("h2", list(text = glue_cmd(text, .envir = .envir), id = id,
class = class))
}
#' @rdname cli_h1
#' @export
cli_h3 <- function(text, id = NULL, class = NULL, .envir = parent.frame()) {
cli__message("h3", list(text = glue_cmd(text, .envir = .envir), id = id,
class = class))
}
#' Generic CLI container
#'
#' See [containers]. A `cli_div` container is special, because it may
#' add new themes, that are valid within the container.
#'
#' @param id Element id, a string. If `NULL`, then a new id is generated
#' and returned.
#' @param class Class name, sting. Can be used in themes.
#' @param theme A custom theme for the container. See [themes].
#' @param .auto_close Whether to close the container, when the calling
#' function finishes (or `.envir` is removed, if specified).
#' @param .envir Environment to evaluate the glue expressions in. It is
#' also used to auto-close the container if `.auto_close` is `TRUE`.
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' ## div with custom theme
#' d <- cli_div(theme = list(h1 = list(color = "blue",
#' "font-weight" = "bold")))
#' cli_h1("Custom title")
#' cli_end(d)
#'
#' ## Close automatically
#' div <- function() {
#' cli_div(class = "tmp", theme = list(.tmp = list(color = "yellow")))
#' cli_text("This is yellow")
#' }
#' div()
#' cli_text("This is not yellow any more")
cli_div <- function(id = NULL, class = NULL, theme = NULL,
.auto_close = TRUE, .envir = parent.frame()) {
cli__message("div", list(id = id, class = class, theme = theme),
.auto_close = .auto_close, .envir = .envir)
}
#' CLI paragraph
#'
#' See [containers].
#'
#' @param id Element id, a string. If `NULL`, then a new id is generated
#' and returned.
#' @param class Class name, sting. Can be used in themes.
#' @inheritParams cli_div
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' id <- cli_par()
#' cli_text("First paragraph")
#' cli_end(id)
#' id <- cli_par()
#' cli_text("Second paragraph")
#' cli_end(id)
cli_par <- function(id = NULL, class = NULL, .auto_close = TRUE,
.envir = parent.frame()) {
cli__message("par", list(id = id, class = class),
.auto_close = .auto_close, .envir = .envir)
}
#' Close a CLI container
#'
#' @param id Id of the container to close. If missing, the current
#' container is closed, if any.
#'
#' @export
#' @examples
#' ## If id is omitted
#' cli_par()
#' cli_text("First paragraph")
#' cli_end()
#' cli_par()
#' cli_text("Second paragraph")
#' cli_end()
cli_end <- function(id = NULL) {
cli__message("end", list(id = id %||% NA_character_))
}
#' Unordered CLI list
#'
#' An unordered list is a container, see [containers].
#'
#' @param items If not `NULL`, then a character vector. Each element of
#' the vector will be one list item, and the list container will be
#' closed by default (see the `.close` argument).
#' @param id Id of the list container. Can be used for closing it with
#' [cli_end()] or in themes. If `NULL`, then an id is generated and
#' returned invisibly.
#' @param class Class of the list container. Can be used in themes.
#' @param .close Whether to close the list container if the `items` were
#' specified. If `FALSE` then new items can be added to the list.
#' @inheritParams cli_div
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' ## Specifying the items at the beginning
#' cli_ul(c("one", "two", "three"))
#'
#' ## Adding items one by one
#' cli_ul()
#' cli_li("one")
#' cli_li("two")
#' cli_li("three")
#' cli_end()
#'
#' ## Complex item, added gradually.
#' cli_ul()
#' cli_li()
#' cli_verbatim("Beginning of the {.emph first} item")
#' cli_text("Still the first item")
#' cli_end()
#' cli_li("Second item")
#' cli_end()
cli_ul <- function(items = NULL, id = NULL, class = NULL,
.close = TRUE, .auto_close = TRUE,
.envir = parent.frame()) {
cli__message(
"ul",
list(
items = lapply(items, glue_cmd, .envir = .envir), id = id,
class = class, .close = .close),
.auto_close = .auto_close, .envir = .envir)
}
#' Ordered CLI list
#'
#' An ordered list is a container, see [containers].
#'
#' @inheritParams cli_ul
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' ## Specifying the items at the beginning
#' cli_ol(c("one", "two", "three"))
#'
#' ## Adding items one by one
#' cli_ol()
#' cli_li("one")
#' cli_li("two")
#' cli_li("three")
#' cli_end()
#'
#' ## Nested lists
#' cli_div(theme = list(ol = list("margin-left" = 2)))
#' cli_ul()
#' cli_li("one")
#' cli_ol(c("foo", "bar", "foobar"))
#' cli_li("two")
#' cli_end()
#' cli_end()
cli_ol <- function(items = NULL, id = NULL, class = NULL,
.close = TRUE, .auto_close = TRUE,
.envir = parent.frame()) {
cli__message(
"ol",
list(
items = lapply(items, glue_cmd, .envir = .envir), id = id,
class = class, .close = .close),
.auto_close = .auto_close, .envir = .envir)
}
#' Definition list
#'
#' A definition list is a container, see [containers].
#'
#' @param items Named character vector, or `NULL`. If not `NULL`, they
#' are used as list items.
#' @inheritParams cli_ul
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' ## Specifying the items at the beginning
#' cli_dl(c(foo = "one", bar = "two", baz = "three"))
#'
#' ## Adding items one by one
#' cli_dl()
#' cli_li(c(foo = "one"))
#' cli_li(c(bar = "two"))
#' cli_li(c(baz = "three"))
#' cli_end()
cli_dl <- function(items = NULL, id = NULL, class = NULL,
.close = TRUE, .auto_close = TRUE,
.envir = parent.frame()) {
cli__message(
"dl",
list(
items = lapply(items, glue_cmd, .envir = .envir), id = id,
class = class, .close = .close),
.auto_close = .auto_close, .envir = .envir)
}
#' CLI list item(s)
#'
#' A list item is a container, see [containers].
#'
#' @param items Character vector of items, or `NULL`.
#' @param id Id of the new container. Can be used for closing it with
#' [cli_end()] or in themes. If `NULL`, then an id is generated and
#' returned invisibly.
#' @param class Class of the item container. Can be used in themes.
#' @inheritParams cli_div
#' @return The id of the new container element, invisibly.
#'
#' @export
#' @examples
#' ## Adding items one by one
#' cli_ul()
#' cli_li("one")
#' cli_li("two")
#' cli_li("three")
#' cli_end()
#'
#' ## Complex item, added gradually.
#' cli_ul()
#' cli_li()
#' cli_verbatim("Beginning of the {.emph first} item")
#' cli_text("Still the first item")
#' cli_end()
#' cli_li("Second item")
#' cli_end()
cli_li <- function(items = NULL, id = NULL, class = NULL,
.auto_close = TRUE, .envir = parent.frame()) {
cli__message(
"li",
list(
items = lapply(items, glue_cmd, .envir = .envir), id = id,
class = class),
.auto_close = .auto_close, .envir = .envir)
}
#' CLI alerts
#'
#' Alerts are typically short status messages.
#'
#' @param text Text of the alert.
#' @param id Id of the alert element. Can be used in themes.
#' @param class Class of the alert element. Can be used in themes.
#' @param wrap Whether to auto-wrap the text of the alert.
#' @param .envir Environment to evaluate the glue expressions in.
#'
#' @export
#' @examples
#'
#' cli_alert("Cannot lock package library.")
#' cli_alert_success("Package {.pkg cli} installed successfully.")
#' cli_alert_danger("Could not download {.pkg cli}.")
#' cli_alert_warning("Internet seems to be unreacheable.")
#' cli_alert_info("Downloaded 1.45MiB of data")
cli_alert <- function(text, id = NULL, class = NULL, wrap = FALSE,
.envir = parent.frame()) {
cli__message(
"alert",
list(
text = glue_cmd(text, .envir = .envir),
id = id,
class = class,
wrap = wrap
)
)
}
#' @rdname cli_alert
#' @export
cli_alert_success <- function(text, id = NULL, class = NULL, wrap = FALSE,
.envir = parent.frame()) {
cli__message(
"alert_success",
list(
text = glue_cmd(text, .envir = .envir),
id = id,
class = class,
wrap = wrap
)
)
}
#' @rdname cli_alert
#' @export
cli_alert_danger <- function(text, id = NULL, class = NULL, wrap = FALSE,
.envir = parent.frame()) {
cli__message(
"alert_danger",
list(
text = glue_cmd(text, .envir = .envir),
id = id,
class = class,
wrap = wrap
)
)
}
#' @rdname cli_alert
#' @export
cli_alert_warning <- function(text, id = NULL, class = NULL, wrap = FALSE,
.envir = parent.frame()) {
cli__message(
"alert_warning",
list(
text = glue_cmd(text, .envir = .envir),
id = id,
class = class,
wrap = wrap
)
)
}
#' @rdname cli_alert
#' @export
cli_alert_info <- function(text, id = NULL, class = NULL, wrap = FALSE,
.envir = parent.frame()) {
cli__message(
"alert_info",
list(
text = glue_cmd(text, .envir = .envir),
id = id,
class = class,
wrap = wrap
)
)
}
#' CLI horizontal rule
#'
#' It can be used to separate parts of the output. The line style of the
#' rule can be changed via the the `line-type` property. Possible values
#' are:
#'
#' * `"single"`: (same as `1`), a single line,
#' * `"double"`: (same as `2`), a double line,
#' * `"bar1"`, `"bar2"`, `"bar3"`, etc., `"bar8"` uses varying height bars.
#'
#' Colors and background colors can similarly changed via a theme, see
#' examples below.
#'
#' @param .envir Environment to evaluate the glue expressions in.
#' @inheritParams rule
#' @inheritParams cli_div
#'
#' @export
#' @examples
#' cli_rule()
#' cli_text(packageDescription("cli")$Description)
#' cli_rule()
#'
#' # Theming
#' d <- cli_div(theme = list(rule = list(
#' color = "blue",
#' "background-color" = "darkgrey",
#' "line-type" = "double")))
#' cli_rule("Left", right = "Right")
#' cli_end(d)
#'
#' # Interpolation
#' cli_rule(left = "One plus one is {1+1}")
#' cli_rule(left = "Package {.pkg mypackage}")
cli_rule <- function(left = "", center = "", right = "", id = NULL,
.envir = parent.frame()) {
cli__message("rule", list(left = glue_cmd(left, .envir = .envir),
center = glue_cmd(center, .envir = .envir),
right = glue_cmd(right, .envir = .envir),
id = id))
}
#' CLI block quote
#'
#' A section that is quoted from another source. It is typically indented.
#'
#' @export
#' @param quote Text of the quotation.
#' @param citation Source of the quotation, typically a link or the name
#' of a person.
#' @inheritParams cli_div
#' @examples
#' cli_blockquote(cli:::lorem_ipsum(), citation = "Nobody, ever")
cli_blockquote <- function(quote, citation = NULL, id = NULL,
class = NULL, .envir = parent.frame()) {
cli__message(
"blockquote",
list(
quote = glue_cmd(quote, .envir = .envir),
citation = glue_cmd(citation, .envir = .envir),
id = id,
class = class
)
)
}
#' A block of code
#'
#' A helper function that creates a `div` with class `code` and then calls
#' `cli_verbatim()` to output code lines. The builtin theme formats these
#' containers specially. In particular, it adds syntax highlighting to
#' valid R code.
#'
#' @param lines Chracter vector, each line will be a line of code, and
#' newline charactes also create new lines. Note that _no_ glue
#' substitution is performed on the code.
#' @param ... More character vectors, they are appended to `lines`.
#' @param language Programming language. This is also added as a class,
#' in addition to `code`.
#' @param .auto_close Passed to `cli_div()` when creating the container of
#' the code. By default the code container is closed after emitting
#' `lines` and `...` via `cli_verbatim()`. You can keep that container
#' open with `.auto_close` and/or `.envir`, and then calling
#' `cli_verbatim()` to add (more) code. Note that the code will be
#' formatted and syntax highlighted separately for each `cli_verbatim()`
#' call.
#' @param .envir Passed to `cli_div()` when creating the container of the
#' code.
#' @return The id of the container that contains the code.
#'
#' @export
#' @examples
#' cli_code(format(cli::cli_blockquote))
cli_code <- function(lines = NULL, ..., language = "R",
.auto_close = TRUE, .envir = environment()) {
lines <- c(lines, unlist(list(...)))
id <- cli_div(
class = paste("code", language),
.auto_close = .auto_close,
.envir = .envir
)
cli_verbatim(lines)
invisible(id)
}
cli_recorded <- new.env(parent = emptyenv())
cli__message <- function(type, args, .auto_close = TRUE, .envir = NULL,
record = getOption("cli.record")) {
if ("id" %in% names(args) && is.null(args$id)) args$id <- new_uuid()
if (.auto_close && !is.null(.envir) && !identical(.envir, .GlobalEnv)) {
if (type == "status") {
defer(cli_status_clear(id = args$id, result = args$auto_result),
envir = .envir, priority = "first")
} else {
defer(cli_end(id = args$id), envir = .envir, priority = "first")
}
}
cond <- cli__message_create(type, args)
if (is.null(record)) {
cli__message_emit(cond)
invisible(args$id)
} else {
cli_recorded[[record]] <- c(cli_recorded[[record]], list(cond))
invisible(cond)
}
}
cli__message_create <- function(type, args) {
cond <- list(message = paste("cli message", type),
type = type, args = args, pid = clienv$pid)
class(cond) <- c(
getOption("cli.message_class"),
"cli_message",
"condition"
)
cond
}
cli__message_emit <- function(cond) {
withRestarts(
{
signalCondition(cond)
cli__default_handler(cond)
},
cli_message_handled = function() NULL)
}
cli__default_handler <- function(msg) {
custom_handler <- getOption("cli.default_handler")
if (is.function(custom_handler)) {
custom_handler(msg)
} else {
cli_server_default(msg)
}
}
|
library(here)
library(dplyr)
library(data.table)
if (!exists("dir_atlas")) source(here("code", "_constants.R"))
## schaefer (L first, R second!) ---
fname_schaefer <- file.path(
dir_atlas,
"Schaefer2018_Parcellations", "HCP", "fslr32k", "cifti", "Schaefer2018_400Parcels_7Networks_order_info.txt"
)
if (file.exists(fname_schaefer)) {
fin <- file(fname_schaefer, 'rt')
tmp <- readLines(fin);
close(fin); unlink(fin);
if (length(tmp) != 800) { stop("not expected Schaefer key."); }
tmp <- tmp[seq(from=1, to=800, by=2)]; # every-other entry is a label
key_schaefer <- gsub("7Networks_", "", tmp);
}
key_schaefer <- data.table(
parcel = key_schaefer,
network = gsub("^.H_(Vis|SomMot|Cont|Default|Limbic|SalVentAttn|DorsAttn)_.*", "\\1", key_schaefer)
)
## mmp (R first, L second!) ----
key_mmp <- fread(
file.path(dir_atlas, "HCP-MMP", "Glasser_et_al_2016_HCP_MMP1.0_RVVG", "MMP360ParcelsKey.csv")
)
key_mmp <- paste0(gsub("_ROI", "", key_mmp$Community), "_", key_mmp$Hem)[order(key_mmp$ParcelID)]
## network assignments:
coleanticevic <- RCurl::getURL(
"https://raw.githubusercontent.com/ColeLab/ColeAnticevicNetPartition/master/CortexSubcortex_ColeAnticevic_NetPartition_wSubcorGSR_parcels_LR_LabelKey.txt"
)
coleanticevic <- data.table::fread(text = coleanticevic)
coleanticevic <- coleanticevic[!is.na(GLASSERLABELNAME), c("NETWORK", "GLASSERLABELNAME")]
coleanticevic$GLASSERLABELNAME <- gsub("(^.)_(.*)_ROI", "\\2_\\1", coleanticevic$GLASSERLABELNAME)
coleanticevic <- dplyr::rename(coleanticevic, parcel = GLASSERLABELNAME, network = NETWORK)
key_mmp <- data.table(full_join(data.frame(parcel = key_mmp), coleanticevic)) ## match to order from key_mmp
## write ----
fwrite(key_schaefer, here("in", "atlas-key_schaefer400-07.csv"))
fwrite(key_mmp, here("in", "atlas-key_mmp.csv"))
|
/in/_write_atlas_keys.R
|
no_license
|
mcfreund/psychomet
|
R
| false
| false
| 1,825
|
r
|
library(here)
library(dplyr)
library(data.table)
if (!exists("dir_atlas")) source(here("code", "_constants.R"))
## schaefer (L first, R second!) ---
fname_schaefer <- file.path(
dir_atlas,
"Schaefer2018_Parcellations", "HCP", "fslr32k", "cifti", "Schaefer2018_400Parcels_7Networks_order_info.txt"
)
if (file.exists(fname_schaefer)) {
fin <- file(fname_schaefer, 'rt')
tmp <- readLines(fin);
close(fin); unlink(fin);
if (length(tmp) != 800) { stop("not expected Schaefer key."); }
tmp <- tmp[seq(from=1, to=800, by=2)]; # every-other entry is a label
key_schaefer <- gsub("7Networks_", "", tmp);
}
key_schaefer <- data.table(
parcel = key_schaefer,
network = gsub("^.H_(Vis|SomMot|Cont|Default|Limbic|SalVentAttn|DorsAttn)_.*", "\\1", key_schaefer)
)
## mmp (R first, L second!) ----
key_mmp <- fread(
file.path(dir_atlas, "HCP-MMP", "Glasser_et_al_2016_HCP_MMP1.0_RVVG", "MMP360ParcelsKey.csv")
)
key_mmp <- paste0(gsub("_ROI", "", key_mmp$Community), "_", key_mmp$Hem)[order(key_mmp$ParcelID)]
## network assignments:
coleanticevic <- RCurl::getURL(
"https://raw.githubusercontent.com/ColeLab/ColeAnticevicNetPartition/master/CortexSubcortex_ColeAnticevic_NetPartition_wSubcorGSR_parcels_LR_LabelKey.txt"
)
coleanticevic <- data.table::fread(text = coleanticevic)
coleanticevic <- coleanticevic[!is.na(GLASSERLABELNAME), c("NETWORK", "GLASSERLABELNAME")]
coleanticevic$GLASSERLABELNAME <- gsub("(^.)_(.*)_ROI", "\\2_\\1", coleanticevic$GLASSERLABELNAME)
coleanticevic <- dplyr::rename(coleanticevic, parcel = GLASSERLABELNAME, network = NETWORK)
key_mmp <- data.table(full_join(data.frame(parcel = key_mmp), coleanticevic)) ## match to order from key_mmp
## write ----
fwrite(key_schaefer, here("in", "atlas-key_schaefer400-07.csv"))
fwrite(key_mmp, here("in", "atlas-key_mmp.csv"))
|
#-------------------------------------------------------------------------------------------------------------------------------------#
# In this code, we are running a simulation to model the effectiveness of various surveillance strategies in detecting
# local Zika virus transmission. The end result is a data table which can be used for analysis and visualization.
#
# Created by Steven Russell
# Last updated: September 5, 2017
#-------------------------------------------------------------------------------------------------------------------------------------#
# Optional memory management: restart R session (and remove all R objects)
# .rs.restartR()
# rm(list = ls())
# Loading the required packages
require(dplyr)
require(tidyr)
require(data.table)
# Setting the seed and number of samples
set.seed(123)
n.samples <- 10000
# Creating a sequence of incidences and population totals that we are interested in
incidences = 10^c(seq(from = -5, to = -4.09, by = .08), -4, seq(from = -3.88, to = -3, by = .08))
pops = c(10000, 100000, 1000000)
# Creating a variable that lists the different types of surveillance systems:
# Pregnant women, blood bank donors, 31 specific symptom combinations, 3 general symptom combinations
types <- factor(c("pregnant", "blood", "arth", "conj", "fever", "head", "rash",
"arth+conj", "arth+fever", "arth+head", "arth+rash", "conj+fever",
"conj+head", "conj+rash", "fever+head", "fever+rash", "head+rash",
"arth+conj+fever", "arth+conj+head", "arth+conj+rash", "arth+fever+head", "arth+fever+rash",
"arth+head+rash", "conj+fever+head", "conj+fever+rash", "conj+head+rash", "fever+head+rash",
"arth+conj+fever+head", "arth+fever+head+rash", "arth+conj+head+rash",
"arth+conj+fever+rash", "conj+fever+head+rash",
"arth+conj+fever+head+rash", "2 or more", "3 or more", "rash + 1"))
# Caculating the number of rows in the data table
dt.rows = length(incidences) * n.samples * length(pops) * length(types)
# Creating a data table with all the combinations of iteration, type, incidence and population
full <- data.table(expand.grid(iter=1:n.samples, type=types, incidence = incidences, population=pops))
# Adding general variables
testing.vars <- data.table(
iter = 1:n.samples,
# Pregnancy variables
sen.ELISA = runif(n.samples, 0.80, 0.99), # sensitivity of IgM MAC ELISA
spec.ELISA = runif(n.samples, 0.80, 0.95), # specificity of IgM MAC ELISA
detection.days.ab = runif(n.samples, 56, 112), # days in which IgM antibodies are detectible
preg.rate = runif(n.samples, .009, .017), # 10-17 per 1,000 in a population per year
# Blood variables
sen.NAAT = runif(n.samples, 0.98, 1), # sensitivity of NAAT test
spec.NAAT = runif(n.samples, .9999, 1), # specificity of NAAT test
detection.days.v = runif(n.samples, 11, 17), # days in which virus is detectible (in serum)
blood.donation.rate = runif(n.samples, .043, .047), # 43 per 1,000 in a population per year
# whole blood and apheresis red blood cell units
p.asymptomatic = runif(n.samples, .6, .8),
# Symptom variables
sen.PCR = runif(n.samples, 0.80, 0.95), # sensitivity of PCR
spec.PCR = runif(n.samples, 0.99, 1), # specificity of PCR
p.z.symp.seek.er = # What % of people are Zika infected have symptoms and seek care at an ED?
runif(n.samples, .20, .40) * # What % of people who are Zika infected have symptoms?
runif(n.samples, .1, .5) * # What % of people who are Zika infected w/ symptoms seek care?
runif(n.samples, .05, .50), # What % of people visit the emergency department in a given week?
p.ed.visit = runif(n.samples, .007, .010)
)
# Adding data on surveillance system specific assumptions
full <- merge(full, testing.vars, by="iter")
# Adding data on emergency department use by syndrome
ed.syndromes <- data.table(read.csv("CSV files/ED_Symptoms.csv"), key = "type")
# Adding data on the prevalence of symptoms among Zikv+ individuals who sought care
zika.symptoms <- data.table(read.csv("CSV files/Zika_Symptoms.csv"), key="type")
# Adding emergency department syndrome distributions to the dataset
full <- merge(full, ed.syndromes, by="type", all.x = TRUE)
full <- full[, p.ed.syndrome := suppressWarnings(runif(dt.rows, l95, u95))]
full <- full[, c("l95", "u95") := NULL ]
dim(filter(full, is.na(p.ed.syndrome) & !(type %in% c("pregnant", "blood"))))[1] # Should be 0
# Adding Zika symptom distributions to the dataset
full <- merge(full, zika.symptoms, by="type", all.x = TRUE)
full <- full[, p.z.symp.seek.er.syndrome := suppressWarnings(runif(dt.rows, l95, u95))]
full <- full[, c("l95", "u95") := NULL ]
dim(filter(full, is.na(p.ed.syndrome) & !(type %in% c("pregnant", "blood"))))[1] # Should be 0
# Creating variables where type == 'pregnant'
full[type == "pregnant", `:=` (
# Prevalence of pregnant women with IgM antibodies at a given time
detectable.zika.prevalence = incidence * detection.days.ab / 7,
# Number of new pregnancies in a week
weekly.pregnancies = preg.rate / 52 * population
)]
full[type == "pregnant", `:=` (
# Weekly tests on Zikv positive people
weekly.zikv.ppl.tested = weekly.pregnancies*2*detectable.zika.prevalence,
# Weekly tests on Zikv negative people
weekly.notzikv.ppl.tested = weekly.pregnancies*2*(1-detectable.zika.prevalence)
)]
full[type == "pregnant", `:=` (
# Probability of detecting Zika (in a given week) if testing all pregnant women twice
prob.detect.week = 1-(1-sen.ELISA*weekly.zikv.ppl.tested/population)^population,
num.zikv.ppl.detected = weekly.zikv.ppl.tested * sen.ELISA,
perc.zikv.ppl.detected = weekly.zikv.ppl.tested * sen.ELISA / (population * incidence),
num.zikv.per.week = (population * incidence),
# Probability of false positive (in a given week) if testing all pregnant women twice
prob.fp.week = 1 - spec.ELISA^weekly.notzikv.ppl.tested,
# Number of IgM tests needed per week
tests.per.week = weekly.pregnancies * 2,
# Expected number of false positives (in a given week)
# n = expected number of tests on ZIKAV- people , p = probability of false positive on a single test
expected.false.positives = weekly.notzikv.ppl.tested * (1-spec.ELISA) #based on E(v) of binomial distribution)
)]
# Removing variables to conserve memory
full[, c("sen.ELISA", "spec.ELISA", "detection.days.ab", "preg.rate") := NULL]
#-------------------------------------------------------------------------------------------#
# Creating variables where type == 'blood'
full[type == "blood", `:=` (
# Prevalence of people in blood bank with detectable virus at a given time
detectable.zika.prevalence = incidence * (detection.days.v / 7) * p.asymptomatic ,
# Number of blood donations in a week
weekly.blood.donors = blood.donation.rate * population / 52,
# Number of NAAT tests needed per week
tests.per.week = blood.donation.rate * population / 52
)]
full[type == "blood", `:=` (
# Weekly tests on Zikv positive people
weekly.zikv.ppl.tested = weekly.blood.donors*detectable.zika.prevalence,
# Weekly tests on Zikv negative people
weekly.notzikv.ppl.tested = weekly.blood.donors*(1-detectable.zika.prevalence)
)]
full[type == "blood", `:=` (
# Probability of detecting Zika (in a given week) if testing all blood donors
prob.detect.week = 1-(1-sen.NAAT*weekly.zikv.ppl.tested/population)^population,
num.zikv.ppl.detected = weekly.zikv.ppl.tested * sen.NAAT,
perc.zikv.ppl.detected = (weekly.zikv.ppl.tested * sen.NAAT) / (population * incidence),
num.zikv.per.week = population * incidence,
# Probability of false positive (in a given week) if testing all blood donors
prob.fp.week = 1 - spec.NAAT^weekly.notzikv.ppl.tested,
# Expected number of false positives (in a given week)
# n = weekly.notzikv.ppl.tested, #expected number of tests on ZIKAV- people
# p = (1-spec.NAAT), #probability of false positive on a single test
expected.false.positives = weekly.notzikv.ppl.tested * (1-spec.NAAT) # based on E(v) of binomial distribution
)]
# Removing variables to conserve memory
full[, c("sen.NAAT", "spec.NAAT", "detection.days.v", "blood.donation.rate", "p.asymptomatic") := NULL]
#-------------------------------------------------------------------------------------------#
# Creating variables for symptomatic types
full[type != "blood" & type != "pregnant", `:=` (
# Number of ZIKV infected people tested per week
weekly.zikv.ppl.tested = population * incidence *
p.z.symp.seek.er * p.z.symp.seek.er.syndrome,
# Number of ZIKV negative people tested per week
weekly.notzikv.ppl.tested = population * (1-incidence) * p.ed.visit * p.ed.syndrome
)]
# Removing variables to conserve memory
full[, c("p.z.symp.seek.er", "p.z.symp.seek.er.syndrome", "p.ed.visit", "p.ed.syndrome") := NULL]
# Creating variables for symptomatic types
full[type != "blood" & type != "pregnant", `:=` (
# Probability of detecting Zika (in a given week) if testing all symptomatic people (w/ sympx)
prob.detect.week = 1 - (1 -sen.PCR*weekly.zikv.ppl.tested/population)^population,
num.zikv.ppl.detected = weekly.zikv.ppl.tested * sen.PCR,
perc.zikv.ppl.detected = (weekly.zikv.ppl.tested * sen.PCR) / (population * incidence),
num.zikv.per.week = (population * incidence),
# Probability of false positive (in a given week) if testing all symptomatic people (w/ sympx)
prob.fp.week = 1 - spec.PCR^weekly.notzikv.ppl.tested,
# Expected number of false positives per week
expected.false.positives = weekly.notzikv.ppl.tested * (1 - spec.PCR)
)]
# Removing variables to conserve memory
full[, c("sen.PCR", "spec.PCR") := NULL]
# Creating variables for symptomatic types
full[type != "blood" & type != "pregnant", `:=` (
tests.per.week = weekly.zikv.ppl.tested + weekly.notzikv.ppl.tested
)]
# Keeping important variables
full <- full[, list(type, population, incidence, prob.detect.week, prob.fp.week, tests.per.week,
expected.false.positives, num.zikv.ppl.detected , perc.zikv.ppl.detected, num.zikv.per.week)]
# Creating surveillance variable
full[type == "pregnant", surveillance := "pregnant"]
full[type == "blood", surveillance := "blood"]
full[type != "pregnant" & type != "blood", surveillance := "symptom"]
# PPV
full <- mutate(full, PPV = num.zikv.ppl.detected / (num.zikv.ppl.detected + expected.false.positives))
# Calculating probability of detection for each incidence, population, and type
summary.stats <- full %>%
group_by(incidence, population, type, surveillance) %>%
summarise(p.detect.m = median(prob.detect.week),
p.detect.l95 = quantile(prob.detect.week, .025),
p.detect.u95 = quantile(prob.detect.week, .975),
p.detect.l50 = quantile(prob.detect.week, .25),
p.detect.u50 = quantile(prob.detect.week, .75),
tests.m = median(tests.per.week),
tests.l95 = quantile(tests.per.week, .025),
tests.u95 = quantile(tests.per.week, .975),
tests.l50 = quantile(tests.per.week, .25),
tests.u50 = quantile(tests.per.week, .75),
fp.m = median(expected.false.positives),
fp.l95 = quantile(expected.false.positives, .025),
fp.u95 = quantile(expected.false.positives, .975),
fp.l50 = quantile(expected.false.positives, .25),
fp.u50 = quantile(expected.false.positives, .75),
perc.zikv.ppl.detected.m = quantile(perc.zikv.ppl.detected, .5),
perc.zikv.ppl.detected.l95 = quantile(perc.zikv.ppl.detected, .025),
perc.zikv.ppl.detected.u95 = quantile(perc.zikv.ppl.detected, .975),
perc.zikv.ppl.detected.l50 = quantile(perc.zikv.ppl.detected, .25),
perc.zikv.ppl.detected.u50 = quantile(perc.zikv.ppl.detected, .75),
num.zikv.ppl.detected.m = quantile(num.zikv.ppl.detected, .5),
num.zikv.ppl.detected.l95 = quantile(num.zikv.ppl.detected, .025),
num.zikv.ppl.detected.u95 = quantile(num.zikv.ppl.detected, .975),
num.zikv.ppl.detected.l50 = quantile(num.zikv.ppl.detected, .25),
num.zikv.ppl.detected.u50 = quantile(num.zikv.ppl.detected, .75),
num.zikv.per.week.m = quantile(num.zikv.per.week, .5),
PPV.m = quantile(PPV, .5),
PPV.l95 = quantile(PPV, .025),
PPV.u95 = quantile(PPV, .975),
PPV.l50 = quantile(PPV, .25),
PPV.u50 = quantile(PPV, .75)
) %>%
ungroup()
summary.stats <- mutate(summary.stats,
log10.incidence = log10(incidence))
# Calculating probability of detection for each incidence, population, and syndrome
summary.stats2 <- full %>%
filter(type != "blood" & type != "pregnant") %>%
group_by(incidence, population, type) %>%
summarise(p.detect.m = median(prob.detect.week),
p.detect.l95 = quantile(prob.detect.week, .025),
p.detect.u95 = quantile(prob.detect.week, .975),
p.detect.l50 = quantile(prob.detect.week, .25),
p.detect.u50 = quantile(prob.detect.week, .75),
tests.m = median(tests.per.week),
tests.l95 = quantile(tests.per.week, .025),
tests.u95 = quantile(tests.per.week, .975),
tests.l50 = quantile(tests.per.week, .25),
tests.u50 = quantile(tests.per.week, .75),
fp.m = median(expected.false.positives),
fp.l95 = quantile(expected.false.positives, .025),
fp.u95 = quantile(expected.false.positives, .975),
fp.l50 = quantile(expected.false.positives, .25),
fp.u50 = quantile(expected.false.positives, .75),
perc.zikv.ppl.detected.m = quantile(perc.zikv.ppl.detected, .5),
num.zikv.ppl.detected.m = quantile(num.zikv.ppl.detected, .5)
) %>%
ungroup()
summary.stats2 <- mutate(summary.stats2,
log10.incidence = log10(incidence))
#------------------------------------- Optional: Save resulting datasets ------------------------------------------------------#
#save(full, file='')
#save(summary.stats, file='')
#save(summary.stats2, file='')
|
/Simulation Code.R
|
no_license
|
StevenRussell/Local_ZIKV_transmission
|
R
| false
| false
| 15,306
|
r
|
#-------------------------------------------------------------------------------------------------------------------------------------#
# In this code, we are running a simulation to model the effectiveness of various surveillance strategies in detecting
# local Zika virus transmission. The end result is a data table which can be used for analysis and visualization.
#
# Created by Steven Russell
# Last updated: September 5, 2017
#-------------------------------------------------------------------------------------------------------------------------------------#
# Optional memory management: restart R session (and remove all R objects)
# .rs.restartR()
# rm(list = ls())
# Loading the required packages
require(dplyr)
require(tidyr)
require(data.table)
# Setting the seed and number of samples
set.seed(123)
n.samples <- 10000
# Creating a sequence of incidences and population totals that we are interested in
incidences = 10^c(seq(from = -5, to = -4.09, by = .08), -4, seq(from = -3.88, to = -3, by = .08))
pops = c(10000, 100000, 1000000)
# Creating a variable that lists the different types of surveillance systems:
# Pregnant women, blood bank donors, 31 specific symptom combinations, 3 general symptom combinations
types <- factor(c("pregnant", "blood", "arth", "conj", "fever", "head", "rash",
"arth+conj", "arth+fever", "arth+head", "arth+rash", "conj+fever",
"conj+head", "conj+rash", "fever+head", "fever+rash", "head+rash",
"arth+conj+fever", "arth+conj+head", "arth+conj+rash", "arth+fever+head", "arth+fever+rash",
"arth+head+rash", "conj+fever+head", "conj+fever+rash", "conj+head+rash", "fever+head+rash",
"arth+conj+fever+head", "arth+fever+head+rash", "arth+conj+head+rash",
"arth+conj+fever+rash", "conj+fever+head+rash",
"arth+conj+fever+head+rash", "2 or more", "3 or more", "rash + 1"))
# Caculating the number of rows in the data table
dt.rows = length(incidences) * n.samples * length(pops) * length(types)
# Creating a data table with all the combinations of iteration, type, incidence and population
full <- data.table(expand.grid(iter=1:n.samples, type=types, incidence = incidences, population=pops))
# Adding general variables
testing.vars <- data.table(
iter = 1:n.samples,
# Pregnancy variables
sen.ELISA = runif(n.samples, 0.80, 0.99), # sensitivity of IgM MAC ELISA
spec.ELISA = runif(n.samples, 0.80, 0.95), # specificity of IgM MAC ELISA
detection.days.ab = runif(n.samples, 56, 112), # days in which IgM antibodies are detectible
preg.rate = runif(n.samples, .009, .017), # 10-17 per 1,000 in a population per year
# Blood variables
sen.NAAT = runif(n.samples, 0.98, 1), # sensitivity of NAAT test
spec.NAAT = runif(n.samples, .9999, 1), # specificity of NAAT test
detection.days.v = runif(n.samples, 11, 17), # days in which virus is detectible (in serum)
blood.donation.rate = runif(n.samples, .043, .047), # 43 per 1,000 in a population per year
# whole blood and apheresis red blood cell units
p.asymptomatic = runif(n.samples, .6, .8),
# Symptom variables
sen.PCR = runif(n.samples, 0.80, 0.95), # sensitivity of PCR
spec.PCR = runif(n.samples, 0.99, 1), # specificity of PCR
p.z.symp.seek.er = # What % of people are Zika infected have symptoms and seek care at an ED?
runif(n.samples, .20, .40) * # What % of people who are Zika infected have symptoms?
runif(n.samples, .1, .5) * # What % of people who are Zika infected w/ symptoms seek care?
runif(n.samples, .05, .50), # What % of people visit the emergency department in a given week?
p.ed.visit = runif(n.samples, .007, .010)
)
# Adding data on surveillance system specific assumptions
full <- merge(full, testing.vars, by="iter")
# Adding data on emergency department use by syndrome
ed.syndromes <- data.table(read.csv("CSV files/ED_Symptoms.csv"), key = "type")
# Adding data on the prevalence of symptoms among Zikv+ individuals who sought care
zika.symptoms <- data.table(read.csv("CSV files/Zika_Symptoms.csv"), key="type")
# Adding emergency department syndrome distributions to the dataset
full <- merge(full, ed.syndromes, by="type", all.x = TRUE)
full <- full[, p.ed.syndrome := suppressWarnings(runif(dt.rows, l95, u95))]
full <- full[, c("l95", "u95") := NULL ]
dim(filter(full, is.na(p.ed.syndrome) & !(type %in% c("pregnant", "blood"))))[1] # Should be 0
# Adding Zika symptom distributions to the dataset
full <- merge(full, zika.symptoms, by="type", all.x = TRUE)
full <- full[, p.z.symp.seek.er.syndrome := suppressWarnings(runif(dt.rows, l95, u95))]
full <- full[, c("l95", "u95") := NULL ]
dim(filter(full, is.na(p.ed.syndrome) & !(type %in% c("pregnant", "blood"))))[1] # Should be 0
# Creating variables where type == 'pregnant'
full[type == "pregnant", `:=` (
# Prevalence of pregnant women with IgM antibodies at a given time
detectable.zika.prevalence = incidence * detection.days.ab / 7,
# Number of new pregnancies in a week
weekly.pregnancies = preg.rate / 52 * population
)]
full[type == "pregnant", `:=` (
# Weekly tests on Zikv positive people
weekly.zikv.ppl.tested = weekly.pregnancies*2*detectable.zika.prevalence,
# Weekly tests on Zikv negative people
weekly.notzikv.ppl.tested = weekly.pregnancies*2*(1-detectable.zika.prevalence)
)]
full[type == "pregnant", `:=` (
# Probability of detecting Zika (in a given week) if testing all pregnant women twice
prob.detect.week = 1-(1-sen.ELISA*weekly.zikv.ppl.tested/population)^population,
num.zikv.ppl.detected = weekly.zikv.ppl.tested * sen.ELISA,
perc.zikv.ppl.detected = weekly.zikv.ppl.tested * sen.ELISA / (population * incidence),
num.zikv.per.week = (population * incidence),
# Probability of false positive (in a given week) if testing all pregnant women twice
prob.fp.week = 1 - spec.ELISA^weekly.notzikv.ppl.tested,
# Number of IgM tests needed per week
tests.per.week = weekly.pregnancies * 2,
# Expected number of false positives (in a given week)
# n = expected number of tests on ZIKAV- people , p = probability of false positive on a single test
expected.false.positives = weekly.notzikv.ppl.tested * (1-spec.ELISA) #based on E(v) of binomial distribution)
)]
# Removing variables to conserve memory
full[, c("sen.ELISA", "spec.ELISA", "detection.days.ab", "preg.rate") := NULL]
#-------------------------------------------------------------------------------------------#
# Creating variables where type == 'blood'
full[type == "blood", `:=` (
# Prevalence of people in blood bank with detectable virus at a given time
detectable.zika.prevalence = incidence * (detection.days.v / 7) * p.asymptomatic ,
# Number of blood donations in a week
weekly.blood.donors = blood.donation.rate * population / 52,
# Number of NAAT tests needed per week
tests.per.week = blood.donation.rate * population / 52
)]
full[type == "blood", `:=` (
# Weekly tests on Zikv positive people
weekly.zikv.ppl.tested = weekly.blood.donors*detectable.zika.prevalence,
# Weekly tests on Zikv negative people
weekly.notzikv.ppl.tested = weekly.blood.donors*(1-detectable.zika.prevalence)
)]
full[type == "blood", `:=` (
# Probability of detecting Zika (in a given week) if testing all blood donors
prob.detect.week = 1-(1-sen.NAAT*weekly.zikv.ppl.tested/population)^population,
num.zikv.ppl.detected = weekly.zikv.ppl.tested * sen.NAAT,
perc.zikv.ppl.detected = (weekly.zikv.ppl.tested * sen.NAAT) / (population * incidence),
num.zikv.per.week = population * incidence,
# Probability of false positive (in a given week) if testing all blood donors
prob.fp.week = 1 - spec.NAAT^weekly.notzikv.ppl.tested,
# Expected number of false positives (in a given week)
# n = weekly.notzikv.ppl.tested, #expected number of tests on ZIKAV- people
# p = (1-spec.NAAT), #probability of false positive on a single test
expected.false.positives = weekly.notzikv.ppl.tested * (1-spec.NAAT) # based on E(v) of binomial distribution
)]
# Removing variables to conserve memory
full[, c("sen.NAAT", "spec.NAAT", "detection.days.v", "blood.donation.rate", "p.asymptomatic") := NULL]
#-------------------------------------------------------------------------------------------#
# Creating variables for symptomatic types
full[type != "blood" & type != "pregnant", `:=` (
# Number of ZIKV infected people tested per week
weekly.zikv.ppl.tested = population * incidence *
p.z.symp.seek.er * p.z.symp.seek.er.syndrome,
# Number of ZIKV negative people tested per week
weekly.notzikv.ppl.tested = population * (1-incidence) * p.ed.visit * p.ed.syndrome
)]
# Removing variables to conserve memory
full[, c("p.z.symp.seek.er", "p.z.symp.seek.er.syndrome", "p.ed.visit", "p.ed.syndrome") := NULL]
# Creating variables for symptomatic types
full[type != "blood" & type != "pregnant", `:=` (
# Probability of detecting Zika (in a given week) if testing all symptomatic people (w/ sympx)
prob.detect.week = 1 - (1 -sen.PCR*weekly.zikv.ppl.tested/population)^population,
num.zikv.ppl.detected = weekly.zikv.ppl.tested * sen.PCR,
perc.zikv.ppl.detected = (weekly.zikv.ppl.tested * sen.PCR) / (population * incidence),
num.zikv.per.week = (population * incidence),
# Probability of false positive (in a given week) if testing all symptomatic people (w/ sympx)
prob.fp.week = 1 - spec.PCR^weekly.notzikv.ppl.tested,
# Expected number of false positives per week
expected.false.positives = weekly.notzikv.ppl.tested * (1 - spec.PCR)
)]
# Removing variables to conserve memory
full[, c("sen.PCR", "spec.PCR") := NULL]
# Creating variables for symptomatic types
full[type != "blood" & type != "pregnant", `:=` (
tests.per.week = weekly.zikv.ppl.tested + weekly.notzikv.ppl.tested
)]
# Keeping important variables
full <- full[, list(type, population, incidence, prob.detect.week, prob.fp.week, tests.per.week,
expected.false.positives, num.zikv.ppl.detected , perc.zikv.ppl.detected, num.zikv.per.week)]
# Creating surveillance variable
full[type == "pregnant", surveillance := "pregnant"]
full[type == "blood", surveillance := "blood"]
full[type != "pregnant" & type != "blood", surveillance := "symptom"]
# PPV
full <- mutate(full, PPV = num.zikv.ppl.detected / (num.zikv.ppl.detected + expected.false.positives))
# Calculating probability of detection for each incidence, population, and type
summary.stats <- full %>%
group_by(incidence, population, type, surveillance) %>%
summarise(p.detect.m = median(prob.detect.week),
p.detect.l95 = quantile(prob.detect.week, .025),
p.detect.u95 = quantile(prob.detect.week, .975),
p.detect.l50 = quantile(prob.detect.week, .25),
p.detect.u50 = quantile(prob.detect.week, .75),
tests.m = median(tests.per.week),
tests.l95 = quantile(tests.per.week, .025),
tests.u95 = quantile(tests.per.week, .975),
tests.l50 = quantile(tests.per.week, .25),
tests.u50 = quantile(tests.per.week, .75),
fp.m = median(expected.false.positives),
fp.l95 = quantile(expected.false.positives, .025),
fp.u95 = quantile(expected.false.positives, .975),
fp.l50 = quantile(expected.false.positives, .25),
fp.u50 = quantile(expected.false.positives, .75),
perc.zikv.ppl.detected.m = quantile(perc.zikv.ppl.detected, .5),
perc.zikv.ppl.detected.l95 = quantile(perc.zikv.ppl.detected, .025),
perc.zikv.ppl.detected.u95 = quantile(perc.zikv.ppl.detected, .975),
perc.zikv.ppl.detected.l50 = quantile(perc.zikv.ppl.detected, .25),
perc.zikv.ppl.detected.u50 = quantile(perc.zikv.ppl.detected, .75),
num.zikv.ppl.detected.m = quantile(num.zikv.ppl.detected, .5),
num.zikv.ppl.detected.l95 = quantile(num.zikv.ppl.detected, .025),
num.zikv.ppl.detected.u95 = quantile(num.zikv.ppl.detected, .975),
num.zikv.ppl.detected.l50 = quantile(num.zikv.ppl.detected, .25),
num.zikv.ppl.detected.u50 = quantile(num.zikv.ppl.detected, .75),
num.zikv.per.week.m = quantile(num.zikv.per.week, .5),
PPV.m = quantile(PPV, .5),
PPV.l95 = quantile(PPV, .025),
PPV.u95 = quantile(PPV, .975),
PPV.l50 = quantile(PPV, .25),
PPV.u50 = quantile(PPV, .75)
) %>%
ungroup()
summary.stats <- mutate(summary.stats,
log10.incidence = log10(incidence))
# Calculating probability of detection for each incidence, population, and syndrome
summary.stats2 <- full %>%
filter(type != "blood" & type != "pregnant") %>%
group_by(incidence, population, type) %>%
summarise(p.detect.m = median(prob.detect.week),
p.detect.l95 = quantile(prob.detect.week, .025),
p.detect.u95 = quantile(prob.detect.week, .975),
p.detect.l50 = quantile(prob.detect.week, .25),
p.detect.u50 = quantile(prob.detect.week, .75),
tests.m = median(tests.per.week),
tests.l95 = quantile(tests.per.week, .025),
tests.u95 = quantile(tests.per.week, .975),
tests.l50 = quantile(tests.per.week, .25),
tests.u50 = quantile(tests.per.week, .75),
fp.m = median(expected.false.positives),
fp.l95 = quantile(expected.false.positives, .025),
fp.u95 = quantile(expected.false.positives, .975),
fp.l50 = quantile(expected.false.positives, .25),
fp.u50 = quantile(expected.false.positives, .75),
perc.zikv.ppl.detected.m = quantile(perc.zikv.ppl.detected, .5),
num.zikv.ppl.detected.m = quantile(num.zikv.ppl.detected, .5)
) %>%
ungroup()
summary.stats2 <- mutate(summary.stats2,
log10.incidence = log10(incidence))
#------------------------------------- Optional: Save resulting datasets ------------------------------------------------------#
#save(full, file='')
#save(summary.stats, file='')
#save(summary.stats2, file='')
|
setwd("~/Documents/7thSemester/dmp/corpus")
library("RSQLite")
library("tokenizers")
library("dplyr")
library("textclean")
library("stringr")
library("tm")
library(qdap)
rm(list=ls())
sec <- scan("rawTexts/detective/agatha-christie-the-secret-adversary.txt",what="character",sep="\n")
sec.start <- which(sec=="IT was 2 p.m. on the afternoon of May 7, 1915. The Lusitania had been struck by two torpedoes in succession and was sinking rapidly, while the boats were being launched with all possible speed. The women and children were being lined up awaiting their turn. Some still clung desperately to husbands and fathers; others clutched their children closely to their breasts. One girl stood alone, slightly apart from the rest. She was quite young, not more than eighteen. She did not seem afraid, and her grave, steadfast eyes looked straight ahead.")
sec.fin <- which(sec =="“And a damned good sport too,” said Tommy.")
sec<- sec[sec.start:sec.fin]
print(length(sec))
sec.paragraphs <- as.data.frame(sec, stringsAsFactors=FALSE)
colnames(sec.paragraphs) <- c("paras")
chaps <- grep('CHAPTER', sec.paragraphs$paras)
sec.paragraphs <- as.data.frame(sec.paragraphs[-c(chaps)], stringsAsFactors=FALSE)
colnames(sec.paragraphs) <- c("paras")
sec.paragraphs<- sec.paragraphs %>%
transmute(paragraphs=gsub("\"|\\*|(?<=[A-Z])(\\.)(?=[A-Z]|\\.|\\s)", "", perl=TRUE, paras))
sec.paragraphs <- sec.paragraphs %>%
transmute(paras= gsub("Mrs\\.", "Mrs", paragraphs) )
sec.paragraphs <- sec.paragraphs %>%
transmute(paragraphs= gsub("Mr\\.", "Mr", paras))
sec.paragraphs <- sec.paragraphs %>%
filter(paragraphs!="")
sec.paragraphs <- sec.paragraphs %>%
filter(paragraphs!=" ")
colnames(sec.paragraphs)
sec.paragraphs <- sec.paragraphs %>%
transmute(paras = replace_abbreviation(paragraphs))
##
print(length(sec.paragraphs$paras))
sec.paragraphs$paras[1] <- "It was 2 PM on the afternoon of May 7, 1915. The Lusitania had been struck by two torpedoes in succession and was sinking rapidly, while the boats were being launched with all possible speed. The women and children were being lined up awaiting their turn. Some still clung desperately to husbands and fathers; others clutched their children closely to their breasts. One girl stood alone, slightly apart from the rest. She was quite young, not more than eighteen. She did not seem afraid, and her grave, steadfast eyes looked straight ahead."
sec.title <- rep("theSecretAdversary", 3240)
sec.para.type <- rep("paragraph",3240)
sec.para.counter<-seq(1, 3240)
sec.para.id <- paste0("THE_SECRET_ADVERSARY_", "PARAGRAPH_", sec.para.counter)
sec.label <- rep("0", 3240)
print(length(sec.para.id))
sec.para.matrix <- cbind(sec.title, sec.para.type, sec.para.id, sec.paragraphs, sec.label)
sec.para.df <- as.data.frame(sec.para.matrix, stringsAsFactors = FALSE)
stock <- c("Title", "Type", "ID", "Unit", "Label")
colnames(sec.para.df) <- stock
con <- dbConnect(RSQLite::SQLite(), ":memory:", dbname="textTable.sqlite")
dbWriteTable(con, "textTable", sec.para.df, append=TRUE, row.names=FALSE)
dbGetQuery(con, "SELECT Unit FROM textTable WHERE Type='paragraph' AND Title='theSecretAdversary' LIMIT 2")
dbDisconnect(con)
# sentences.
sec <- sec.paragraphs$paras
first_bite <- sec[1:2499]
second_bite <- sec[2500:3240]
sec.sents.first <- paste0(first_bite, collapse = "\n")
sec.sents.first <- unlist(tokenize_sentences(sec.sents.first))
sec.sents.second <- paste0(second_bite, collapse = "\n")
sec.sents.second <- unlist(tokenize_sentences(sec.sents.second))
sec.sents <- c(sec.sents.first, sec.sents.second)
sec.sents.df <- as.data.frame(sec.sents, stringsAsFactors = FALSE)
print(length(sec.sents.df$sec.sents))
# loops.
bad_spots <-c(0)
for(i in seq(1:length(sec.sents))){
#if the sentence ends with a punctuation mark and the next character is a lowercase, combine them,
# if the sequence starts with a capital letter... but for eg ha! ha! ha! don't combine
# so check if the first sentence starts with a lowercase as well
test <- substr(sec.sents[i], nchar(sec.sents[i]), nchar(sec.sents[i]))
test2 <- substr(sec.sents[i+1], 1, 1)
test3 <- substr(sec.sents[i], 1, 1)
if(test2 %in% c(LETTERS, letters)){
if(test %in% c('?', '!') && test2==tolower(test2) && test3!=tolower(test3)){
#print(i)
sec.sents[i] <- paste(sec.sents[i], sec.sents[i+1])
bad_spots<-append(bad_spots, i+1)
}
}
}
bad_spots <- bad_spots[-c(1)]
# sec.sents[bad_spots]
sec.sents <- sec.sents[-c(bad_spots)]
bad_spots <-c(0)
for(i in seq(1:length(sec.sents))){
#if the sentence ends with a punctuation mark and the next character is a lowercase, combine them
test <- substr(sec.sents[i], nchar(sec.sents[i])-1, nchar(sec.sents[i]))
test2 <- substr(sec.sents[i+1], 1, 1)
if(test2 %in% c(LETTERS, letters)){
if((test %in% c('?”', '!”') && test2==tolower(test2))){
#print(i)
sec.sents[i] <- paste(sec.sents[i], sec.sents[i+1])
bad_spots<-append(bad_spots, i+1)
}
}
}
sec.sents[bad_spots]
sec.sents <- sec.sents[-c(bad_spots)]
print(length(sec.sents))
sec.sents.df <- as.data.frame(sec.sents, stringsAsFactors = FALSE)
sec.sents <- sec.sents[sec.sents!="“..."]
sec.sents <- sec.sents[sec.sents!=""]
print(length(sec.sents))
sec.title <- rep("theSecretAdversary", 7809)
sec.sents.type <- rep("sentence", 7809)
sec.sents.counter<-seq(1, 7809)
sec.sents.id <- paste0("THE_SECRET_ADVERSARY_", "SENT_", sec.sents.counter)
sec.label <- rep("0", 7809)
print(length(sec.sents.id))
sec.sents.matrix <- cbind(sec.title, sec.sents.type, sec.sents.id, sec.sents, sec.label)
sec.sents.df <- as.data.frame(sec.sents.matrix, stringsAsFactors = FALSE)
stock <- c("Title", "Type", "ID", "Unit", "Label")
colnames(sec.sents.df) <- stock
con <- dbConnect(RSQLite::SQLite(), ":memory:", dbname="textTable.sqlite")
dbWriteTable(con, "textTable", sec.sents.df, append=TRUE, row.names=FALSE)
dbGetQuery(con, "SELECT Unit FROM textTable WHERE Type='sentence' AND Title='theSecretAdversary' LIMIT 2")
dbDisconnect(con)
# words.
sec.temp <- sec
sec.temp <- paste(sec.temp, collapse=" ")
sec.temp <-tolower(sec.temp)
# a better regex that is going to maintain contractions. important!
sec.temp <- unlist(strsplit(sec.temp, "[^\\w’]", perl=TRUE))
sec.not.blanks <- which(sec.temp != "")
sec.words <- sec.temp[sec.not.blanks]
print(length(sec.words))
sec.words<- sec.words[which(sec.words!="^’")]
sec.words<- sec.words[which(sec.words!="’")]
print(length(sec.words))
sec.words[9999:10099]
sec.title <- rep("theSecretAdversary", 76046)
sec.words.type <- rep("word", 76046)
sec.words.counter <- seq(1, 76046)
sec.words.id <- paste0("THE_SECRET_ADVERSARY_", "WORD_", sec.words.counter)
sec.label<- rep("0", 76046)
sec.words.matrix <- cbind(sec.title, sec.words.type, sec.words.id, sec.words, sec.label)
sec.words.df <- as.data.frame(sec.words.matrix, stringsAsFactors = FALSE)
stock <- c("Title", "Type", "ID", "Unit", "Label")
colnames(sec.words.df) <- c("Title", "Type", "ID", "Unit", "Label")
con <- dbConnect(RSQLite::SQLite(), ":memory:", dbname="textTable.sqlite")
dbWriteTable(con, "textTable", sec.words.df, append=TRUE, row.names=FALSE)
dbGetQuery(con, "SELECT * FROM textTable WHERE Type= 'word' AND Title='theSecretAdversary' LIMIT 10")
dbDisconnect(con)
# secret adversary finito.
|
/scriptsAndDatabase/detective_clean/ADVERSARYclean.R
|
no_license
|
timschott/dmp
|
R
| false
| false
| 7,322
|
r
|
setwd("~/Documents/7thSemester/dmp/corpus")
library("RSQLite")
library("tokenizers")
library("dplyr")
library("textclean")
library("stringr")
library("tm")
library(qdap)
rm(list=ls())
sec <- scan("rawTexts/detective/agatha-christie-the-secret-adversary.txt",what="character",sep="\n")
sec.start <- which(sec=="IT was 2 p.m. on the afternoon of May 7, 1915. The Lusitania had been struck by two torpedoes in succession and was sinking rapidly, while the boats were being launched with all possible speed. The women and children were being lined up awaiting their turn. Some still clung desperately to husbands and fathers; others clutched their children closely to their breasts. One girl stood alone, slightly apart from the rest. She was quite young, not more than eighteen. She did not seem afraid, and her grave, steadfast eyes looked straight ahead.")
sec.fin <- which(sec =="“And a damned good sport too,” said Tommy.")
sec<- sec[sec.start:sec.fin]
print(length(sec))
sec.paragraphs <- as.data.frame(sec, stringsAsFactors=FALSE)
colnames(sec.paragraphs) <- c("paras")
chaps <- grep('CHAPTER', sec.paragraphs$paras)
sec.paragraphs <- as.data.frame(sec.paragraphs[-c(chaps)], stringsAsFactors=FALSE)
colnames(sec.paragraphs) <- c("paras")
sec.paragraphs<- sec.paragraphs %>%
transmute(paragraphs=gsub("\"|\\*|(?<=[A-Z])(\\.)(?=[A-Z]|\\.|\\s)", "", perl=TRUE, paras))
sec.paragraphs <- sec.paragraphs %>%
transmute(paras= gsub("Mrs\\.", "Mrs", paragraphs) )
sec.paragraphs <- sec.paragraphs %>%
transmute(paragraphs= gsub("Mr\\.", "Mr", paras))
sec.paragraphs <- sec.paragraphs %>%
filter(paragraphs!="")
sec.paragraphs <- sec.paragraphs %>%
filter(paragraphs!=" ")
colnames(sec.paragraphs)
sec.paragraphs <- sec.paragraphs %>%
transmute(paras = replace_abbreviation(paragraphs))
##
print(length(sec.paragraphs$paras))
sec.paragraphs$paras[1] <- "It was 2 PM on the afternoon of May 7, 1915. The Lusitania had been struck by two torpedoes in succession and was sinking rapidly, while the boats were being launched with all possible speed. The women and children were being lined up awaiting their turn. Some still clung desperately to husbands and fathers; others clutched their children closely to their breasts. One girl stood alone, slightly apart from the rest. She was quite young, not more than eighteen. She did not seem afraid, and her grave, steadfast eyes looked straight ahead."
sec.title <- rep("theSecretAdversary", 3240)
sec.para.type <- rep("paragraph",3240)
sec.para.counter<-seq(1, 3240)
sec.para.id <- paste0("THE_SECRET_ADVERSARY_", "PARAGRAPH_", sec.para.counter)
sec.label <- rep("0", 3240)
print(length(sec.para.id))
sec.para.matrix <- cbind(sec.title, sec.para.type, sec.para.id, sec.paragraphs, sec.label)
sec.para.df <- as.data.frame(sec.para.matrix, stringsAsFactors = FALSE)
stock <- c("Title", "Type", "ID", "Unit", "Label")
colnames(sec.para.df) <- stock
con <- dbConnect(RSQLite::SQLite(), ":memory:", dbname="textTable.sqlite")
dbWriteTable(con, "textTable", sec.para.df, append=TRUE, row.names=FALSE)
dbGetQuery(con, "SELECT Unit FROM textTable WHERE Type='paragraph' AND Title='theSecretAdversary' LIMIT 2")
dbDisconnect(con)
# sentences.
sec <- sec.paragraphs$paras
first_bite <- sec[1:2499]
second_bite <- sec[2500:3240]
sec.sents.first <- paste0(first_bite, collapse = "\n")
sec.sents.first <- unlist(tokenize_sentences(sec.sents.first))
sec.sents.second <- paste0(second_bite, collapse = "\n")
sec.sents.second <- unlist(tokenize_sentences(sec.sents.second))
sec.sents <- c(sec.sents.first, sec.sents.second)
sec.sents.df <- as.data.frame(sec.sents, stringsAsFactors = FALSE)
print(length(sec.sents.df$sec.sents))
# loops.
bad_spots <-c(0)
for(i in seq(1:length(sec.sents))){
#if the sentence ends with a punctuation mark and the next character is a lowercase, combine them,
# if the sequence starts with a capital letter... but for eg ha! ha! ha! don't combine
# so check if the first sentence starts with a lowercase as well
test <- substr(sec.sents[i], nchar(sec.sents[i]), nchar(sec.sents[i]))
test2 <- substr(sec.sents[i+1], 1, 1)
test3 <- substr(sec.sents[i], 1, 1)
if(test2 %in% c(LETTERS, letters)){
if(test %in% c('?', '!') && test2==tolower(test2) && test3!=tolower(test3)){
#print(i)
sec.sents[i] <- paste(sec.sents[i], sec.sents[i+1])
bad_spots<-append(bad_spots, i+1)
}
}
}
bad_spots <- bad_spots[-c(1)]
# sec.sents[bad_spots]
sec.sents <- sec.sents[-c(bad_spots)]
bad_spots <-c(0)
for(i in seq(1:length(sec.sents))){
#if the sentence ends with a punctuation mark and the next character is a lowercase, combine them
test <- substr(sec.sents[i], nchar(sec.sents[i])-1, nchar(sec.sents[i]))
test2 <- substr(sec.sents[i+1], 1, 1)
if(test2 %in% c(LETTERS, letters)){
if((test %in% c('?”', '!”') && test2==tolower(test2))){
#print(i)
sec.sents[i] <- paste(sec.sents[i], sec.sents[i+1])
bad_spots<-append(bad_spots, i+1)
}
}
}
sec.sents[bad_spots]
sec.sents <- sec.sents[-c(bad_spots)]
print(length(sec.sents))
sec.sents.df <- as.data.frame(sec.sents, stringsAsFactors = FALSE)
sec.sents <- sec.sents[sec.sents!="“..."]
sec.sents <- sec.sents[sec.sents!=""]
print(length(sec.sents))
sec.title <- rep("theSecretAdversary", 7809)
sec.sents.type <- rep("sentence", 7809)
sec.sents.counter<-seq(1, 7809)
sec.sents.id <- paste0("THE_SECRET_ADVERSARY_", "SENT_", sec.sents.counter)
sec.label <- rep("0", 7809)
print(length(sec.sents.id))
sec.sents.matrix <- cbind(sec.title, sec.sents.type, sec.sents.id, sec.sents, sec.label)
sec.sents.df <- as.data.frame(sec.sents.matrix, stringsAsFactors = FALSE)
stock <- c("Title", "Type", "ID", "Unit", "Label")
colnames(sec.sents.df) <- stock
con <- dbConnect(RSQLite::SQLite(), ":memory:", dbname="textTable.sqlite")
dbWriteTable(con, "textTable", sec.sents.df, append=TRUE, row.names=FALSE)
dbGetQuery(con, "SELECT Unit FROM textTable WHERE Type='sentence' AND Title='theSecretAdversary' LIMIT 2")
dbDisconnect(con)
# words.
sec.temp <- sec
sec.temp <- paste(sec.temp, collapse=" ")
sec.temp <-tolower(sec.temp)
# a better regex that is going to maintain contractions. important!
sec.temp <- unlist(strsplit(sec.temp, "[^\\w’]", perl=TRUE))
sec.not.blanks <- which(sec.temp != "")
sec.words <- sec.temp[sec.not.blanks]
print(length(sec.words))
sec.words<- sec.words[which(sec.words!="^’")]
sec.words<- sec.words[which(sec.words!="’")]
print(length(sec.words))
sec.words[9999:10099]
sec.title <- rep("theSecretAdversary", 76046)
sec.words.type <- rep("word", 76046)
sec.words.counter <- seq(1, 76046)
sec.words.id <- paste0("THE_SECRET_ADVERSARY_", "WORD_", sec.words.counter)
sec.label<- rep("0", 76046)
sec.words.matrix <- cbind(sec.title, sec.words.type, sec.words.id, sec.words, sec.label)
sec.words.df <- as.data.frame(sec.words.matrix, stringsAsFactors = FALSE)
stock <- c("Title", "Type", "ID", "Unit", "Label")
colnames(sec.words.df) <- c("Title", "Type", "ID", "Unit", "Label")
con <- dbConnect(RSQLite::SQLite(), ":memory:", dbname="textTable.sqlite")
dbWriteTable(con, "textTable", sec.words.df, append=TRUE, row.names=FALSE)
dbGetQuery(con, "SELECT * FROM textTable WHERE Type= 'word' AND Title='theSecretAdversary' LIMIT 10")
dbDisconnect(con)
# secret adversary finito.
|
data1 <- readRDS("summarySCC_PM25.rds")
data2 <- readRDS("Source_Classification_Code.rds" )
both <- merge(data1, data2, by="data2")
str(both)
library(ggplot2)
subsetdata1 <- data1[data1$fips=="24510" & data1$type=="ON-ROAD", ]
AggregatedTotalYear <- aggregate(Emissions ~ year, subsetdata1, sum)
png("Project.Plot5.png", width = 640, height = 640)
g <- ggplot(AggregatedTotalYear, aes(factor(year), Emissions))
g <- g + geom_bar(stat = "identity") + xlab("year") + ylab(expression("Total PM"[2.5]*" Emissions")) +
ggtitle("Total Emissions from motor vehicle (type = ON-ROAD) in Baltimore City, Maryland (fips = 24510) from 1999 to 2008")
print(g)
dev.off()
|
/Project.Plot5.R
|
no_license
|
basakritu/Coursera_Exploratory_data_analysis_Project_2
|
R
| false
| false
| 696
|
r
|
data1 <- readRDS("summarySCC_PM25.rds")
data2 <- readRDS("Source_Classification_Code.rds" )
both <- merge(data1, data2, by="data2")
str(both)
library(ggplot2)
subsetdata1 <- data1[data1$fips=="24510" & data1$type=="ON-ROAD", ]
AggregatedTotalYear <- aggregate(Emissions ~ year, subsetdata1, sum)
png("Project.Plot5.png", width = 640, height = 640)
g <- ggplot(AggregatedTotalYear, aes(factor(year), Emissions))
g <- g + geom_bar(stat = "identity") + xlab("year") + ylab(expression("Total PM"[2.5]*" Emissions")) +
ggtitle("Total Emissions from motor vehicle (type = ON-ROAD) in Baltimore City, Maryland (fips = 24510) from 1999 to 2008")
print(g)
dev.off()
|
#new file lalla
#and some more
|
/newfile.R
|
no_license
|
scelmendorf/test
|
R
| false
| false
| 31
|
r
|
#new file lalla
#and some more
|
#' Adaptive Maximum Margin Criterion
#'
#' Adaptive Maximum Margin Criterion (AMMC) is a supervised linear dimension reduction method.
#' The method uses different weights to characterize the different contributions of the
#' training samples embedded in MMC framework. With the choice of \code{a=0}, \code{b=0}, and
#' \code{lambda=1}, it is identical to standard MMC method.
#'
#' @param X an \eqn{(n\times p)} matrix or data frame whose rows are observations
#' and columns represent independent variables.
#' @param label a length-\eqn{n} vector of data class labels.
#' @param ndim an integer-valued target dimension.
#' @param preprocess an additional option for preprocessing the data.
#' Default is "center". See also \code{\link{aux.preprocess}} for more details.
#' @param a tuning parameter for between-class weight in \eqn{[0,\infty)}.
#' @param b tuning parameter for within-class weight in \eqn{[0,\infty)}.
#' @param lambda balance parameter for between-class and within-class scatter matrices in \eqn{(0,\infty)}.
#'
#' @return a named list containing
#' \describe{
#' \item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
#' \item{trfinfo}{a list containing information for out-of-sample prediction.}
#' \item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
#' }
#'
#' @examples
#' ## load iris data
#' data(iris)
#' X = as.matrix(iris[,1:4])
#' label = as.factor(iris$Species)
#'
#' ## try different lambda values
#' out1 = do.ammc(X, label, lambda=0.1)
#' out2 = do.ammc(X, label, lambda=1)
#' out3 = do.ammc(X, label, lambda=10)
#'
#' ## visualize
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,3))
#' plot(out1$Y, main="AMMC::lambda=0.1", pch=19, cex=0.5, col=label)
#' plot(out2$Y, main="AMMC::lambda=1", pch=19, cex=0.5, col=label)
#' plot(out3$Y, main="AMMC::lambda=10", pch=19, cex=0.5, col=label)
#' par(opar)
#'
#' @references
#' \insertRef{lu_adaptive_2011}{Rdimtools}
#'
#' @seealso \code{\link{do.mmc}}
#' @author Kisung You
#' @rdname linear_AMMC
#' @export
do.ammc <- function(X, label, ndim=2, preprocess=c("center","scale","cscale","decorrelate","whiten"),
a=1.0, b=1.0, lambda=1.0){
#------------------------------------------------------------------------
## PREPROCESSING
# 1. data matrix
aux.typecheck(X)
n = nrow(X)
p = ncol(X)
# 2. label : check and return a de-factored vector
# For this example, there should be no degenerate class of size 1.
label = check_label(label, n)
ulabel = unique(label)
for (i in 1:length(ulabel)){
if (sum(label==ulabel[i])==1){
stop("* do.ammc : no degerate class of size 1 is allowed.")
}
}
nlabel = length(ulabel)
if (any(is.na(label))||(any(is.infinite(label)))){
stop("* Supervised Learning : any element of 'label' as NA or Inf will simply be considered as a class, not missing entries.")
}
# 3. ndim
ndim = as.integer(ndim)
if (!check_ndim(ndim,p)){stop("* do.ammc : 'ndim' is a positive integer in [1,#(covariates)).")}
# 4. preprocess
if (missing(preprocess)){
algpreprocess = "center"
} else {
algpreprocess = match.arg(preprocess)
}
# 5. a, b : tuning parameters
a = as.double(a)
b = as.double(b)
lambda = as.double(lambda)
if (!check_NumMM(a,0,1e+10,compact=TRUE)){stop("* do.ammc : 'a' should be a nonnegative real number.")}
if (!check_NumMM(b,0,1e+10,compact=TRUE)){stop("* do.ammc : 'b' should be a nonnegative real number.")}
if (!check_NumMM(lambda,0,Inf,compact=FALSE)){stop("* do.ammc : 'lambda' should be a positive real number.")}
#------------------------------------------------------------------------
## COMPUTATION : PRELIMINARY
# 1. preprocess of data
tmplist = aux.preprocess.hidden(X,type=algpreprocess,algtype="linear")
trfinfo = tmplist$info
pX = tmplist$pX
# 2. per-class and overall : mean vectors
meanvectors = ammc_meanvec(pX, label, ulabel)
mean_Overall = meanvectors$overall
mean_PerClass = meanvectors$class
# 3. adaptive scatter matrices
adaSb = ammc_adaSb(mean_PerClass, a)
adaSw = ammc_adaSw(pX, label, b)
#------------------------------------------------------------------------
## COMPUTATION : MAIN COMPUTATION
costS = (adaSb - (lambda*adaSw))
projection = aux.adjprojection(RSpectra::eigs(costS, ndim)$vectors)
#------------------------------------------------------------------------
## RETURN THE RESULTS
result = list()
result$Y = pX%*%projection
result$trfinfo = trfinfo
result$projection = projection
return(result)
}
# auxiliary for AMMC ------------------------------------------------------
#' @keywords internal
#' @noRd
ammc_meanvec <- function(X, label, ulabel){
p = ncol(X)
nlabel = length(ulabel)
mean_Overall = as.vector(colMeans(X))
mean_Class = array(0,c(nlabel,p))
for (i in 1:nlabel){
idxlabel = which(label==ulabel[i])
mean_Class[i,] = as.vector(colMeans(X[idxlabel,]))
}
output = list()
output$overall = mean_Overall
output$class = mean_Class
return(output)
}
#' @keywords internal
#' @noRd
ammc_adaSb <- function(mat, a){
c = nrow(mat)
p = ncol(mat)
Sb = array(0,c(p,p))
for (i in 1:c){
vec1 = as.vector(mat[i,])
for (j in 1:c){
vec2 = as.vector(mat[j,])
if (j!=i){
vecdiff = (vec1-vec2)
weight = ((sqrt(sum(vecdiff*vecdiff)))^(-a))
Sb = Sb + weight*outer(vecdiff,vecdiff)
}
}
}
return(Sb)
}
#' @keywords internal
#' @noRd
ammc_adaSw <- function(X, label, b){
n = nrow(X)
p = ncol(X)
if (length(label)!=n){
stop("* ammc_adaSw.")
}
ulabel = unique(label)
c = length(ulabel)
Sw = array(0,c(p,p))
for (i in 1:c){
idxlabel = which(label==ulabel[i])
ni = length(idxlabel)
mi = as.vector(colMeans(X[idxlabel,]))
for (j in 1:ni){
cidx = as.integer(idxlabel[j])
cvec = as.vector(X[cidx,])
vecdiff = cvec-mi
weight = ((sqrt(sum(vecdiff*vecdiff)))^b)
Sw = Sw + weight*outer(vecdiff,vecdiff)
}
}
return(Sw)
}
|
/R/linear_AMMC.R
|
no_license
|
dungcv/Rdimtools
|
R
| false
| false
| 6,078
|
r
|
#' Adaptive Maximum Margin Criterion
#'
#' Adaptive Maximum Margin Criterion (AMMC) is a supervised linear dimension reduction method.
#' The method uses different weights to characterize the different contributions of the
#' training samples embedded in MMC framework. With the choice of \code{a=0}, \code{b=0}, and
#' \code{lambda=1}, it is identical to standard MMC method.
#'
#' @param X an \eqn{(n\times p)} matrix or data frame whose rows are observations
#' and columns represent independent variables.
#' @param label a length-\eqn{n} vector of data class labels.
#' @param ndim an integer-valued target dimension.
#' @param preprocess an additional option for preprocessing the data.
#' Default is "center". See also \code{\link{aux.preprocess}} for more details.
#' @param a tuning parameter for between-class weight in \eqn{[0,\infty)}.
#' @param b tuning parameter for within-class weight in \eqn{[0,\infty)}.
#' @param lambda balance parameter for between-class and within-class scatter matrices in \eqn{(0,\infty)}.
#'
#' @return a named list containing
#' \describe{
#' \item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
#' \item{trfinfo}{a list containing information for out-of-sample prediction.}
#' \item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
#' }
#'
#' @examples
#' ## load iris data
#' data(iris)
#' X = as.matrix(iris[,1:4])
#' label = as.factor(iris$Species)
#'
#' ## try different lambda values
#' out1 = do.ammc(X, label, lambda=0.1)
#' out2 = do.ammc(X, label, lambda=1)
#' out3 = do.ammc(X, label, lambda=10)
#'
#' ## visualize
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,3))
#' plot(out1$Y, main="AMMC::lambda=0.1", pch=19, cex=0.5, col=label)
#' plot(out2$Y, main="AMMC::lambda=1", pch=19, cex=0.5, col=label)
#' plot(out3$Y, main="AMMC::lambda=10", pch=19, cex=0.5, col=label)
#' par(opar)
#'
#' @references
#' \insertRef{lu_adaptive_2011}{Rdimtools}
#'
#' @seealso \code{\link{do.mmc}}
#' @author Kisung You
#' @rdname linear_AMMC
#' @export
do.ammc <- function(X, label, ndim=2, preprocess=c("center","scale","cscale","decorrelate","whiten"),
a=1.0, b=1.0, lambda=1.0){
#------------------------------------------------------------------------
## PREPROCESSING
# 1. data matrix
aux.typecheck(X)
n = nrow(X)
p = ncol(X)
# 2. label : check and return a de-factored vector
# For this example, there should be no degenerate class of size 1.
label = check_label(label, n)
ulabel = unique(label)
for (i in 1:length(ulabel)){
if (sum(label==ulabel[i])==1){
stop("* do.ammc : no degerate class of size 1 is allowed.")
}
}
nlabel = length(ulabel)
if (any(is.na(label))||(any(is.infinite(label)))){
stop("* Supervised Learning : any element of 'label' as NA or Inf will simply be considered as a class, not missing entries.")
}
# 3. ndim
ndim = as.integer(ndim)
if (!check_ndim(ndim,p)){stop("* do.ammc : 'ndim' is a positive integer in [1,#(covariates)).")}
# 4. preprocess
if (missing(preprocess)){
algpreprocess = "center"
} else {
algpreprocess = match.arg(preprocess)
}
# 5. a, b : tuning parameters
a = as.double(a)
b = as.double(b)
lambda = as.double(lambda)
if (!check_NumMM(a,0,1e+10,compact=TRUE)){stop("* do.ammc : 'a' should be a nonnegative real number.")}
if (!check_NumMM(b,0,1e+10,compact=TRUE)){stop("* do.ammc : 'b' should be a nonnegative real number.")}
if (!check_NumMM(lambda,0,Inf,compact=FALSE)){stop("* do.ammc : 'lambda' should be a positive real number.")}
#------------------------------------------------------------------------
## COMPUTATION : PRELIMINARY
# 1. preprocess of data
tmplist = aux.preprocess.hidden(X,type=algpreprocess,algtype="linear")
trfinfo = tmplist$info
pX = tmplist$pX
# 2. per-class and overall : mean vectors
meanvectors = ammc_meanvec(pX, label, ulabel)
mean_Overall = meanvectors$overall
mean_PerClass = meanvectors$class
# 3. adaptive scatter matrices
adaSb = ammc_adaSb(mean_PerClass, a)
adaSw = ammc_adaSw(pX, label, b)
#------------------------------------------------------------------------
## COMPUTATION : MAIN COMPUTATION
costS = (adaSb - (lambda*adaSw))
projection = aux.adjprojection(RSpectra::eigs(costS, ndim)$vectors)
#------------------------------------------------------------------------
## RETURN THE RESULTS
result = list()
result$Y = pX%*%projection
result$trfinfo = trfinfo
result$projection = projection
return(result)
}
# auxiliary for AMMC ------------------------------------------------------
#' @keywords internal
#' @noRd
ammc_meanvec <- function(X, label, ulabel){
p = ncol(X)
nlabel = length(ulabel)
mean_Overall = as.vector(colMeans(X))
mean_Class = array(0,c(nlabel,p))
for (i in 1:nlabel){
idxlabel = which(label==ulabel[i])
mean_Class[i,] = as.vector(colMeans(X[idxlabel,]))
}
output = list()
output$overall = mean_Overall
output$class = mean_Class
return(output)
}
#' @keywords internal
#' @noRd
ammc_adaSb <- function(mat, a){
c = nrow(mat)
p = ncol(mat)
Sb = array(0,c(p,p))
for (i in 1:c){
vec1 = as.vector(mat[i,])
for (j in 1:c){
vec2 = as.vector(mat[j,])
if (j!=i){
vecdiff = (vec1-vec2)
weight = ((sqrt(sum(vecdiff*vecdiff)))^(-a))
Sb = Sb + weight*outer(vecdiff,vecdiff)
}
}
}
return(Sb)
}
#' @keywords internal
#' @noRd
ammc_adaSw <- function(X, label, b){
n = nrow(X)
p = ncol(X)
if (length(label)!=n){
stop("* ammc_adaSw.")
}
ulabel = unique(label)
c = length(ulabel)
Sw = array(0,c(p,p))
for (i in 1:c){
idxlabel = which(label==ulabel[i])
ni = length(idxlabel)
mi = as.vector(colMeans(X[idxlabel,]))
for (j in 1:ni){
cidx = as.integer(idxlabel[j])
cvec = as.vector(X[cidx,])
vecdiff = cvec-mi
weight = ((sqrt(sum(vecdiff*vecdiff)))^b)
Sw = Sw + weight*outer(vecdiff,vecdiff)
}
}
return(Sw)
}
|
#' Brewery Location and Home/Rental Information
#'
#' This data provides a very small sample of different breweries in the U.S
#' with some home listing information and typical rental information in the
#' same areas as breweries. The home/rental information is all for the month
#' of October, 2019.
#'
#' @format The brewery sale rentals data frame contains 31 observations and 18 variables.
#' \describe{
#' \item{Zipcode}{Zipode of breweries and homes/rentals}
#' \item{Address}{Address of breweries}
#' \item{City}{City of breweries and homes/rentals}
#' \item{State}{State of breweries and homes/rentals}
#' \item{Brewery Type}{Type of brewery}
#' \item{Brewery}{Brewery name}
#' \item{Description}{Description of brewery}
#' \item{Year Established}{Year the brewery was established}
#' \item{Homes for Sale}{Number of homes for sale by zipcode}
#' \item{Average Listing Price}{Average listing price of homes by zipcode}
#' \item{Price Reduced Count}{Number if homes that reduced sale price}
#' \item{Median Day on Market}{Median days a home is listed on the market}
#' \item{Typical Rent Price}{Typical rent price by zipcode}
#' \item{Phone}{Phone number of brewery}
#' \item{Website}{Website of brewery}
#' \item{Brewery Latitude}{Latitude of brewery location}
#' \item{Brewery Longitude}{Longitude of brewery location}
#' \item{Open to Public}{If the brewery is open to the public}
#' }
#'@source \url{https://www.brewerydb.com/developers/docs}
#'@source \url{https://www.zillow.com/research/data/}
#'@source \url{https://www.realtor.com/research/data/}
"brewery_sales_rentals"
|
/BrewHome/R/data.R
|
no_license
|
ecwalters112/BrewHome-Package
|
R
| false
| false
| 1,624
|
r
|
#' Brewery Location and Home/Rental Information
#'
#' This data provides a very small sample of different breweries in the U.S
#' with some home listing information and typical rental information in the
#' same areas as breweries. The home/rental information is all for the month
#' of October, 2019.
#'
#' @format The brewery sale rentals data frame contains 31 observations and 18 variables.
#' \describe{
#' \item{Zipcode}{Zipode of breweries and homes/rentals}
#' \item{Address}{Address of breweries}
#' \item{City}{City of breweries and homes/rentals}
#' \item{State}{State of breweries and homes/rentals}
#' \item{Brewery Type}{Type of brewery}
#' \item{Brewery}{Brewery name}
#' \item{Description}{Description of brewery}
#' \item{Year Established}{Year the brewery was established}
#' \item{Homes for Sale}{Number of homes for sale by zipcode}
#' \item{Average Listing Price}{Average listing price of homes by zipcode}
#' \item{Price Reduced Count}{Number if homes that reduced sale price}
#' \item{Median Day on Market}{Median days a home is listed on the market}
#' \item{Typical Rent Price}{Typical rent price by zipcode}
#' \item{Phone}{Phone number of brewery}
#' \item{Website}{Website of brewery}
#' \item{Brewery Latitude}{Latitude of brewery location}
#' \item{Brewery Longitude}{Longitude of brewery location}
#' \item{Open to Public}{If the brewery is open to the public}
#' }
#'@source \url{https://www.brewerydb.com/developers/docs}
#'@source \url{https://www.zillow.com/research/data/}
#'@source \url{https://www.realtor.com/research/data/}
"brewery_sales_rentals"
|
library(gapminder)
library(dplyr)
library(ggplot2)
gapminder_1952 <- gapminder %>%
filter(year == 1952)
# Scatter plot comparing pop and lifeExp, faceted by continent
ggplot(data = gapminder_1952, aes(x = pop, y = lifeExp)) +
geom_point() +
scale_x_log10() +
facet_wrap(~ continent)
|
/DCamp/Intro Tidyverse/Data Visualization/Creating a subgraph facet_wrap().R
|
no_license
|
shinichimatsuda/R_Training
|
R
| false
| false
| 293
|
r
|
library(gapminder)
library(dplyr)
library(ggplot2)
gapminder_1952 <- gapminder %>%
filter(year == 1952)
# Scatter plot comparing pop and lifeExp, faceted by continent
ggplot(data = gapminder_1952, aes(x = pop, y = lifeExp)) +
geom_point() +
scale_x_log10() +
facet_wrap(~ continent)
|
make_CO2_ratios_of_A_and_gs_plots_DT <- function() {
### read input
pilDF<-read.csv("data/glasshouse2/Pilularis_Phys.csv",sep=",", header=TRUE)
popDF<-read.csv("data/glasshouse2/Populnea_Phys.csv",sep=",", header=TRUE)
### day list
d1 <- unique(pilDF$Day)
d2 <- unique(popDF$Day)
### water treatment
w <- c("D", "ND")
### plot DF
plotDF1 <- data.frame(rep(w, length(d1)),
rep(d1, each=2), NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA)
plotDF2 <- data.frame(rep(w, length(d2)),
rep(d2, each=2), NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA)
colnames(plotDF1) <- colnames(plotDF2) <- c("Trt", "Day",
"amb_Adaily", "ele_Adaily",
"amb_Aearly", "ele_Aearly",
"amb_Alate", "ele_Alate",
"amb_GSdaily", "ele_GSdaily",
"amb_GSearly", "ele_GSearly",
"amb_GSlate", "ele_GSlate",
"amb_AdailySD", "ele_AdailySD",
"amb_AearlySD", "ele_AearlySD",
"amb_AlateSD", "ele_AlateSD",
"amb_GSdailySD", "ele_GSdailySD",
"amb_GSearlySD", "ele_GSearlySD",
"amb_GSlateSD", "ele_GSlateSD",
"amb_AdailyN", "ele_AdailyN",
"amb_AearlyN", "ele_AearlyN",
"amb_AlateN", "ele_AlateN")
for (i in d1) {
# A
plotDF1$amb_Adaily[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Adaily[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_Aearly[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Aearly[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_Alate[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Alate[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_Adaily[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Adaily[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_Aearly[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Aearly[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_Alate[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Alate[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_Adaily[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Adaily[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_Aearly[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Aearly[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_Alate[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Alate[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_Adaily[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Adaily[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_Aearly[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Aearly[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_Alate[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Alate[pilDF$Trt=="PILEND"&pilDF$Day==i]
### gs
plotDF1$amb_GSdaily[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsDaily[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSearly[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsearly[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSlate[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gslate[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_GSdaily[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsDaily[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSearly[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsearly[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSlate[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gslate[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_GSdaily[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsDaily[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSearly[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsearly[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSlate[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gslate[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_GSdaily[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsDaily[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSearly[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsearly[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSlate[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gslate[pilDF$Trt=="PILEND"&pilDF$Day==i]
# A SD
plotDF1$amb_AdailySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AdailySD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_AearlySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AearlySD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_AlateSD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AlateSD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_AdailySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AdailySD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_AearlySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AearlySD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_AlateSD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AlateSD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_AdailySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AdailySD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_AearlySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AearlySD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_AlateSD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AlateSD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_AdailySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AdailySD[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_AearlySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AearlySD[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_AlateSD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AlateSD[pilDF$Trt=="PILEND"&pilDF$Day==i]
# A n
plotDF1$amb_AdailyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.4[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_AearlyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.5[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_AlateN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.6[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_AdailyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.4[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_AearlyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.5[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_AlateN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.6[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_AdailyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.4[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_AearlyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.5[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_AlateN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.6[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_AdailyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.4[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_AearlyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.5[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_AlateN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.6[pilDF$Trt=="PILEND"&pilDF$Day==i]
# gs SD
plotDF1$amb_GSdailySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsDailySD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSearlySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsearlySD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSlateSD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gslateSD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_GSdailySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsDailySD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSearlySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsearlySD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSlateSD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gslateSD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_GSdailySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsDailySD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSearlySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsearlySD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSlateSD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gslateSD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_GSdailySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsDailySD[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSearlySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsearlySD[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSlateSD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gslateSD[pilDF$Trt=="PILEND"&pilDF$Day==i]
# gs n
plotDF1$amb_GSdailyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.7[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSearlyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.8[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSlateN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.9[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_GSdailyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.7[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSearlyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.8[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSlateN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.9[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_GSdailyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.7[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSearlyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.8[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSlateN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.9[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_GSdailyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.7[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSearlyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.8[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSlateN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.9[pilDF$Trt=="PILEND"&pilDF$Day==i]
}
for (i in d2) {
# A
plotDF2$amb_Adaily[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Adaily[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$Adaily[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_Aearly[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Aearly[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$Aearly[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_Alate[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Alate[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$Alate[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_Adaily[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Adaily[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$Adaily[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_Aearly[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Aearly[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$Aearly[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_Alate[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Alate[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$Alate[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_Adaily[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Adaily[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$Adaily[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_Aearly[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Aearly[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$Aearly[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_Alate[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Alate[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$Alate[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_Adaily[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Adaily[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$Adaily[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_Aearly[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Aearly[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$Aearly[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_Alate[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Alate[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$Alate[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
### gs
plotDF2$amb_GSdaily[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsDaily[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gsDaily[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSearly[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsearly[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gsearly[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSlate[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gslate[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gslate[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_GSdaily[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsDaily[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gsDaily[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSearly[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsearly[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gsearly[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSlate[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gslate[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gslate[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_GSdaily[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsDaily[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gsDaily[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSearly[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsearly[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gsearly[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSlate[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gslate[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gslate[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_GSdaily[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsDaily[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gsDaily[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSearly[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsearly[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gsearly[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSlate[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gslate[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gslate[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
# A SD
plotDF2$amb_AdailySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AdailySD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$AdailySD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_AearlySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AearlySD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$AearlySD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_AlateSD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AlateSD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$AlateSD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_AdailySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AdailySD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$AdailySD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_AearlySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AearlySD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$AearlySD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_AlateSD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AlateSD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$AlateSD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_AdailySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AdailySD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$AdailySD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_AearlySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AearlySD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$AearlySD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_AlateSD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AlateSD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$AlateSD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_AdailySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AdailySD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$AdailySD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_AearlySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AearlySD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$AearlySD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_AlateSD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AlateSD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$AlateSD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
# A n
plotDF2$amb_AdailyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.4[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.4[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_AearlyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.5[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.5[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_AlateN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.6[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.6[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_AdailyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.4[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.4[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_AearlyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.5[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.5[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_AlateN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.6[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.6[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_AdailyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.4[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.4[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_AearlyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.5[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.5[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_AlateN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.6[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.6[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_AdailyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.4[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.4[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_AearlyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.5[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.5[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_AlateN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.6[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.6[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
# gs SD
plotDF2$amb_GSdailySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsDailySD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gsDailySD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSearlySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsearlySD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gsearlySD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSlateSD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gslateSD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gslateSD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_GSdailySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsDailySD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gsDailySD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSearlySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsearlySD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gsearlySD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSlateSD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gslateSD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gslateSD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_GSdailySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsDailySD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gsDailySD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSearlySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsearlySD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gsearlySD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSlateSD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gslateSD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gslateSD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_GSdailySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsDailySD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gsDailySD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSearlySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsearlySD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gsearlySD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSlateSD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gslateSD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gslateSD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
# gs n
plotDF2$amb_GSdailyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.7[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.7[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSearlyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.8[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.8[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSlateN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.9[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.9[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_GSdailyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.7[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.7[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSearlyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.8[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.8[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSlateN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.9[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.9[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_GSdailyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.7[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.7[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSearlyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.8[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.8[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSlateN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.9[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.9[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_GSdailyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.7[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.7[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSearlyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.8[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.8[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSlateN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.9[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.9[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
}
### ignore NAs
plotDF1 <- plotDF1[complete.cases(plotDF1),]
plotDF2 <- plotDF2[complete.cases(plotDF2),]
plotDF2 <- as.data.frame(sapply(plotDF2, as.numeric))
plotDF2$Trt <- gsub(1, "D", plotDF2$Trt)
plotDF2$Trt <- gsub(2, "ND", plotDF2$Trt)
plotDF2$Trt <- as.character(plotDF2$Trt)
### Calculate CO2 signal
plotDF1$CO2_Adaily <- plotDF1$ele_Adaily / plotDF1$amb_Adaily
plotDF1$CO2_Aearly <- plotDF1$ele_Aearly / plotDF1$amb_Aearly
plotDF1$CO2_Alate <- plotDF1$ele_Alate / plotDF1$amb_Alate
plotDF2$CO2_Adaily <- plotDF2$ele_Adaily / plotDF2$amb_Adaily
plotDF2$CO2_Aearly <- plotDF2$ele_Aearly / plotDF2$amb_Aearly
plotDF2$CO2_Alate <- plotDF2$ele_Alate / plotDF2$amb_Alate
plotDF1$CO2_GSdaily <- plotDF1$ele_GSdaily / plotDF1$amb_GSdaily
plotDF1$CO2_GSearly <- plotDF1$ele_GSearly / plotDF1$amb_GSearly
plotDF1$CO2_GSlate <- plotDF1$ele_GSlate / plotDF1$amb_GSlate
plotDF2$CO2_GSdaily <- plotDF2$ele_GSdaily / plotDF2$amb_GSdaily
plotDF2$CO2_GSearly <- plotDF2$ele_GSearly / plotDF2$amb_GSearly
plotDF2$CO2_GSlate <- plotDF2$ele_GSlate / plotDF2$amb_GSlate
### calculate standard error for the ratios
#plotDF1$CO2_AdailySD <- sqrt((plotDF1$amb_AdailySD^2)/(plotDF1$amb_AdailyN) +
# (plotDF1$ele_AdailySD^2)/(plotDF1$ele_AdailyN))
#plotDF1$CO2_AearlySD <- sqrt((plotDF1$amb_AearlySD^2)/(plotDF1$amb_AearlyN) +
# (plotDF1$ele_AearlySD^2)/(plotDF1$ele_AearlyN))
#plotDF1$CO2_AlateSD <- sqrt((plotDF1$amb_AlateSD^2)/(plotDF1$amb_AlateN) +
# (plotDF1$ele_AlateSD^2)/(plotDF1$ele_AlateN))
#
#plotDF1$CO2_GSdailySD <- sqrt((plotDF1$amb_GSdailySD^2)/(plotDF1$amb_GSdailyN) +
# (plotDF1$ele_GSdailySD^2)/(plotDF1$ele_GSdailyN))
#plotDF1$CO2_GSearlySD <- sqrt((plotDF1$amb_GSearlySD^2)/(plotDF1$amb_GSearlyN) +
# (plotDF1$ele_GSearlySD^2)/(plotDF1$ele_GSearlyN))
#plotDF1$CO2_GSlateSD <- sqrt((plotDF1$amb_GSlateSD^2)/(plotDF1$amb_GSlateN) +
# (plotDF1$ele_GSlateSD^2)/(plotDF1$ele_GSlateN))
#
#plotDF2$CO2_AdailySD <- sqrt((plotDF2$amb_AdailySD^2)/(plotDF2$amb_AdailyN) +
# (plotDF2$ele_AdailySD^2)/(plotDF2$ele_AdailyN))
#plotDF2$CO2_AearlySD <- sqrt((plotDF2$amb_AearlySD^2)/(plotDF2$amb_AearlyN) +
# (plotDF2$ele_AearlySD^2)/(plotDF2$ele_AearlyN))
#plotDF2$CO2_AlateSD <- sqrt((plotDF2$amb_AlateSD^2)/(plotDF2$amb_AlateN) +
# (plotDF2$ele_AlateSD^2)/(plotDF2$ele_AlateN))
#
#plotDF2$CO2_GSdailySD <- sqrt((plotDF2$amb_GSdailySD^2)/(plotDF2$amb_GSdailyN) +
# (plotDF2$ele_GSdailySD^2)/(plotDF2$ele_GSdailyN))
#plotDF2$CO2_GSearlySD <- sqrt((plotDF2$amb_GSearlySD^2)/(plotDF2$amb_GSearlyN) +
# (plotDF2$ele_GSearlySD^2)/(plotDF2$ele_GSearlyN))
#plotDF2$CO2_GSlateSD <- sqrt((plotDF2$amb_GSlateSD^2)/(plotDF2$amb_GSlateN) +
# (plotDF2$ele_GSlateSD^2)/(plotDF2$ele_GSlateN))
### plotting
p1 <- ggplot(plotDF1, aes(x=Day, y=CO2_Adaily, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
#geom_errorbar(aes(col=Trt, x=Day,
# ymin=CO2_Adaily-CO2_AdailySD, ymax=CO2_Adaily+CO2_AdailySD),
# width=0.2)+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=14),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Daily")+
ylim(0, 8)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
p2 <- ggplot(plotDF1, aes(x=Day, y=CO2_Aearly, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Morning")+
ylim(0, 8)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
p3 <- ggplot(plotDF1, aes(x=Day, y=CO2_Alate, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Midday")+
ylim(0, 8)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
p4 <- ggplot(plotDF1, aes(x=Day, y=CO2_GSdaily, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
geom_hline(yintercept=1, col="black", lty=2)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=14),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 5)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
p5 <- ggplot(plotDF1, aes(x=Day, y=CO2_GSearly, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
geom_hline(yintercept=1, col="black", lty=2)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 5)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
p6 <- ggplot(plotDF1, aes(x=Day, y=CO2_GSlate, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 5)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
### output
combined_legend <- get_legend(p1 + theme(legend.position="bottom",
legend.box = 'vertical',
legend.box.just = 'left'))
combined_plots <- plot_grid(p1, p2, p3, p4, p5, p6,
labels=c("(a)", "(b)", "(c)", "(d)", "(e)", "(f)"),
ncol=3, align="h", axis = "l",
rel_widths=c(1,0.9),
label_x=0.85, label_y=0.85)
pdf(paste0(outdir, "F10.1.CO2_ratio_pilularis.pdf"), width=14, height=8)
plot_grid(combined_plots, combined_legend,
ncol=1, rel_heights=c(1, 0.1))
dev.off()
### plotting
p1 <- ggplot(plotDF2, aes(x=Day, y=CO2_Adaily, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
geom_hline(yintercept=1, col="black", lty=2)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=14),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Daily")+
ylim(0, 4)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
p2 <- ggplot(plotDF2, aes(x=Day, y=CO2_Aearly, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Morning")+
ylim(0, 4)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
p3 <- ggplot(plotDF2, aes(x=Day, y=CO2_Alate, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Midday")+
ylim(0, 4)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
p4 <- ggplot(plotDF2, aes(x=Day, y=CO2_GSdaily, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=14),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 3)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
p5 <- ggplot(plotDF2, aes(x=Day, y=CO2_GSearly, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 3)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
p6 <- ggplot(plotDF2, aes(x=Day, y=CO2_GSlate, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 3)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
### output
combined_legend <- get_legend(p1 + theme(legend.position="bottom",
legend.box = 'vertical',
legend.box.just = 'left'))
combined_plots <- plot_grid(p1, p2, p3, p4, p5, p6,
labels=c("(a)", "(b)", "(c)", "(d)", "(e)", "(f)"),
ncol=3, align="h", axis = "l",
rel_widths=c(1,0.9),
label_x=0.85, label_y=0.85)
pdf(paste0(outdir, "F10.2.CO2_ratio_populnea.pdf"), width=14, height=8)
plot_grid(combined_plots, combined_legend,
ncol=1, rel_heights=c(1, 0.1))
dev.off()
}
|
/scripts/DT/make_CO2_ratios_of_A_and_gs_plots_DT.R
|
no_license
|
mingkaijiang/CO2_x_Drought_Glasshouse_Experiment
|
R
| false
| false
| 62,914
|
r
|
make_CO2_ratios_of_A_and_gs_plots_DT <- function() {
### read input
pilDF<-read.csv("data/glasshouse2/Pilularis_Phys.csv",sep=",", header=TRUE)
popDF<-read.csv("data/glasshouse2/Populnea_Phys.csv",sep=",", header=TRUE)
### day list
d1 <- unique(pilDF$Day)
d2 <- unique(popDF$Day)
### water treatment
w <- c("D", "ND")
### plot DF
plotDF1 <- data.frame(rep(w, length(d1)),
rep(d1, each=2), NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA)
plotDF2 <- data.frame(rep(w, length(d2)),
rep(d2, each=2), NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA)
colnames(plotDF1) <- colnames(plotDF2) <- c("Trt", "Day",
"amb_Adaily", "ele_Adaily",
"amb_Aearly", "ele_Aearly",
"amb_Alate", "ele_Alate",
"amb_GSdaily", "ele_GSdaily",
"amb_GSearly", "ele_GSearly",
"amb_GSlate", "ele_GSlate",
"amb_AdailySD", "ele_AdailySD",
"amb_AearlySD", "ele_AearlySD",
"amb_AlateSD", "ele_AlateSD",
"amb_GSdailySD", "ele_GSdailySD",
"amb_GSearlySD", "ele_GSearlySD",
"amb_GSlateSD", "ele_GSlateSD",
"amb_AdailyN", "ele_AdailyN",
"amb_AearlyN", "ele_AearlyN",
"amb_AlateN", "ele_AlateN")
for (i in d1) {
# A
plotDF1$amb_Adaily[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Adaily[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_Aearly[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Aearly[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_Alate[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Alate[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_Adaily[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Adaily[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_Aearly[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Aearly[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_Alate[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$Alate[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_Adaily[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Adaily[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_Aearly[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Aearly[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_Alate[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Alate[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_Adaily[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Adaily[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_Aearly[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Aearly[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_Alate[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$Alate[pilDF$Trt=="PILEND"&pilDF$Day==i]
### gs
plotDF1$amb_GSdaily[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsDaily[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSearly[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsearly[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSlate[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gslate[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_GSdaily[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsDaily[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSearly[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsearly[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSlate[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gslate[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_GSdaily[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsDaily[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSearly[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsearly[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSlate[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gslate[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_GSdaily[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsDaily[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSearly[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsearly[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSlate[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gslate[pilDF$Trt=="PILEND"&pilDF$Day==i]
# A SD
plotDF1$amb_AdailySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AdailySD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_AearlySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AearlySD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_AlateSD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AlateSD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_AdailySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AdailySD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_AearlySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AearlySD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_AlateSD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$AlateSD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_AdailySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AdailySD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_AearlySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AearlySD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_AlateSD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AlateSD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_AdailySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AdailySD[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_AearlySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AearlySD[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_AlateSD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$AlateSD[pilDF$Trt=="PILEND"&pilDF$Day==i]
# A n
plotDF1$amb_AdailyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.4[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_AearlyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.5[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_AlateN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.6[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_AdailyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.4[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_AearlyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.5[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_AlateN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.6[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_AdailyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.4[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_AearlyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.5[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_AlateN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.6[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_AdailyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.4[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_AearlyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.5[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_AlateN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.6[pilDF$Trt=="PILEND"&pilDF$Day==i]
# gs SD
plotDF1$amb_GSdailySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsDailySD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSearlySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsearlySD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSlateSD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gslateSD[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_GSdailySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsDailySD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSearlySD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gsearlySD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSlateSD[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$gslateSD[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_GSdailySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsDailySD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSearlySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsearlySD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSlateSD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gslateSD[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_GSdailySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsDailySD[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSearlySD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gsearlySD[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSlateSD[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$gslateSD[pilDF$Trt=="PILEND"&pilDF$Day==i]
# gs n
plotDF1$amb_GSdailyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.7[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSearlyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.8[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$amb_GSlateN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.9[pilDF$Trt=="PILAD"&pilDF$Day==i]
plotDF1$ele_GSdailyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.7[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSearlyN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.8[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$ele_GSlateN[plotDF1$Trt=="D"&plotDF1$Day==i] <- pilDF$n.9[pilDF$Trt=="PILED"&pilDF$Day==i]
plotDF1$amb_GSdailyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.7[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSearlyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.8[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$amb_GSlateN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.9[pilDF$Trt=="PILAND"&pilDF$Day==i]
plotDF1$ele_GSdailyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.7[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSearlyN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.8[pilDF$Trt=="PILEND"&pilDF$Day==i]
plotDF1$ele_GSlateN[plotDF1$Trt=="ND"&plotDF1$Day==i] <- pilDF$n.9[pilDF$Trt=="PILEND"&pilDF$Day==i]
}
for (i in d2) {
# A
plotDF2$amb_Adaily[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Adaily[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$Adaily[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_Aearly[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Aearly[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$Aearly[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_Alate[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Alate[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$Alate[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_Adaily[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Adaily[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$Adaily[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_Aearly[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Aearly[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$Aearly[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_Alate[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$Alate[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$Alate[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_Adaily[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Adaily[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$Adaily[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_Aearly[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Aearly[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$Aearly[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_Alate[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Alate[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$Alate[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_Adaily[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Adaily[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$Adaily[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_Aearly[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Aearly[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$Aearly[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_Alate[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$Alate[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$Alate[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
### gs
plotDF2$amb_GSdaily[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsDaily[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gsDaily[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSearly[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsearly[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gsearly[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSlate[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gslate[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gslate[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_GSdaily[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsDaily[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gsDaily[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSearly[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsearly[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gsearly[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSlate[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gslate[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gslate[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_GSdaily[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsDaily[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gsDaily[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSearly[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsearly[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gsearly[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSlate[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gslate[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gslate[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_GSdaily[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsDaily[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gsDaily[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSearly[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsearly[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gsearly[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSlate[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gslate[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gslate[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
# A SD
plotDF2$amb_AdailySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AdailySD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$AdailySD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_AearlySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AearlySD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$AearlySD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_AlateSD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AlateSD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$AlateSD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_AdailySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AdailySD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$AdailySD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_AearlySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AearlySD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$AearlySD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_AlateSD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$AlateSD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$AlateSD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_AdailySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AdailySD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$AdailySD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_AearlySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AearlySD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$AearlySD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_AlateSD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AlateSD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$AlateSD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_AdailySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AdailySD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$AdailySD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_AearlySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AearlySD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$AearlySD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_AlateSD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$AlateSD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$AlateSD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
# A n
plotDF2$amb_AdailyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.4[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.4[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_AearlyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.5[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.5[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_AlateN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.6[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.6[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_AdailyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.4[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.4[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_AearlyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.5[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.5[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_AlateN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.6[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.6[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_AdailyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.4[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.4[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_AearlyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.5[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.5[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_AlateN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.6[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.6[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_AdailyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.4[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.4[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_AearlyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.5[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.5[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_AlateN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.6[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.6[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
# gs SD
plotDF2$amb_GSdailySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsDailySD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gsDailySD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSearlySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsearlySD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gsearlySD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSlateSD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gslateSD[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$gslateSD[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_GSdailySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsDailySD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gsDailySD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSearlySD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gsearlySD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gsearlySD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSlateSD[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$gslateSD[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$gslateSD[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_GSdailySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsDailySD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gsDailySD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSearlySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsearlySD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gsearlySD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSlateSD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gslateSD[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$gslateSD[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_GSdailySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsDailySD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gsDailySD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSearlySD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gsearlySD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gsearlySD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSlateSD[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$gslateSD[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$gslateSD[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
# gs n
plotDF2$amb_GSdailyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.7[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.7[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSearlyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.8[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.8[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$amb_GSlateN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.9[popDF$Trt=="POPAD"&popDF$Day==i])==1,
as.numeric(popDF$n.9[popDF$Trt=="POPAD"&popDF$Day==i]),
NA)
plotDF2$ele_GSdailyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.7[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.7[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSearlyN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.8[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.8[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$ele_GSlateN[plotDF2$Trt=="D"&plotDF2$Day==i] <- ifelse(length(popDF$n.9[popDF$Trt=="POPED"&popDF$Day==i])==1,
as.numeric(popDF$n.9[popDF$Trt=="POPED"&popDF$Day==i]),
NA)
plotDF2$amb_GSdailyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.7[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.7[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSearlyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.8[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.8[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$amb_GSlateN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.9[popDF$Trt=="POPAND"&popDF$Day==i])==1,
as.numeric(popDF$n.9[popDF$Trt=="POPAND"&popDF$Day==i]),
NA)
plotDF2$ele_GSdailyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.7[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.7[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSearlyN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.8[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.8[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
plotDF2$ele_GSlateN[plotDF2$Trt=="ND"&plotDF2$Day==i] <- ifelse(length(popDF$n.9[popDF$Trt=="POPEND"&popDF$Day==i])==1,
as.numeric(popDF$n.9[popDF$Trt=="POPEND"&popDF$Day==i]),
NA)
}
### ignore NAs
plotDF1 <- plotDF1[complete.cases(plotDF1),]
plotDF2 <- plotDF2[complete.cases(plotDF2),]
plotDF2 <- as.data.frame(sapply(plotDF2, as.numeric))
plotDF2$Trt <- gsub(1, "D", plotDF2$Trt)
plotDF2$Trt <- gsub(2, "ND", plotDF2$Trt)
plotDF2$Trt <- as.character(plotDF2$Trt)
### Calculate CO2 signal
plotDF1$CO2_Adaily <- plotDF1$ele_Adaily / plotDF1$amb_Adaily
plotDF1$CO2_Aearly <- plotDF1$ele_Aearly / plotDF1$amb_Aearly
plotDF1$CO2_Alate <- plotDF1$ele_Alate / plotDF1$amb_Alate
plotDF2$CO2_Adaily <- plotDF2$ele_Adaily / plotDF2$amb_Adaily
plotDF2$CO2_Aearly <- plotDF2$ele_Aearly / plotDF2$amb_Aearly
plotDF2$CO2_Alate <- plotDF2$ele_Alate / plotDF2$amb_Alate
plotDF1$CO2_GSdaily <- plotDF1$ele_GSdaily / plotDF1$amb_GSdaily
plotDF1$CO2_GSearly <- plotDF1$ele_GSearly / plotDF1$amb_GSearly
plotDF1$CO2_GSlate <- plotDF1$ele_GSlate / plotDF1$amb_GSlate
plotDF2$CO2_GSdaily <- plotDF2$ele_GSdaily / plotDF2$amb_GSdaily
plotDF2$CO2_GSearly <- plotDF2$ele_GSearly / plotDF2$amb_GSearly
plotDF2$CO2_GSlate <- plotDF2$ele_GSlate / plotDF2$amb_GSlate
### calculate standard error for the ratios
#plotDF1$CO2_AdailySD <- sqrt((plotDF1$amb_AdailySD^2)/(plotDF1$amb_AdailyN) +
# (plotDF1$ele_AdailySD^2)/(plotDF1$ele_AdailyN))
#plotDF1$CO2_AearlySD <- sqrt((plotDF1$amb_AearlySD^2)/(plotDF1$amb_AearlyN) +
# (plotDF1$ele_AearlySD^2)/(plotDF1$ele_AearlyN))
#plotDF1$CO2_AlateSD <- sqrt((plotDF1$amb_AlateSD^2)/(plotDF1$amb_AlateN) +
# (plotDF1$ele_AlateSD^2)/(plotDF1$ele_AlateN))
#
#plotDF1$CO2_GSdailySD <- sqrt((plotDF1$amb_GSdailySD^2)/(plotDF1$amb_GSdailyN) +
# (plotDF1$ele_GSdailySD^2)/(plotDF1$ele_GSdailyN))
#plotDF1$CO2_GSearlySD <- sqrt((plotDF1$amb_GSearlySD^2)/(plotDF1$amb_GSearlyN) +
# (plotDF1$ele_GSearlySD^2)/(plotDF1$ele_GSearlyN))
#plotDF1$CO2_GSlateSD <- sqrt((plotDF1$amb_GSlateSD^2)/(plotDF1$amb_GSlateN) +
# (plotDF1$ele_GSlateSD^2)/(plotDF1$ele_GSlateN))
#
#plotDF2$CO2_AdailySD <- sqrt((plotDF2$amb_AdailySD^2)/(plotDF2$amb_AdailyN) +
# (plotDF2$ele_AdailySD^2)/(plotDF2$ele_AdailyN))
#plotDF2$CO2_AearlySD <- sqrt((plotDF2$amb_AearlySD^2)/(plotDF2$amb_AearlyN) +
# (plotDF2$ele_AearlySD^2)/(plotDF2$ele_AearlyN))
#plotDF2$CO2_AlateSD <- sqrt((plotDF2$amb_AlateSD^2)/(plotDF2$amb_AlateN) +
# (plotDF2$ele_AlateSD^2)/(plotDF2$ele_AlateN))
#
#plotDF2$CO2_GSdailySD <- sqrt((plotDF2$amb_GSdailySD^2)/(plotDF2$amb_GSdailyN) +
# (plotDF2$ele_GSdailySD^2)/(plotDF2$ele_GSdailyN))
#plotDF2$CO2_GSearlySD <- sqrt((plotDF2$amb_GSearlySD^2)/(plotDF2$amb_GSearlyN) +
# (plotDF2$ele_GSearlySD^2)/(plotDF2$ele_GSearlyN))
#plotDF2$CO2_GSlateSD <- sqrt((plotDF2$amb_GSlateSD^2)/(plotDF2$amb_GSlateN) +
# (plotDF2$ele_GSlateSD^2)/(plotDF2$ele_GSlateN))
### plotting
p1 <- ggplot(plotDF1, aes(x=Day, y=CO2_Adaily, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
#geom_errorbar(aes(col=Trt, x=Day,
# ymin=CO2_Adaily-CO2_AdailySD, ymax=CO2_Adaily+CO2_AdailySD),
# width=0.2)+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=14),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Daily")+
ylim(0, 8)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
p2 <- ggplot(plotDF1, aes(x=Day, y=CO2_Aearly, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Morning")+
ylim(0, 8)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
p3 <- ggplot(plotDF1, aes(x=Day, y=CO2_Alate, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Midday")+
ylim(0, 8)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
p4 <- ggplot(plotDF1, aes(x=Day, y=CO2_GSdaily, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
geom_hline(yintercept=1, col="black", lty=2)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=14),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 5)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
p5 <- ggplot(plotDF1, aes(x=Day, y=CO2_GSearly, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
geom_hline(yintercept=1, col="black", lty=2)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 5)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
p6 <- ggplot(plotDF1, aes(x=Day, y=CO2_GSlate, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 5)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 10),
breaks=c(0, 2, 4, 6, 8, 10))
### output
combined_legend <- get_legend(p1 + theme(legend.position="bottom",
legend.box = 'vertical',
legend.box.just = 'left'))
combined_plots <- plot_grid(p1, p2, p3, p4, p5, p6,
labels=c("(a)", "(b)", "(c)", "(d)", "(e)", "(f)"),
ncol=3, align="h", axis = "l",
rel_widths=c(1,0.9),
label_x=0.85, label_y=0.85)
pdf(paste0(outdir, "F10.1.CO2_ratio_pilularis.pdf"), width=14, height=8)
plot_grid(combined_plots, combined_legend,
ncol=1, rel_heights=c(1, 0.1))
dev.off()
### plotting
p1 <- ggplot(plotDF2, aes(x=Day, y=CO2_Adaily, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
geom_hline(yintercept=1, col="black", lty=2)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=14),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Daily")+
ylim(0, 4)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
p2 <- ggplot(plotDF2, aes(x=Day, y=CO2_Aearly, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Morning")+
ylim(0, 4)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
p3 <- ggplot(plotDF2, aes(x=Day, y=CO2_Alate, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * A[sat])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ggtitle("Midday")+
ylim(0, 4)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
p4 <- ggplot(plotDF2, aes(x=Day, y=CO2_GSdaily, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=14),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 3)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
p5 <- ggplot(plotDF2, aes(x=Day, y=CO2_GSearly, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 3)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
p6 <- ggplot(plotDF2, aes(x=Day, y=CO2_GSlate, group=Trt)) +
geom_point(aes(col=Trt, fill=Trt), pch=21, size=2)+
geom_line(aes(col=Trt))+
theme_linedraw() +
geom_hline(yintercept=1, col="black", lty=2)+
theme(panel.grid.minor=element_blank(),
axis.text.x=element_text(size=12),
axis.title.x=element_blank(),
axis.text.y=element_text(size=12),
axis.title.y=element_blank(),
legend.text=element_text(size=14),
legend.title=element_text(size=16),
panel.grid.major=element_blank(),
legend.position="none",
legend.box = 'horizontal',
legend.box.just = 'left',
plot.title = element_text(size=16, face="bold",
hjust = 0.5))+
ylab(expression(paste(CO[2]* " ratio " * g[s])))+
scale_color_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
scale_fill_manual(name="",
limits=c("D", "ND"),
labels=c("Droughted", "Well-watered"),
values=c("red3", "blue2"),
guide=guide_legend(nrow=1))+
ylim(0, 3)+
xlab("Day")+
guides(fill = guide_legend(override.aes = list(shape = c(21, 21),
fill = c("red3", "blue2"),
col = c("red3", "blue2"))))+
scale_x_continuous(limits=c(0, 40),
breaks=c(0, 5, 10, 20, 30, 40))
### output
combined_legend <- get_legend(p1 + theme(legend.position="bottom",
legend.box = 'vertical',
legend.box.just = 'left'))
combined_plots <- plot_grid(p1, p2, p3, p4, p5, p6,
labels=c("(a)", "(b)", "(c)", "(d)", "(e)", "(f)"),
ncol=3, align="h", axis = "l",
rel_widths=c(1,0.9),
label_x=0.85, label_y=0.85)
pdf(paste0(outdir, "F10.2.CO2_ratio_populnea.pdf"), width=14, height=8)
plot_grid(combined_plots, combined_legend,
ncol=1, rel_heights=c(1, 0.1))
dev.off()
}
|
for(i in 1:10)
{
cat(i)
if(i==10)
{
cat("\n")
break
}
cat(", ")
}
|
/Programming Language Detection/Experiment-2/Dataset/Train/R/loops-n-plus-one-half-2.r
|
no_license
|
dlaststark/machine-learning-projects
|
R
| false
| false
| 93
|
r
|
for(i in 1:10)
{
cat(i)
if(i==10)
{
cat("\n")
break
}
cat(", ")
}
|
##
## sendmailR.r - send email from within R
##
## Author:
## Olaf Mersmann (OME) <olafm@datensplitter.net>
##
.rfc2822_date <- function(time=Sys.time()) {
lc <- Sys.getlocale("LC_TIME")
on.exit(Sys.setlocale("LC_TIME", lc))
Sys.setlocale("LC_TIME", "C")
strftime(time, format="%a, %d %b %Y %H:%M:%S -0000",
tz="UTC", use.tz=TRUE)
}
.write_mail <- function(headers, msg, sock) {
if (!is.list(msg))
msg <- list(msg)
## Generate MIME headers:
boundary <- paste(packBits(sample(0:1, 256, TRUE)), collapse="")
headers$`MIME-Version` <- "1.0"
headers$`Content-Type` <- sprintf("multipart/mixed; boundary=\"%s\"", boundary)
writeLines(paste(names(headers),
unlist(headers), sep=": "),
sock, sep="\r\n")
writeLines("", sock, sep="\r\n")
writeLines("This is a message with multiple parts in MIME format.", sock, sep="\r\n")
for (part in msg) {
writeLines(sprintf("--%s", boundary), sock, sep="\r\n")
if (inherits(part, "mime_part"))
.write_mime_part(part, sock)
else if (is.character(part)) { ## Legacy support for plain old string
## writeLines(sprintf("--%s", boundary), sock, sep="\r\n")
writeLines("Content-Type: text/plain; format=flowed\r\n", sock, sep="\r\n")
writeLines(part, sock, sep="\r\n")
}
}
writeLines(sprintf("--%s--", boundary), sock, sep="\r\n")
}
.smtp_submit_mail <- function(server, port, headers, msg, verbose=FALSE) {
stopifnot(is.character(headers$From), is.character(headers$To))
wait_for <- function(lcode) {
done <- FALSE
while (!done) {
line <- readLines(con=sock, n=1)
if (verbose)
message("<< ", line)
code <- substring(line, 1, 3)
msg <- substring(line, 5)
if (code == lcode) {
done <- TRUE
} else {
if (code >= 500 & code <= 599)
stop("SMTP Error: ", msg)
else
message("Unknown SMTP code: ", code)
}
}
return(list(code=code, msg=msg))
}
send_command <- function(cmd, code) {
if (verbose)
message(">> ", cmd)
writeLines(cmd, sock, sep="\r\n")
wait_for(code)
}
nodename <- Sys.info()[4]
sock <- socketConnection(host=server,
port=port,
blocking=TRUE)
if (!isOpen(sock))
stop(sprintf("Could not connect to smtp server '%s' on port '%i'.",
server, port))
on.exit(close(sock))
## << 220 <hostname> ESMTP
wait_for(220)
## >> HELO localhost
## << 250 mail.statistik.uni-dortmund.de
send_command(paste("HELO ", nodename), 250)
## >> MAIL FROM: <foo@bah.com>
## << 250 2.1.0 Ok
send_command(paste("MAIL FROM: ", headers$From), 250)
## >> RCPT TO: <bah@baz.org>
## << 250 2.1.5 Ok
lapply(headers$To, function(rcpt){
send_command(paste("RCPT TO: ", rcpt), 250)
})
## >> DATA
## << 354 blah fu
send_command("DATA", 354)
## >> <actual message + headers + .>
if (verbose)
message(">> <message data>")
.write_mail(headers, msg, sock)
writeLines(".", sock, sep="\r\n")
wait_for(250)
## << 250 2.0.0 Ok: queued as XXXXXXXX
## >> QUIT
## << 221 2.0.0 Bye
send_command("QUIT", 221)
}
##' Simplistic sendmail utility for R. Uses SMTP to submit a message
##' to a local SMTP server.
##'
##' @title Send mail from within R
##'
##' @param from From whom the mail message is (RFC2822 style address).
##' @param to Recipient of the message (valid RFC2822 style address).
##' @param subject Subject line of message.
##' @param msg Body text of message or a list containing
##' \code{\link{mime_part}} objects.
##' @param \dots ...
##' @param headers Any other headers to include.
##' @param control List of SMTP server settings. Valid values are the
##' possible options for \code{\link{sendmail_options}}.
##'
##' @seealso \code{\link{mime_part}} for a way to add attachments.
##' @keywords utilities
##'
##' @examples
##' \dontrun{
##' from <- sprintf("<sendmailR@@\\%s>", Sys.info()[4])
##' to <- "\"Olaf Mersmann\"<olafm@@datensplitter.net>"
##' subject <- "Hello from R"
##' body <- list("It works!", mime_part(iris))
##' sendmail(from, to, subject, body,
##' control=list(smtpServer="ASPMX.L.GOOGLE.COM"))
##' }
##'
##' @export
sendmail <- function(from, to, subject, msg, ...,
headers=list(),
control=list()) {
## Argument checks:
stopifnot(is.list(headers), is.list(control))
if (length(from) != 1)
stop("'from' must be a single address.")
get_value <- function(n, default="") {
if (n %in% names(control)) {
return(control[[n]])
} else if (n %in% names(.SendmailREnv$options)) {
return(.SendmailREnv$options[[n]])
} else {
return(default)
}
}
headers$From <- from
headers$To <- to
headers$Subject <- subject
## Add Date header if not explicitly set. This fixes the annoyance,
## that apparently Thunderbird does not sort mails correctly if they
## do not have a Date header.
if (is.null(headers$Date))
headers$Date <- .rfc2822_date()
transport <- get_value("transport", "smtp")
verbose <- get_value("verbose", FALSE)
if (transport == "smtp") {
server <- get_value("smtpServer", "localhost")
port <- get_value("smtpPort", 25)
.smtp_submit_mail(server, port, headers, msg, verbose)
} else if (transport == "debug") {
.write_mail(headers, msg, stdout())
}
}
|
/B_analysts_sources_github/jeroen/sendmailR/sendmailR.r
|
no_license
|
Irbis3/crantasticScrapper
|
R
| false
| false
| 5,452
|
r
|
##
## sendmailR.r - send email from within R
##
## Author:
## Olaf Mersmann (OME) <olafm@datensplitter.net>
##
.rfc2822_date <- function(time=Sys.time()) {
lc <- Sys.getlocale("LC_TIME")
on.exit(Sys.setlocale("LC_TIME", lc))
Sys.setlocale("LC_TIME", "C")
strftime(time, format="%a, %d %b %Y %H:%M:%S -0000",
tz="UTC", use.tz=TRUE)
}
.write_mail <- function(headers, msg, sock) {
if (!is.list(msg))
msg <- list(msg)
## Generate MIME headers:
boundary <- paste(packBits(sample(0:1, 256, TRUE)), collapse="")
headers$`MIME-Version` <- "1.0"
headers$`Content-Type` <- sprintf("multipart/mixed; boundary=\"%s\"", boundary)
writeLines(paste(names(headers),
unlist(headers), sep=": "),
sock, sep="\r\n")
writeLines("", sock, sep="\r\n")
writeLines("This is a message with multiple parts in MIME format.", sock, sep="\r\n")
for (part in msg) {
writeLines(sprintf("--%s", boundary), sock, sep="\r\n")
if (inherits(part, "mime_part"))
.write_mime_part(part, sock)
else if (is.character(part)) { ## Legacy support for plain old string
## writeLines(sprintf("--%s", boundary), sock, sep="\r\n")
writeLines("Content-Type: text/plain; format=flowed\r\n", sock, sep="\r\n")
writeLines(part, sock, sep="\r\n")
}
}
writeLines(sprintf("--%s--", boundary), sock, sep="\r\n")
}
.smtp_submit_mail <- function(server, port, headers, msg, verbose=FALSE) {
stopifnot(is.character(headers$From), is.character(headers$To))
wait_for <- function(lcode) {
done <- FALSE
while (!done) {
line <- readLines(con=sock, n=1)
if (verbose)
message("<< ", line)
code <- substring(line, 1, 3)
msg <- substring(line, 5)
if (code == lcode) {
done <- TRUE
} else {
if (code >= 500 & code <= 599)
stop("SMTP Error: ", msg)
else
message("Unknown SMTP code: ", code)
}
}
return(list(code=code, msg=msg))
}
send_command <- function(cmd, code) {
if (verbose)
message(">> ", cmd)
writeLines(cmd, sock, sep="\r\n")
wait_for(code)
}
nodename <- Sys.info()[4]
sock <- socketConnection(host=server,
port=port,
blocking=TRUE)
if (!isOpen(sock))
stop(sprintf("Could not connect to smtp server '%s' on port '%i'.",
server, port))
on.exit(close(sock))
## << 220 <hostname> ESMTP
wait_for(220)
## >> HELO localhost
## << 250 mail.statistik.uni-dortmund.de
send_command(paste("HELO ", nodename), 250)
## >> MAIL FROM: <foo@bah.com>
## << 250 2.1.0 Ok
send_command(paste("MAIL FROM: ", headers$From), 250)
## >> RCPT TO: <bah@baz.org>
## << 250 2.1.5 Ok
lapply(headers$To, function(rcpt){
send_command(paste("RCPT TO: ", rcpt), 250)
})
## >> DATA
## << 354 blah fu
send_command("DATA", 354)
## >> <actual message + headers + .>
if (verbose)
message(">> <message data>")
.write_mail(headers, msg, sock)
writeLines(".", sock, sep="\r\n")
wait_for(250)
## << 250 2.0.0 Ok: queued as XXXXXXXX
## >> QUIT
## << 221 2.0.0 Bye
send_command("QUIT", 221)
}
##' Simplistic sendmail utility for R. Uses SMTP to submit a message
##' to a local SMTP server.
##'
##' @title Send mail from within R
##'
##' @param from From whom the mail message is (RFC2822 style address).
##' @param to Recipient of the message (valid RFC2822 style address).
##' @param subject Subject line of message.
##' @param msg Body text of message or a list containing
##' \code{\link{mime_part}} objects.
##' @param \dots ...
##' @param headers Any other headers to include.
##' @param control List of SMTP server settings. Valid values are the
##' possible options for \code{\link{sendmail_options}}.
##'
##' @seealso \code{\link{mime_part}} for a way to add attachments.
##' @keywords utilities
##'
##' @examples
##' \dontrun{
##' from <- sprintf("<sendmailR@@\\%s>", Sys.info()[4])
##' to <- "\"Olaf Mersmann\"<olafm@@datensplitter.net>"
##' subject <- "Hello from R"
##' body <- list("It works!", mime_part(iris))
##' sendmail(from, to, subject, body,
##' control=list(smtpServer="ASPMX.L.GOOGLE.COM"))
##' }
##'
##' @export
sendmail <- function(from, to, subject, msg, ...,
headers=list(),
control=list()) {
## Argument checks:
stopifnot(is.list(headers), is.list(control))
if (length(from) != 1)
stop("'from' must be a single address.")
get_value <- function(n, default="") {
if (n %in% names(control)) {
return(control[[n]])
} else if (n %in% names(.SendmailREnv$options)) {
return(.SendmailREnv$options[[n]])
} else {
return(default)
}
}
headers$From <- from
headers$To <- to
headers$Subject <- subject
## Add Date header if not explicitly set. This fixes the annoyance,
## that apparently Thunderbird does not sort mails correctly if they
## do not have a Date header.
if (is.null(headers$Date))
headers$Date <- .rfc2822_date()
transport <- get_value("transport", "smtp")
verbose <- get_value("verbose", FALSE)
if (transport == "smtp") {
server <- get_value("smtpServer", "localhost")
port <- get_value("smtpPort", 25)
.smtp_submit_mail(server, port, headers, msg, verbose)
} else if (transport == "debug") {
.write_mail(headers, msg, stdout())
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_cells.R
\name{get_cells}
\alias{get_cells}
\title{Get cells of a tree}
\usage{
get_cells(tree, treeT = c("LT", "DT"), type = c("all", "nr", "inc"))
}
\arguments{
\item{tree}{The lineage or division tree, an object of class \code{"igraph"}.}
\item{treeT}{A character string naming the type of \code{tree}:
\itemize{
\item \code{"LT"} if \code{tree} is a lineage tree
\item \code{"DT"} if \code{tree} is a division tree
}
This argument is ignored in case \code{type = "all"}}
\item{type}{A character string naming the type of cells to be returned:
\itemize{
\item \code{"all"} for all cells (including any existing imaginary \emph{root} cells)
\item \code{"nr"} for all non-root cells (excluding any existing imaginary \emph{root} cells)
\item \code{"inc"} for all cells included in the analysis
}}
}
\value{
The labels of the corresponding cells, a vector of character strings.
}
\description{
Returns the labels of the cells in a lineage or division tree.
}
|
/man/get_cells.Rd
|
no_license
|
vicstefanou/ViSCA
|
R
| false
| true
| 1,042
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_cells.R
\name{get_cells}
\alias{get_cells}
\title{Get cells of a tree}
\usage{
get_cells(tree, treeT = c("LT", "DT"), type = c("all", "nr", "inc"))
}
\arguments{
\item{tree}{The lineage or division tree, an object of class \code{"igraph"}.}
\item{treeT}{A character string naming the type of \code{tree}:
\itemize{
\item \code{"LT"} if \code{tree} is a lineage tree
\item \code{"DT"} if \code{tree} is a division tree
}
This argument is ignored in case \code{type = "all"}}
\item{type}{A character string naming the type of cells to be returned:
\itemize{
\item \code{"all"} for all cells (including any existing imaginary \emph{root} cells)
\item \code{"nr"} for all non-root cells (excluding any existing imaginary \emph{root} cells)
\item \code{"inc"} for all cells included in the analysis
}}
}
\value{
The labels of the corresponding cells, a vector of character strings.
}
\description{
Returns the labels of the cells in a lineage or division tree.
}
|
#' The Rorschach protocol.
#'
#' This protocol is used to calibrate the eyes for variation due to sampling.
#' All plots are typically null data sets, data that is consistent with a null
#' hypothesis. The protocol is described in Buja, Cook, Hofmann, Lawrence,
#' Lee, Swayne, Wickham (2009) Statistical inference for exploratory data
#' analysis and model diagnostics, Phil. Trans. R. Soc. A, 367, 4361-4383.
#'
#' @export
#' @param method method for generating null data sets
#' @param true true data set. If \code{NULL}, \code{\link{find_plot_data}}
#' will attempt to extract it from the current ggplot2 plot.
#' @param n total number of samples to generate (including true data)
#' @param p probability of including true data with null data.
rorschach <- function(method, true = NULL, n = 20, p = 0) {
true <- find_plot_data(true)
show_true <- rbinom(1, 1, p) == 1
if (show_true) {
n <- n - 1
}
samples <- data.frame(.n = seq_len(n),
V1 = unlist(purrr::rerun(n, method(true))))
if (show_true) {
pos <- sample(n + 1, 1)
message(encrypt("True data in position ", pos+10))
samples <- add_true(samples, true, pos)
} else {
samples$.sample <- samples$.n
samples$.n <- NULL
}
samples
}
#' The line-up protocol.
#'
#' In this protocol the plot of the real data is embedded amongst a field of
#' plots of data generated to be consistent with some null hypothesis.
#' If the observe can pick the real data as different from the others, this
#' lends weight to the statistical significance of the structure in the plot.
#' The protocol is described in Buja, Cook, Hofmann, Lawrence,
#' Lee, Swayne, Wickham (2009) Statistical inference for exploratory data
#' analysis and model diagnostics, Phil. Trans. R. Soc. A, 367, 4361-4383.
#'
#' Generate n - 1 null datasets and randomly position the true data. If you
#' pick the real data as being noticeably different, then you have formally
#' established that it is different to with p-value 1/n.
#'
#' @param method method for generating null data sets
#' @param true true data set. If \code{NULL}, \code{\link{find_plot_data}}
#' will attempt to extract it from the current ggplot2 plot.
#' @param n total number of samples to generate (including true data)
#' @param pos position of true data. Leave missing to pick position at
#' random. Encryped position will be printed on the command line,
#' \code{\link{decrypt}} to understand.
#' @param samples samples generated under the null hypothesis. Only specify
#' this if you don't want lineup to generate the data for you.
#' @export
#' @examples
#' ggplot(lineup(null_permute('mpg'), mtcars), aes(mpg, wt)) +
#' geom_point() +
#' facet_wrap(~ .sample)
#' ggplot(lineup(null_permute('cyl'), mtcars),
#' aes(mpg, .sample, colour = factor(cyl))) +
#' geom_point()
lineup <- function(method, true = NULL, n = 20, pos = sample(n, 1), samples = NULL) {
true <- find_plot_data(true)
if (is.null(samples)) {
samples <- data.frame(.n = seq_len(n - 1),
V1 = unlist(purrr::rerun(n - 1, method(true))))
#samples <- plyr::rdply(n - 1, method(true))
}
if (missing(pos)) {
message("decrypt(\"", encrypt("True data in position ", pos+10), "\")")
}
add_true(samples, true, pos)
}
#' Add true data into data frame containing null data sets.
#' @keywords internal
add_true <- function(samples, true, pos) {
samples$.sample <- with(samples, ifelse(.n >= pos, .n + 1, .n))
samples$.n <- NULL
true$.sample <- pos
all <- dplyr::bind_rows(samples, true)
attr(all, "pos") <- pos
all[order(all$.sample), ]
}
#' Find plot data.
#' If data is not specified, this function will attempt to find the data
#' corresponding to the last ggplot2 created or displayed. This will work
#' in most situations where you are creating the plot and immediately
#' displaying it, but may not work in other situations. In those cases,
#' please specify the data explicitly.
#'
#' @keywords internal
#' @importFrom ggplot2 last_plot
find_plot_data <- function(data) {
if (!is.null(data))
return(data)
if (exists("last_plot") && !is.null(last_plot())) {
last_plot()$data
} else {
stop("Missing true dataset")
}
}
|
/R/protocols.r
|
no_license
|
ellisvalentiner/nullabor
|
R
| false
| false
| 4,352
|
r
|
#' The Rorschach protocol.
#'
#' This protocol is used to calibrate the eyes for variation due to sampling.
#' All plots are typically null data sets, data that is consistent with a null
#' hypothesis. The protocol is described in Buja, Cook, Hofmann, Lawrence,
#' Lee, Swayne, Wickham (2009) Statistical inference for exploratory data
#' analysis and model diagnostics, Phil. Trans. R. Soc. A, 367, 4361-4383.
#'
#' @export
#' @param method method for generating null data sets
#' @param true true data set. If \code{NULL}, \code{\link{find_plot_data}}
#' will attempt to extract it from the current ggplot2 plot.
#' @param n total number of samples to generate (including true data)
#' @param p probability of including true data with null data.
rorschach <- function(method, true = NULL, n = 20, p = 0) {
true <- find_plot_data(true)
show_true <- rbinom(1, 1, p) == 1
if (show_true) {
n <- n - 1
}
samples <- data.frame(.n = seq_len(n),
V1 = unlist(purrr::rerun(n, method(true))))
if (show_true) {
pos <- sample(n + 1, 1)
message(encrypt("True data in position ", pos+10))
samples <- add_true(samples, true, pos)
} else {
samples$.sample <- samples$.n
samples$.n <- NULL
}
samples
}
#' The line-up protocol.
#'
#' In this protocol the plot of the real data is embedded amongst a field of
#' plots of data generated to be consistent with some null hypothesis.
#' If the observe can pick the real data as different from the others, this
#' lends weight to the statistical significance of the structure in the plot.
#' The protocol is described in Buja, Cook, Hofmann, Lawrence,
#' Lee, Swayne, Wickham (2009) Statistical inference for exploratory data
#' analysis and model diagnostics, Phil. Trans. R. Soc. A, 367, 4361-4383.
#'
#' Generate n - 1 null datasets and randomly position the true data. If you
#' pick the real data as being noticeably different, then you have formally
#' established that it is different to with p-value 1/n.
#'
#' @param method method for generating null data sets
#' @param true true data set. If \code{NULL}, \code{\link{find_plot_data}}
#' will attempt to extract it from the current ggplot2 plot.
#' @param n total number of samples to generate (including true data)
#' @param pos position of true data. Leave missing to pick position at
#' random. Encryped position will be printed on the command line,
#' \code{\link{decrypt}} to understand.
#' @param samples samples generated under the null hypothesis. Only specify
#' this if you don't want lineup to generate the data for you.
#' @export
#' @examples
#' ggplot(lineup(null_permute('mpg'), mtcars), aes(mpg, wt)) +
#' geom_point() +
#' facet_wrap(~ .sample)
#' ggplot(lineup(null_permute('cyl'), mtcars),
#' aes(mpg, .sample, colour = factor(cyl))) +
#' geom_point()
lineup <- function(method, true = NULL, n = 20, pos = sample(n, 1), samples = NULL) {
true <- find_plot_data(true)
if (is.null(samples)) {
samples <- data.frame(.n = seq_len(n - 1),
V1 = unlist(purrr::rerun(n - 1, method(true))))
#samples <- plyr::rdply(n - 1, method(true))
}
if (missing(pos)) {
message("decrypt(\"", encrypt("True data in position ", pos+10), "\")")
}
add_true(samples, true, pos)
}
#' Add true data into data frame containing null data sets.
#' @keywords internal
add_true <- function(samples, true, pos) {
samples$.sample <- with(samples, ifelse(.n >= pos, .n + 1, .n))
samples$.n <- NULL
true$.sample <- pos
all <- dplyr::bind_rows(samples, true)
attr(all, "pos") <- pos
all[order(all$.sample), ]
}
#' Find plot data.
#' If data is not specified, this function will attempt to find the data
#' corresponding to the last ggplot2 created or displayed. This will work
#' in most situations where you are creating the plot and immediately
#' displaying it, but may not work in other situations. In those cases,
#' please specify the data explicitly.
#'
#' @keywords internal
#' @importFrom ggplot2 last_plot
find_plot_data <- function(data) {
if (!is.null(data))
return(data)
if (exists("last_plot") && !is.null(last_plot())) {
last_plot()$data
} else {
stop("Missing true dataset")
}
}
|
context("yaml config manipulation")
test_that("can remove a data item", {
file <- system.file("extdata", "tests", "subsetCars.Rmd",
package = "DataPackageR"
)
file2 <- system.file("extdata", "tests", "extra.rmd",
package = "DataPackageR"
)
expect_null(
datapackage_skeleton(
name = "subsetCars",
path = tempdir(),
code_files = c(file, file2),
force = TRUE,
r_object_names = c("cars_over_20", "pressure")
)
)
package_build(file.path(tempdir(), "subsetCars"))
# have we saved the new object?
config <- yml_find(file.path(tempdir(), "subsetCars"))
config <- yml_disable_compile(config, basename(file2))
yml_write(config)
package_build(file.path(tempdir(), "subsetCars"))
expect_equal(
list.files(file.path(tempdir(), "subsetCars", "data")),
c("cars_over_20.rda", "pressure.rda")
)
expect_true(all(
c("subsetCars", "cars_over_20", "pressure") %in%
names(DataPackageR:::.doc_parse(
list.files(file.path(tempdir(), "subsetCars", "R"),
full.names = TRUE
)
))
))
unlink(file.path(tempdir(), "subsetCars"),
recursive = TRUE,
force = TRUE
)
})
|
/tests/testthat/test-yaml-manipulation.R
|
no_license
|
cran/DataPackageR
|
R
| false
| false
| 1,214
|
r
|
context("yaml config manipulation")
test_that("can remove a data item", {
file <- system.file("extdata", "tests", "subsetCars.Rmd",
package = "DataPackageR"
)
file2 <- system.file("extdata", "tests", "extra.rmd",
package = "DataPackageR"
)
expect_null(
datapackage_skeleton(
name = "subsetCars",
path = tempdir(),
code_files = c(file, file2),
force = TRUE,
r_object_names = c("cars_over_20", "pressure")
)
)
package_build(file.path(tempdir(), "subsetCars"))
# have we saved the new object?
config <- yml_find(file.path(tempdir(), "subsetCars"))
config <- yml_disable_compile(config, basename(file2))
yml_write(config)
package_build(file.path(tempdir(), "subsetCars"))
expect_equal(
list.files(file.path(tempdir(), "subsetCars", "data")),
c("cars_over_20.rda", "pressure.rda")
)
expect_true(all(
c("subsetCars", "cars_over_20", "pressure") %in%
names(DataPackageR:::.doc_parse(
list.files(file.path(tempdir(), "subsetCars", "R"),
full.names = TRUE
)
))
))
unlink(file.path(tempdir(), "subsetCars"),
recursive = TRUE,
force = TRUE
)
})
|
library(tidyverse)
library(sva)
library(RUVSeq)
library(RColorBrewer)
library("factoextra")
library(FactoMineR)
require(patchwork)
require(limma)
library(peer)
require(edgeR)
library("ggsci")
##################################################################################################################################
#
# 8/24/2020 MPM Script is broken, files cannot be found. This SCRipt computed multiple normilization methods methods and plotted.
# Plots can be found in QC/normmethods.
##################################################################################################################################
svaBatchCor <- function(dat, mmi, mm0,n.sv=NULL){
dat <- as.matrix(dat)
Y <- t(dat)
#library(sva)
if(is.null(n.sv)) n.sv <- num.sv(dat,mmi,method="leek")
o <- svaseq(dat,mmi,mm0,n.sv=n.sv)
W <- o$sv
alpha <- solve(t(W) %*% W) %*% t(W) %*% Y
o$corrected <- t(Y - W %*% alpha)
return(o)
}
exprs <- readRDS('data/subread_counts_allgood.RDS')
exprs$annotation
p<- read_csv('data/phenoData.csv')
dge <- DGEList(counts=exprs$counts)
cpms = cpm(dge)
keep = rowSums(cpms>1)>=.25*dim(dge)[2]
dge <- dge[keep,]
dge <- calcNormFactors(dge)
cpms <- cpm(dge)
dim(dge)
cpms.pca <- PCA(t(cpms), graph = FALSE)
########################## PEERS #####################################
# 8/27/2020 Cannot install PEERS
mod <- model.matrix(~age+etiology+race+gender,p)
model = PEER()
PEER_setPhenoMean(model,t(cpms))
PEER_setCovariates(model, as.matrix(mod))
PEER_setAdd_mean(model, TRUE)
PEER_setNk(model,10)
PEER_getNk(model)
PEER_update(model)
factors = PEER_getX(model)
weights = PEER_getW(model)
precision = PEER_getAlpha(model)
residuals = PEER_getResiduals(model)
plot(precision)
write_tsv(as.data.frame(t(factors)),'data/PEERS_factors')
peer_res.pca <- PCA(residuals, graph = FALSE)
Y <-t(cpms)
W <- as.matrix(factors[,10-18])
W
alpha <- solve(t(W) %*% W) %*% t(W) %*% Y
peer.cor <- t(Y - W %*% alpha)
peer_cor.pca <-PCA(t(peer.cor), graph = FALSE)
plot_grid(fviz_pca_ind(peer_res.pca, habillage = p$Library_Pool,geom='point')+ggtitle('PEER Residuals'),
fviz_pca_ind(peer_cor.pca, habillage = p$Library_Pool,geom='point')+ggtitle('PEER corrected'),
align='v',labels='AUTO',ncol=1
)
#########################################################
################### SVAseq ##############################
mod0 <- model.matrix(~1,data=p)
num.sv(cpms,mod,method="leek")
svseq = svaseq(cpms,mod,mod0,n.sv=10)
unsup_sva = svaseq(cpms,mod,mod0)
sva5 <- svaBatchCor(cpms,mod,mod0,n.sv=5)
sva10 <- svaBatchCor(cpms,mod,mod0,n.sv=10)
sva24 <- svaBatchCor(cpms,mod,mod0,n.sv=24)
sva24_factors <- sva24$sv
rownames(sva24_factors) <- p$Sample
saveRDS(sva24_factors,file = 'MAGnet_SVA24_factors.RDS')
sva5.pca <- PCA(t(sva5$corrected),graph=FALSE)
sva10.pca <- PCA(t(sva10$corrected),graph=FALSE)
sva24.pca <- PCA(t(sva24$corrected),graph=FALSE)
### Write
write_tsv(as.data.frame(t(sva10$sv)),'data/SVA10_factors')
write_tsv(as.data.frame(t(sva24$sv)),'data/SVA24_factors')
saveRDS(sva24$corrected,file='data/sav24_corrected.RDS')
###############################################################################################
###################################### Limma remove batch effects ############################
batcheffects <- removeBatchEffect(cpms, batch=p$Library_Pool, design=mod,covariates = p$TIN.median.)
batcheffects.pca <- PCA(t(batcheffects),graph=FALSE)
#####################################################
#### RUVseq with empirical controls
### USing the previouslt dfined dge
y <- calcNormFactors(dge, method="upperquartile")
y <- estimateGLMCommonDisp(y, mod)
y <- estimateGLMTagwiseDisp(y, mod)
fit <- glmFit(y, mod)
lrt <- glmLRT(fit, coef=2)
controls=rank(lrt$table$LR) <= 500
batch_ruv_emp <- RUVg(dge$counts, controls, k=10)
batch_ruv_emp$normalizedCounts
ruv.pca <- PCA(t(batch_ruv_emp$normalizedCounts), graph = FALSE)
############################# Plot all results ########################
## Do orignal Data.
p$etiology <- as.factor(p$etiology)
p$Library.Pool <- as.factor(p$Library.Pool)
wrap_plots(
fviz_pca_ind(cpms.pca, habillage = p$etiology,geom='point')+ggtitle('Orig-Status'),
fviz_pca_ind(cpms.pca, habillage = p$Library.Pool,geom='point')+ggtitle('Orig-Library'),
ncol=1
)
ggsave('PCA_original.pdf')
plot_grid(fviz_pca_ind(cpms.pca, habillage = p$Library.Pool,geom='point')+ggtitle('Orig'),
fviz_pca_ind(batcheffects.pca, habillage = p$Library.Pool,geom='point')+ggtitle('Batch Correct')+theme(legend.position="none"),
fviz_pca_ind(sva10.pca, habillage = p$Library.Pool,geom='point')+ggtitle('SVA10')+ theme(legend.position="none"),
fviz_pca_ind(ruv.pca, habillage = p$Library.Pool,geom='point')+ggtitle('RUV')+theme(legend.position="none"),
align='v',labels='AUTO',ncol=1
)
ggsave('PCA_mmethods_lib.pdf')
plot_grid(fviz_pca_ind(cpms.pca, habillage = p$etiology,geom='point')+ggtitle('Orig'),
fviz_pca_ind(batcheffects.pca, habillage = p$etiology,geom='point')+ggtitle('Batch Correct')+theme(legend.position="none"),
fviz_pca_ind(sva24.pca, habillage = p$etiology,geom='point')+ggtitle('SVA10')+ theme(legend.position="none"),
#fviz_pca_ind(peer_res.pca, habillage = p$etiology,geom='point')+ggtitle('PEER10')+theme(legend.position="none"),
fviz_pca_ind(ruv.pca, habillage = p$etiology,geom='point')+ggtitle('RUV')+theme(legend.position="none"),
align='v',labels='AUTO',ncol=1,axis='l'
)
ggsave('PCA_mmethods_etiology.pdf')
# SVA 5,10,24
plot_grid(fviz_pca_ind(cpms.pca, habillage = p$etiology,geom='point')+ggtitle('Orig'),
fviz_pca_ind(sva5.pca, habillage = p$etiology,geom='point')+ggtitle('SVA5')+ theme(legend.position="none"),
fviz_pca_ind(sva10.pca, habillage = p$etiology,geom='point')+ggtitle('SVA10')+theme(legend.position="none"),
fviz_pca_ind(sva24.pca, habillage = p$etiology,geom='point')+ggtitle('SVA24')+theme(legend.position="none"),
align='v',labels='AUTO',ncol=1,axis='l'
)
ggsave('PCA_sva_etiology.pdf')
plot_grid(fviz_pca_ind(cpms.pca, habillage = p$Gender,geom='point')+ggtitle('Orig'),
fviz_pca_ind(sva5.pca, habillage = p$Gender,geom='point')+ggtitle('SVA5')+ theme(legend.position="none"),
fviz_pca_ind(sva10.pca, habillage = p$Gender,geom='point')+ggtitle('SVA10')+theme(legend.position="none"),
fviz_pca_ind(sva24.pca, habillage = p$Gender,geom='point')+ggtitle('SVA24')+theme(legend.position="none"),
align='v',labels='AUTO',ncol=1,axis='l'
)
|
/bin/NormMethods.R
|
no_license
|
mpmorley/MAGNet
|
R
| false
| false
| 6,759
|
r
|
library(tidyverse)
library(sva)
library(RUVSeq)
library(RColorBrewer)
library("factoextra")
library(FactoMineR)
require(patchwork)
require(limma)
library(peer)
require(edgeR)
library("ggsci")
##################################################################################################################################
#
# 8/24/2020 MPM Script is broken, files cannot be found. This SCRipt computed multiple normilization methods methods and plotted.
# Plots can be found in QC/normmethods.
##################################################################################################################################
svaBatchCor <- function(dat, mmi, mm0,n.sv=NULL){
dat <- as.matrix(dat)
Y <- t(dat)
#library(sva)
if(is.null(n.sv)) n.sv <- num.sv(dat,mmi,method="leek")
o <- svaseq(dat,mmi,mm0,n.sv=n.sv)
W <- o$sv
alpha <- solve(t(W) %*% W) %*% t(W) %*% Y
o$corrected <- t(Y - W %*% alpha)
return(o)
}
exprs <- readRDS('data/subread_counts_allgood.RDS')
exprs$annotation
p<- read_csv('data/phenoData.csv')
dge <- DGEList(counts=exprs$counts)
cpms = cpm(dge)
keep = rowSums(cpms>1)>=.25*dim(dge)[2]
dge <- dge[keep,]
dge <- calcNormFactors(dge)
cpms <- cpm(dge)
dim(dge)
cpms.pca <- PCA(t(cpms), graph = FALSE)
########################## PEERS #####################################
# 8/27/2020 Cannot install PEERS
mod <- model.matrix(~age+etiology+race+gender,p)
model = PEER()
PEER_setPhenoMean(model,t(cpms))
PEER_setCovariates(model, as.matrix(mod))
PEER_setAdd_mean(model, TRUE)
PEER_setNk(model,10)
PEER_getNk(model)
PEER_update(model)
factors = PEER_getX(model)
weights = PEER_getW(model)
precision = PEER_getAlpha(model)
residuals = PEER_getResiduals(model)
plot(precision)
write_tsv(as.data.frame(t(factors)),'data/PEERS_factors')
peer_res.pca <- PCA(residuals, graph = FALSE)
Y <-t(cpms)
W <- as.matrix(factors[,10-18])
W
alpha <- solve(t(W) %*% W) %*% t(W) %*% Y
peer.cor <- t(Y - W %*% alpha)
peer_cor.pca <-PCA(t(peer.cor), graph = FALSE)
plot_grid(fviz_pca_ind(peer_res.pca, habillage = p$Library_Pool,geom='point')+ggtitle('PEER Residuals'),
fviz_pca_ind(peer_cor.pca, habillage = p$Library_Pool,geom='point')+ggtitle('PEER corrected'),
align='v',labels='AUTO',ncol=1
)
#########################################################
################### SVAseq ##############################
mod0 <- model.matrix(~1,data=p)
num.sv(cpms,mod,method="leek")
svseq = svaseq(cpms,mod,mod0,n.sv=10)
unsup_sva = svaseq(cpms,mod,mod0)
sva5 <- svaBatchCor(cpms,mod,mod0,n.sv=5)
sva10 <- svaBatchCor(cpms,mod,mod0,n.sv=10)
sva24 <- svaBatchCor(cpms,mod,mod0,n.sv=24)
sva24_factors <- sva24$sv
rownames(sva24_factors) <- p$Sample
saveRDS(sva24_factors,file = 'MAGnet_SVA24_factors.RDS')
sva5.pca <- PCA(t(sva5$corrected),graph=FALSE)
sva10.pca <- PCA(t(sva10$corrected),graph=FALSE)
sva24.pca <- PCA(t(sva24$corrected),graph=FALSE)
### Write
write_tsv(as.data.frame(t(sva10$sv)),'data/SVA10_factors')
write_tsv(as.data.frame(t(sva24$sv)),'data/SVA24_factors')
saveRDS(sva24$corrected,file='data/sav24_corrected.RDS')
###############################################################################################
###################################### Limma remove batch effects ############################
batcheffects <- removeBatchEffect(cpms, batch=p$Library_Pool, design=mod,covariates = p$TIN.median.)
batcheffects.pca <- PCA(t(batcheffects),graph=FALSE)
#####################################################
#### RUVseq with empirical controls
### USing the previouslt dfined dge
y <- calcNormFactors(dge, method="upperquartile")
y <- estimateGLMCommonDisp(y, mod)
y <- estimateGLMTagwiseDisp(y, mod)
fit <- glmFit(y, mod)
lrt <- glmLRT(fit, coef=2)
controls=rank(lrt$table$LR) <= 500
batch_ruv_emp <- RUVg(dge$counts, controls, k=10)
batch_ruv_emp$normalizedCounts
ruv.pca <- PCA(t(batch_ruv_emp$normalizedCounts), graph = FALSE)
############################# Plot all results ########################
## Do orignal Data.
p$etiology <- as.factor(p$etiology)
p$Library.Pool <- as.factor(p$Library.Pool)
wrap_plots(
fviz_pca_ind(cpms.pca, habillage = p$etiology,geom='point')+ggtitle('Orig-Status'),
fviz_pca_ind(cpms.pca, habillage = p$Library.Pool,geom='point')+ggtitle('Orig-Library'),
ncol=1
)
ggsave('PCA_original.pdf')
plot_grid(fviz_pca_ind(cpms.pca, habillage = p$Library.Pool,geom='point')+ggtitle('Orig'),
fviz_pca_ind(batcheffects.pca, habillage = p$Library.Pool,geom='point')+ggtitle('Batch Correct')+theme(legend.position="none"),
fviz_pca_ind(sva10.pca, habillage = p$Library.Pool,geom='point')+ggtitle('SVA10')+ theme(legend.position="none"),
fviz_pca_ind(ruv.pca, habillage = p$Library.Pool,geom='point')+ggtitle('RUV')+theme(legend.position="none"),
align='v',labels='AUTO',ncol=1
)
ggsave('PCA_mmethods_lib.pdf')
plot_grid(fviz_pca_ind(cpms.pca, habillage = p$etiology,geom='point')+ggtitle('Orig'),
fviz_pca_ind(batcheffects.pca, habillage = p$etiology,geom='point')+ggtitle('Batch Correct')+theme(legend.position="none"),
fviz_pca_ind(sva24.pca, habillage = p$etiology,geom='point')+ggtitle('SVA10')+ theme(legend.position="none"),
#fviz_pca_ind(peer_res.pca, habillage = p$etiology,geom='point')+ggtitle('PEER10')+theme(legend.position="none"),
fviz_pca_ind(ruv.pca, habillage = p$etiology,geom='point')+ggtitle('RUV')+theme(legend.position="none"),
align='v',labels='AUTO',ncol=1,axis='l'
)
ggsave('PCA_mmethods_etiology.pdf')
# SVA 5,10,24
plot_grid(fviz_pca_ind(cpms.pca, habillage = p$etiology,geom='point')+ggtitle('Orig'),
fviz_pca_ind(sva5.pca, habillage = p$etiology,geom='point')+ggtitle('SVA5')+ theme(legend.position="none"),
fviz_pca_ind(sva10.pca, habillage = p$etiology,geom='point')+ggtitle('SVA10')+theme(legend.position="none"),
fviz_pca_ind(sva24.pca, habillage = p$etiology,geom='point')+ggtitle('SVA24')+theme(legend.position="none"),
align='v',labels='AUTO',ncol=1,axis='l'
)
ggsave('PCA_sva_etiology.pdf')
plot_grid(fviz_pca_ind(cpms.pca, habillage = p$Gender,geom='point')+ggtitle('Orig'),
fviz_pca_ind(sva5.pca, habillage = p$Gender,geom='point')+ggtitle('SVA5')+ theme(legend.position="none"),
fviz_pca_ind(sva10.pca, habillage = p$Gender,geom='point')+ggtitle('SVA10')+theme(legend.position="none"),
fviz_pca_ind(sva24.pca, habillage = p$Gender,geom='point')+ggtitle('SVA24')+theme(legend.position="none"),
align='v',labels='AUTO',ncol=1,axis='l'
)
|
#ANN
#Our regression ANN will use the Yacht Hydrodynamics data set from UCI’s Machine Learning Repository.
#This data set contains data contains results from 308 full-scale experiments performed at the Delft
#Ship Hydromechanics Laboratory where they test 22 different hull forms. Their experiment tested the
#effect of variations in the hull geometry and the ship’s Froude number on the craft’s residuary
#resistance per unit weight of displacement.
library(tidyverse)
library(neuralnet)
library(GGally)
url <- 'http://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data'
Yacht_Data <- read_table(file = url, col_names = c('LongPos_COB', 'Prismatic_Coeff','Len_Disp_Ratio', 'Beam_Draut_Ratio', 'Length_Beam_Ratio','Froude_Num', 'Residuary_Resist')) %>% na.omit()
ggpairs(Yacht_Data, title = "Scatterplot Matrix of the Features of the Yacht Data Set")
# Scale the Data
scale01 <- function(x){
(x - min(x)) / (max(x) - min(x))
}
Yacht_Data <- Yacht_Data %>%
mutate_all(scale01)
# Split into test and train sets
set.seed(12345)
Yacht_Data_Train <- sample_frac(tbl = Yacht_Data, replace = FALSE, size = 0.80)
Yacht_Data_Test <- anti_join(Yacht_Data, Yacht_Data_Train)
set.seed(12321)
Yacht_NN1 <- neuralnet(Residuary_Resist ~ LongPos_COB + Prismatic_Coeff +
Len_Disp_Ratio + Beam_Draut_Ratio + Length_Beam_Ratio +
Froude_Num, data = Yacht_Data_Train)
plot(Yacht_NN1, rep = 'best')
#manually compute the SSE you can use the following:
NN1_Train_SSE <- sum((Yacht_NN1$net.result - Yacht_Data_Train[, 7])^2)/2
paste("SSE: ", round(NN1_Train_SSE, 4))
Test_NN1_Output <- compute(Yacht_NN1, Yacht_Data_Test[, 1:6])$net.result
NN1_Test_SSE <- sum((Test_NN1_Output - Yacht_Data_Test[, 7])^2)/2
NN1_Test_SSE
# *** Regression Hyperparameters
# 2-Hidden Layers, Layer-1 4-neurons, Layer-2, 1-neuron, logistic activation
# function
set.seed(12321)
Yacht_NN2 <- neuralnet(Residuary_Resist ~ LongPos_COB + Prismatic_Coeff + Len_Disp_Ratio + Beam_Draut_Ratio + Length_Beam_Ratio + Froude_Num,
data = Yacht_Data_Train,
hidden = c(4, 1),
act.fct = "logistic")
## Training Error
NN2_Train_SSE <- sum((Yacht_NN2$net.result - Yacht_Data_Train[, 7])^2)/2
## Test Error
Test_NN2_Output <- compute(Yacht_NN2, Yacht_Data_Test[, 1:6])$net.result
NN2_Test_SSE <- sum((Test_NN2_Output - Yacht_Data_Test[, 7])^2)/2
# Rescale for tanh activation function
scale11 <- function(x) {
(2 * ((x - min(x))/(max(x) - min(x)))) - 1
}
Yacht_Data_Train <- Yacht_Data_Train %>% mutate_all(scale11)
Yacht_Data_Test <- Yacht_Data_Test %>% mutate_all(scale11)
# 2-Hidden Layers, Layer-1 4-neurons, Layer-2, 1-neuron, tanh activation
# function
set.seed(12321)
Yacht_NN3 <- neuralnet(Residuary_Resist ~ LongPos_COB + Prismatic_Coeff + Len_Disp_Ratio + Beam_Draut_Ratio + Length_Beam_Ratio + Froude_Num,
data = Yacht_Data_Train,
hidden = c(4, 1),
act.fct = "tanh")
## Training Error
NN3_Train_SSE <- sum((Yacht_NN3$net.result - Yacht_Data_Train[, 7])^2)/2
## Test Error
Test_NN3_Output <- compute(Yacht_NN3, Yacht_Data_Test[, 1:6])$net.result
NN3_Test_SSE <- sum((Test_NN3_Output - Yacht_Data_Test[, 7])^2)/2
# 1-Hidden Layer, 1-neuron, tanh activation function
set.seed(12321)
Yacht_NN4 <- neuralnet(Residuary_Resist ~ LongPos_COB + Prismatic_Coeff + Len_Disp_Ratio + Beam_Draut_Ratio + Length_Beam_Ratio + Froude_Num,
data = Yacht_Data_Train,
act.fct = "tanh")
## Training Error
NN4_Train_SSE <- sum((Yacht_NN4$net.result - Yacht_Data_Train[, 7])^2)/2
## Test Error
Test_NN4_Output <- compute(Yacht_NN4, Yacht_Data_Test[, 1:6])$net.result
NN4_Test_SSE <- sum((Test_NN4_Output - Yacht_Data_Test[, 7])^2)/2
# Bar plot of results
Regression_NN_Errors <- tibble(Network = rep(c("NN1", "NN2", "NN3", "NN4"), each = 2),
DataSet = rep(c("Train", "Test"), time = 4),
SSE = c(NN1_Train_SSE, NN1_Test_SSE,
NN2_Train_SSE, NN2_Test_SSE,
NN3_Train_SSE, NN3_Test_SSE,
NN4_Train_SSE, NN4_Test_SSE))
Regression_NN_Errors %>%
ggplot(aes(Network, SSE, fill = DataSet)) +
geom_col(position = "dodge") +
ggtitle("Regression ANN's SSE")
#As evident from the plot, we see that the best regression ANN we found was Yacht_NN2 with a training and
#test SSE of 0.0188 and 0.0057. We make this determination by the value of the training and test SSEs only.
plot(Yacht_NN2, rep = "best")
|
/Neural Networks/NN Regression - Yacht Hydrodynamics/NN Regression- Yacht Hydrodynamics.R
|
no_license
|
pmayav/Machine-Learning-in-R
|
R
| false
| false
| 4,719
|
r
|
#ANN
#Our regression ANN will use the Yacht Hydrodynamics data set from UCI’s Machine Learning Repository.
#This data set contains data contains results from 308 full-scale experiments performed at the Delft
#Ship Hydromechanics Laboratory where they test 22 different hull forms. Their experiment tested the
#effect of variations in the hull geometry and the ship’s Froude number on the craft’s residuary
#resistance per unit weight of displacement.
library(tidyverse)
library(neuralnet)
library(GGally)
url <- 'http://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data'
Yacht_Data <- read_table(file = url, col_names = c('LongPos_COB', 'Prismatic_Coeff','Len_Disp_Ratio', 'Beam_Draut_Ratio', 'Length_Beam_Ratio','Froude_Num', 'Residuary_Resist')) %>% na.omit()
ggpairs(Yacht_Data, title = "Scatterplot Matrix of the Features of the Yacht Data Set")
# Scale the Data
scale01 <- function(x){
(x - min(x)) / (max(x) - min(x))
}
Yacht_Data <- Yacht_Data %>%
mutate_all(scale01)
# Split into test and train sets
set.seed(12345)
Yacht_Data_Train <- sample_frac(tbl = Yacht_Data, replace = FALSE, size = 0.80)
Yacht_Data_Test <- anti_join(Yacht_Data, Yacht_Data_Train)
set.seed(12321)
Yacht_NN1 <- neuralnet(Residuary_Resist ~ LongPos_COB + Prismatic_Coeff +
Len_Disp_Ratio + Beam_Draut_Ratio + Length_Beam_Ratio +
Froude_Num, data = Yacht_Data_Train)
plot(Yacht_NN1, rep = 'best')
#manually compute the SSE you can use the following:
NN1_Train_SSE <- sum((Yacht_NN1$net.result - Yacht_Data_Train[, 7])^2)/2
paste("SSE: ", round(NN1_Train_SSE, 4))
Test_NN1_Output <- compute(Yacht_NN1, Yacht_Data_Test[, 1:6])$net.result
NN1_Test_SSE <- sum((Test_NN1_Output - Yacht_Data_Test[, 7])^2)/2
NN1_Test_SSE
# *** Regression Hyperparameters
# 2-Hidden Layers, Layer-1 4-neurons, Layer-2, 1-neuron, logistic activation
# function
set.seed(12321)
Yacht_NN2 <- neuralnet(Residuary_Resist ~ LongPos_COB + Prismatic_Coeff + Len_Disp_Ratio + Beam_Draut_Ratio + Length_Beam_Ratio + Froude_Num,
data = Yacht_Data_Train,
hidden = c(4, 1),
act.fct = "logistic")
## Training Error
NN2_Train_SSE <- sum((Yacht_NN2$net.result - Yacht_Data_Train[, 7])^2)/2
## Test Error
Test_NN2_Output <- compute(Yacht_NN2, Yacht_Data_Test[, 1:6])$net.result
NN2_Test_SSE <- sum((Test_NN2_Output - Yacht_Data_Test[, 7])^2)/2
# Rescale for tanh activation function
scale11 <- function(x) {
(2 * ((x - min(x))/(max(x) - min(x)))) - 1
}
Yacht_Data_Train <- Yacht_Data_Train %>% mutate_all(scale11)
Yacht_Data_Test <- Yacht_Data_Test %>% mutate_all(scale11)
# 2-Hidden Layers, Layer-1 4-neurons, Layer-2, 1-neuron, tanh activation
# function
set.seed(12321)
Yacht_NN3 <- neuralnet(Residuary_Resist ~ LongPos_COB + Prismatic_Coeff + Len_Disp_Ratio + Beam_Draut_Ratio + Length_Beam_Ratio + Froude_Num,
data = Yacht_Data_Train,
hidden = c(4, 1),
act.fct = "tanh")
## Training Error
NN3_Train_SSE <- sum((Yacht_NN3$net.result - Yacht_Data_Train[, 7])^2)/2
## Test Error
Test_NN3_Output <- compute(Yacht_NN3, Yacht_Data_Test[, 1:6])$net.result
NN3_Test_SSE <- sum((Test_NN3_Output - Yacht_Data_Test[, 7])^2)/2
# 1-Hidden Layer, 1-neuron, tanh activation function
set.seed(12321)
Yacht_NN4 <- neuralnet(Residuary_Resist ~ LongPos_COB + Prismatic_Coeff + Len_Disp_Ratio + Beam_Draut_Ratio + Length_Beam_Ratio + Froude_Num,
data = Yacht_Data_Train,
act.fct = "tanh")
## Training Error
NN4_Train_SSE <- sum((Yacht_NN4$net.result - Yacht_Data_Train[, 7])^2)/2
## Test Error
Test_NN4_Output <- compute(Yacht_NN4, Yacht_Data_Test[, 1:6])$net.result
NN4_Test_SSE <- sum((Test_NN4_Output - Yacht_Data_Test[, 7])^2)/2
# Bar plot of results
Regression_NN_Errors <- tibble(Network = rep(c("NN1", "NN2", "NN3", "NN4"), each = 2),
DataSet = rep(c("Train", "Test"), time = 4),
SSE = c(NN1_Train_SSE, NN1_Test_SSE,
NN2_Train_SSE, NN2_Test_SSE,
NN3_Train_SSE, NN3_Test_SSE,
NN4_Train_SSE, NN4_Test_SSE))
Regression_NN_Errors %>%
ggplot(aes(Network, SSE, fill = DataSet)) +
geom_col(position = "dodge") +
ggtitle("Regression ANN's SSE")
#As evident from the plot, we see that the best regression ANN we found was Yacht_NN2 with a training and
#test SSE of 0.0188 and 0.0057. We make this determination by the value of the training and test SSEs only.
plot(Yacht_NN2, rep = "best")
|
library(mvbutils)
### Name: do.in.envir
### Title: Modify a function's scope
### Aliases: do.in.envir
### Keywords: programming utilities
### ** Examples
fff <- function( abcdef) ffdie( 3)
ffdie <- function( x) do.in.envir( { x+abcdef} )
fff( 9) # 12; ffdie wouldn't know about abcdef without the do.in.envir call
# Show sys.call issues
# Note that the "envir" argument in this case makes the
# "do.in.envir" call completely superfluous!
ffe <- function(...) do.in.envir( envir=sys.frame( sys.nframe()), sys.call( -5))
ffe( 27, b=4) # ffe( 27, b=4)
|
/data/genthat_extracted_code/mvbutils/examples/do.in.envir.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 556
|
r
|
library(mvbutils)
### Name: do.in.envir
### Title: Modify a function's scope
### Aliases: do.in.envir
### Keywords: programming utilities
### ** Examples
fff <- function( abcdef) ffdie( 3)
ffdie <- function( x) do.in.envir( { x+abcdef} )
fff( 9) # 12; ffdie wouldn't know about abcdef without the do.in.envir call
# Show sys.call issues
# Note that the "envir" argument in this case makes the
# "do.in.envir" call completely superfluous!
ffe <- function(...) do.in.envir( envir=sys.frame( sys.nframe()), sys.call( -5))
ffe( 27, b=4) # ffe( 27, b=4)
|
/weekly_R/REmap.R
|
no_license
|
tuqiang2014/R-Programming
|
R
| false
| false
| 5,994
|
r
| ||
.formatDAVIDResult <- function(result, verbose=FALSE) {
### we always use read.delim(...header=TRUE) but formatting expects the first row tobe the column names
### in order to make formatting work we add the top row
result<-rbind(colnames(result),result);
tool <- attr(result,"tool")
if(verbose)
cat("formatDAVIDResult: tool=", tool,
ifelse(tool=="geneReportFull",
" (invisible return)", ""),
"\n")
### formatting depends on which is done
if(tool=="geneReportFull") {
returnval <- try(formatGeneReportFull(result))
} else if(tool=="geneReport") {
returnval <- try(formatGeneReport(result))
} else if(tool=="list") {
returnval <- try(formatList(result))
} else if(tool=="gene2gene") {
returnval <- try(formatGene2Gene(result))
} else if(tool=="annotationReport") {
returnval <- try(formatAnnotationReport(result))
} else
returnval <- result ### Unformatted for now.
if(class(returnval) == "try-error")
returnval <- result
for(attname in c("annot", "ids", "tool", "type"))
attr(returnval, attname) <- attr(result, attname)
returnval
}
.bracketedStrings <- function(s, before, after, verbose=FALSE, addNames=FALSE, drop.na=TRUE, warn.if.gt.1=TRUE) {
if(length(s) > 1) {
result <- lapply(s, .bracketedStrings,
before=before, after=after, verbose=FALSE)
if(addNames) names(result) = s
return(result)
}
starts <- (valStrings <- gregexpr(before, s)[[1]]) + attr(valStrings, "match.length")
ends <- regexpr(after, (vStrings <- substring(s, starts,1e6)) ) - 2
result <- substring(s, starts, starts+ends)
if(verbose)
cat(paste("=>",starts, starts+ends, result, sep=":", collapse="\n"))
result <- result[result != ""]
if(drop.na) result <- result[!is.na(result)]
result <- result[starts>=0 & ends>=0]
if((length(result) > 1) & (warn.if.gt.1))
warning("More than one substring found.")
return(result)
}
## A litle hack to DAVIDQuery as the DAVIDQuery got deprected
.DAVIDQuery<-function (ids = "O00161,O75396", type = "UNIPROT_ACCESSION",
annot, tool, URLlengthLimit = 2048, details = TRUE, verbose = FALSE,
writeHTML = FALSE, testMe = FALSE, graphicMenu = FALSE, formatIt = TRUE)
{
DAVIDURLBase <- "https://david.abcc.ncifcrf.gov/"
ids <- paste(ids, collapse = ",")
ids <- paste(strsplit(ids, " ")[[1]], sep = "", collapse = "")
firstURLOK <- FALSE
while (firstURLOK == FALSE) {
firstURL <- paste(DAVIDURLBase, "api.jsp?", "type=",
type, "&ids=", ids, "&tool=", tool, sep = "")
if (!is.null(annot))
firstURL <- paste(firstURL, "&annot=", annot, sep = "")
if (verbose)
cat("DAVIDQuery: firstURL = ", firstURL, "\n")
if (nchar(firstURL) < URLlengthLimit)
firstURLOK <- TRUE
else ids <- ids[-length(ids)]
}
DAVIDQueryResult <- try({
myCurlHandle <- RCurl::getCurlHandle(cookiefile = "DAVIDCookiefile.txt")
firstStageResult <- RCurl::getURL(firstURL, curl = myCurlHandle,
verbose = FALSE,ssl.verifypeer=FALSE)
if (writeHTML)
writeChar(firstStageResult, "firstStageResult.html")
DAVIDaction <- .bracketedStrings(firstStageResult, "document.apiForm.action = \"",
"\"")
DAVIDvalues <- .bracketedStrings(firstStageResult, "document.apiForm.[a-z]*.value=\"",
"\"", warn.if.gt.1 = FALSE)
if(DAVIDvalues[1] != ""){
tmp <- unlist(strsplit(DAVIDvalues[1],split=","))
if(length(tmp >=40)){
DAVIDvalues[1] <- paste(tmp[1:40],collapse=",")
}
}
DAVIDfields <- .bracketedStrings(firstStageResult, "document.apiForm.",
".value=\"", warn.if.gt.1 = FALSE)
secondURL <- paste(DAVIDURLBase, DAVIDaction, "?",
paste(DAVIDfields, "=", DAVIDvalues, sep = "", collapse = "&"),
sep = "")
if (verbose)
cat("DAVIDQuery: secondURL = ", secondURL, "\n")
if (nchar(secondURL) > URLlengthLimit)
stop(paste("nchar(secondURL) too long; ", nchar(secondURL),
">", URLlengthLimit))
secondStageResult <- RCurl::getURL(secondURL, curl = myCurlHandle,
verbose = FALSE, ssl.verifypeer=FALSE)
hasSessionEnded <- length(grep("Your session has ended",
secondStageResult) > 0)
if (hasSessionEnded)
warning("Warning: Session ended")
if (writeHTML)
writeChar(secondStageResult, "secondStageResult.html")
downloadFileName <- .bracketedStrings(secondStageResult,
"href=\"data/download/", "\" target=")
if (length(downloadFileName) == 0)
warning("Warning: downloadFileName is not found in reply html. \n")
downloadURL <- paste(DAVIDURLBase, "data/download/",
downloadFileName, sep = "")
if (verbose)
cat("downloadURL = ", downloadURL, "\n")
if (tool=="geneReport"){
# work around the format in which the file for 'geneReport' is returned by DAVID
read.delim(downloadURL,stringsAsFactors=FALSE,header=TRUE,nrows=0);
} else {
read.delim(downloadURL, header = TRUE, check.names = FALSE, stringsAsFactors = FALSE);
}
})
try(if (is.data.frame(DAVIDQueryResult) & (length(DAVIDQueryResult) >
0)) {
if (length(grep("<title>Directory Listing For /data/download/</title>",
DAVIDQueryResult[[1]])) > 0) {
DAVIDQueryResult <- paste("No result file was found. URL = ",
DAVIDQueryResult$firstURL)
class(DAVIDQueryResult) <- "try-error"
}
})
attr(DAVIDQueryResult, "ids") <- ids
attr(DAVIDQueryResult, "tool") <- tool
attr(DAVIDQueryResult, "annot") <- annot
attr(DAVIDQueryResult, "type") <- type
if (formatIt & (class(DAVIDQueryResult) != "try-error")) {
DAVIDQueryResult <- .formatDAVIDResult(DAVIDQueryResult)
}
if (details)
return(list(ids = ids, firstURL = firstURL, firstStageResult = firstStageResult,
DAVIDaction = DAVIDaction, secondURL = secondURL,
secondStageResult = secondStageResult, hasSessionEnded = hasSessionEnded,
downloadFileName = downloadFileName, downloadURL = downloadURL,
DAVIDQueryResult = DAVIDQueryResult))
return(DAVIDQueryResult)
}
|
/R/DAVIDQuery.r
|
no_license
|
sirusb/R3CPET
|
R
| false
| false
| 6,345
|
r
|
.formatDAVIDResult <- function(result, verbose=FALSE) {
### we always use read.delim(...header=TRUE) but formatting expects the first row tobe the column names
### in order to make formatting work we add the top row
result<-rbind(colnames(result),result);
tool <- attr(result,"tool")
if(verbose)
cat("formatDAVIDResult: tool=", tool,
ifelse(tool=="geneReportFull",
" (invisible return)", ""),
"\n")
### formatting depends on which is done
if(tool=="geneReportFull") {
returnval <- try(formatGeneReportFull(result))
} else if(tool=="geneReport") {
returnval <- try(formatGeneReport(result))
} else if(tool=="list") {
returnval <- try(formatList(result))
} else if(tool=="gene2gene") {
returnval <- try(formatGene2Gene(result))
} else if(tool=="annotationReport") {
returnval <- try(formatAnnotationReport(result))
} else
returnval <- result ### Unformatted for now.
if(class(returnval) == "try-error")
returnval <- result
for(attname in c("annot", "ids", "tool", "type"))
attr(returnval, attname) <- attr(result, attname)
returnval
}
.bracketedStrings <- function(s, before, after, verbose=FALSE, addNames=FALSE, drop.na=TRUE, warn.if.gt.1=TRUE) {
if(length(s) > 1) {
result <- lapply(s, .bracketedStrings,
before=before, after=after, verbose=FALSE)
if(addNames) names(result) = s
return(result)
}
starts <- (valStrings <- gregexpr(before, s)[[1]]) + attr(valStrings, "match.length")
ends <- regexpr(after, (vStrings <- substring(s, starts,1e6)) ) - 2
result <- substring(s, starts, starts+ends)
if(verbose)
cat(paste("=>",starts, starts+ends, result, sep=":", collapse="\n"))
result <- result[result != ""]
if(drop.na) result <- result[!is.na(result)]
result <- result[starts>=0 & ends>=0]
if((length(result) > 1) & (warn.if.gt.1))
warning("More than one substring found.")
return(result)
}
## A litle hack to DAVIDQuery as the DAVIDQuery got deprected
.DAVIDQuery<-function (ids = "O00161,O75396", type = "UNIPROT_ACCESSION",
annot, tool, URLlengthLimit = 2048, details = TRUE, verbose = FALSE,
writeHTML = FALSE, testMe = FALSE, graphicMenu = FALSE, formatIt = TRUE)
{
DAVIDURLBase <- "https://david.abcc.ncifcrf.gov/"
ids <- paste(ids, collapse = ",")
ids <- paste(strsplit(ids, " ")[[1]], sep = "", collapse = "")
firstURLOK <- FALSE
while (firstURLOK == FALSE) {
firstURL <- paste(DAVIDURLBase, "api.jsp?", "type=",
type, "&ids=", ids, "&tool=", tool, sep = "")
if (!is.null(annot))
firstURL <- paste(firstURL, "&annot=", annot, sep = "")
if (verbose)
cat("DAVIDQuery: firstURL = ", firstURL, "\n")
if (nchar(firstURL) < URLlengthLimit)
firstURLOK <- TRUE
else ids <- ids[-length(ids)]
}
DAVIDQueryResult <- try({
myCurlHandle <- RCurl::getCurlHandle(cookiefile = "DAVIDCookiefile.txt")
firstStageResult <- RCurl::getURL(firstURL, curl = myCurlHandle,
verbose = FALSE,ssl.verifypeer=FALSE)
if (writeHTML)
writeChar(firstStageResult, "firstStageResult.html")
DAVIDaction <- .bracketedStrings(firstStageResult, "document.apiForm.action = \"",
"\"")
DAVIDvalues <- .bracketedStrings(firstStageResult, "document.apiForm.[a-z]*.value=\"",
"\"", warn.if.gt.1 = FALSE)
if(DAVIDvalues[1] != ""){
tmp <- unlist(strsplit(DAVIDvalues[1],split=","))
if(length(tmp >=40)){
DAVIDvalues[1] <- paste(tmp[1:40],collapse=",")
}
}
DAVIDfields <- .bracketedStrings(firstStageResult, "document.apiForm.",
".value=\"", warn.if.gt.1 = FALSE)
secondURL <- paste(DAVIDURLBase, DAVIDaction, "?",
paste(DAVIDfields, "=", DAVIDvalues, sep = "", collapse = "&"),
sep = "")
if (verbose)
cat("DAVIDQuery: secondURL = ", secondURL, "\n")
if (nchar(secondURL) > URLlengthLimit)
stop(paste("nchar(secondURL) too long; ", nchar(secondURL),
">", URLlengthLimit))
secondStageResult <- RCurl::getURL(secondURL, curl = myCurlHandle,
verbose = FALSE, ssl.verifypeer=FALSE)
hasSessionEnded <- length(grep("Your session has ended",
secondStageResult) > 0)
if (hasSessionEnded)
warning("Warning: Session ended")
if (writeHTML)
writeChar(secondStageResult, "secondStageResult.html")
downloadFileName <- .bracketedStrings(secondStageResult,
"href=\"data/download/", "\" target=")
if (length(downloadFileName) == 0)
warning("Warning: downloadFileName is not found in reply html. \n")
downloadURL <- paste(DAVIDURLBase, "data/download/",
downloadFileName, sep = "")
if (verbose)
cat("downloadURL = ", downloadURL, "\n")
if (tool=="geneReport"){
# work around the format in which the file for 'geneReport' is returned by DAVID
read.delim(downloadURL,stringsAsFactors=FALSE,header=TRUE,nrows=0);
} else {
read.delim(downloadURL, header = TRUE, check.names = FALSE, stringsAsFactors = FALSE);
}
})
try(if (is.data.frame(DAVIDQueryResult) & (length(DAVIDQueryResult) >
0)) {
if (length(grep("<title>Directory Listing For /data/download/</title>",
DAVIDQueryResult[[1]])) > 0) {
DAVIDQueryResult <- paste("No result file was found. URL = ",
DAVIDQueryResult$firstURL)
class(DAVIDQueryResult) <- "try-error"
}
})
attr(DAVIDQueryResult, "ids") <- ids
attr(DAVIDQueryResult, "tool") <- tool
attr(DAVIDQueryResult, "annot") <- annot
attr(DAVIDQueryResult, "type") <- type
if (formatIt & (class(DAVIDQueryResult) != "try-error")) {
DAVIDQueryResult <- .formatDAVIDResult(DAVIDQueryResult)
}
if (details)
return(list(ids = ids, firstURL = firstURL, firstStageResult = firstStageResult,
DAVIDaction = DAVIDaction, secondURL = secondURL,
secondStageResult = secondStageResult, hasSessionEnded = hasSessionEnded,
downloadFileName = downloadFileName, downloadURL = downloadURL,
DAVIDQueryResult = DAVIDQueryResult))
return(DAVIDQueryResult)
}
|
#' Adds the global p-value for a categorical variables
#'
#' This function uses [car::Anova] with argument
#' `type = "III"` to calculate global p-values for categorical variables.
#' Output from `tbl_regression` and `tbl_uvregression` objects supported.
#'
#' @section Note:
#' If a needed class of model is not supported by
#' [car::Anova], please create a
#' [GitHub Issue](https://github.com/ddsjoberg/gtsummary/issues) to request support.
#'
#' @param x `tbl_regression` or `tbl_uvregression` object
#' @param ... Further arguments passed to or from other methods.
#' @seealso \code{\link{add_global_p.tbl_regression}},
#' \code{\link{add_global_p.tbl_uvregression}}
#' @author Daniel D. Sjoberg
#' @export
add_global_p <- function(x, ...) {
# must have car package installed to use this function
assert_package("car", "add_global_p")
UseMethod("add_global_p")
}
#' Adds the global p-value for categorical variables
#'
#' This function uses [car::Anova] with argument
#' `type = "III"` to calculate global p-values for categorical variables.
#'
#' @section Note:
#' If a needed class of model is not supported by
#' [car::Anova], please create a
#' [GitHub Issue](https://github.com/ddsjoberg/gtsummary/issues) to request support.
#'
#'
#' @param x Object with class `tbl_regression` from the
#' [tbl_regression] function
#' @param keep Logical argument indicating whether to also retain the individual
#' p-values in the table output for each level of the categorical variable.
#' Default is `FALSE`
#' @param include Variables to calculate global p-value for. Input may be a vector of
#' quoted or unquoted variable names. tidyselect and gtsummary select helper
#' functions are also accepted. Default is `NULL`, which adds global p-values
#' for all categorical and interaction terms.
#' @param quiet Logical indicating whether to print messages in console. Default is
#' `FALSE`
#' @param terms DEPRECATED. Use `include=` argument instead.
#' @param type Type argument passed to [car::Anova]. Default is `"III"`
#' @param ... Additional arguments to be passed to [car::Anova]
#' @author Daniel D. Sjoberg
#' @family tbl_regression tools
#' @examples
#' tbl_lm_global_ex1 <-
#' lm(marker ~ age + grade, trial) %>%
#' tbl_regression() %>%
#' add_global_p()
#' @export
#' @return A `tbl_regression` object
#' @section Example Output:
#' \if{html}{\figure{tbl_lm_global_ex1.png}{options: width=50\%}}
add_global_p.tbl_regression <- function(x,
include = x$table_body$variable[x$table_body$var_type %in% c("categorical", "interaction")],
type = NULL, keep = FALSE, quiet = NULL, ..., terms = NULL) {
# deprecated arguments -------------------------------------------------------
if (!is.null(terms)) {
lifecycle::deprecate_warn(
"1.2.5", "gtsummary::add_global_p.tbl_regression(terms = )",
"add_global_p.tbl_regression(include = )"
)
include <- terms
}
# setting defaults -----------------------------------------------------------
quiet <- quiet %||% get_theme_element("pkgwide-lgl:quiet") %||% FALSE
type <- type %||% get_theme_element("add_global_p-str:type", default = "III")
# converting to character vector ---------------------------------------------
include <- var_input_to_string(data = vctr_2_tibble(unique(x$table_body$variable)),
select_input = !!rlang::enquo(include))
# if no terms are provided, stop and return x
if (length(include) == 0) {
if (quiet == FALSE)
paste("No terms were selected, and no global p-values were added to the table.",
"The default behaviour is to add global p-values for categorical and ",
"interaction terms. To obtain p-values for other terms,",
"update the `include=` argument.") %>%
stringr::str_wrap() %>%
message()
return(x)
}
# vetted model geeglm not supported here.
if (inherits(x$inputs$x, "geeglm")) {
rlang::abort(paste(
"Model class `geeglm` not supported by `car::Anova()`,",
"and function could not calculate requested p-value."
))
}
# printing analysis performed
if (quiet == FALSE) {
expr_car <-
rlang::expr(car::Anova(x$model_obj, type = !!type, !!!list(...))) %>%
deparse()
paste("Global p-values for variable(s)",
glue("`include = {deparse(include) %>% paste(collapse = '')}`"),
glue("were calculated with")) %>%
stringr::str_wrap() %>%
paste(glue("`{expr_car}`"), sep = "\n ") %>%
rlang::inform()
}
# calculating global pvalues
tryCatch(
{
car_Anova <-
x$model_obj %>%
car::Anova(type = type, ...)
},
error = function(e) {
usethis::ui_oops(paste0(
"{usethis::ui_code('add_global_p()')} uses ",
"{usethis::ui_code('car::Anova()')} to calculate the global p-value,\n",
"and the function returned an error while calculating the p-values.\n",
"Is your model type supported by {usethis::ui_code('car::Anova()')}?"
))
stop(e)
}
)
global_p <-
car_Anova %>%
as.data.frame() %>%
tibble::rownames_to_column(var = "variable") %>%
filter(.data$variable %in% !!include) %>%
select(c("variable", starts_with("Pr(>"))) %>% # selecting the pvalue column
set_names(c("variable", "p.value_global")) %>%
mutate(row_type = "label")
# merging in global pvalue ---------------------------------------------------
# adding p-value column, if it is not already there
if (!"p.value" %in% names(x$table_body)) {
# adding p.value to table_body
x$table_body <- mutate(x$table_body, p.value = NA_real_)
# adding to table_header
x$table_header <-
tibble(column = names(x$table_body)) %>%
left_join(x$table_header, by = "column") %>%
table_header_fill_missing() %>%
table_header_fmt_fun(
p.value = x$inputs$pvalue_fun %||%
getOption("gtsummary.pvalue_fun", default = style_pvalue)
)
x <- modify_header_internal(x, p.value = "**p-value**")
}
# adding global p-values
x$table_body <-
x$table_body %>%
left_join(
global_p,
by = c("row_type", "variable")
) %>%
mutate(
p.value = coalesce(.data$p.value_global, .data$p.value)
) %>%
select(-c("p.value_global"))
# if keep == FALSE, then deleting variable-level p-values
if (keep == FALSE) {
x$table_body <-
x$table_body %>%
mutate(
p.value = if_else(.data$variable %in% !!include & .data$row_type == "level",
NA_real_, .data$p.value
)
)
}
x$call_list <- c(x$call_list, list(add_global_p = match.call()))
return(x)
}
#' Adds the global p-value for categorical variables
#'
#' This function uses [car::Anova] with argument
#' `type = "III"` to calculate global p-values for categorical variables.
#'
#' @param x Object with class `tbl_uvregression` from the
#' [tbl_uvregression] function
#' @param ... Additional arguments to be passed to [car::Anova].
#' @inheritParams add_global_p.tbl_regression
#' @param include Variables to calculate global p-value for. Input may be a vector of
#' quoted or unquoted variable names. tidyselect and gtsummary select helper
#' functions are also accepted. Default is `everything()`.
#' @author Daniel D. Sjoberg
#' @family tbl_uvregression tools
#' @examples
#' tbl_uv_global_ex2 <-
#' trial[c("response", "trt", "age", "grade")] %>%
#' tbl_uvregression(
#' method = glm,
#' y = response,
#' method.args = list(family = binomial),
#' exponentiate = TRUE
#' ) %>%
#' add_global_p()
#' @export
#' @return A `tbl_uvregression` object
#' @section Example Output:
#' \if{html}{\figure{tbl_uv_global_ex2.png}{options: width=50\%}}
#'
add_global_p.tbl_uvregression <- function(x, type = NULL, include = everything(),
keep = FALSE, quiet = NULL, ...) {
# setting defaults -----------------------------------------------------------
quiet <- quiet %||% get_theme_element("pkgwide-lgl:quiet") %||% FALSE
type <- type %||% get_theme_element("add_global_p-str:type", default = "III")
# converting to character vector ---------------------------------------------
include <- var_input_to_string(data = vctr_2_tibble(unique(x$table_body$variable)),
select_input = !!rlang::enquo(include))
# capturing dots in expression
dots <- rlang::enexprs(...)
# printing analysis performed
if (quiet == FALSE) {
expr_car <-
rlang::expr(car::Anova(mod = x$model_obj, type = !!type, !!!list(...))) %>%
deparse()
paste("Global p-values for variable(s)",
glue("`include = {deparse(include) %>% paste(collapse = '')}`"),
glue("were calculated with")) %>%
stringr::str_wrap() %>%
paste(glue("`{expr_car}`"), sep = "\n ") %>%
rlang::inform()
}
# calculating global pvalues
global_p <-
imap_dfr(
x$tbls[include],
function(x, y) {
tryCatch(
{
car_Anova <-
rlang::call2(
car::Anova, mod = x[["model_obj"]], type = type, !!!dots
) %>%
rlang::eval_tidy()
},
error = function(e) {
usethis::ui_oops(paste0(
"{usethis::ui_code('add_global_p()')} uses ",
"{usethis::ui_code('car::Anova()')} to calculate the global p-value,\n",
"and the function returned an error while calculating the p-value ",
"for {usethis::ui_value(y)}."
))
stop(e)
}
)
car_Anova %>%
as.data.frame() %>%
tibble::rownames_to_column(var = "variable") %>%
filter(.data$variable == y) %>%
select(c(
"variable", starts_with("Pr(>")
)) %>% # selecting the pvalue column
set_names(c("variable", "p.value_global"))
}
) %>%
select(c("variable", "p.value_global"))
# adding global p-value to meta_data object
x$meta_data <-
x$meta_data %>%
left_join(
global_p,
by = "variable"
)
# merging in global pvalue ---------------------------------------------------
# adding p-value column, if it is not already there
if (!"p.value" %in% names(x$table_body)) {
# adding p.value to table_body
x$table_body <- mutate(x$table_body, p.value = NA_real_)
# adding to table_header
x$table_header <-
tibble(column = names(x$table_body)) %>%
left_join(x$table_header, by = "column") %>%
table_header_fill_missing() %>%
table_header_fmt_fun(p.value = x$inputs$pvalue_fun)
x <- modify_header_internal(x, p.value = "**p-value**")
}
# adding global p-values
x$table_body <-
x$table_body %>%
left_join(
global_p %>% mutate(row_type = "label"),
by = c("row_type", "variable")
) %>%
mutate(
p.value = coalesce(.data$p.value_global, .data$p.value)
) %>%
select(-c("p.value_global"))
# if keep == FALSE, then deleting variable-level p-values
if (keep == FALSE) {
x$table_body <-
x$table_body %>%
mutate(
p.value = if_else(.data$variable %in% !!include & .data$row_type == "level",
NA_real_, .data$p.value
)
)
}
x$call_list <- c(x$call_list, list(add_global_p = match.call()))
return(x)
}
|
/R/add_global_p.R
|
permissive
|
shijianasdf/gtsummary
|
R
| false
| false
| 11,463
|
r
|
#' Adds the global p-value for a categorical variables
#'
#' This function uses [car::Anova] with argument
#' `type = "III"` to calculate global p-values for categorical variables.
#' Output from `tbl_regression` and `tbl_uvregression` objects supported.
#'
#' @section Note:
#' If a needed class of model is not supported by
#' [car::Anova], please create a
#' [GitHub Issue](https://github.com/ddsjoberg/gtsummary/issues) to request support.
#'
#' @param x `tbl_regression` or `tbl_uvregression` object
#' @param ... Further arguments passed to or from other methods.
#' @seealso \code{\link{add_global_p.tbl_regression}},
#' \code{\link{add_global_p.tbl_uvregression}}
#' @author Daniel D. Sjoberg
#' @export
add_global_p <- function(x, ...) {
# must have car package installed to use this function
assert_package("car", "add_global_p")
UseMethod("add_global_p")
}
#' Adds the global p-value for categorical variables
#'
#' This function uses [car::Anova] with argument
#' `type = "III"` to calculate global p-values for categorical variables.
#'
#' @section Note:
#' If a needed class of model is not supported by
#' [car::Anova], please create a
#' [GitHub Issue](https://github.com/ddsjoberg/gtsummary/issues) to request support.
#'
#'
#' @param x Object with class `tbl_regression` from the
#' [tbl_regression] function
#' @param keep Logical argument indicating whether to also retain the individual
#' p-values in the table output for each level of the categorical variable.
#' Default is `FALSE`
#' @param include Variables to calculate global p-value for. Input may be a vector of
#' quoted or unquoted variable names. tidyselect and gtsummary select helper
#' functions are also accepted. Default is `NULL`, which adds global p-values
#' for all categorical and interaction terms.
#' @param quiet Logical indicating whether to print messages in console. Default is
#' `FALSE`
#' @param terms DEPRECATED. Use `include=` argument instead.
#' @param type Type argument passed to [car::Anova]. Default is `"III"`
#' @param ... Additional arguments to be passed to [car::Anova]
#' @author Daniel D. Sjoberg
#' @family tbl_regression tools
#' @examples
#' tbl_lm_global_ex1 <-
#' lm(marker ~ age + grade, trial) %>%
#' tbl_regression() %>%
#' add_global_p()
#' @export
#' @return A `tbl_regression` object
#' @section Example Output:
#' \if{html}{\figure{tbl_lm_global_ex1.png}{options: width=50\%}}
add_global_p.tbl_regression <- function(x,
include = x$table_body$variable[x$table_body$var_type %in% c("categorical", "interaction")],
type = NULL, keep = FALSE, quiet = NULL, ..., terms = NULL) {
# deprecated arguments -------------------------------------------------------
if (!is.null(terms)) {
lifecycle::deprecate_warn(
"1.2.5", "gtsummary::add_global_p.tbl_regression(terms = )",
"add_global_p.tbl_regression(include = )"
)
include <- terms
}
# setting defaults -----------------------------------------------------------
quiet <- quiet %||% get_theme_element("pkgwide-lgl:quiet") %||% FALSE
type <- type %||% get_theme_element("add_global_p-str:type", default = "III")
# converting to character vector ---------------------------------------------
include <- var_input_to_string(data = vctr_2_tibble(unique(x$table_body$variable)),
select_input = !!rlang::enquo(include))
# if no terms are provided, stop and return x
if (length(include) == 0) {
if (quiet == FALSE)
paste("No terms were selected, and no global p-values were added to the table.",
"The default behaviour is to add global p-values for categorical and ",
"interaction terms. To obtain p-values for other terms,",
"update the `include=` argument.") %>%
stringr::str_wrap() %>%
message()
return(x)
}
# vetted model geeglm not supported here.
if (inherits(x$inputs$x, "geeglm")) {
rlang::abort(paste(
"Model class `geeglm` not supported by `car::Anova()`,",
"and function could not calculate requested p-value."
))
}
# printing analysis performed
if (quiet == FALSE) {
expr_car <-
rlang::expr(car::Anova(x$model_obj, type = !!type, !!!list(...))) %>%
deparse()
paste("Global p-values for variable(s)",
glue("`include = {deparse(include) %>% paste(collapse = '')}`"),
glue("were calculated with")) %>%
stringr::str_wrap() %>%
paste(glue("`{expr_car}`"), sep = "\n ") %>%
rlang::inform()
}
# calculating global pvalues
tryCatch(
{
car_Anova <-
x$model_obj %>%
car::Anova(type = type, ...)
},
error = function(e) {
usethis::ui_oops(paste0(
"{usethis::ui_code('add_global_p()')} uses ",
"{usethis::ui_code('car::Anova()')} to calculate the global p-value,\n",
"and the function returned an error while calculating the p-values.\n",
"Is your model type supported by {usethis::ui_code('car::Anova()')}?"
))
stop(e)
}
)
global_p <-
car_Anova %>%
as.data.frame() %>%
tibble::rownames_to_column(var = "variable") %>%
filter(.data$variable %in% !!include) %>%
select(c("variable", starts_with("Pr(>"))) %>% # selecting the pvalue column
set_names(c("variable", "p.value_global")) %>%
mutate(row_type = "label")
# merging in global pvalue ---------------------------------------------------
# adding p-value column, if it is not already there
if (!"p.value" %in% names(x$table_body)) {
# adding p.value to table_body
x$table_body <- mutate(x$table_body, p.value = NA_real_)
# adding to table_header
x$table_header <-
tibble(column = names(x$table_body)) %>%
left_join(x$table_header, by = "column") %>%
table_header_fill_missing() %>%
table_header_fmt_fun(
p.value = x$inputs$pvalue_fun %||%
getOption("gtsummary.pvalue_fun", default = style_pvalue)
)
x <- modify_header_internal(x, p.value = "**p-value**")
}
# adding global p-values
x$table_body <-
x$table_body %>%
left_join(
global_p,
by = c("row_type", "variable")
) %>%
mutate(
p.value = coalesce(.data$p.value_global, .data$p.value)
) %>%
select(-c("p.value_global"))
# if keep == FALSE, then deleting variable-level p-values
if (keep == FALSE) {
x$table_body <-
x$table_body %>%
mutate(
p.value = if_else(.data$variable %in% !!include & .data$row_type == "level",
NA_real_, .data$p.value
)
)
}
x$call_list <- c(x$call_list, list(add_global_p = match.call()))
return(x)
}
#' Adds the global p-value for categorical variables
#'
#' This function uses [car::Anova] with argument
#' `type = "III"` to calculate global p-values for categorical variables.
#'
#' @param x Object with class `tbl_uvregression` from the
#' [tbl_uvregression] function
#' @param ... Additional arguments to be passed to [car::Anova].
#' @inheritParams add_global_p.tbl_regression
#' @param include Variables to calculate global p-value for. Input may be a vector of
#' quoted or unquoted variable names. tidyselect and gtsummary select helper
#' functions are also accepted. Default is `everything()`.
#' @author Daniel D. Sjoberg
#' @family tbl_uvregression tools
#' @examples
#' tbl_uv_global_ex2 <-
#' trial[c("response", "trt", "age", "grade")] %>%
#' tbl_uvregression(
#' method = glm,
#' y = response,
#' method.args = list(family = binomial),
#' exponentiate = TRUE
#' ) %>%
#' add_global_p()
#' @export
#' @return A `tbl_uvregression` object
#' @section Example Output:
#' \if{html}{\figure{tbl_uv_global_ex2.png}{options: width=50\%}}
#'
add_global_p.tbl_uvregression <- function(x, type = NULL, include = everything(),
keep = FALSE, quiet = NULL, ...) {
# setting defaults -----------------------------------------------------------
quiet <- quiet %||% get_theme_element("pkgwide-lgl:quiet") %||% FALSE
type <- type %||% get_theme_element("add_global_p-str:type", default = "III")
# converting to character vector ---------------------------------------------
include <- var_input_to_string(data = vctr_2_tibble(unique(x$table_body$variable)),
select_input = !!rlang::enquo(include))
# capturing dots in expression
dots <- rlang::enexprs(...)
# printing analysis performed
if (quiet == FALSE) {
expr_car <-
rlang::expr(car::Anova(mod = x$model_obj, type = !!type, !!!list(...))) %>%
deparse()
paste("Global p-values for variable(s)",
glue("`include = {deparse(include) %>% paste(collapse = '')}`"),
glue("were calculated with")) %>%
stringr::str_wrap() %>%
paste(glue("`{expr_car}`"), sep = "\n ") %>%
rlang::inform()
}
# calculating global pvalues
global_p <-
imap_dfr(
x$tbls[include],
function(x, y) {
tryCatch(
{
car_Anova <-
rlang::call2(
car::Anova, mod = x[["model_obj"]], type = type, !!!dots
) %>%
rlang::eval_tidy()
},
error = function(e) {
usethis::ui_oops(paste0(
"{usethis::ui_code('add_global_p()')} uses ",
"{usethis::ui_code('car::Anova()')} to calculate the global p-value,\n",
"and the function returned an error while calculating the p-value ",
"for {usethis::ui_value(y)}."
))
stop(e)
}
)
car_Anova %>%
as.data.frame() %>%
tibble::rownames_to_column(var = "variable") %>%
filter(.data$variable == y) %>%
select(c(
"variable", starts_with("Pr(>")
)) %>% # selecting the pvalue column
set_names(c("variable", "p.value_global"))
}
) %>%
select(c("variable", "p.value_global"))
# adding global p-value to meta_data object
x$meta_data <-
x$meta_data %>%
left_join(
global_p,
by = "variable"
)
# merging in global pvalue ---------------------------------------------------
# adding p-value column, if it is not already there
if (!"p.value" %in% names(x$table_body)) {
# adding p.value to table_body
x$table_body <- mutate(x$table_body, p.value = NA_real_)
# adding to table_header
x$table_header <-
tibble(column = names(x$table_body)) %>%
left_join(x$table_header, by = "column") %>%
table_header_fill_missing() %>%
table_header_fmt_fun(p.value = x$inputs$pvalue_fun)
x <- modify_header_internal(x, p.value = "**p-value**")
}
# adding global p-values
x$table_body <-
x$table_body %>%
left_join(
global_p %>% mutate(row_type = "label"),
by = c("row_type", "variable")
) %>%
mutate(
p.value = coalesce(.data$p.value_global, .data$p.value)
) %>%
select(-c("p.value_global"))
# if keep == FALSE, then deleting variable-level p-values
if (keep == FALSE) {
x$table_body <-
x$table_body %>%
mutate(
p.value = if_else(.data$variable %in% !!include & .data$row_type == "level",
NA_real_, .data$p.value
)
)
}
x$call_list <- c(x$call_list, list(add_global_p = match.call()))
return(x)
}
|
##' One-line description
##'
##' Short description
##'
##' @details detailed description
##'
##' @return An object of class \code{wcr_data}, which is a list of resampled datasets, each of which consists of independent data points.
##'
##' @references HOffman EB, SEN PK, Weinberg CR. (2001). Within-cluster resampling. \emph{Biometrika}, \bold{88}, 1121-1134.
##'
##' @author Kazuki Yoshida
##' @seealso
##' \code{\link{print.wcr_data}}
##' @examples
##'
##' ## Load
##' library(wcr)
##'
##' @export
WcrData <- function(data, cluster_id, Q) {
## Assess cluster sizes
n_k_vec <- ClusterSizes(data = data, cluster_id = cluster_id)
## Resample IDs
resample_id_df <- ResampleId(n_k_vec = n_k_vec, Q = Q)
## Create a list of resampled datasets
out <- ResampleDatasets(data = data,
cluster_id = cluster_id,
resample_id_df = resample_id_df)
## Give class name
class(out) <- "wcr_data"
out
}
|
/R/WcrData.R
|
no_license
|
kaz-yos/mouse
|
R
| false
| false
| 976
|
r
|
##' One-line description
##'
##' Short description
##'
##' @details detailed description
##'
##' @return An object of class \code{wcr_data}, which is a list of resampled datasets, each of which consists of independent data points.
##'
##' @references HOffman EB, SEN PK, Weinberg CR. (2001). Within-cluster resampling. \emph{Biometrika}, \bold{88}, 1121-1134.
##'
##' @author Kazuki Yoshida
##' @seealso
##' \code{\link{print.wcr_data}}
##' @examples
##'
##' ## Load
##' library(wcr)
##'
##' @export
WcrData <- function(data, cluster_id, Q) {
## Assess cluster sizes
n_k_vec <- ClusterSizes(data = data, cluster_id = cluster_id)
## Resample IDs
resample_id_df <- ResampleId(n_k_vec = n_k_vec, Q = Q)
## Create a list of resampled datasets
out <- ResampleDatasets(data = data,
cluster_id = cluster_id,
resample_id_df = resample_id_df)
## Give class name
class(out) <- "wcr_data"
out
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.8,family="gaussian",standardize=TRUE)
sink('./soft_tissue_083.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/soft_tissue/soft_tissue_083.R
|
no_license
|
esbgkannan/QSMART
|
R
| false
| false
| 358
|
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.8,family="gaussian",standardize=TRUE)
sink('./soft_tissue_083.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
### load a set of core functions
source('ntx_deseq_functions.R')
### load the feature counts data and generate count matrix (rawcounts)
PE <- read.csv('raw_gene_counts/PE_featurecounts.txt',sep = "\t",comment.char = "#")
SE <- read.csv('raw_gene_counts/SE_featurecounts.txt',sep = "\t",comment.char = "#")
rawcounts <- merge(PE,SE,by=c("Geneid","Chr","Start","End","Strand","Length"))
### generate or load gene annotations
# generate
#source('DESeq_annotation_merge.R')
# or load
gene_annotations <- read.csv('annotations/AaegL2.1RUv2_annotations.csv')
row.names(gene_annotations) <- gene_annotations$internal.gene_id
# read in library information
columns <- read.csv('annotations/library_key.csv')
colnames(rawcounts) <- columns$y
row.names(rawcounts) <- rawcounts$gene
# generate merged file with annotation information and raw counts
rawcounts_only <- rawcounts[,7:length(colnames(rawcounts))]
rawcounts_with_annotation <- merge(rawcounts_only,gene_annotations,by="row.names")
row.names(rawcounts_with_annotation) <- rawcounts_with_annotation$internal.gene_id
# convert counts to TPM
tpm_all <- apply(rawcounts_only,2,countToTpm,rawcounts$len)
tpm_all_with_annotation <- merge(tpm_all,gene_annotations,by="row.names")
##### read in library information from CSV file and add info
libprop <- read.csv('annotations/library_info.csv')
row.names(libprop) <- libprop$library
# pull out counts and TPM for each library
rawcounts_with_annotation_reordered <- rawcounts_with_annotation[row.names(libprop)]
tpm_all_with_annotation_reordered <- tpm_all_with_annotation[row.names(libprop)]
|
/DESeq_initialize.R
|
no_license
|
bnmtthws/ntx_deseq
|
R
| false
| false
| 1,592
|
r
|
### load a set of core functions
source('ntx_deseq_functions.R')
### load the feature counts data and generate count matrix (rawcounts)
PE <- read.csv('raw_gene_counts/PE_featurecounts.txt',sep = "\t",comment.char = "#")
SE <- read.csv('raw_gene_counts/SE_featurecounts.txt',sep = "\t",comment.char = "#")
rawcounts <- merge(PE,SE,by=c("Geneid","Chr","Start","End","Strand","Length"))
### generate or load gene annotations
# generate
#source('DESeq_annotation_merge.R')
# or load
gene_annotations <- read.csv('annotations/AaegL2.1RUv2_annotations.csv')
row.names(gene_annotations) <- gene_annotations$internal.gene_id
# read in library information
columns <- read.csv('annotations/library_key.csv')
colnames(rawcounts) <- columns$y
row.names(rawcounts) <- rawcounts$gene
# generate merged file with annotation information and raw counts
rawcounts_only <- rawcounts[,7:length(colnames(rawcounts))]
rawcounts_with_annotation <- merge(rawcounts_only,gene_annotations,by="row.names")
row.names(rawcounts_with_annotation) <- rawcounts_with_annotation$internal.gene_id
# convert counts to TPM
tpm_all <- apply(rawcounts_only,2,countToTpm,rawcounts$len)
tpm_all_with_annotation <- merge(tpm_all,gene_annotations,by="row.names")
##### read in library information from CSV file and add info
libprop <- read.csv('annotations/library_info.csv')
row.names(libprop) <- libprop$library
# pull out counts and TPM for each library
rawcounts_with_annotation_reordered <- rawcounts_with_annotation[row.names(libprop)]
tpm_all_with_annotation_reordered <- tpm_all_with_annotation[row.names(libprop)]
|
#' @title Plot heatmap
#'
#' @description This function plots a heatmap for direct visualisation of results
#'
#' @details This function will plot a heatmap directly from the count data, an annotation bar at the top of the heatmap will offer information about the plot at a glance. A side bar indicating the pvalue will allow determination of statistical significance at a glance as well.
#'
#' @return A lovely looking heatmap which is interactive
#'
#' @param DGElist A DGElist containing the count and sampling data
#' @param variable The selected variable of interest, should be a character string or vector.
#' @param metadata A dataframe with different variables on the x axis and samples on the y axis.
#' @param nGenes Specify the number of genes you'd like to get data for in the top table at the end
#'
#' @export
plotHeatmap <- function(DGElist, variable, metadata, nGenes = 30) {
designMatrix <- getDesignMatrix(variable, metadata)
ScaledCPM <- getScaledCPM(DGElist = DGElist, designMatrix = designMatrix, nGenes = nGenes)
plottingHeatmap(ScaledCPM = ScaledCPM, variable = variable, metadata = metadata)
}
|
/InteGRAPE_current_buggy/R/plotHeatmap.R
|
no_license
|
UofABioinformaticsHub/InteGRAPE
|
R
| false
| false
| 1,128
|
r
|
#' @title Plot heatmap
#'
#' @description This function plots a heatmap for direct visualisation of results
#'
#' @details This function will plot a heatmap directly from the count data, an annotation bar at the top of the heatmap will offer information about the plot at a glance. A side bar indicating the pvalue will allow determination of statistical significance at a glance as well.
#'
#' @return A lovely looking heatmap which is interactive
#'
#' @param DGElist A DGElist containing the count and sampling data
#' @param variable The selected variable of interest, should be a character string or vector.
#' @param metadata A dataframe with different variables on the x axis and samples on the y axis.
#' @param nGenes Specify the number of genes you'd like to get data for in the top table at the end
#'
#' @export
plotHeatmap <- function(DGElist, variable, metadata, nGenes = 30) {
designMatrix <- getDesignMatrix(variable, metadata)
ScaledCPM <- getScaledCPM(DGElist = DGElist, designMatrix = designMatrix, nGenes = nGenes)
plottingHeatmap(ScaledCPM = ScaledCPM, variable = variable, metadata = metadata)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.